// Code generated by go-swagger; DO NOT EDIT.
// Copyright Authors of Cilium
// SPDX-License-Identifier: Apache-2.0
package bgp
// This file was generated by the swagger tool.
// Editing this file might prove futile when you re-run the swagger generate command
import (
"fmt"
"github.com/go-openapi/runtime"
httptransport "github.com/go-openapi/runtime/client"
"github.com/go-openapi/strfmt"
)
// New creates a new bgp API client.
func New(transport runtime.ClientTransport, formats strfmt.Registry) ClientService {
return &Client{transport: transport, formats: formats}
}
// New creates a new bgp API client with basic auth credentials.
// It takes the following parameters:
// - host: http host (github.com).
// - basePath: any base path for the API client ("/v1", "/v3").
// - scheme: http scheme ("http", "https").
// - user: user for basic authentication header.
// - password: password for basic authentication header.
func NewClientWithBasicAuth(host, basePath, scheme, user, password string) ClientService {
transport := httptransport.New(host, basePath, []string{scheme})
transport.DefaultAuthentication = httptransport.BasicAuth(user, password)
return &Client{transport: transport, formats: strfmt.Default}
}
// New creates a new bgp API client with a bearer token for authentication.
// It takes the following parameters:
// - host: http host (github.com).
// - basePath: any base path for the API client ("/v1", "/v3").
// - scheme: http scheme ("http", "https").
// - bearerToken: bearer token for Bearer authentication header.
func NewClientWithBearerToken(host, basePath, scheme, bearerToken string) ClientService {
transport := httptransport.New(host, basePath, []string{scheme})
transport.DefaultAuthentication = httptransport.BearerToken(bearerToken)
return &Client{transport: transport, formats: strfmt.Default}
}
/*
Client for bgp API
*/
type Client struct {
transport runtime.ClientTransport
formats strfmt.Registry
}
// ClientOption may be used to customize the behavior of Client methods.
type ClientOption func(*runtime.ClientOperation)
// ClientService is the interface for Client methods
type ClientService interface {
GetBgpPeers(params *GetBgpPeersParams, opts ...ClientOption) (*GetBgpPeersOK, error)
GetBgpRoutePolicies(params *GetBgpRoutePoliciesParams, opts ...ClientOption) (*GetBgpRoutePoliciesOK, error)
GetBgpRoutes(params *GetBgpRoutesParams, opts ...ClientOption) (*GetBgpRoutesOK, error)
SetTransport(transport runtime.ClientTransport)
}
/*
GetBgpPeers lists operational state of b g p peers
Retrieves current operational state of BGP peers created by
Cilium BGP virtual router. This includes session state, uptime,
information per address family, etc.
*/
func (a *Client) GetBgpPeers(params *GetBgpPeersParams, opts ...ClientOption) (*GetBgpPeersOK, error) {
// TODO: Validate the params before sending
if params == nil {
params = NewGetBgpPeersParams()
}
op := &runtime.ClientOperation{
ID: "GetBgpPeers",
Method: "GET",
PathPattern: "/bgp/peers",
ProducesMediaTypes: []string{"application/json"},
ConsumesMediaTypes: []string{"application/json"},
Schemes: []string{"http"},
Params: params,
Reader: &GetBgpPeersReader{formats: a.formats},
Context: params.Context,
Client: params.HTTPClient,
}
for _, opt := range opts {
opt(op)
}
result, err := a.transport.Submit(op)
if err != nil {
return nil, err
}
success, ok := result.(*GetBgpPeersOK)
if ok {
return success, nil
}
// unexpected success response
// safeguard: normally, absent a default response, unknown success responses return an error above: so this is a codegen issue
msg := fmt.Sprintf("unexpected success response for GetBgpPeers: API contract not enforced by server. Client expected to get an error, but got: %T", result)
panic(msg)
}
/*
GetBgpRoutePolicies lists b g p route policies configured in b g p control plane
Retrieves route policies from BGP Control Plane.
*/
func (a *Client) GetBgpRoutePolicies(params *GetBgpRoutePoliciesParams, opts ...ClientOption) (*GetBgpRoutePoliciesOK, error) {
// TODO: Validate the params before sending
if params == nil {
params = NewGetBgpRoutePoliciesParams()
}
op := &runtime.ClientOperation{
ID: "GetBgpRoutePolicies",
Method: "GET",
PathPattern: "/bgp/route-policies",
ProducesMediaTypes: []string{"application/json"},
ConsumesMediaTypes: []string{"application/json"},
Schemes: []string{"http"},
Params: params,
Reader: &GetBgpRoutePoliciesReader{formats: a.formats},
Context: params.Context,
Client: params.HTTPClient,
}
for _, opt := range opts {
opt(op)
}
result, err := a.transport.Submit(op)
if err != nil {
return nil, err
}
success, ok := result.(*GetBgpRoutePoliciesOK)
if ok {
return success, nil
}
// unexpected success response
// safeguard: normally, absent a default response, unknown success responses return an error above: so this is a codegen issue
msg := fmt.Sprintf("unexpected success response for GetBgpRoutePolicies: API contract not enforced by server. Client expected to get an error, but got: %T", result)
panic(msg)
}
/*
GetBgpRoutes lists b g p routes from b g p control plane r i b
Retrieves routes from BGP Control Plane RIB filtered by parameters you specify
*/
func (a *Client) GetBgpRoutes(params *GetBgpRoutesParams, opts ...ClientOption) (*GetBgpRoutesOK, error) {
// TODO: Validate the params before sending
if params == nil {
params = NewGetBgpRoutesParams()
}
op := &runtime.ClientOperation{
ID: "GetBgpRoutes",
Method: "GET",
PathPattern: "/bgp/routes",
ProducesMediaTypes: []string{"application/json"},
ConsumesMediaTypes: []string{"application/json"},
Schemes: []string{"http"},
Params: params,
Reader: &GetBgpRoutesReader{formats: a.formats},
Context: params.Context,
Client: params.HTTPClient,
}
for _, opt := range opts {
opt(op)
}
result, err := a.transport.Submit(op)
if err != nil {
return nil, err
}
success, ok := result.(*GetBgpRoutesOK)
if ok {
return success, nil
}
// unexpected success response
// safeguard: normally, absent a default response, unknown success responses return an error above: so this is a codegen issue
msg := fmt.Sprintf("unexpected success response for GetBgpRoutes: API contract not enforced by server. Client expected to get an error, but got: %T", result)
panic(msg)
}
// SetTransport changes the transport on the client
func (a *Client) SetTransport(transport runtime.ClientTransport) {
a.transport = transport
}
// Code generated by go-swagger; DO NOT EDIT.
// Copyright Authors of Cilium
// SPDX-License-Identifier: Apache-2.0
package bgp
// This file was generated by the swagger tool.
// Editing this file might prove futile when you re-run the swagger generate command
import (
"context"
"net/http"
"time"
"github.com/go-openapi/errors"
"github.com/go-openapi/runtime"
cr "github.com/go-openapi/runtime/client"
"github.com/go-openapi/strfmt"
)
// NewGetBgpPeersParams creates a new GetBgpPeersParams object,
// with the default timeout for this client.
//
// Default values are not hydrated, since defaults are normally applied by the API server side.
//
// To enforce default values in parameter, use SetDefaults or WithDefaults.
func NewGetBgpPeersParams() *GetBgpPeersParams {
return &GetBgpPeersParams{
timeout: cr.DefaultTimeout,
}
}
// NewGetBgpPeersParamsWithTimeout creates a new GetBgpPeersParams object
// with the ability to set a timeout on a request.
func NewGetBgpPeersParamsWithTimeout(timeout time.Duration) *GetBgpPeersParams {
return &GetBgpPeersParams{
timeout: timeout,
}
}
// NewGetBgpPeersParamsWithContext creates a new GetBgpPeersParams object
// with the ability to set a context for a request.
func NewGetBgpPeersParamsWithContext(ctx context.Context) *GetBgpPeersParams {
return &GetBgpPeersParams{
Context: ctx,
}
}
// NewGetBgpPeersParamsWithHTTPClient creates a new GetBgpPeersParams object
// with the ability to set a custom HTTPClient for a request.
func NewGetBgpPeersParamsWithHTTPClient(client *http.Client) *GetBgpPeersParams {
return &GetBgpPeersParams{
HTTPClient: client,
}
}
/*
GetBgpPeersParams contains all the parameters to send to the API endpoint
for the get bgp peers operation.
Typically these are written to a http.Request.
*/
type GetBgpPeersParams struct {
timeout time.Duration
Context context.Context
HTTPClient *http.Client
}
// WithDefaults hydrates default values in the get bgp peers params (not the query body).
//
// All values with no default are reset to their zero value.
func (o *GetBgpPeersParams) WithDefaults() *GetBgpPeersParams {
o.SetDefaults()
return o
}
// SetDefaults hydrates default values in the get bgp peers params (not the query body).
//
// All values with no default are reset to their zero value.
func (o *GetBgpPeersParams) SetDefaults() {
// no default values defined for this parameter
}
// WithTimeout adds the timeout to the get bgp peers params
func (o *GetBgpPeersParams) WithTimeout(timeout time.Duration) *GetBgpPeersParams {
o.SetTimeout(timeout)
return o
}
// SetTimeout adds the timeout to the get bgp peers params
func (o *GetBgpPeersParams) SetTimeout(timeout time.Duration) {
o.timeout = timeout
}
// WithContext adds the context to the get bgp peers params
func (o *GetBgpPeersParams) WithContext(ctx context.Context) *GetBgpPeersParams {
o.SetContext(ctx)
return o
}
// SetContext adds the context to the get bgp peers params
func (o *GetBgpPeersParams) SetContext(ctx context.Context) {
o.Context = ctx
}
// WithHTTPClient adds the HTTPClient to the get bgp peers params
func (o *GetBgpPeersParams) WithHTTPClient(client *http.Client) *GetBgpPeersParams {
o.SetHTTPClient(client)
return o
}
// SetHTTPClient adds the HTTPClient to the get bgp peers params
func (o *GetBgpPeersParams) SetHTTPClient(client *http.Client) {
o.HTTPClient = client
}
// WriteToRequest writes these params to a swagger request
func (o *GetBgpPeersParams) WriteToRequest(r runtime.ClientRequest, reg strfmt.Registry) error {
if err := r.SetTimeout(o.timeout); err != nil {
return err
}
var res []error
if len(res) > 0 {
return errors.CompositeValidationError(res...)
}
return nil
}
// Code generated by go-swagger; DO NOT EDIT.
// Copyright Authors of Cilium
// SPDX-License-Identifier: Apache-2.0
package bgp
// This file was generated by the swagger tool.
// Editing this file might prove futile when you re-run the swagger generate command
import (
"encoding/json"
"fmt"
"io"
"github.com/go-openapi/runtime"
"github.com/go-openapi/strfmt"
"github.com/cilium/cilium/api/v1/models"
)
// GetBgpPeersReader is a Reader for the GetBgpPeers structure.
type GetBgpPeersReader struct {
formats strfmt.Registry
}
// ReadResponse reads a server response into the received o.
func (o *GetBgpPeersReader) ReadResponse(response runtime.ClientResponse, consumer runtime.Consumer) (interface{}, error) {
switch response.Code() {
case 200:
result := NewGetBgpPeersOK()
if err := result.readResponse(response, consumer, o.formats); err != nil {
return nil, err
}
return result, nil
case 500:
result := NewGetBgpPeersInternalServerError()
if err := result.readResponse(response, consumer, o.formats); err != nil {
return nil, err
}
return nil, result
case 501:
result := NewGetBgpPeersDisabled()
if err := result.readResponse(response, consumer, o.formats); err != nil {
return nil, err
}
return nil, result
default:
return nil, runtime.NewAPIError("[GET /bgp/peers] GetBgpPeers", response, response.Code())
}
}
// NewGetBgpPeersOK creates a GetBgpPeersOK with default headers values
func NewGetBgpPeersOK() *GetBgpPeersOK {
return &GetBgpPeersOK{}
}
/*
GetBgpPeersOK describes a response with status code 200, with default header values.
Success
*/
type GetBgpPeersOK struct {
Payload []*models.BgpPeer
}
// IsSuccess returns true when this get bgp peers o k response has a 2xx status code
func (o *GetBgpPeersOK) IsSuccess() bool {
return true
}
// IsRedirect returns true when this get bgp peers o k response has a 3xx status code
func (o *GetBgpPeersOK) IsRedirect() bool {
return false
}
// IsClientError returns true when this get bgp peers o k response has a 4xx status code
func (o *GetBgpPeersOK) IsClientError() bool {
return false
}
// IsServerError returns true when this get bgp peers o k response has a 5xx status code
func (o *GetBgpPeersOK) IsServerError() bool {
return false
}
// IsCode returns true when this get bgp peers o k response a status code equal to that given
func (o *GetBgpPeersOK) IsCode(code int) bool {
return code == 200
}
// Code gets the status code for the get bgp peers o k response
func (o *GetBgpPeersOK) Code() int {
return 200
}
func (o *GetBgpPeersOK) Error() string {
payload, _ := json.Marshal(o.Payload)
return fmt.Sprintf("[GET /bgp/peers][%d] getBgpPeersOK %s", 200, payload)
}
func (o *GetBgpPeersOK) String() string {
payload, _ := json.Marshal(o.Payload)
return fmt.Sprintf("[GET /bgp/peers][%d] getBgpPeersOK %s", 200, payload)
}
func (o *GetBgpPeersOK) GetPayload() []*models.BgpPeer {
return o.Payload
}
func (o *GetBgpPeersOK) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error {
// response payload
if err := consumer.Consume(response.Body(), &o.Payload); err != nil && err != io.EOF {
return err
}
return nil
}
// NewGetBgpPeersInternalServerError creates a GetBgpPeersInternalServerError with default headers values
func NewGetBgpPeersInternalServerError() *GetBgpPeersInternalServerError {
return &GetBgpPeersInternalServerError{}
}
/*
GetBgpPeersInternalServerError describes a response with status code 500, with default header values.
Internal Server Error
*/
type GetBgpPeersInternalServerError struct {
Payload models.Error
}
// IsSuccess returns true when this get bgp peers internal server error response has a 2xx status code
func (o *GetBgpPeersInternalServerError) IsSuccess() bool {
return false
}
// IsRedirect returns true when this get bgp peers internal server error response has a 3xx status code
func (o *GetBgpPeersInternalServerError) IsRedirect() bool {
return false
}
// IsClientError returns true when this get bgp peers internal server error response has a 4xx status code
func (o *GetBgpPeersInternalServerError) IsClientError() bool {
return false
}
// IsServerError returns true when this get bgp peers internal server error response has a 5xx status code
func (o *GetBgpPeersInternalServerError) IsServerError() bool {
return true
}
// IsCode returns true when this get bgp peers internal server error response a status code equal to that given
func (o *GetBgpPeersInternalServerError) IsCode(code int) bool {
return code == 500
}
// Code gets the status code for the get bgp peers internal server error response
func (o *GetBgpPeersInternalServerError) Code() int {
return 500
}
func (o *GetBgpPeersInternalServerError) Error() string {
payload, _ := json.Marshal(o.Payload)
return fmt.Sprintf("[GET /bgp/peers][%d] getBgpPeersInternalServerError %s", 500, payload)
}
func (o *GetBgpPeersInternalServerError) String() string {
payload, _ := json.Marshal(o.Payload)
return fmt.Sprintf("[GET /bgp/peers][%d] getBgpPeersInternalServerError %s", 500, payload)
}
func (o *GetBgpPeersInternalServerError) GetPayload() models.Error {
return o.Payload
}
func (o *GetBgpPeersInternalServerError) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error {
// response payload
if err := consumer.Consume(response.Body(), &o.Payload); err != nil && err != io.EOF {
return err
}
return nil
}
// NewGetBgpPeersDisabled creates a GetBgpPeersDisabled with default headers values
func NewGetBgpPeersDisabled() *GetBgpPeersDisabled {
return &GetBgpPeersDisabled{}
}
/*
GetBgpPeersDisabled describes a response with status code 501, with default header values.
BGP Control Plane disabled
*/
type GetBgpPeersDisabled struct {
Payload models.Error
}
// IsSuccess returns true when this get bgp peers disabled response has a 2xx status code
func (o *GetBgpPeersDisabled) IsSuccess() bool {
return false
}
// IsRedirect returns true when this get bgp peers disabled response has a 3xx status code
func (o *GetBgpPeersDisabled) IsRedirect() bool {
return false
}
// IsClientError returns true when this get bgp peers disabled response has a 4xx status code
func (o *GetBgpPeersDisabled) IsClientError() bool {
return false
}
// IsServerError returns true when this get bgp peers disabled response has a 5xx status code
func (o *GetBgpPeersDisabled) IsServerError() bool {
return true
}
// IsCode returns true when this get bgp peers disabled response a status code equal to that given
func (o *GetBgpPeersDisabled) IsCode(code int) bool {
return code == 501
}
// Code gets the status code for the get bgp peers disabled response
func (o *GetBgpPeersDisabled) Code() int {
return 501
}
func (o *GetBgpPeersDisabled) Error() string {
payload, _ := json.Marshal(o.Payload)
return fmt.Sprintf("[GET /bgp/peers][%d] getBgpPeersDisabled %s", 501, payload)
}
func (o *GetBgpPeersDisabled) String() string {
payload, _ := json.Marshal(o.Payload)
return fmt.Sprintf("[GET /bgp/peers][%d] getBgpPeersDisabled %s", 501, payload)
}
func (o *GetBgpPeersDisabled) GetPayload() models.Error {
return o.Payload
}
func (o *GetBgpPeersDisabled) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error {
// response payload
if err := consumer.Consume(response.Body(), &o.Payload); err != nil && err != io.EOF {
return err
}
return nil
}
// Code generated by go-swagger; DO NOT EDIT.
// Copyright Authors of Cilium
// SPDX-License-Identifier: Apache-2.0
package bgp
// This file was generated by the swagger tool.
// Editing this file might prove futile when you re-run the swagger generate command
import (
"context"
"net/http"
"time"
"github.com/go-openapi/errors"
"github.com/go-openapi/runtime"
cr "github.com/go-openapi/runtime/client"
"github.com/go-openapi/strfmt"
"github.com/go-openapi/swag"
)
// NewGetBgpRoutePoliciesParams creates a new GetBgpRoutePoliciesParams object,
// with the default timeout for this client.
//
// Default values are not hydrated, since defaults are normally applied by the API server side.
//
// To enforce default values in parameter, use SetDefaults or WithDefaults.
func NewGetBgpRoutePoliciesParams() *GetBgpRoutePoliciesParams {
return &GetBgpRoutePoliciesParams{
timeout: cr.DefaultTimeout,
}
}
// NewGetBgpRoutePoliciesParamsWithTimeout creates a new GetBgpRoutePoliciesParams object
// with the ability to set a timeout on a request.
func NewGetBgpRoutePoliciesParamsWithTimeout(timeout time.Duration) *GetBgpRoutePoliciesParams {
return &GetBgpRoutePoliciesParams{
timeout: timeout,
}
}
// NewGetBgpRoutePoliciesParamsWithContext creates a new GetBgpRoutePoliciesParams object
// with the ability to set a context for a request.
func NewGetBgpRoutePoliciesParamsWithContext(ctx context.Context) *GetBgpRoutePoliciesParams {
return &GetBgpRoutePoliciesParams{
Context: ctx,
}
}
// NewGetBgpRoutePoliciesParamsWithHTTPClient creates a new GetBgpRoutePoliciesParams object
// with the ability to set a custom HTTPClient for a request.
func NewGetBgpRoutePoliciesParamsWithHTTPClient(client *http.Client) *GetBgpRoutePoliciesParams {
return &GetBgpRoutePoliciesParams{
HTTPClient: client,
}
}
/*
GetBgpRoutePoliciesParams contains all the parameters to send to the API endpoint
for the get bgp route policies operation.
Typically these are written to a http.Request.
*/
type GetBgpRoutePoliciesParams struct {
/* RouterAsn.
Autonomous System Number (ASN) identifying a BGP virtual router instance.
If not specified, all virtual router instances are selected.
*/
RouterAsn *int64
timeout time.Duration
Context context.Context
HTTPClient *http.Client
}
// WithDefaults hydrates default values in the get bgp route policies params (not the query body).
//
// All values with no default are reset to their zero value.
func (o *GetBgpRoutePoliciesParams) WithDefaults() *GetBgpRoutePoliciesParams {
o.SetDefaults()
return o
}
// SetDefaults hydrates default values in the get bgp route policies params (not the query body).
//
// All values with no default are reset to their zero value.
func (o *GetBgpRoutePoliciesParams) SetDefaults() {
// no default values defined for this parameter
}
// WithTimeout adds the timeout to the get bgp route policies params
func (o *GetBgpRoutePoliciesParams) WithTimeout(timeout time.Duration) *GetBgpRoutePoliciesParams {
o.SetTimeout(timeout)
return o
}
// SetTimeout adds the timeout to the get bgp route policies params
func (o *GetBgpRoutePoliciesParams) SetTimeout(timeout time.Duration) {
o.timeout = timeout
}
// WithContext adds the context to the get bgp route policies params
func (o *GetBgpRoutePoliciesParams) WithContext(ctx context.Context) *GetBgpRoutePoliciesParams {
o.SetContext(ctx)
return o
}
// SetContext adds the context to the get bgp route policies params
func (o *GetBgpRoutePoliciesParams) SetContext(ctx context.Context) {
o.Context = ctx
}
// WithHTTPClient adds the HTTPClient to the get bgp route policies params
func (o *GetBgpRoutePoliciesParams) WithHTTPClient(client *http.Client) *GetBgpRoutePoliciesParams {
o.SetHTTPClient(client)
return o
}
// SetHTTPClient adds the HTTPClient to the get bgp route policies params
func (o *GetBgpRoutePoliciesParams) SetHTTPClient(client *http.Client) {
o.HTTPClient = client
}
// WithRouterAsn adds the routerAsn to the get bgp route policies params
func (o *GetBgpRoutePoliciesParams) WithRouterAsn(routerAsn *int64) *GetBgpRoutePoliciesParams {
o.SetRouterAsn(routerAsn)
return o
}
// SetRouterAsn adds the routerAsn to the get bgp route policies params
func (o *GetBgpRoutePoliciesParams) SetRouterAsn(routerAsn *int64) {
o.RouterAsn = routerAsn
}
// WriteToRequest writes these params to a swagger request
func (o *GetBgpRoutePoliciesParams) WriteToRequest(r runtime.ClientRequest, reg strfmt.Registry) error {
if err := r.SetTimeout(o.timeout); err != nil {
return err
}
var res []error
if o.RouterAsn != nil {
// query param router_asn
var qrRouterAsn int64
if o.RouterAsn != nil {
qrRouterAsn = *o.RouterAsn
}
qRouterAsn := swag.FormatInt64(qrRouterAsn)
if qRouterAsn != "" {
if err := r.SetQueryParam("router_asn", qRouterAsn); err != nil {
return err
}
}
}
if len(res) > 0 {
return errors.CompositeValidationError(res...)
}
return nil
}
// Code generated by go-swagger; DO NOT EDIT.
// Copyright Authors of Cilium
// SPDX-License-Identifier: Apache-2.0
package bgp
// This file was generated by the swagger tool.
// Editing this file might prove futile when you re-run the swagger generate command
import (
"encoding/json"
"fmt"
"io"
"github.com/go-openapi/runtime"
"github.com/go-openapi/strfmt"
"github.com/cilium/cilium/api/v1/models"
)
// GetBgpRoutePoliciesReader is a Reader for the GetBgpRoutePolicies structure.
type GetBgpRoutePoliciesReader struct {
formats strfmt.Registry
}
// ReadResponse reads a server response into the received o.
func (o *GetBgpRoutePoliciesReader) ReadResponse(response runtime.ClientResponse, consumer runtime.Consumer) (interface{}, error) {
switch response.Code() {
case 200:
result := NewGetBgpRoutePoliciesOK()
if err := result.readResponse(response, consumer, o.formats); err != nil {
return nil, err
}
return result, nil
case 500:
result := NewGetBgpRoutePoliciesInternalServerError()
if err := result.readResponse(response, consumer, o.formats); err != nil {
return nil, err
}
return nil, result
case 501:
result := NewGetBgpRoutePoliciesDisabled()
if err := result.readResponse(response, consumer, o.formats); err != nil {
return nil, err
}
return nil, result
default:
return nil, runtime.NewAPIError("[GET /bgp/route-policies] GetBgpRoutePolicies", response, response.Code())
}
}
// NewGetBgpRoutePoliciesOK creates a GetBgpRoutePoliciesOK with default headers values
func NewGetBgpRoutePoliciesOK() *GetBgpRoutePoliciesOK {
return &GetBgpRoutePoliciesOK{}
}
/*
GetBgpRoutePoliciesOK describes a response with status code 200, with default header values.
Success
*/
type GetBgpRoutePoliciesOK struct {
Payload []*models.BgpRoutePolicy
}
// IsSuccess returns true when this get bgp route policies o k response has a 2xx status code
func (o *GetBgpRoutePoliciesOK) IsSuccess() bool {
return true
}
// IsRedirect returns true when this get bgp route policies o k response has a 3xx status code
func (o *GetBgpRoutePoliciesOK) IsRedirect() bool {
return false
}
// IsClientError returns true when this get bgp route policies o k response has a 4xx status code
func (o *GetBgpRoutePoliciesOK) IsClientError() bool {
return false
}
// IsServerError returns true when this get bgp route policies o k response has a 5xx status code
func (o *GetBgpRoutePoliciesOK) IsServerError() bool {
return false
}
// IsCode returns true when this get bgp route policies o k response a status code equal to that given
func (o *GetBgpRoutePoliciesOK) IsCode(code int) bool {
return code == 200
}
// Code gets the status code for the get bgp route policies o k response
func (o *GetBgpRoutePoliciesOK) Code() int {
return 200
}
func (o *GetBgpRoutePoliciesOK) Error() string {
payload, _ := json.Marshal(o.Payload)
return fmt.Sprintf("[GET /bgp/route-policies][%d] getBgpRoutePoliciesOK %s", 200, payload)
}
func (o *GetBgpRoutePoliciesOK) String() string {
payload, _ := json.Marshal(o.Payload)
return fmt.Sprintf("[GET /bgp/route-policies][%d] getBgpRoutePoliciesOK %s", 200, payload)
}
func (o *GetBgpRoutePoliciesOK) GetPayload() []*models.BgpRoutePolicy {
return o.Payload
}
func (o *GetBgpRoutePoliciesOK) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error {
// response payload
if err := consumer.Consume(response.Body(), &o.Payload); err != nil && err != io.EOF {
return err
}
return nil
}
// NewGetBgpRoutePoliciesInternalServerError creates a GetBgpRoutePoliciesInternalServerError with default headers values
func NewGetBgpRoutePoliciesInternalServerError() *GetBgpRoutePoliciesInternalServerError {
return &GetBgpRoutePoliciesInternalServerError{}
}
/*
GetBgpRoutePoliciesInternalServerError describes a response with status code 500, with default header values.
Internal Server Error
*/
type GetBgpRoutePoliciesInternalServerError struct {
Payload models.Error
}
// IsSuccess returns true when this get bgp route policies internal server error response has a 2xx status code
func (o *GetBgpRoutePoliciesInternalServerError) IsSuccess() bool {
return false
}
// IsRedirect returns true when this get bgp route policies internal server error response has a 3xx status code
func (o *GetBgpRoutePoliciesInternalServerError) IsRedirect() bool {
return false
}
// IsClientError returns true when this get bgp route policies internal server error response has a 4xx status code
func (o *GetBgpRoutePoliciesInternalServerError) IsClientError() bool {
return false
}
// IsServerError returns true when this get bgp route policies internal server error response has a 5xx status code
func (o *GetBgpRoutePoliciesInternalServerError) IsServerError() bool {
return true
}
// IsCode returns true when this get bgp route policies internal server error response a status code equal to that given
func (o *GetBgpRoutePoliciesInternalServerError) IsCode(code int) bool {
return code == 500
}
// Code gets the status code for the get bgp route policies internal server error response
func (o *GetBgpRoutePoliciesInternalServerError) Code() int {
return 500
}
func (o *GetBgpRoutePoliciesInternalServerError) Error() string {
payload, _ := json.Marshal(o.Payload)
return fmt.Sprintf("[GET /bgp/route-policies][%d] getBgpRoutePoliciesInternalServerError %s", 500, payload)
}
func (o *GetBgpRoutePoliciesInternalServerError) String() string {
payload, _ := json.Marshal(o.Payload)
return fmt.Sprintf("[GET /bgp/route-policies][%d] getBgpRoutePoliciesInternalServerError %s", 500, payload)
}
func (o *GetBgpRoutePoliciesInternalServerError) GetPayload() models.Error {
return o.Payload
}
func (o *GetBgpRoutePoliciesInternalServerError) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error {
// response payload
if err := consumer.Consume(response.Body(), &o.Payload); err != nil && err != io.EOF {
return err
}
return nil
}
// NewGetBgpRoutePoliciesDisabled creates a GetBgpRoutePoliciesDisabled with default headers values
func NewGetBgpRoutePoliciesDisabled() *GetBgpRoutePoliciesDisabled {
return &GetBgpRoutePoliciesDisabled{}
}
/*
GetBgpRoutePoliciesDisabled describes a response with status code 501, with default header values.
BGP Control Plane disabled
*/
type GetBgpRoutePoliciesDisabled struct {
Payload models.Error
}
// IsSuccess returns true when this get bgp route policies disabled response has a 2xx status code
func (o *GetBgpRoutePoliciesDisabled) IsSuccess() bool {
return false
}
// IsRedirect returns true when this get bgp route policies disabled response has a 3xx status code
func (o *GetBgpRoutePoliciesDisabled) IsRedirect() bool {
return false
}
// IsClientError returns true when this get bgp route policies disabled response has a 4xx status code
func (o *GetBgpRoutePoliciesDisabled) IsClientError() bool {
return false
}
// IsServerError returns true when this get bgp route policies disabled response has a 5xx status code
func (o *GetBgpRoutePoliciesDisabled) IsServerError() bool {
return true
}
// IsCode returns true when this get bgp route policies disabled response a status code equal to that given
func (o *GetBgpRoutePoliciesDisabled) IsCode(code int) bool {
return code == 501
}
// Code gets the status code for the get bgp route policies disabled response
func (o *GetBgpRoutePoliciesDisabled) Code() int {
return 501
}
func (o *GetBgpRoutePoliciesDisabled) Error() string {
payload, _ := json.Marshal(o.Payload)
return fmt.Sprintf("[GET /bgp/route-policies][%d] getBgpRoutePoliciesDisabled %s", 501, payload)
}
func (o *GetBgpRoutePoliciesDisabled) String() string {
payload, _ := json.Marshal(o.Payload)
return fmt.Sprintf("[GET /bgp/route-policies][%d] getBgpRoutePoliciesDisabled %s", 501, payload)
}
func (o *GetBgpRoutePoliciesDisabled) GetPayload() models.Error {
return o.Payload
}
func (o *GetBgpRoutePoliciesDisabled) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error {
// response payload
if err := consumer.Consume(response.Body(), &o.Payload); err != nil && err != io.EOF {
return err
}
return nil
}
// Code generated by go-swagger; DO NOT EDIT.
// Copyright Authors of Cilium
// SPDX-License-Identifier: Apache-2.0
package bgp
// This file was generated by the swagger tool.
// Editing this file might prove futile when you re-run the swagger generate command
import (
"context"
"net/http"
"time"
"github.com/go-openapi/errors"
"github.com/go-openapi/runtime"
cr "github.com/go-openapi/runtime/client"
"github.com/go-openapi/strfmt"
"github.com/go-openapi/swag"
)
// NewGetBgpRoutesParams creates a new GetBgpRoutesParams object,
// with the default timeout for this client.
//
// Default values are not hydrated, since defaults are normally applied by the API server side.
//
// To enforce default values in parameter, use SetDefaults or WithDefaults.
func NewGetBgpRoutesParams() *GetBgpRoutesParams {
return &GetBgpRoutesParams{
timeout: cr.DefaultTimeout,
}
}
// NewGetBgpRoutesParamsWithTimeout creates a new GetBgpRoutesParams object
// with the ability to set a timeout on a request.
func NewGetBgpRoutesParamsWithTimeout(timeout time.Duration) *GetBgpRoutesParams {
return &GetBgpRoutesParams{
timeout: timeout,
}
}
// NewGetBgpRoutesParamsWithContext creates a new GetBgpRoutesParams object
// with the ability to set a context for a request.
func NewGetBgpRoutesParamsWithContext(ctx context.Context) *GetBgpRoutesParams {
return &GetBgpRoutesParams{
Context: ctx,
}
}
// NewGetBgpRoutesParamsWithHTTPClient creates a new GetBgpRoutesParams object
// with the ability to set a custom HTTPClient for a request.
func NewGetBgpRoutesParamsWithHTTPClient(client *http.Client) *GetBgpRoutesParams {
return &GetBgpRoutesParams{
HTTPClient: client,
}
}
/*
GetBgpRoutesParams contains all the parameters to send to the API endpoint
for the get bgp routes operation.
Typically these are written to a http.Request.
*/
type GetBgpRoutesParams struct {
/* Afi.
Address Family Indicator (AFI) of a BGP route
*/
Afi string
/* Neighbor.
IP address specifying a BGP neighbor.
Has to be specified only when table type is adj-rib-in or adj-rib-out.
*/
Neighbor *string
/* RouterAsn.
Autonomous System Number (ASN) identifying a BGP virtual router instance.
If not specified, all virtual router instances are selected.
*/
RouterAsn *int64
/* Safi.
Subsequent Address Family Indicator (SAFI) of a BGP route
*/
Safi string
/* TableType.
BGP Routing Information Base (RIB) table type
*/
TableType string
timeout time.Duration
Context context.Context
HTTPClient *http.Client
}
// WithDefaults hydrates default values in the get bgp routes params (not the query body).
//
// All values with no default are reset to their zero value.
func (o *GetBgpRoutesParams) WithDefaults() *GetBgpRoutesParams {
o.SetDefaults()
return o
}
// SetDefaults hydrates default values in the get bgp routes params (not the query body).
//
// All values with no default are reset to their zero value.
func (o *GetBgpRoutesParams) SetDefaults() {
// no default values defined for this parameter
}
// WithTimeout adds the timeout to the get bgp routes params
func (o *GetBgpRoutesParams) WithTimeout(timeout time.Duration) *GetBgpRoutesParams {
o.SetTimeout(timeout)
return o
}
// SetTimeout adds the timeout to the get bgp routes params
func (o *GetBgpRoutesParams) SetTimeout(timeout time.Duration) {
o.timeout = timeout
}
// WithContext adds the context to the get bgp routes params
func (o *GetBgpRoutesParams) WithContext(ctx context.Context) *GetBgpRoutesParams {
o.SetContext(ctx)
return o
}
// SetContext adds the context to the get bgp routes params
func (o *GetBgpRoutesParams) SetContext(ctx context.Context) {
o.Context = ctx
}
// WithHTTPClient adds the HTTPClient to the get bgp routes params
func (o *GetBgpRoutesParams) WithHTTPClient(client *http.Client) *GetBgpRoutesParams {
o.SetHTTPClient(client)
return o
}
// SetHTTPClient adds the HTTPClient to the get bgp routes params
func (o *GetBgpRoutesParams) SetHTTPClient(client *http.Client) {
o.HTTPClient = client
}
// WithAfi adds the afi to the get bgp routes params
func (o *GetBgpRoutesParams) WithAfi(afi string) *GetBgpRoutesParams {
o.SetAfi(afi)
return o
}
// SetAfi adds the afi to the get bgp routes params
func (o *GetBgpRoutesParams) SetAfi(afi string) {
o.Afi = afi
}
// WithNeighbor adds the neighbor to the get bgp routes params
func (o *GetBgpRoutesParams) WithNeighbor(neighbor *string) *GetBgpRoutesParams {
o.SetNeighbor(neighbor)
return o
}
// SetNeighbor adds the neighbor to the get bgp routes params
func (o *GetBgpRoutesParams) SetNeighbor(neighbor *string) {
o.Neighbor = neighbor
}
// WithRouterAsn adds the routerAsn to the get bgp routes params
func (o *GetBgpRoutesParams) WithRouterAsn(routerAsn *int64) *GetBgpRoutesParams {
o.SetRouterAsn(routerAsn)
return o
}
// SetRouterAsn adds the routerAsn to the get bgp routes params
func (o *GetBgpRoutesParams) SetRouterAsn(routerAsn *int64) {
o.RouterAsn = routerAsn
}
// WithSafi adds the safi to the get bgp routes params
func (o *GetBgpRoutesParams) WithSafi(safi string) *GetBgpRoutesParams {
o.SetSafi(safi)
return o
}
// SetSafi adds the safi to the get bgp routes params
func (o *GetBgpRoutesParams) SetSafi(safi string) {
o.Safi = safi
}
// WithTableType adds the tableType to the get bgp routes params
func (o *GetBgpRoutesParams) WithTableType(tableType string) *GetBgpRoutesParams {
o.SetTableType(tableType)
return o
}
// SetTableType adds the tableType to the get bgp routes params
func (o *GetBgpRoutesParams) SetTableType(tableType string) {
o.TableType = tableType
}
// WriteToRequest writes these params to a swagger request
func (o *GetBgpRoutesParams) WriteToRequest(r runtime.ClientRequest, reg strfmt.Registry) error {
if err := r.SetTimeout(o.timeout); err != nil {
return err
}
var res []error
// query param afi
qrAfi := o.Afi
qAfi := qrAfi
if qAfi != "" {
if err := r.SetQueryParam("afi", qAfi); err != nil {
return err
}
}
if o.Neighbor != nil {
// query param neighbor
var qrNeighbor string
if o.Neighbor != nil {
qrNeighbor = *o.Neighbor
}
qNeighbor := qrNeighbor
if qNeighbor != "" {
if err := r.SetQueryParam("neighbor", qNeighbor); err != nil {
return err
}
}
}
if o.RouterAsn != nil {
// query param router_asn
var qrRouterAsn int64
if o.RouterAsn != nil {
qrRouterAsn = *o.RouterAsn
}
qRouterAsn := swag.FormatInt64(qrRouterAsn)
if qRouterAsn != "" {
if err := r.SetQueryParam("router_asn", qRouterAsn); err != nil {
return err
}
}
}
// query param safi
qrSafi := o.Safi
qSafi := qrSafi
if qSafi != "" {
if err := r.SetQueryParam("safi", qSafi); err != nil {
return err
}
}
// query param table_type
qrTableType := o.TableType
qTableType := qrTableType
if qTableType != "" {
if err := r.SetQueryParam("table_type", qTableType); err != nil {
return err
}
}
if len(res) > 0 {
return errors.CompositeValidationError(res...)
}
return nil
}
// Code generated by go-swagger; DO NOT EDIT.
// Copyright Authors of Cilium
// SPDX-License-Identifier: Apache-2.0
package bgp
// This file was generated by the swagger tool.
// Editing this file might prove futile when you re-run the swagger generate command
import (
"encoding/json"
"fmt"
"io"
"github.com/go-openapi/runtime"
"github.com/go-openapi/strfmt"
"github.com/cilium/cilium/api/v1/models"
)
// GetBgpRoutesReader is a Reader for the GetBgpRoutes structure.
type GetBgpRoutesReader struct {
formats strfmt.Registry
}
// ReadResponse reads a server response into the received o.
func (o *GetBgpRoutesReader) ReadResponse(response runtime.ClientResponse, consumer runtime.Consumer) (interface{}, error) {
switch response.Code() {
case 200:
result := NewGetBgpRoutesOK()
if err := result.readResponse(response, consumer, o.formats); err != nil {
return nil, err
}
return result, nil
case 500:
result := NewGetBgpRoutesInternalServerError()
if err := result.readResponse(response, consumer, o.formats); err != nil {
return nil, err
}
return nil, result
case 501:
result := NewGetBgpRoutesDisabled()
if err := result.readResponse(response, consumer, o.formats); err != nil {
return nil, err
}
return nil, result
default:
return nil, runtime.NewAPIError("[GET /bgp/routes] GetBgpRoutes", response, response.Code())
}
}
// NewGetBgpRoutesOK creates a GetBgpRoutesOK with default headers values
func NewGetBgpRoutesOK() *GetBgpRoutesOK {
return &GetBgpRoutesOK{}
}
/*
GetBgpRoutesOK describes a response with status code 200, with default header values.
Success
*/
type GetBgpRoutesOK struct {
Payload []*models.BgpRoute
}
// IsSuccess returns true when this get bgp routes o k response has a 2xx status code
func (o *GetBgpRoutesOK) IsSuccess() bool {
return true
}
// IsRedirect returns true when this get bgp routes o k response has a 3xx status code
func (o *GetBgpRoutesOK) IsRedirect() bool {
return false
}
// IsClientError returns true when this get bgp routes o k response has a 4xx status code
func (o *GetBgpRoutesOK) IsClientError() bool {
return false
}
// IsServerError returns true when this get bgp routes o k response has a 5xx status code
func (o *GetBgpRoutesOK) IsServerError() bool {
return false
}
// IsCode returns true when this get bgp routes o k response a status code equal to that given
func (o *GetBgpRoutesOK) IsCode(code int) bool {
return code == 200
}
// Code gets the status code for the get bgp routes o k response
func (o *GetBgpRoutesOK) Code() int {
return 200
}
func (o *GetBgpRoutesOK) Error() string {
payload, _ := json.Marshal(o.Payload)
return fmt.Sprintf("[GET /bgp/routes][%d] getBgpRoutesOK %s", 200, payload)
}
func (o *GetBgpRoutesOK) String() string {
payload, _ := json.Marshal(o.Payload)
return fmt.Sprintf("[GET /bgp/routes][%d] getBgpRoutesOK %s", 200, payload)
}
func (o *GetBgpRoutesOK) GetPayload() []*models.BgpRoute {
return o.Payload
}
func (o *GetBgpRoutesOK) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error {
// response payload
if err := consumer.Consume(response.Body(), &o.Payload); err != nil && err != io.EOF {
return err
}
return nil
}
// NewGetBgpRoutesInternalServerError creates a GetBgpRoutesInternalServerError with default headers values
func NewGetBgpRoutesInternalServerError() *GetBgpRoutesInternalServerError {
return &GetBgpRoutesInternalServerError{}
}
/*
GetBgpRoutesInternalServerError describes a response with status code 500, with default header values.
Internal Server Error
*/
type GetBgpRoutesInternalServerError struct {
Payload models.Error
}
// IsSuccess returns true when this get bgp routes internal server error response has a 2xx status code
func (o *GetBgpRoutesInternalServerError) IsSuccess() bool {
return false
}
// IsRedirect returns true when this get bgp routes internal server error response has a 3xx status code
func (o *GetBgpRoutesInternalServerError) IsRedirect() bool {
return false
}
// IsClientError returns true when this get bgp routes internal server error response has a 4xx status code
func (o *GetBgpRoutesInternalServerError) IsClientError() bool {
return false
}
// IsServerError returns true when this get bgp routes internal server error response has a 5xx status code
func (o *GetBgpRoutesInternalServerError) IsServerError() bool {
return true
}
// IsCode returns true when this get bgp routes internal server error response a status code equal to that given
func (o *GetBgpRoutesInternalServerError) IsCode(code int) bool {
return code == 500
}
// Code gets the status code for the get bgp routes internal server error response
func (o *GetBgpRoutesInternalServerError) Code() int {
return 500
}
func (o *GetBgpRoutesInternalServerError) Error() string {
payload, _ := json.Marshal(o.Payload)
return fmt.Sprintf("[GET /bgp/routes][%d] getBgpRoutesInternalServerError %s", 500, payload)
}
func (o *GetBgpRoutesInternalServerError) String() string {
payload, _ := json.Marshal(o.Payload)
return fmt.Sprintf("[GET /bgp/routes][%d] getBgpRoutesInternalServerError %s", 500, payload)
}
func (o *GetBgpRoutesInternalServerError) GetPayload() models.Error {
return o.Payload
}
func (o *GetBgpRoutesInternalServerError) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error {
// response payload
if err := consumer.Consume(response.Body(), &o.Payload); err != nil && err != io.EOF {
return err
}
return nil
}
// NewGetBgpRoutesDisabled creates a GetBgpRoutesDisabled with default headers values
func NewGetBgpRoutesDisabled() *GetBgpRoutesDisabled {
return &GetBgpRoutesDisabled{}
}
/*
GetBgpRoutesDisabled describes a response with status code 501, with default header values.
BGP Control Plane disabled
*/
type GetBgpRoutesDisabled struct {
Payload models.Error
}
// IsSuccess returns true when this get bgp routes disabled response has a 2xx status code
func (o *GetBgpRoutesDisabled) IsSuccess() bool {
return false
}
// IsRedirect returns true when this get bgp routes disabled response has a 3xx status code
func (o *GetBgpRoutesDisabled) IsRedirect() bool {
return false
}
// IsClientError returns true when this get bgp routes disabled response has a 4xx status code
func (o *GetBgpRoutesDisabled) IsClientError() bool {
return false
}
// IsServerError returns true when this get bgp routes disabled response has a 5xx status code
func (o *GetBgpRoutesDisabled) IsServerError() bool {
return true
}
// IsCode returns true when this get bgp routes disabled response a status code equal to that given
func (o *GetBgpRoutesDisabled) IsCode(code int) bool {
return code == 501
}
// Code gets the status code for the get bgp routes disabled response
func (o *GetBgpRoutesDisabled) Code() int {
return 501
}
func (o *GetBgpRoutesDisabled) Error() string {
payload, _ := json.Marshal(o.Payload)
return fmt.Sprintf("[GET /bgp/routes][%d] getBgpRoutesDisabled %s", 501, payload)
}
func (o *GetBgpRoutesDisabled) String() string {
payload, _ := json.Marshal(o.Payload)
return fmt.Sprintf("[GET /bgp/routes][%d] getBgpRoutesDisabled %s", 501, payload)
}
func (o *GetBgpRoutesDisabled) GetPayload() models.Error {
return o.Payload
}
func (o *GetBgpRoutesDisabled) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error {
// response payload
if err := consumer.Consume(response.Body(), &o.Payload); err != nil && err != io.EOF {
return err
}
return nil
}
// Code generated by go-swagger; DO NOT EDIT.
// Copyright Authors of Cilium
// SPDX-License-Identifier: Apache-2.0
package client
// This file was generated by the swagger tool.
// Editing this file might prove futile when you re-run the swagger generate command
import (
"github.com/go-openapi/runtime"
httptransport "github.com/go-openapi/runtime/client"
"github.com/go-openapi/strfmt"
"github.com/cilium/cilium/api/v1/client/bgp"
"github.com/cilium/cilium/api/v1/client/daemon"
"github.com/cilium/cilium/api/v1/client/endpoint"
"github.com/cilium/cilium/api/v1/client/ipam"
"github.com/cilium/cilium/api/v1/client/policy"
"github.com/cilium/cilium/api/v1/client/prefilter"
"github.com/cilium/cilium/api/v1/client/service"
)
// Default cilium API HTTP client.
var Default = NewHTTPClient(nil)
const (
// DefaultHost is the default Host
// found in Meta (info) section of spec file
DefaultHost string = "localhost"
// DefaultBasePath is the default BasePath
// found in Meta (info) section of spec file
DefaultBasePath string = "/v1"
)
// DefaultSchemes are the default schemes found in Meta (info) section of spec file
var DefaultSchemes = []string{"http"}
// NewHTTPClient creates a new cilium API HTTP client.
func NewHTTPClient(formats strfmt.Registry) *CiliumAPI {
return NewHTTPClientWithConfig(formats, nil)
}
// NewHTTPClientWithConfig creates a new cilium API HTTP client,
// using a customizable transport config.
func NewHTTPClientWithConfig(formats strfmt.Registry, cfg *TransportConfig) *CiliumAPI {
// ensure nullable parameters have default
if cfg == nil {
cfg = DefaultTransportConfig()
}
// create transport and client
transport := httptransport.New(cfg.Host, cfg.BasePath, cfg.Schemes)
return New(transport, formats)
}
// New creates a new cilium API client
func New(transport runtime.ClientTransport, formats strfmt.Registry) *CiliumAPI {
// ensure nullable parameters have default
if formats == nil {
formats = strfmt.Default
}
cli := new(CiliumAPI)
cli.Transport = transport
cli.Bgp = bgp.New(transport, formats)
cli.Daemon = daemon.New(transport, formats)
cli.Endpoint = endpoint.New(transport, formats)
cli.Ipam = ipam.New(transport, formats)
cli.Policy = policy.New(transport, formats)
cli.Prefilter = prefilter.New(transport, formats)
cli.Service = service.New(transport, formats)
return cli
}
// DefaultTransportConfig creates a TransportConfig with the
// default settings taken from the meta section of the spec file.
func DefaultTransportConfig() *TransportConfig {
return &TransportConfig{
Host: DefaultHost,
BasePath: DefaultBasePath,
Schemes: DefaultSchemes,
}
}
// TransportConfig contains the transport related info,
// found in the meta section of the spec file.
type TransportConfig struct {
Host string
BasePath string
Schemes []string
}
// WithHost overrides the default host,
// provided by the meta section of the spec file.
func (cfg *TransportConfig) WithHost(host string) *TransportConfig {
cfg.Host = host
return cfg
}
// WithBasePath overrides the default basePath,
// provided by the meta section of the spec file.
func (cfg *TransportConfig) WithBasePath(basePath string) *TransportConfig {
cfg.BasePath = basePath
return cfg
}
// WithSchemes overrides the default schemes,
// provided by the meta section of the spec file.
func (cfg *TransportConfig) WithSchemes(schemes []string) *TransportConfig {
cfg.Schemes = schemes
return cfg
}
// CiliumAPI is a client for cilium API
type CiliumAPI struct {
Bgp bgp.ClientService
Daemon daemon.ClientService
Endpoint endpoint.ClientService
Ipam ipam.ClientService
Policy policy.ClientService
Prefilter prefilter.ClientService
Service service.ClientService
Transport runtime.ClientTransport
}
// SetTransport changes the transport on the client and all its subresources
func (c *CiliumAPI) SetTransport(transport runtime.ClientTransport) {
c.Transport = transport
c.Bgp.SetTransport(transport)
c.Daemon.SetTransport(transport)
c.Endpoint.SetTransport(transport)
c.Ipam.SetTransport(transport)
c.Policy.SetTransport(transport)
c.Prefilter.SetTransport(transport)
c.Service.SetTransport(transport)
}
// Code generated by go-swagger; DO NOT EDIT.
// Copyright Authors of Cilium
// SPDX-License-Identifier: Apache-2.0
package daemon
// This file was generated by the swagger tool.
// Editing this file might prove futile when you re-run the swagger generate command
import (
"fmt"
"io"
"github.com/go-openapi/runtime"
httptransport "github.com/go-openapi/runtime/client"
"github.com/go-openapi/strfmt"
)
// New creates a new daemon API client.
func New(transport runtime.ClientTransport, formats strfmt.Registry) ClientService {
return &Client{transport: transport, formats: formats}
}
// New creates a new daemon API client with basic auth credentials.
// It takes the following parameters:
// - host: http host (github.com).
// - basePath: any base path for the API client ("/v1", "/v3").
// - scheme: http scheme ("http", "https").
// - user: user for basic authentication header.
// - password: password for basic authentication header.
func NewClientWithBasicAuth(host, basePath, scheme, user, password string) ClientService {
transport := httptransport.New(host, basePath, []string{scheme})
transport.DefaultAuthentication = httptransport.BasicAuth(user, password)
return &Client{transport: transport, formats: strfmt.Default}
}
// New creates a new daemon API client with a bearer token for authentication.
// It takes the following parameters:
// - host: http host (github.com).
// - basePath: any base path for the API client ("/v1", "/v3").
// - scheme: http scheme ("http", "https").
// - bearerToken: bearer token for Bearer authentication header.
func NewClientWithBearerToken(host, basePath, scheme, bearerToken string) ClientService {
transport := httptransport.New(host, basePath, []string{scheme})
transport.DefaultAuthentication = httptransport.BearerToken(bearerToken)
return &Client{transport: transport, formats: strfmt.Default}
}
/*
Client for daemon API
*/
type Client struct {
transport runtime.ClientTransport
formats strfmt.Registry
}
// ClientOption may be used to customize the behavior of Client methods.
type ClientOption func(*runtime.ClientOperation)
// ClientService is the interface for Client methods
type ClientService interface {
GetCgroupDumpMetadata(params *GetCgroupDumpMetadataParams, opts ...ClientOption) (*GetCgroupDumpMetadataOK, error)
GetClusterNodes(params *GetClusterNodesParams, opts ...ClientOption) (*GetClusterNodesOK, error)
GetConfig(params *GetConfigParams, opts ...ClientOption) (*GetConfigOK, error)
GetDebuginfo(params *GetDebuginfoParams, opts ...ClientOption) (*GetDebuginfoOK, error)
GetHealthz(params *GetHealthzParams, opts ...ClientOption) (*GetHealthzOK, error)
GetMap(params *GetMapParams, opts ...ClientOption) (*GetMapOK, error)
GetMapName(params *GetMapNameParams, opts ...ClientOption) (*GetMapNameOK, error)
GetMapNameEvents(params *GetMapNameEventsParams, writer io.Writer, opts ...ClientOption) (*GetMapNameEventsOK, error)
GetNodeIds(params *GetNodeIdsParams, opts ...ClientOption) (*GetNodeIdsOK, error)
PatchConfig(params *PatchConfigParams, opts ...ClientOption) (*PatchConfigOK, error)
SetTransport(transport runtime.ClientTransport)
}
/*
GetCgroupDumpMetadata retrieves cgroup metadata for all pods
*/
func (a *Client) GetCgroupDumpMetadata(params *GetCgroupDumpMetadataParams, opts ...ClientOption) (*GetCgroupDumpMetadataOK, error) {
// TODO: Validate the params before sending
if params == nil {
params = NewGetCgroupDumpMetadataParams()
}
op := &runtime.ClientOperation{
ID: "GetCgroupDumpMetadata",
Method: "GET",
PathPattern: "/cgroup-dump-metadata",
ProducesMediaTypes: []string{"application/json"},
ConsumesMediaTypes: []string{"application/json"},
Schemes: []string{"http"},
Params: params,
Reader: &GetCgroupDumpMetadataReader{formats: a.formats},
Context: params.Context,
Client: params.HTTPClient,
}
for _, opt := range opts {
opt(op)
}
result, err := a.transport.Submit(op)
if err != nil {
return nil, err
}
success, ok := result.(*GetCgroupDumpMetadataOK)
if ok {
return success, nil
}
// unexpected success response
// safeguard: normally, absent a default response, unknown success responses return an error above: so this is a codegen issue
msg := fmt.Sprintf("unexpected success response for GetCgroupDumpMetadata: API contract not enforced by server. Client expected to get an error, but got: %T", result)
panic(msg)
}
/*
GetClusterNodes gets nodes information stored in the cilium agent
*/
func (a *Client) GetClusterNodes(params *GetClusterNodesParams, opts ...ClientOption) (*GetClusterNodesOK, error) {
// TODO: Validate the params before sending
if params == nil {
params = NewGetClusterNodesParams()
}
op := &runtime.ClientOperation{
ID: "GetClusterNodes",
Method: "GET",
PathPattern: "/cluster/nodes",
ProducesMediaTypes: []string{"application/json"},
ConsumesMediaTypes: []string{"application/json"},
Schemes: []string{"http"},
Params: params,
Reader: &GetClusterNodesReader{formats: a.formats},
Context: params.Context,
Client: params.HTTPClient,
}
for _, opt := range opts {
opt(op)
}
result, err := a.transport.Submit(op)
if err != nil {
return nil, err
}
success, ok := result.(*GetClusterNodesOK)
if ok {
return success, nil
}
// unexpected success response
// safeguard: normally, absent a default response, unknown success responses return an error above: so this is a codegen issue
msg := fmt.Sprintf("unexpected success response for GetClusterNodes: API contract not enforced by server. Client expected to get an error, but got: %T", result)
panic(msg)
}
/*
GetConfig gets configuration of cilium daemon
Returns the configuration of the Cilium daemon.
*/
func (a *Client) GetConfig(params *GetConfigParams, opts ...ClientOption) (*GetConfigOK, error) {
// TODO: Validate the params before sending
if params == nil {
params = NewGetConfigParams()
}
op := &runtime.ClientOperation{
ID: "GetConfig",
Method: "GET",
PathPattern: "/config",
ProducesMediaTypes: []string{"application/json"},
ConsumesMediaTypes: []string{"application/json"},
Schemes: []string{"http"},
Params: params,
Reader: &GetConfigReader{formats: a.formats},
Context: params.Context,
Client: params.HTTPClient,
}
for _, opt := range opts {
opt(op)
}
result, err := a.transport.Submit(op)
if err != nil {
return nil, err
}
success, ok := result.(*GetConfigOK)
if ok {
return success, nil
}
// unexpected success response
// safeguard: normally, absent a default response, unknown success responses return an error above: so this is a codegen issue
msg := fmt.Sprintf("unexpected success response for GetConfig: API contract not enforced by server. Client expected to get an error, but got: %T", result)
panic(msg)
}
/*
GetDebuginfo retrieves information about the agent and environment for debugging
*/
func (a *Client) GetDebuginfo(params *GetDebuginfoParams, opts ...ClientOption) (*GetDebuginfoOK, error) {
// TODO: Validate the params before sending
if params == nil {
params = NewGetDebuginfoParams()
}
op := &runtime.ClientOperation{
ID: "GetDebuginfo",
Method: "GET",
PathPattern: "/debuginfo",
ProducesMediaTypes: []string{"application/json"},
ConsumesMediaTypes: []string{"application/json"},
Schemes: []string{"http"},
Params: params,
Reader: &GetDebuginfoReader{formats: a.formats},
Context: params.Context,
Client: params.HTTPClient,
}
for _, opt := range opts {
opt(op)
}
result, err := a.transport.Submit(op)
if err != nil {
return nil, err
}
success, ok := result.(*GetDebuginfoOK)
if ok {
return success, nil
}
// unexpected success response
// safeguard: normally, absent a default response, unknown success responses return an error above: so this is a codegen issue
msg := fmt.Sprintf("unexpected success response for GetDebuginfo: API contract not enforced by server. Client expected to get an error, but got: %T", result)
panic(msg)
}
/*
GetHealthz gets health of cilium daemon
Returns health and status information of the Cilium daemon and related
components such as the local container runtime, connected datastore,
Kubernetes integration and Hubble.
*/
func (a *Client) GetHealthz(params *GetHealthzParams, opts ...ClientOption) (*GetHealthzOK, error) {
// TODO: Validate the params before sending
if params == nil {
params = NewGetHealthzParams()
}
op := &runtime.ClientOperation{
ID: "GetHealthz",
Method: "GET",
PathPattern: "/healthz",
ProducesMediaTypes: []string{"application/json"},
ConsumesMediaTypes: []string{"application/json"},
Schemes: []string{"http"},
Params: params,
Reader: &GetHealthzReader{formats: a.formats},
Context: params.Context,
Client: params.HTTPClient,
}
for _, opt := range opts {
opt(op)
}
result, err := a.transport.Submit(op)
if err != nil {
return nil, err
}
success, ok := result.(*GetHealthzOK)
if ok {
return success, nil
}
// unexpected success response
// safeguard: normally, absent a default response, unknown success responses return an error above: so this is a codegen issue
msg := fmt.Sprintf("unexpected success response for GetHealthz: API contract not enforced by server. Client expected to get an error, but got: %T", result)
panic(msg)
}
/*
GetMap lists all open maps
*/
func (a *Client) GetMap(params *GetMapParams, opts ...ClientOption) (*GetMapOK, error) {
// TODO: Validate the params before sending
if params == nil {
params = NewGetMapParams()
}
op := &runtime.ClientOperation{
ID: "GetMap",
Method: "GET",
PathPattern: "/map",
ProducesMediaTypes: []string{"application/json"},
ConsumesMediaTypes: []string{"application/json"},
Schemes: []string{"http"},
Params: params,
Reader: &GetMapReader{formats: a.formats},
Context: params.Context,
Client: params.HTTPClient,
}
for _, opt := range opts {
opt(op)
}
result, err := a.transport.Submit(op)
if err != nil {
return nil, err
}
success, ok := result.(*GetMapOK)
if ok {
return success, nil
}
// unexpected success response
// safeguard: normally, absent a default response, unknown success responses return an error above: so this is a codegen issue
msg := fmt.Sprintf("unexpected success response for GetMap: API contract not enforced by server. Client expected to get an error, but got: %T", result)
panic(msg)
}
/*
GetMapName retrieves contents of b p f map
*/
func (a *Client) GetMapName(params *GetMapNameParams, opts ...ClientOption) (*GetMapNameOK, error) {
// TODO: Validate the params before sending
if params == nil {
params = NewGetMapNameParams()
}
op := &runtime.ClientOperation{
ID: "GetMapName",
Method: "GET",
PathPattern: "/map/{name}",
ProducesMediaTypes: []string{"application/json"},
ConsumesMediaTypes: []string{"application/json"},
Schemes: []string{"http"},
Params: params,
Reader: &GetMapNameReader{formats: a.formats},
Context: params.Context,
Client: params.HTTPClient,
}
for _, opt := range opts {
opt(op)
}
result, err := a.transport.Submit(op)
if err != nil {
return nil, err
}
success, ok := result.(*GetMapNameOK)
if ok {
return success, nil
}
// unexpected success response
// safeguard: normally, absent a default response, unknown success responses return an error above: so this is a codegen issue
msg := fmt.Sprintf("unexpected success response for GetMapName: API contract not enforced by server. Client expected to get an error, but got: %T", result)
panic(msg)
}
/*
GetMapNameEvents retrieves the recent event logs associated with this endpoint
*/
func (a *Client) GetMapNameEvents(params *GetMapNameEventsParams, writer io.Writer, opts ...ClientOption) (*GetMapNameEventsOK, error) {
// TODO: Validate the params before sending
if params == nil {
params = NewGetMapNameEventsParams()
}
op := &runtime.ClientOperation{
ID: "GetMapNameEvents",
Method: "GET",
PathPattern: "/map/{name}/events",
ProducesMediaTypes: []string{"application/json"},
ConsumesMediaTypes: []string{"application/json"},
Schemes: []string{"http"},
Params: params,
Reader: &GetMapNameEventsReader{formats: a.formats, writer: writer},
Context: params.Context,
Client: params.HTTPClient,
}
for _, opt := range opts {
opt(op)
}
result, err := a.transport.Submit(op)
if err != nil {
return nil, err
}
success, ok := result.(*GetMapNameEventsOK)
if ok {
return success, nil
}
// unexpected success response
// safeguard: normally, absent a default response, unknown success responses return an error above: so this is a codegen issue
msg := fmt.Sprintf("unexpected success response for GetMapNameEvents: API contract not enforced by server. Client expected to get an error, but got: %T", result)
panic(msg)
}
/*
GetNodeIds lists information about known node i ds
Retrieves a list of node IDs allocated by the agent and their
associated node IP addresses.
*/
func (a *Client) GetNodeIds(params *GetNodeIdsParams, opts ...ClientOption) (*GetNodeIdsOK, error) {
// TODO: Validate the params before sending
if params == nil {
params = NewGetNodeIdsParams()
}
op := &runtime.ClientOperation{
ID: "GetNodeIds",
Method: "GET",
PathPattern: "/node/ids",
ProducesMediaTypes: []string{"application/json"},
ConsumesMediaTypes: []string{"application/json"},
Schemes: []string{"http"},
Params: params,
Reader: &GetNodeIdsReader{formats: a.formats},
Context: params.Context,
Client: params.HTTPClient,
}
for _, opt := range opts {
opt(op)
}
result, err := a.transport.Submit(op)
if err != nil {
return nil, err
}
success, ok := result.(*GetNodeIdsOK)
if ok {
return success, nil
}
// unexpected success response
// safeguard: normally, absent a default response, unknown success responses return an error above: so this is a codegen issue
msg := fmt.Sprintf("unexpected success response for GetNodeIds: API contract not enforced by server. Client expected to get an error, but got: %T", result)
panic(msg)
}
/*
PatchConfig modifies daemon configuration
Updates the daemon configuration by applying the provided
ConfigurationMap and regenerates & recompiles all required datapath
components.
*/
func (a *Client) PatchConfig(params *PatchConfigParams, opts ...ClientOption) (*PatchConfigOK, error) {
// TODO: Validate the params before sending
if params == nil {
params = NewPatchConfigParams()
}
op := &runtime.ClientOperation{
ID: "PatchConfig",
Method: "PATCH",
PathPattern: "/config",
ProducesMediaTypes: []string{"application/json"},
ConsumesMediaTypes: []string{"application/json"},
Schemes: []string{"http"},
Params: params,
Reader: &PatchConfigReader{formats: a.formats},
Context: params.Context,
Client: params.HTTPClient,
}
for _, opt := range opts {
opt(op)
}
result, err := a.transport.Submit(op)
if err != nil {
return nil, err
}
success, ok := result.(*PatchConfigOK)
if ok {
return success, nil
}
// unexpected success response
// safeguard: normally, absent a default response, unknown success responses return an error above: so this is a codegen issue
msg := fmt.Sprintf("unexpected success response for PatchConfig: API contract not enforced by server. Client expected to get an error, but got: %T", result)
panic(msg)
}
// SetTransport changes the transport on the client
func (a *Client) SetTransport(transport runtime.ClientTransport) {
a.transport = transport
}
// Code generated by go-swagger; DO NOT EDIT.
// Copyright Authors of Cilium
// SPDX-License-Identifier: Apache-2.0
package daemon
// This file was generated by the swagger tool.
// Editing this file might prove futile when you re-run the swagger generate command
import (
"context"
"net/http"
"time"
"github.com/go-openapi/errors"
"github.com/go-openapi/runtime"
cr "github.com/go-openapi/runtime/client"
"github.com/go-openapi/strfmt"
)
// NewGetCgroupDumpMetadataParams creates a new GetCgroupDumpMetadataParams object,
// with the default timeout for this client.
//
// Default values are not hydrated, since defaults are normally applied by the API server side.
//
// To enforce default values in parameter, use SetDefaults or WithDefaults.
func NewGetCgroupDumpMetadataParams() *GetCgroupDumpMetadataParams {
return &GetCgroupDumpMetadataParams{
timeout: cr.DefaultTimeout,
}
}
// NewGetCgroupDumpMetadataParamsWithTimeout creates a new GetCgroupDumpMetadataParams object
// with the ability to set a timeout on a request.
func NewGetCgroupDumpMetadataParamsWithTimeout(timeout time.Duration) *GetCgroupDumpMetadataParams {
return &GetCgroupDumpMetadataParams{
timeout: timeout,
}
}
// NewGetCgroupDumpMetadataParamsWithContext creates a new GetCgroupDumpMetadataParams object
// with the ability to set a context for a request.
func NewGetCgroupDumpMetadataParamsWithContext(ctx context.Context) *GetCgroupDumpMetadataParams {
return &GetCgroupDumpMetadataParams{
Context: ctx,
}
}
// NewGetCgroupDumpMetadataParamsWithHTTPClient creates a new GetCgroupDumpMetadataParams object
// with the ability to set a custom HTTPClient for a request.
func NewGetCgroupDumpMetadataParamsWithHTTPClient(client *http.Client) *GetCgroupDumpMetadataParams {
return &GetCgroupDumpMetadataParams{
HTTPClient: client,
}
}
/*
GetCgroupDumpMetadataParams contains all the parameters to send to the API endpoint
for the get cgroup dump metadata operation.
Typically these are written to a http.Request.
*/
type GetCgroupDumpMetadataParams struct {
timeout time.Duration
Context context.Context
HTTPClient *http.Client
}
// WithDefaults hydrates default values in the get cgroup dump metadata params (not the query body).
//
// All values with no default are reset to their zero value.
func (o *GetCgroupDumpMetadataParams) WithDefaults() *GetCgroupDumpMetadataParams {
o.SetDefaults()
return o
}
// SetDefaults hydrates default values in the get cgroup dump metadata params (not the query body).
//
// All values with no default are reset to their zero value.
func (o *GetCgroupDumpMetadataParams) SetDefaults() {
// no default values defined for this parameter
}
// WithTimeout adds the timeout to the get cgroup dump metadata params
func (o *GetCgroupDumpMetadataParams) WithTimeout(timeout time.Duration) *GetCgroupDumpMetadataParams {
o.SetTimeout(timeout)
return o
}
// SetTimeout adds the timeout to the get cgroup dump metadata params
func (o *GetCgroupDumpMetadataParams) SetTimeout(timeout time.Duration) {
o.timeout = timeout
}
// WithContext adds the context to the get cgroup dump metadata params
func (o *GetCgroupDumpMetadataParams) WithContext(ctx context.Context) *GetCgroupDumpMetadataParams {
o.SetContext(ctx)
return o
}
// SetContext adds the context to the get cgroup dump metadata params
func (o *GetCgroupDumpMetadataParams) SetContext(ctx context.Context) {
o.Context = ctx
}
// WithHTTPClient adds the HTTPClient to the get cgroup dump metadata params
func (o *GetCgroupDumpMetadataParams) WithHTTPClient(client *http.Client) *GetCgroupDumpMetadataParams {
o.SetHTTPClient(client)
return o
}
// SetHTTPClient adds the HTTPClient to the get cgroup dump metadata params
func (o *GetCgroupDumpMetadataParams) SetHTTPClient(client *http.Client) {
o.HTTPClient = client
}
// WriteToRequest writes these params to a swagger request
func (o *GetCgroupDumpMetadataParams) WriteToRequest(r runtime.ClientRequest, reg strfmt.Registry) error {
if err := r.SetTimeout(o.timeout); err != nil {
return err
}
var res []error
if len(res) > 0 {
return errors.CompositeValidationError(res...)
}
return nil
}
// Code generated by go-swagger; DO NOT EDIT.
// Copyright Authors of Cilium
// SPDX-License-Identifier: Apache-2.0
package daemon
// This file was generated by the swagger tool.
// Editing this file might prove futile when you re-run the swagger generate command
import (
"encoding/json"
"fmt"
"io"
"github.com/go-openapi/runtime"
"github.com/go-openapi/strfmt"
"github.com/cilium/cilium/api/v1/models"
)
// GetCgroupDumpMetadataReader is a Reader for the GetCgroupDumpMetadata structure.
type GetCgroupDumpMetadataReader struct {
formats strfmt.Registry
}
// ReadResponse reads a server response into the received o.
func (o *GetCgroupDumpMetadataReader) ReadResponse(response runtime.ClientResponse, consumer runtime.Consumer) (interface{}, error) {
switch response.Code() {
case 200:
result := NewGetCgroupDumpMetadataOK()
if err := result.readResponse(response, consumer, o.formats); err != nil {
return nil, err
}
return result, nil
case 500:
result := NewGetCgroupDumpMetadataFailure()
if err := result.readResponse(response, consumer, o.formats); err != nil {
return nil, err
}
return nil, result
default:
return nil, runtime.NewAPIError("[GET /cgroup-dump-metadata] GetCgroupDumpMetadata", response, response.Code())
}
}
// NewGetCgroupDumpMetadataOK creates a GetCgroupDumpMetadataOK with default headers values
func NewGetCgroupDumpMetadataOK() *GetCgroupDumpMetadataOK {
return &GetCgroupDumpMetadataOK{}
}
/*
GetCgroupDumpMetadataOK describes a response with status code 200, with default header values.
Success
*/
type GetCgroupDumpMetadataOK struct {
Payload *models.CgroupDumpMetadata
}
// IsSuccess returns true when this get cgroup dump metadata o k response has a 2xx status code
func (o *GetCgroupDumpMetadataOK) IsSuccess() bool {
return true
}
// IsRedirect returns true when this get cgroup dump metadata o k response has a 3xx status code
func (o *GetCgroupDumpMetadataOK) IsRedirect() bool {
return false
}
// IsClientError returns true when this get cgroup dump metadata o k response has a 4xx status code
func (o *GetCgroupDumpMetadataOK) IsClientError() bool {
return false
}
// IsServerError returns true when this get cgroup dump metadata o k response has a 5xx status code
func (o *GetCgroupDumpMetadataOK) IsServerError() bool {
return false
}
// IsCode returns true when this get cgroup dump metadata o k response a status code equal to that given
func (o *GetCgroupDumpMetadataOK) IsCode(code int) bool {
return code == 200
}
// Code gets the status code for the get cgroup dump metadata o k response
func (o *GetCgroupDumpMetadataOK) Code() int {
return 200
}
func (o *GetCgroupDumpMetadataOK) Error() string {
payload, _ := json.Marshal(o.Payload)
return fmt.Sprintf("[GET /cgroup-dump-metadata][%d] getCgroupDumpMetadataOK %s", 200, payload)
}
func (o *GetCgroupDumpMetadataOK) String() string {
payload, _ := json.Marshal(o.Payload)
return fmt.Sprintf("[GET /cgroup-dump-metadata][%d] getCgroupDumpMetadataOK %s", 200, payload)
}
func (o *GetCgroupDumpMetadataOK) GetPayload() *models.CgroupDumpMetadata {
return o.Payload
}
func (o *GetCgroupDumpMetadataOK) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error {
o.Payload = new(models.CgroupDumpMetadata)
// response payload
if err := consumer.Consume(response.Body(), o.Payload); err != nil && err != io.EOF {
return err
}
return nil
}
// NewGetCgroupDumpMetadataFailure creates a GetCgroupDumpMetadataFailure with default headers values
func NewGetCgroupDumpMetadataFailure() *GetCgroupDumpMetadataFailure {
return &GetCgroupDumpMetadataFailure{}
}
/*
GetCgroupDumpMetadataFailure describes a response with status code 500, with default header values.
CgroupDumpMetadata get failed
*/
type GetCgroupDumpMetadataFailure struct {
Payload models.Error
}
// IsSuccess returns true when this get cgroup dump metadata failure response has a 2xx status code
func (o *GetCgroupDumpMetadataFailure) IsSuccess() bool {
return false
}
// IsRedirect returns true when this get cgroup dump metadata failure response has a 3xx status code
func (o *GetCgroupDumpMetadataFailure) IsRedirect() bool {
return false
}
// IsClientError returns true when this get cgroup dump metadata failure response has a 4xx status code
func (o *GetCgroupDumpMetadataFailure) IsClientError() bool {
return false
}
// IsServerError returns true when this get cgroup dump metadata failure response has a 5xx status code
func (o *GetCgroupDumpMetadataFailure) IsServerError() bool {
return true
}
// IsCode returns true when this get cgroup dump metadata failure response a status code equal to that given
func (o *GetCgroupDumpMetadataFailure) IsCode(code int) bool {
return code == 500
}
// Code gets the status code for the get cgroup dump metadata failure response
func (o *GetCgroupDumpMetadataFailure) Code() int {
return 500
}
func (o *GetCgroupDumpMetadataFailure) Error() string {
payload, _ := json.Marshal(o.Payload)
return fmt.Sprintf("[GET /cgroup-dump-metadata][%d] getCgroupDumpMetadataFailure %s", 500, payload)
}
func (o *GetCgroupDumpMetadataFailure) String() string {
payload, _ := json.Marshal(o.Payload)
return fmt.Sprintf("[GET /cgroup-dump-metadata][%d] getCgroupDumpMetadataFailure %s", 500, payload)
}
func (o *GetCgroupDumpMetadataFailure) GetPayload() models.Error {
return o.Payload
}
func (o *GetCgroupDumpMetadataFailure) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error {
// response payload
if err := consumer.Consume(response.Body(), &o.Payload); err != nil && err != io.EOF {
return err
}
return nil
}
// Code generated by go-swagger; DO NOT EDIT.
// Copyright Authors of Cilium
// SPDX-License-Identifier: Apache-2.0
package daemon
// This file was generated by the swagger tool.
// Editing this file might prove futile when you re-run the swagger generate command
import (
"context"
"net/http"
"time"
"github.com/go-openapi/errors"
"github.com/go-openapi/runtime"
cr "github.com/go-openapi/runtime/client"
"github.com/go-openapi/strfmt"
"github.com/go-openapi/swag"
)
// NewGetClusterNodesParams creates a new GetClusterNodesParams object,
// with the default timeout for this client.
//
// Default values are not hydrated, since defaults are normally applied by the API server side.
//
// To enforce default values in parameter, use SetDefaults or WithDefaults.
func NewGetClusterNodesParams() *GetClusterNodesParams {
return &GetClusterNodesParams{
timeout: cr.DefaultTimeout,
}
}
// NewGetClusterNodesParamsWithTimeout creates a new GetClusterNodesParams object
// with the ability to set a timeout on a request.
func NewGetClusterNodesParamsWithTimeout(timeout time.Duration) *GetClusterNodesParams {
return &GetClusterNodesParams{
timeout: timeout,
}
}
// NewGetClusterNodesParamsWithContext creates a new GetClusterNodesParams object
// with the ability to set a context for a request.
func NewGetClusterNodesParamsWithContext(ctx context.Context) *GetClusterNodesParams {
return &GetClusterNodesParams{
Context: ctx,
}
}
// NewGetClusterNodesParamsWithHTTPClient creates a new GetClusterNodesParams object
// with the ability to set a custom HTTPClient for a request.
func NewGetClusterNodesParamsWithHTTPClient(client *http.Client) *GetClusterNodesParams {
return &GetClusterNodesParams{
HTTPClient: client,
}
}
/*
GetClusterNodesParams contains all the parameters to send to the API endpoint
for the get cluster nodes operation.
Typically these are written to a http.Request.
*/
type GetClusterNodesParams struct {
/* ClientID.
Client UUID should be used when the client wants to request
a diff of nodes added and / or removed since the last time
that client has made a request.
*/
ClientID *int64
timeout time.Duration
Context context.Context
HTTPClient *http.Client
}
// WithDefaults hydrates default values in the get cluster nodes params (not the query body).
//
// All values with no default are reset to their zero value.
func (o *GetClusterNodesParams) WithDefaults() *GetClusterNodesParams {
o.SetDefaults()
return o
}
// SetDefaults hydrates default values in the get cluster nodes params (not the query body).
//
// All values with no default are reset to their zero value.
func (o *GetClusterNodesParams) SetDefaults() {
// no default values defined for this parameter
}
// WithTimeout adds the timeout to the get cluster nodes params
func (o *GetClusterNodesParams) WithTimeout(timeout time.Duration) *GetClusterNodesParams {
o.SetTimeout(timeout)
return o
}
// SetTimeout adds the timeout to the get cluster nodes params
func (o *GetClusterNodesParams) SetTimeout(timeout time.Duration) {
o.timeout = timeout
}
// WithContext adds the context to the get cluster nodes params
func (o *GetClusterNodesParams) WithContext(ctx context.Context) *GetClusterNodesParams {
o.SetContext(ctx)
return o
}
// SetContext adds the context to the get cluster nodes params
func (o *GetClusterNodesParams) SetContext(ctx context.Context) {
o.Context = ctx
}
// WithHTTPClient adds the HTTPClient to the get cluster nodes params
func (o *GetClusterNodesParams) WithHTTPClient(client *http.Client) *GetClusterNodesParams {
o.SetHTTPClient(client)
return o
}
// SetHTTPClient adds the HTTPClient to the get cluster nodes params
func (o *GetClusterNodesParams) SetHTTPClient(client *http.Client) {
o.HTTPClient = client
}
// WithClientID adds the clientID to the get cluster nodes params
func (o *GetClusterNodesParams) WithClientID(clientID *int64) *GetClusterNodesParams {
o.SetClientID(clientID)
return o
}
// SetClientID adds the clientId to the get cluster nodes params
func (o *GetClusterNodesParams) SetClientID(clientID *int64) {
o.ClientID = clientID
}
// WriteToRequest writes these params to a swagger request
func (o *GetClusterNodesParams) WriteToRequest(r runtime.ClientRequest, reg strfmt.Registry) error {
if err := r.SetTimeout(o.timeout); err != nil {
return err
}
var res []error
if o.ClientID != nil {
// header param client-id
if err := r.SetHeaderParam("client-id", swag.FormatInt64(*o.ClientID)); err != nil {
return err
}
}
if len(res) > 0 {
return errors.CompositeValidationError(res...)
}
return nil
}
// Code generated by go-swagger; DO NOT EDIT.
// Copyright Authors of Cilium
// SPDX-License-Identifier: Apache-2.0
package daemon
// This file was generated by the swagger tool.
// Editing this file might prove futile when you re-run the swagger generate command
import (
"encoding/json"
"fmt"
"io"
"github.com/go-openapi/runtime"
"github.com/go-openapi/strfmt"
"github.com/cilium/cilium/api/v1/models"
)
// GetClusterNodesReader is a Reader for the GetClusterNodes structure.
type GetClusterNodesReader struct {
formats strfmt.Registry
}
// ReadResponse reads a server response into the received o.
func (o *GetClusterNodesReader) ReadResponse(response runtime.ClientResponse, consumer runtime.Consumer) (interface{}, error) {
switch response.Code() {
case 200:
result := NewGetClusterNodesOK()
if err := result.readResponse(response, consumer, o.formats); err != nil {
return nil, err
}
return result, nil
default:
return nil, runtime.NewAPIError("[GET /cluster/nodes] GetClusterNodes", response, response.Code())
}
}
// NewGetClusterNodesOK creates a GetClusterNodesOK with default headers values
func NewGetClusterNodesOK() *GetClusterNodesOK {
return &GetClusterNodesOK{}
}
/*
GetClusterNodesOK describes a response with status code 200, with default header values.
Success
*/
type GetClusterNodesOK struct {
Payload *models.ClusterNodeStatus
}
// IsSuccess returns true when this get cluster nodes o k response has a 2xx status code
func (o *GetClusterNodesOK) IsSuccess() bool {
return true
}
// IsRedirect returns true when this get cluster nodes o k response has a 3xx status code
func (o *GetClusterNodesOK) IsRedirect() bool {
return false
}
// IsClientError returns true when this get cluster nodes o k response has a 4xx status code
func (o *GetClusterNodesOK) IsClientError() bool {
return false
}
// IsServerError returns true when this get cluster nodes o k response has a 5xx status code
func (o *GetClusterNodesOK) IsServerError() bool {
return false
}
// IsCode returns true when this get cluster nodes o k response a status code equal to that given
func (o *GetClusterNodesOK) IsCode(code int) bool {
return code == 200
}
// Code gets the status code for the get cluster nodes o k response
func (o *GetClusterNodesOK) Code() int {
return 200
}
func (o *GetClusterNodesOK) Error() string {
payload, _ := json.Marshal(o.Payload)
return fmt.Sprintf("[GET /cluster/nodes][%d] getClusterNodesOK %s", 200, payload)
}
func (o *GetClusterNodesOK) String() string {
payload, _ := json.Marshal(o.Payload)
return fmt.Sprintf("[GET /cluster/nodes][%d] getClusterNodesOK %s", 200, payload)
}
func (o *GetClusterNodesOK) GetPayload() *models.ClusterNodeStatus {
return o.Payload
}
func (o *GetClusterNodesOK) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error {
o.Payload = new(models.ClusterNodeStatus)
// response payload
if err := consumer.Consume(response.Body(), o.Payload); err != nil && err != io.EOF {
return err
}
return nil
}
// Code generated by go-swagger; DO NOT EDIT.
// Copyright Authors of Cilium
// SPDX-License-Identifier: Apache-2.0
package daemon
// This file was generated by the swagger tool.
// Editing this file might prove futile when you re-run the swagger generate command
import (
"context"
"net/http"
"time"
"github.com/go-openapi/errors"
"github.com/go-openapi/runtime"
cr "github.com/go-openapi/runtime/client"
"github.com/go-openapi/strfmt"
)
// NewGetConfigParams creates a new GetConfigParams object,
// with the default timeout for this client.
//
// Default values are not hydrated, since defaults are normally applied by the API server side.
//
// To enforce default values in parameter, use SetDefaults or WithDefaults.
func NewGetConfigParams() *GetConfigParams {
return &GetConfigParams{
timeout: cr.DefaultTimeout,
}
}
// NewGetConfigParamsWithTimeout creates a new GetConfigParams object
// with the ability to set a timeout on a request.
func NewGetConfigParamsWithTimeout(timeout time.Duration) *GetConfigParams {
return &GetConfigParams{
timeout: timeout,
}
}
// NewGetConfigParamsWithContext creates a new GetConfigParams object
// with the ability to set a context for a request.
func NewGetConfigParamsWithContext(ctx context.Context) *GetConfigParams {
return &GetConfigParams{
Context: ctx,
}
}
// NewGetConfigParamsWithHTTPClient creates a new GetConfigParams object
// with the ability to set a custom HTTPClient for a request.
func NewGetConfigParamsWithHTTPClient(client *http.Client) *GetConfigParams {
return &GetConfigParams{
HTTPClient: client,
}
}
/*
GetConfigParams contains all the parameters to send to the API endpoint
for the get config operation.
Typically these are written to a http.Request.
*/
type GetConfigParams struct {
timeout time.Duration
Context context.Context
HTTPClient *http.Client
}
// WithDefaults hydrates default values in the get config params (not the query body).
//
// All values with no default are reset to their zero value.
func (o *GetConfigParams) WithDefaults() *GetConfigParams {
o.SetDefaults()
return o
}
// SetDefaults hydrates default values in the get config params (not the query body).
//
// All values with no default are reset to their zero value.
func (o *GetConfigParams) SetDefaults() {
// no default values defined for this parameter
}
// WithTimeout adds the timeout to the get config params
func (o *GetConfigParams) WithTimeout(timeout time.Duration) *GetConfigParams {
o.SetTimeout(timeout)
return o
}
// SetTimeout adds the timeout to the get config params
func (o *GetConfigParams) SetTimeout(timeout time.Duration) {
o.timeout = timeout
}
// WithContext adds the context to the get config params
func (o *GetConfigParams) WithContext(ctx context.Context) *GetConfigParams {
o.SetContext(ctx)
return o
}
// SetContext adds the context to the get config params
func (o *GetConfigParams) SetContext(ctx context.Context) {
o.Context = ctx
}
// WithHTTPClient adds the HTTPClient to the get config params
func (o *GetConfigParams) WithHTTPClient(client *http.Client) *GetConfigParams {
o.SetHTTPClient(client)
return o
}
// SetHTTPClient adds the HTTPClient to the get config params
func (o *GetConfigParams) SetHTTPClient(client *http.Client) {
o.HTTPClient = client
}
// WriteToRequest writes these params to a swagger request
func (o *GetConfigParams) WriteToRequest(r runtime.ClientRequest, reg strfmt.Registry) error {
if err := r.SetTimeout(o.timeout); err != nil {
return err
}
var res []error
if len(res) > 0 {
return errors.CompositeValidationError(res...)
}
return nil
}
// Code generated by go-swagger; DO NOT EDIT.
// Copyright Authors of Cilium
// SPDX-License-Identifier: Apache-2.0
package daemon
// This file was generated by the swagger tool.
// Editing this file might prove futile when you re-run the swagger generate command
import (
"encoding/json"
"fmt"
"io"
"github.com/go-openapi/runtime"
"github.com/go-openapi/strfmt"
"github.com/cilium/cilium/api/v1/models"
)
// GetConfigReader is a Reader for the GetConfig structure.
type GetConfigReader struct {
formats strfmt.Registry
}
// ReadResponse reads a server response into the received o.
func (o *GetConfigReader) ReadResponse(response runtime.ClientResponse, consumer runtime.Consumer) (interface{}, error) {
switch response.Code() {
case 200:
result := NewGetConfigOK()
if err := result.readResponse(response, consumer, o.formats); err != nil {
return nil, err
}
return result, nil
default:
return nil, runtime.NewAPIError("[GET /config] GetConfig", response, response.Code())
}
}
// NewGetConfigOK creates a GetConfigOK with default headers values
func NewGetConfigOK() *GetConfigOK {
return &GetConfigOK{}
}
/*
GetConfigOK describes a response with status code 200, with default header values.
Success
*/
type GetConfigOK struct {
Payload *models.DaemonConfiguration
}
// IsSuccess returns true when this get config o k response has a 2xx status code
func (o *GetConfigOK) IsSuccess() bool {
return true
}
// IsRedirect returns true when this get config o k response has a 3xx status code
func (o *GetConfigOK) IsRedirect() bool {
return false
}
// IsClientError returns true when this get config o k response has a 4xx status code
func (o *GetConfigOK) IsClientError() bool {
return false
}
// IsServerError returns true when this get config o k response has a 5xx status code
func (o *GetConfigOK) IsServerError() bool {
return false
}
// IsCode returns true when this get config o k response a status code equal to that given
func (o *GetConfigOK) IsCode(code int) bool {
return code == 200
}
// Code gets the status code for the get config o k response
func (o *GetConfigOK) Code() int {
return 200
}
func (o *GetConfigOK) Error() string {
payload, _ := json.Marshal(o.Payload)
return fmt.Sprintf("[GET /config][%d] getConfigOK %s", 200, payload)
}
func (o *GetConfigOK) String() string {
payload, _ := json.Marshal(o.Payload)
return fmt.Sprintf("[GET /config][%d] getConfigOK %s", 200, payload)
}
func (o *GetConfigOK) GetPayload() *models.DaemonConfiguration {
return o.Payload
}
func (o *GetConfigOK) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error {
o.Payload = new(models.DaemonConfiguration)
// response payload
if err := consumer.Consume(response.Body(), o.Payload); err != nil && err != io.EOF {
return err
}
return nil
}
// Code generated by go-swagger; DO NOT EDIT.
// Copyright Authors of Cilium
// SPDX-License-Identifier: Apache-2.0
package daemon
// This file was generated by the swagger tool.
// Editing this file might prove futile when you re-run the swagger generate command
import (
"context"
"net/http"
"time"
"github.com/go-openapi/errors"
"github.com/go-openapi/runtime"
cr "github.com/go-openapi/runtime/client"
"github.com/go-openapi/strfmt"
)
// NewGetDebuginfoParams creates a new GetDebuginfoParams object,
// with the default timeout for this client.
//
// Default values are not hydrated, since defaults are normally applied by the API server side.
//
// To enforce default values in parameter, use SetDefaults or WithDefaults.
func NewGetDebuginfoParams() *GetDebuginfoParams {
return &GetDebuginfoParams{
timeout: cr.DefaultTimeout,
}
}
// NewGetDebuginfoParamsWithTimeout creates a new GetDebuginfoParams object
// with the ability to set a timeout on a request.
func NewGetDebuginfoParamsWithTimeout(timeout time.Duration) *GetDebuginfoParams {
return &GetDebuginfoParams{
timeout: timeout,
}
}
// NewGetDebuginfoParamsWithContext creates a new GetDebuginfoParams object
// with the ability to set a context for a request.
func NewGetDebuginfoParamsWithContext(ctx context.Context) *GetDebuginfoParams {
return &GetDebuginfoParams{
Context: ctx,
}
}
// NewGetDebuginfoParamsWithHTTPClient creates a new GetDebuginfoParams object
// with the ability to set a custom HTTPClient for a request.
func NewGetDebuginfoParamsWithHTTPClient(client *http.Client) *GetDebuginfoParams {
return &GetDebuginfoParams{
HTTPClient: client,
}
}
/*
GetDebuginfoParams contains all the parameters to send to the API endpoint
for the get debuginfo operation.
Typically these are written to a http.Request.
*/
type GetDebuginfoParams struct {
timeout time.Duration
Context context.Context
HTTPClient *http.Client
}
// WithDefaults hydrates default values in the get debuginfo params (not the query body).
//
// All values with no default are reset to their zero value.
func (o *GetDebuginfoParams) WithDefaults() *GetDebuginfoParams {
o.SetDefaults()
return o
}
// SetDefaults hydrates default values in the get debuginfo params (not the query body).
//
// All values with no default are reset to their zero value.
func (o *GetDebuginfoParams) SetDefaults() {
// no default values defined for this parameter
}
// WithTimeout adds the timeout to the get debuginfo params
func (o *GetDebuginfoParams) WithTimeout(timeout time.Duration) *GetDebuginfoParams {
o.SetTimeout(timeout)
return o
}
// SetTimeout adds the timeout to the get debuginfo params
func (o *GetDebuginfoParams) SetTimeout(timeout time.Duration) {
o.timeout = timeout
}
// WithContext adds the context to the get debuginfo params
func (o *GetDebuginfoParams) WithContext(ctx context.Context) *GetDebuginfoParams {
o.SetContext(ctx)
return o
}
// SetContext adds the context to the get debuginfo params
func (o *GetDebuginfoParams) SetContext(ctx context.Context) {
o.Context = ctx
}
// WithHTTPClient adds the HTTPClient to the get debuginfo params
func (o *GetDebuginfoParams) WithHTTPClient(client *http.Client) *GetDebuginfoParams {
o.SetHTTPClient(client)
return o
}
// SetHTTPClient adds the HTTPClient to the get debuginfo params
func (o *GetDebuginfoParams) SetHTTPClient(client *http.Client) {
o.HTTPClient = client
}
// WriteToRequest writes these params to a swagger request
func (o *GetDebuginfoParams) WriteToRequest(r runtime.ClientRequest, reg strfmt.Registry) error {
if err := r.SetTimeout(o.timeout); err != nil {
return err
}
var res []error
if len(res) > 0 {
return errors.CompositeValidationError(res...)
}
return nil
}
// Code generated by go-swagger; DO NOT EDIT.
// Copyright Authors of Cilium
// SPDX-License-Identifier: Apache-2.0
package daemon
// This file was generated by the swagger tool.
// Editing this file might prove futile when you re-run the swagger generate command
import (
"encoding/json"
"fmt"
"io"
"github.com/go-openapi/runtime"
"github.com/go-openapi/strfmt"
"github.com/cilium/cilium/api/v1/models"
)
// GetDebuginfoReader is a Reader for the GetDebuginfo structure.
type GetDebuginfoReader struct {
formats strfmt.Registry
}
// ReadResponse reads a server response into the received o.
func (o *GetDebuginfoReader) ReadResponse(response runtime.ClientResponse, consumer runtime.Consumer) (interface{}, error) {
switch response.Code() {
case 200:
result := NewGetDebuginfoOK()
if err := result.readResponse(response, consumer, o.formats); err != nil {
return nil, err
}
return result, nil
case 500:
result := NewGetDebuginfoFailure()
if err := result.readResponse(response, consumer, o.formats); err != nil {
return nil, err
}
return nil, result
default:
return nil, runtime.NewAPIError("[GET /debuginfo] GetDebuginfo", response, response.Code())
}
}
// NewGetDebuginfoOK creates a GetDebuginfoOK with default headers values
func NewGetDebuginfoOK() *GetDebuginfoOK {
return &GetDebuginfoOK{}
}
/*
GetDebuginfoOK describes a response with status code 200, with default header values.
Success
*/
type GetDebuginfoOK struct {
Payload *models.DebugInfo
}
// IsSuccess returns true when this get debuginfo o k response has a 2xx status code
func (o *GetDebuginfoOK) IsSuccess() bool {
return true
}
// IsRedirect returns true when this get debuginfo o k response has a 3xx status code
func (o *GetDebuginfoOK) IsRedirect() bool {
return false
}
// IsClientError returns true when this get debuginfo o k response has a 4xx status code
func (o *GetDebuginfoOK) IsClientError() bool {
return false
}
// IsServerError returns true when this get debuginfo o k response has a 5xx status code
func (o *GetDebuginfoOK) IsServerError() bool {
return false
}
// IsCode returns true when this get debuginfo o k response a status code equal to that given
func (o *GetDebuginfoOK) IsCode(code int) bool {
return code == 200
}
// Code gets the status code for the get debuginfo o k response
func (o *GetDebuginfoOK) Code() int {
return 200
}
func (o *GetDebuginfoOK) Error() string {
payload, _ := json.Marshal(o.Payload)
return fmt.Sprintf("[GET /debuginfo][%d] getDebuginfoOK %s", 200, payload)
}
func (o *GetDebuginfoOK) String() string {
payload, _ := json.Marshal(o.Payload)
return fmt.Sprintf("[GET /debuginfo][%d] getDebuginfoOK %s", 200, payload)
}
func (o *GetDebuginfoOK) GetPayload() *models.DebugInfo {
return o.Payload
}
func (o *GetDebuginfoOK) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error {
o.Payload = new(models.DebugInfo)
// response payload
if err := consumer.Consume(response.Body(), o.Payload); err != nil && err != io.EOF {
return err
}
return nil
}
// NewGetDebuginfoFailure creates a GetDebuginfoFailure with default headers values
func NewGetDebuginfoFailure() *GetDebuginfoFailure {
return &GetDebuginfoFailure{}
}
/*
GetDebuginfoFailure describes a response with status code 500, with default header values.
DebugInfo get failed
*/
type GetDebuginfoFailure struct {
Payload models.Error
}
// IsSuccess returns true when this get debuginfo failure response has a 2xx status code
func (o *GetDebuginfoFailure) IsSuccess() bool {
return false
}
// IsRedirect returns true when this get debuginfo failure response has a 3xx status code
func (o *GetDebuginfoFailure) IsRedirect() bool {
return false
}
// IsClientError returns true when this get debuginfo failure response has a 4xx status code
func (o *GetDebuginfoFailure) IsClientError() bool {
return false
}
// IsServerError returns true when this get debuginfo failure response has a 5xx status code
func (o *GetDebuginfoFailure) IsServerError() bool {
return true
}
// IsCode returns true when this get debuginfo failure response a status code equal to that given
func (o *GetDebuginfoFailure) IsCode(code int) bool {
return code == 500
}
// Code gets the status code for the get debuginfo failure response
func (o *GetDebuginfoFailure) Code() int {
return 500
}
func (o *GetDebuginfoFailure) Error() string {
payload, _ := json.Marshal(o.Payload)
return fmt.Sprintf("[GET /debuginfo][%d] getDebuginfoFailure %s", 500, payload)
}
func (o *GetDebuginfoFailure) String() string {
payload, _ := json.Marshal(o.Payload)
return fmt.Sprintf("[GET /debuginfo][%d] getDebuginfoFailure %s", 500, payload)
}
func (o *GetDebuginfoFailure) GetPayload() models.Error {
return o.Payload
}
func (o *GetDebuginfoFailure) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error {
// response payload
if err := consumer.Consume(response.Body(), &o.Payload); err != nil && err != io.EOF {
return err
}
return nil
}
// Code generated by go-swagger; DO NOT EDIT.
// Copyright Authors of Cilium
// SPDX-License-Identifier: Apache-2.0
package daemon
// This file was generated by the swagger tool.
// Editing this file might prove futile when you re-run the swagger generate command
import (
"context"
"net/http"
"time"
"github.com/go-openapi/errors"
"github.com/go-openapi/runtime"
cr "github.com/go-openapi/runtime/client"
"github.com/go-openapi/strfmt"
"github.com/go-openapi/swag"
)
// NewGetHealthzParams creates a new GetHealthzParams object,
// with the default timeout for this client.
//
// Default values are not hydrated, since defaults are normally applied by the API server side.
//
// To enforce default values in parameter, use SetDefaults or WithDefaults.
func NewGetHealthzParams() *GetHealthzParams {
return &GetHealthzParams{
timeout: cr.DefaultTimeout,
}
}
// NewGetHealthzParamsWithTimeout creates a new GetHealthzParams object
// with the ability to set a timeout on a request.
func NewGetHealthzParamsWithTimeout(timeout time.Duration) *GetHealthzParams {
return &GetHealthzParams{
timeout: timeout,
}
}
// NewGetHealthzParamsWithContext creates a new GetHealthzParams object
// with the ability to set a context for a request.
func NewGetHealthzParamsWithContext(ctx context.Context) *GetHealthzParams {
return &GetHealthzParams{
Context: ctx,
}
}
// NewGetHealthzParamsWithHTTPClient creates a new GetHealthzParams object
// with the ability to set a custom HTTPClient for a request.
func NewGetHealthzParamsWithHTTPClient(client *http.Client) *GetHealthzParams {
return &GetHealthzParams{
HTTPClient: client,
}
}
/*
GetHealthzParams contains all the parameters to send to the API endpoint
for the get healthz operation.
Typically these are written to a http.Request.
*/
type GetHealthzParams struct {
/* Brief.
Brief will return a brief representation of the Cilium status.
*/
Brief *bool
/* RequireK8sConnectivity.
If set to true, failure of the agent to connect to the Kubernetes control plane will cause the agent's health status to also fail.
Default: true
*/
RequireK8sConnectivity *bool
timeout time.Duration
Context context.Context
HTTPClient *http.Client
}
// WithDefaults hydrates default values in the get healthz params (not the query body).
//
// All values with no default are reset to their zero value.
func (o *GetHealthzParams) WithDefaults() *GetHealthzParams {
o.SetDefaults()
return o
}
// SetDefaults hydrates default values in the get healthz params (not the query body).
//
// All values with no default are reset to their zero value.
func (o *GetHealthzParams) SetDefaults() {
var (
requireK8sConnectivityDefault = bool(true)
)
val := GetHealthzParams{
RequireK8sConnectivity: &requireK8sConnectivityDefault,
}
val.timeout = o.timeout
val.Context = o.Context
val.HTTPClient = o.HTTPClient
*o = val
}
// WithTimeout adds the timeout to the get healthz params
func (o *GetHealthzParams) WithTimeout(timeout time.Duration) *GetHealthzParams {
o.SetTimeout(timeout)
return o
}
// SetTimeout adds the timeout to the get healthz params
func (o *GetHealthzParams) SetTimeout(timeout time.Duration) {
o.timeout = timeout
}
// WithContext adds the context to the get healthz params
func (o *GetHealthzParams) WithContext(ctx context.Context) *GetHealthzParams {
o.SetContext(ctx)
return o
}
// SetContext adds the context to the get healthz params
func (o *GetHealthzParams) SetContext(ctx context.Context) {
o.Context = ctx
}
// WithHTTPClient adds the HTTPClient to the get healthz params
func (o *GetHealthzParams) WithHTTPClient(client *http.Client) *GetHealthzParams {
o.SetHTTPClient(client)
return o
}
// SetHTTPClient adds the HTTPClient to the get healthz params
func (o *GetHealthzParams) SetHTTPClient(client *http.Client) {
o.HTTPClient = client
}
// WithBrief adds the brief to the get healthz params
func (o *GetHealthzParams) WithBrief(brief *bool) *GetHealthzParams {
o.SetBrief(brief)
return o
}
// SetBrief adds the brief to the get healthz params
func (o *GetHealthzParams) SetBrief(brief *bool) {
o.Brief = brief
}
// WithRequireK8sConnectivity adds the requireK8sConnectivity to the get healthz params
func (o *GetHealthzParams) WithRequireK8sConnectivity(requireK8sConnectivity *bool) *GetHealthzParams {
o.SetRequireK8sConnectivity(requireK8sConnectivity)
return o
}
// SetRequireK8sConnectivity adds the requireK8sConnectivity to the get healthz params
func (o *GetHealthzParams) SetRequireK8sConnectivity(requireK8sConnectivity *bool) {
o.RequireK8sConnectivity = requireK8sConnectivity
}
// WriteToRequest writes these params to a swagger request
func (o *GetHealthzParams) WriteToRequest(r runtime.ClientRequest, reg strfmt.Registry) error {
if err := r.SetTimeout(o.timeout); err != nil {
return err
}
var res []error
if o.Brief != nil {
// header param brief
if err := r.SetHeaderParam("brief", swag.FormatBool(*o.Brief)); err != nil {
return err
}
}
if o.RequireK8sConnectivity != nil {
// header param require-k8s-connectivity
if err := r.SetHeaderParam("require-k8s-connectivity", swag.FormatBool(*o.RequireK8sConnectivity)); err != nil {
return err
}
}
if len(res) > 0 {
return errors.CompositeValidationError(res...)
}
return nil
}
// Code generated by go-swagger; DO NOT EDIT.
// Copyright Authors of Cilium
// SPDX-License-Identifier: Apache-2.0
package daemon
// This file was generated by the swagger tool.
// Editing this file might prove futile when you re-run the swagger generate command
import (
"encoding/json"
"fmt"
"io"
"github.com/go-openapi/runtime"
"github.com/go-openapi/strfmt"
"github.com/cilium/cilium/api/v1/models"
)
// GetHealthzReader is a Reader for the GetHealthz structure.
type GetHealthzReader struct {
formats strfmt.Registry
}
// ReadResponse reads a server response into the received o.
func (o *GetHealthzReader) ReadResponse(response runtime.ClientResponse, consumer runtime.Consumer) (interface{}, error) {
switch response.Code() {
case 200:
result := NewGetHealthzOK()
if err := result.readResponse(response, consumer, o.formats); err != nil {
return nil, err
}
return result, nil
default:
return nil, runtime.NewAPIError("[GET /healthz] GetHealthz", response, response.Code())
}
}
// NewGetHealthzOK creates a GetHealthzOK with default headers values
func NewGetHealthzOK() *GetHealthzOK {
return &GetHealthzOK{}
}
/*
GetHealthzOK describes a response with status code 200, with default header values.
Success
*/
type GetHealthzOK struct {
Payload *models.StatusResponse
}
// IsSuccess returns true when this get healthz o k response has a 2xx status code
func (o *GetHealthzOK) IsSuccess() bool {
return true
}
// IsRedirect returns true when this get healthz o k response has a 3xx status code
func (o *GetHealthzOK) IsRedirect() bool {
return false
}
// IsClientError returns true when this get healthz o k response has a 4xx status code
func (o *GetHealthzOK) IsClientError() bool {
return false
}
// IsServerError returns true when this get healthz o k response has a 5xx status code
func (o *GetHealthzOK) IsServerError() bool {
return false
}
// IsCode returns true when this get healthz o k response a status code equal to that given
func (o *GetHealthzOK) IsCode(code int) bool {
return code == 200
}
// Code gets the status code for the get healthz o k response
func (o *GetHealthzOK) Code() int {
return 200
}
func (o *GetHealthzOK) Error() string {
payload, _ := json.Marshal(o.Payload)
return fmt.Sprintf("[GET /healthz][%d] getHealthzOK %s", 200, payload)
}
func (o *GetHealthzOK) String() string {
payload, _ := json.Marshal(o.Payload)
return fmt.Sprintf("[GET /healthz][%d] getHealthzOK %s", 200, payload)
}
func (o *GetHealthzOK) GetPayload() *models.StatusResponse {
return o.Payload
}
func (o *GetHealthzOK) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error {
o.Payload = new(models.StatusResponse)
// response payload
if err := consumer.Consume(response.Body(), o.Payload); err != nil && err != io.EOF {
return err
}
return nil
}
// Code generated by go-swagger; DO NOT EDIT.
// Copyright Authors of Cilium
// SPDX-License-Identifier: Apache-2.0
package daemon
// This file was generated by the swagger tool.
// Editing this file might prove futile when you re-run the swagger generate command
import (
"context"
"net/http"
"time"
"github.com/go-openapi/errors"
"github.com/go-openapi/runtime"
cr "github.com/go-openapi/runtime/client"
"github.com/go-openapi/strfmt"
"github.com/go-openapi/swag"
)
// NewGetMapNameEventsParams creates a new GetMapNameEventsParams object,
// with the default timeout for this client.
//
// Default values are not hydrated, since defaults are normally applied by the API server side.
//
// To enforce default values in parameter, use SetDefaults or WithDefaults.
func NewGetMapNameEventsParams() *GetMapNameEventsParams {
return &GetMapNameEventsParams{
timeout: cr.DefaultTimeout,
}
}
// NewGetMapNameEventsParamsWithTimeout creates a new GetMapNameEventsParams object
// with the ability to set a timeout on a request.
func NewGetMapNameEventsParamsWithTimeout(timeout time.Duration) *GetMapNameEventsParams {
return &GetMapNameEventsParams{
timeout: timeout,
}
}
// NewGetMapNameEventsParamsWithContext creates a new GetMapNameEventsParams object
// with the ability to set a context for a request.
func NewGetMapNameEventsParamsWithContext(ctx context.Context) *GetMapNameEventsParams {
return &GetMapNameEventsParams{
Context: ctx,
}
}
// NewGetMapNameEventsParamsWithHTTPClient creates a new GetMapNameEventsParams object
// with the ability to set a custom HTTPClient for a request.
func NewGetMapNameEventsParamsWithHTTPClient(client *http.Client) *GetMapNameEventsParams {
return &GetMapNameEventsParams{
HTTPClient: client,
}
}
/*
GetMapNameEventsParams contains all the parameters to send to the API endpoint
for the get map name events operation.
Typically these are written to a http.Request.
*/
type GetMapNameEventsParams struct {
/* Follow.
Whether to follow streamed requests
*/
Follow *bool
/* Name.
Name of map
*/
Name string
timeout time.Duration
Context context.Context
HTTPClient *http.Client
}
// WithDefaults hydrates default values in the get map name events params (not the query body).
//
// All values with no default are reset to their zero value.
func (o *GetMapNameEventsParams) WithDefaults() *GetMapNameEventsParams {
o.SetDefaults()
return o
}
// SetDefaults hydrates default values in the get map name events params (not the query body).
//
// All values with no default are reset to their zero value.
func (o *GetMapNameEventsParams) SetDefaults() {
// no default values defined for this parameter
}
// WithTimeout adds the timeout to the get map name events params
func (o *GetMapNameEventsParams) WithTimeout(timeout time.Duration) *GetMapNameEventsParams {
o.SetTimeout(timeout)
return o
}
// SetTimeout adds the timeout to the get map name events params
func (o *GetMapNameEventsParams) SetTimeout(timeout time.Duration) {
o.timeout = timeout
}
// WithContext adds the context to the get map name events params
func (o *GetMapNameEventsParams) WithContext(ctx context.Context) *GetMapNameEventsParams {
o.SetContext(ctx)
return o
}
// SetContext adds the context to the get map name events params
func (o *GetMapNameEventsParams) SetContext(ctx context.Context) {
o.Context = ctx
}
// WithHTTPClient adds the HTTPClient to the get map name events params
func (o *GetMapNameEventsParams) WithHTTPClient(client *http.Client) *GetMapNameEventsParams {
o.SetHTTPClient(client)
return o
}
// SetHTTPClient adds the HTTPClient to the get map name events params
func (o *GetMapNameEventsParams) SetHTTPClient(client *http.Client) {
o.HTTPClient = client
}
// WithFollow adds the follow to the get map name events params
func (o *GetMapNameEventsParams) WithFollow(follow *bool) *GetMapNameEventsParams {
o.SetFollow(follow)
return o
}
// SetFollow adds the follow to the get map name events params
func (o *GetMapNameEventsParams) SetFollow(follow *bool) {
o.Follow = follow
}
// WithName adds the name to the get map name events params
func (o *GetMapNameEventsParams) WithName(name string) *GetMapNameEventsParams {
o.SetName(name)
return o
}
// SetName adds the name to the get map name events params
func (o *GetMapNameEventsParams) SetName(name string) {
o.Name = name
}
// WriteToRequest writes these params to a swagger request
func (o *GetMapNameEventsParams) WriteToRequest(r runtime.ClientRequest, reg strfmt.Registry) error {
if err := r.SetTimeout(o.timeout); err != nil {
return err
}
var res []error
if o.Follow != nil {
// query param follow
var qrFollow bool
if o.Follow != nil {
qrFollow = *o.Follow
}
qFollow := swag.FormatBool(qrFollow)
if qFollow != "" {
if err := r.SetQueryParam("follow", qFollow); err != nil {
return err
}
}
}
// path param name
if err := r.SetPathParam("name", o.Name); err != nil {
return err
}
if len(res) > 0 {
return errors.CompositeValidationError(res...)
}
return nil
}
// Code generated by go-swagger; DO NOT EDIT.
// Copyright Authors of Cilium
// SPDX-License-Identifier: Apache-2.0
package daemon
// This file was generated by the swagger tool.
// Editing this file might prove futile when you re-run the swagger generate command
import (
"fmt"
"io"
"github.com/go-openapi/runtime"
"github.com/go-openapi/strfmt"
)
// GetMapNameEventsReader is a Reader for the GetMapNameEvents structure.
type GetMapNameEventsReader struct {
formats strfmt.Registry
writer io.Writer
}
// ReadResponse reads a server response into the received o.
func (o *GetMapNameEventsReader) ReadResponse(response runtime.ClientResponse, consumer runtime.Consumer) (interface{}, error) {
switch response.Code() {
case 200:
result := NewGetMapNameEventsOK(o.writer)
if err := result.readResponse(response, consumer, o.formats); err != nil {
return nil, err
}
return result, nil
case 404:
result := NewGetMapNameEventsNotFound()
if err := result.readResponse(response, consumer, o.formats); err != nil {
return nil, err
}
return nil, result
default:
return nil, runtime.NewAPIError("[GET /map/{name}/events] GetMapNameEvents", response, response.Code())
}
}
// NewGetMapNameEventsOK creates a GetMapNameEventsOK with default headers values
func NewGetMapNameEventsOK(writer io.Writer) *GetMapNameEventsOK {
return &GetMapNameEventsOK{
Payload: writer,
}
}
/*
GetMapNameEventsOK describes a response with status code 200, with default header values.
Success
*/
type GetMapNameEventsOK struct {
Payload io.Writer
}
// IsSuccess returns true when this get map name events o k response has a 2xx status code
func (o *GetMapNameEventsOK) IsSuccess() bool {
return true
}
// IsRedirect returns true when this get map name events o k response has a 3xx status code
func (o *GetMapNameEventsOK) IsRedirect() bool {
return false
}
// IsClientError returns true when this get map name events o k response has a 4xx status code
func (o *GetMapNameEventsOK) IsClientError() bool {
return false
}
// IsServerError returns true when this get map name events o k response has a 5xx status code
func (o *GetMapNameEventsOK) IsServerError() bool {
return false
}
// IsCode returns true when this get map name events o k response a status code equal to that given
func (o *GetMapNameEventsOK) IsCode(code int) bool {
return code == 200
}
// Code gets the status code for the get map name events o k response
func (o *GetMapNameEventsOK) Code() int {
return 200
}
func (o *GetMapNameEventsOK) Error() string {
return fmt.Sprintf("[GET /map/{name}/events][%d] getMapNameEventsOK", 200)
}
func (o *GetMapNameEventsOK) String() string {
return fmt.Sprintf("[GET /map/{name}/events][%d] getMapNameEventsOK", 200)
}
func (o *GetMapNameEventsOK) GetPayload() io.Writer {
return o.Payload
}
func (o *GetMapNameEventsOK) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error {
// response payload
if err := consumer.Consume(response.Body(), o.Payload); err != nil && err != io.EOF {
return err
}
return nil
}
// NewGetMapNameEventsNotFound creates a GetMapNameEventsNotFound with default headers values
func NewGetMapNameEventsNotFound() *GetMapNameEventsNotFound {
return &GetMapNameEventsNotFound{}
}
/*
GetMapNameEventsNotFound describes a response with status code 404, with default header values.
Map not found
*/
type GetMapNameEventsNotFound struct {
}
// IsSuccess returns true when this get map name events not found response has a 2xx status code
func (o *GetMapNameEventsNotFound) IsSuccess() bool {
return false
}
// IsRedirect returns true when this get map name events not found response has a 3xx status code
func (o *GetMapNameEventsNotFound) IsRedirect() bool {
return false
}
// IsClientError returns true when this get map name events not found response has a 4xx status code
func (o *GetMapNameEventsNotFound) IsClientError() bool {
return true
}
// IsServerError returns true when this get map name events not found response has a 5xx status code
func (o *GetMapNameEventsNotFound) IsServerError() bool {
return false
}
// IsCode returns true when this get map name events not found response a status code equal to that given
func (o *GetMapNameEventsNotFound) IsCode(code int) bool {
return code == 404
}
// Code gets the status code for the get map name events not found response
func (o *GetMapNameEventsNotFound) Code() int {
return 404
}
func (o *GetMapNameEventsNotFound) Error() string {
return fmt.Sprintf("[GET /map/{name}/events][%d] getMapNameEventsNotFound", 404)
}
func (o *GetMapNameEventsNotFound) String() string {
return fmt.Sprintf("[GET /map/{name}/events][%d] getMapNameEventsNotFound", 404)
}
func (o *GetMapNameEventsNotFound) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error {
return nil
}
// Code generated by go-swagger; DO NOT EDIT.
// Copyright Authors of Cilium
// SPDX-License-Identifier: Apache-2.0
package daemon
// This file was generated by the swagger tool.
// Editing this file might prove futile when you re-run the swagger generate command
import (
"context"
"net/http"
"time"
"github.com/go-openapi/errors"
"github.com/go-openapi/runtime"
cr "github.com/go-openapi/runtime/client"
"github.com/go-openapi/strfmt"
)
// NewGetMapNameParams creates a new GetMapNameParams object,
// with the default timeout for this client.
//
// Default values are not hydrated, since defaults are normally applied by the API server side.
//
// To enforce default values in parameter, use SetDefaults or WithDefaults.
func NewGetMapNameParams() *GetMapNameParams {
return &GetMapNameParams{
timeout: cr.DefaultTimeout,
}
}
// NewGetMapNameParamsWithTimeout creates a new GetMapNameParams object
// with the ability to set a timeout on a request.
func NewGetMapNameParamsWithTimeout(timeout time.Duration) *GetMapNameParams {
return &GetMapNameParams{
timeout: timeout,
}
}
// NewGetMapNameParamsWithContext creates a new GetMapNameParams object
// with the ability to set a context for a request.
func NewGetMapNameParamsWithContext(ctx context.Context) *GetMapNameParams {
return &GetMapNameParams{
Context: ctx,
}
}
// NewGetMapNameParamsWithHTTPClient creates a new GetMapNameParams object
// with the ability to set a custom HTTPClient for a request.
func NewGetMapNameParamsWithHTTPClient(client *http.Client) *GetMapNameParams {
return &GetMapNameParams{
HTTPClient: client,
}
}
/*
GetMapNameParams contains all the parameters to send to the API endpoint
for the get map name operation.
Typically these are written to a http.Request.
*/
type GetMapNameParams struct {
/* Name.
Name of map
*/
Name string
timeout time.Duration
Context context.Context
HTTPClient *http.Client
}
// WithDefaults hydrates default values in the get map name params (not the query body).
//
// All values with no default are reset to their zero value.
func (o *GetMapNameParams) WithDefaults() *GetMapNameParams {
o.SetDefaults()
return o
}
// SetDefaults hydrates default values in the get map name params (not the query body).
//
// All values with no default are reset to their zero value.
func (o *GetMapNameParams) SetDefaults() {
// no default values defined for this parameter
}
// WithTimeout adds the timeout to the get map name params
func (o *GetMapNameParams) WithTimeout(timeout time.Duration) *GetMapNameParams {
o.SetTimeout(timeout)
return o
}
// SetTimeout adds the timeout to the get map name params
func (o *GetMapNameParams) SetTimeout(timeout time.Duration) {
o.timeout = timeout
}
// WithContext adds the context to the get map name params
func (o *GetMapNameParams) WithContext(ctx context.Context) *GetMapNameParams {
o.SetContext(ctx)
return o
}
// SetContext adds the context to the get map name params
func (o *GetMapNameParams) SetContext(ctx context.Context) {
o.Context = ctx
}
// WithHTTPClient adds the HTTPClient to the get map name params
func (o *GetMapNameParams) WithHTTPClient(client *http.Client) *GetMapNameParams {
o.SetHTTPClient(client)
return o
}
// SetHTTPClient adds the HTTPClient to the get map name params
func (o *GetMapNameParams) SetHTTPClient(client *http.Client) {
o.HTTPClient = client
}
// WithName adds the name to the get map name params
func (o *GetMapNameParams) WithName(name string) *GetMapNameParams {
o.SetName(name)
return o
}
// SetName adds the name to the get map name params
func (o *GetMapNameParams) SetName(name string) {
o.Name = name
}
// WriteToRequest writes these params to a swagger request
func (o *GetMapNameParams) WriteToRequest(r runtime.ClientRequest, reg strfmt.Registry) error {
if err := r.SetTimeout(o.timeout); err != nil {
return err
}
var res []error
// path param name
if err := r.SetPathParam("name", o.Name); err != nil {
return err
}
if len(res) > 0 {
return errors.CompositeValidationError(res...)
}
return nil
}
// Code generated by go-swagger; DO NOT EDIT.
// Copyright Authors of Cilium
// SPDX-License-Identifier: Apache-2.0
package daemon
// This file was generated by the swagger tool.
// Editing this file might prove futile when you re-run the swagger generate command
import (
"encoding/json"
"fmt"
"io"
"github.com/go-openapi/runtime"
"github.com/go-openapi/strfmt"
"github.com/cilium/cilium/api/v1/models"
)
// GetMapNameReader is a Reader for the GetMapName structure.
type GetMapNameReader struct {
formats strfmt.Registry
}
// ReadResponse reads a server response into the received o.
func (o *GetMapNameReader) ReadResponse(response runtime.ClientResponse, consumer runtime.Consumer) (interface{}, error) {
switch response.Code() {
case 200:
result := NewGetMapNameOK()
if err := result.readResponse(response, consumer, o.formats); err != nil {
return nil, err
}
return result, nil
case 404:
result := NewGetMapNameNotFound()
if err := result.readResponse(response, consumer, o.formats); err != nil {
return nil, err
}
return nil, result
default:
return nil, runtime.NewAPIError("[GET /map/{name}] GetMapName", response, response.Code())
}
}
// NewGetMapNameOK creates a GetMapNameOK with default headers values
func NewGetMapNameOK() *GetMapNameOK {
return &GetMapNameOK{}
}
/*
GetMapNameOK describes a response with status code 200, with default header values.
Success
*/
type GetMapNameOK struct {
Payload *models.BPFMap
}
// IsSuccess returns true when this get map name o k response has a 2xx status code
func (o *GetMapNameOK) IsSuccess() bool {
return true
}
// IsRedirect returns true when this get map name o k response has a 3xx status code
func (o *GetMapNameOK) IsRedirect() bool {
return false
}
// IsClientError returns true when this get map name o k response has a 4xx status code
func (o *GetMapNameOK) IsClientError() bool {
return false
}
// IsServerError returns true when this get map name o k response has a 5xx status code
func (o *GetMapNameOK) IsServerError() bool {
return false
}
// IsCode returns true when this get map name o k response a status code equal to that given
func (o *GetMapNameOK) IsCode(code int) bool {
return code == 200
}
// Code gets the status code for the get map name o k response
func (o *GetMapNameOK) Code() int {
return 200
}
func (o *GetMapNameOK) Error() string {
payload, _ := json.Marshal(o.Payload)
return fmt.Sprintf("[GET /map/{name}][%d] getMapNameOK %s", 200, payload)
}
func (o *GetMapNameOK) String() string {
payload, _ := json.Marshal(o.Payload)
return fmt.Sprintf("[GET /map/{name}][%d] getMapNameOK %s", 200, payload)
}
func (o *GetMapNameOK) GetPayload() *models.BPFMap {
return o.Payload
}
func (o *GetMapNameOK) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error {
o.Payload = new(models.BPFMap)
// response payload
if err := consumer.Consume(response.Body(), o.Payload); err != nil && err != io.EOF {
return err
}
return nil
}
// NewGetMapNameNotFound creates a GetMapNameNotFound with default headers values
func NewGetMapNameNotFound() *GetMapNameNotFound {
return &GetMapNameNotFound{}
}
/*
GetMapNameNotFound describes a response with status code 404, with default header values.
Map not found
*/
type GetMapNameNotFound struct {
}
// IsSuccess returns true when this get map name not found response has a 2xx status code
func (o *GetMapNameNotFound) IsSuccess() bool {
return false
}
// IsRedirect returns true when this get map name not found response has a 3xx status code
func (o *GetMapNameNotFound) IsRedirect() bool {
return false
}
// IsClientError returns true when this get map name not found response has a 4xx status code
func (o *GetMapNameNotFound) IsClientError() bool {
return true
}
// IsServerError returns true when this get map name not found response has a 5xx status code
func (o *GetMapNameNotFound) IsServerError() bool {
return false
}
// IsCode returns true when this get map name not found response a status code equal to that given
func (o *GetMapNameNotFound) IsCode(code int) bool {
return code == 404
}
// Code gets the status code for the get map name not found response
func (o *GetMapNameNotFound) Code() int {
return 404
}
func (o *GetMapNameNotFound) Error() string {
return fmt.Sprintf("[GET /map/{name}][%d] getMapNameNotFound", 404)
}
func (o *GetMapNameNotFound) String() string {
return fmt.Sprintf("[GET /map/{name}][%d] getMapNameNotFound", 404)
}
func (o *GetMapNameNotFound) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error {
return nil
}
// Code generated by go-swagger; DO NOT EDIT.
// Copyright Authors of Cilium
// SPDX-License-Identifier: Apache-2.0
package daemon
// This file was generated by the swagger tool.
// Editing this file might prove futile when you re-run the swagger generate command
import (
"context"
"net/http"
"time"
"github.com/go-openapi/errors"
"github.com/go-openapi/runtime"
cr "github.com/go-openapi/runtime/client"
"github.com/go-openapi/strfmt"
)
// NewGetMapParams creates a new GetMapParams object,
// with the default timeout for this client.
//
// Default values are not hydrated, since defaults are normally applied by the API server side.
//
// To enforce default values in parameter, use SetDefaults or WithDefaults.
func NewGetMapParams() *GetMapParams {
return &GetMapParams{
timeout: cr.DefaultTimeout,
}
}
// NewGetMapParamsWithTimeout creates a new GetMapParams object
// with the ability to set a timeout on a request.
func NewGetMapParamsWithTimeout(timeout time.Duration) *GetMapParams {
return &GetMapParams{
timeout: timeout,
}
}
// NewGetMapParamsWithContext creates a new GetMapParams object
// with the ability to set a context for a request.
func NewGetMapParamsWithContext(ctx context.Context) *GetMapParams {
return &GetMapParams{
Context: ctx,
}
}
// NewGetMapParamsWithHTTPClient creates a new GetMapParams object
// with the ability to set a custom HTTPClient for a request.
func NewGetMapParamsWithHTTPClient(client *http.Client) *GetMapParams {
return &GetMapParams{
HTTPClient: client,
}
}
/*
GetMapParams contains all the parameters to send to the API endpoint
for the get map operation.
Typically these are written to a http.Request.
*/
type GetMapParams struct {
timeout time.Duration
Context context.Context
HTTPClient *http.Client
}
// WithDefaults hydrates default values in the get map params (not the query body).
//
// All values with no default are reset to their zero value.
func (o *GetMapParams) WithDefaults() *GetMapParams {
o.SetDefaults()
return o
}
// SetDefaults hydrates default values in the get map params (not the query body).
//
// All values with no default are reset to their zero value.
func (o *GetMapParams) SetDefaults() {
// no default values defined for this parameter
}
// WithTimeout adds the timeout to the get map params
func (o *GetMapParams) WithTimeout(timeout time.Duration) *GetMapParams {
o.SetTimeout(timeout)
return o
}
// SetTimeout adds the timeout to the get map params
func (o *GetMapParams) SetTimeout(timeout time.Duration) {
o.timeout = timeout
}
// WithContext adds the context to the get map params
func (o *GetMapParams) WithContext(ctx context.Context) *GetMapParams {
o.SetContext(ctx)
return o
}
// SetContext adds the context to the get map params
func (o *GetMapParams) SetContext(ctx context.Context) {
o.Context = ctx
}
// WithHTTPClient adds the HTTPClient to the get map params
func (o *GetMapParams) WithHTTPClient(client *http.Client) *GetMapParams {
o.SetHTTPClient(client)
return o
}
// SetHTTPClient adds the HTTPClient to the get map params
func (o *GetMapParams) SetHTTPClient(client *http.Client) {
o.HTTPClient = client
}
// WriteToRequest writes these params to a swagger request
func (o *GetMapParams) WriteToRequest(r runtime.ClientRequest, reg strfmt.Registry) error {
if err := r.SetTimeout(o.timeout); err != nil {
return err
}
var res []error
if len(res) > 0 {
return errors.CompositeValidationError(res...)
}
return nil
}
// Code generated by go-swagger; DO NOT EDIT.
// Copyright Authors of Cilium
// SPDX-License-Identifier: Apache-2.0
package daemon
// This file was generated by the swagger tool.
// Editing this file might prove futile when you re-run the swagger generate command
import (
"encoding/json"
"fmt"
"io"
"github.com/go-openapi/runtime"
"github.com/go-openapi/strfmt"
"github.com/cilium/cilium/api/v1/models"
)
// GetMapReader is a Reader for the GetMap structure.
type GetMapReader struct {
formats strfmt.Registry
}
// ReadResponse reads a server response into the received o.
func (o *GetMapReader) ReadResponse(response runtime.ClientResponse, consumer runtime.Consumer) (interface{}, error) {
switch response.Code() {
case 200:
result := NewGetMapOK()
if err := result.readResponse(response, consumer, o.formats); err != nil {
return nil, err
}
return result, nil
default:
return nil, runtime.NewAPIError("[GET /map] GetMap", response, response.Code())
}
}
// NewGetMapOK creates a GetMapOK with default headers values
func NewGetMapOK() *GetMapOK {
return &GetMapOK{}
}
/*
GetMapOK describes a response with status code 200, with default header values.
Success
*/
type GetMapOK struct {
Payload *models.BPFMapList
}
// IsSuccess returns true when this get map o k response has a 2xx status code
func (o *GetMapOK) IsSuccess() bool {
return true
}
// IsRedirect returns true when this get map o k response has a 3xx status code
func (o *GetMapOK) IsRedirect() bool {
return false
}
// IsClientError returns true when this get map o k response has a 4xx status code
func (o *GetMapOK) IsClientError() bool {
return false
}
// IsServerError returns true when this get map o k response has a 5xx status code
func (o *GetMapOK) IsServerError() bool {
return false
}
// IsCode returns true when this get map o k response a status code equal to that given
func (o *GetMapOK) IsCode(code int) bool {
return code == 200
}
// Code gets the status code for the get map o k response
func (o *GetMapOK) Code() int {
return 200
}
func (o *GetMapOK) Error() string {
payload, _ := json.Marshal(o.Payload)
return fmt.Sprintf("[GET /map][%d] getMapOK %s", 200, payload)
}
func (o *GetMapOK) String() string {
payload, _ := json.Marshal(o.Payload)
return fmt.Sprintf("[GET /map][%d] getMapOK %s", 200, payload)
}
func (o *GetMapOK) GetPayload() *models.BPFMapList {
return o.Payload
}
func (o *GetMapOK) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error {
o.Payload = new(models.BPFMapList)
// response payload
if err := consumer.Consume(response.Body(), o.Payload); err != nil && err != io.EOF {
return err
}
return nil
}
// Code generated by go-swagger; DO NOT EDIT.
// Copyright Authors of Cilium
// SPDX-License-Identifier: Apache-2.0
package daemon
// This file was generated by the swagger tool.
// Editing this file might prove futile when you re-run the swagger generate command
import (
"context"
"net/http"
"time"
"github.com/go-openapi/errors"
"github.com/go-openapi/runtime"
cr "github.com/go-openapi/runtime/client"
"github.com/go-openapi/strfmt"
)
// NewGetNodeIdsParams creates a new GetNodeIdsParams object,
// with the default timeout for this client.
//
// Default values are not hydrated, since defaults are normally applied by the API server side.
//
// To enforce default values in parameter, use SetDefaults or WithDefaults.
func NewGetNodeIdsParams() *GetNodeIdsParams {
return &GetNodeIdsParams{
timeout: cr.DefaultTimeout,
}
}
// NewGetNodeIdsParamsWithTimeout creates a new GetNodeIdsParams object
// with the ability to set a timeout on a request.
func NewGetNodeIdsParamsWithTimeout(timeout time.Duration) *GetNodeIdsParams {
return &GetNodeIdsParams{
timeout: timeout,
}
}
// NewGetNodeIdsParamsWithContext creates a new GetNodeIdsParams object
// with the ability to set a context for a request.
func NewGetNodeIdsParamsWithContext(ctx context.Context) *GetNodeIdsParams {
return &GetNodeIdsParams{
Context: ctx,
}
}
// NewGetNodeIdsParamsWithHTTPClient creates a new GetNodeIdsParams object
// with the ability to set a custom HTTPClient for a request.
func NewGetNodeIdsParamsWithHTTPClient(client *http.Client) *GetNodeIdsParams {
return &GetNodeIdsParams{
HTTPClient: client,
}
}
/*
GetNodeIdsParams contains all the parameters to send to the API endpoint
for the get node ids operation.
Typically these are written to a http.Request.
*/
type GetNodeIdsParams struct {
timeout time.Duration
Context context.Context
HTTPClient *http.Client
}
// WithDefaults hydrates default values in the get node ids params (not the query body).
//
// All values with no default are reset to their zero value.
func (o *GetNodeIdsParams) WithDefaults() *GetNodeIdsParams {
o.SetDefaults()
return o
}
// SetDefaults hydrates default values in the get node ids params (not the query body).
//
// All values with no default are reset to their zero value.
func (o *GetNodeIdsParams) SetDefaults() {
// no default values defined for this parameter
}
// WithTimeout adds the timeout to the get node ids params
func (o *GetNodeIdsParams) WithTimeout(timeout time.Duration) *GetNodeIdsParams {
o.SetTimeout(timeout)
return o
}
// SetTimeout adds the timeout to the get node ids params
func (o *GetNodeIdsParams) SetTimeout(timeout time.Duration) {
o.timeout = timeout
}
// WithContext adds the context to the get node ids params
func (o *GetNodeIdsParams) WithContext(ctx context.Context) *GetNodeIdsParams {
o.SetContext(ctx)
return o
}
// SetContext adds the context to the get node ids params
func (o *GetNodeIdsParams) SetContext(ctx context.Context) {
o.Context = ctx
}
// WithHTTPClient adds the HTTPClient to the get node ids params
func (o *GetNodeIdsParams) WithHTTPClient(client *http.Client) *GetNodeIdsParams {
o.SetHTTPClient(client)
return o
}
// SetHTTPClient adds the HTTPClient to the get node ids params
func (o *GetNodeIdsParams) SetHTTPClient(client *http.Client) {
o.HTTPClient = client
}
// WriteToRequest writes these params to a swagger request
func (o *GetNodeIdsParams) WriteToRequest(r runtime.ClientRequest, reg strfmt.Registry) error {
if err := r.SetTimeout(o.timeout); err != nil {
return err
}
var res []error
if len(res) > 0 {
return errors.CompositeValidationError(res...)
}
return nil
}
// Code generated by go-swagger; DO NOT EDIT.
// Copyright Authors of Cilium
// SPDX-License-Identifier: Apache-2.0
package daemon
// This file was generated by the swagger tool.
// Editing this file might prove futile when you re-run the swagger generate command
import (
"encoding/json"
"fmt"
"io"
"github.com/go-openapi/runtime"
"github.com/go-openapi/strfmt"
"github.com/cilium/cilium/api/v1/models"
)
// GetNodeIdsReader is a Reader for the GetNodeIds structure.
type GetNodeIdsReader struct {
formats strfmt.Registry
}
// ReadResponse reads a server response into the received o.
func (o *GetNodeIdsReader) ReadResponse(response runtime.ClientResponse, consumer runtime.Consumer) (interface{}, error) {
switch response.Code() {
case 200:
result := NewGetNodeIdsOK()
if err := result.readResponse(response, consumer, o.formats); err != nil {
return nil, err
}
return result, nil
default:
return nil, runtime.NewAPIError("[GET /node/ids] GetNodeIds", response, response.Code())
}
}
// NewGetNodeIdsOK creates a GetNodeIdsOK with default headers values
func NewGetNodeIdsOK() *GetNodeIdsOK {
return &GetNodeIdsOK{}
}
/*
GetNodeIdsOK describes a response with status code 200, with default header values.
Success
*/
type GetNodeIdsOK struct {
Payload []*models.NodeID
}
// IsSuccess returns true when this get node ids o k response has a 2xx status code
func (o *GetNodeIdsOK) IsSuccess() bool {
return true
}
// IsRedirect returns true when this get node ids o k response has a 3xx status code
func (o *GetNodeIdsOK) IsRedirect() bool {
return false
}
// IsClientError returns true when this get node ids o k response has a 4xx status code
func (o *GetNodeIdsOK) IsClientError() bool {
return false
}
// IsServerError returns true when this get node ids o k response has a 5xx status code
func (o *GetNodeIdsOK) IsServerError() bool {
return false
}
// IsCode returns true when this get node ids o k response a status code equal to that given
func (o *GetNodeIdsOK) IsCode(code int) bool {
return code == 200
}
// Code gets the status code for the get node ids o k response
func (o *GetNodeIdsOK) Code() int {
return 200
}
func (o *GetNodeIdsOK) Error() string {
payload, _ := json.Marshal(o.Payload)
return fmt.Sprintf("[GET /node/ids][%d] getNodeIdsOK %s", 200, payload)
}
func (o *GetNodeIdsOK) String() string {
payload, _ := json.Marshal(o.Payload)
return fmt.Sprintf("[GET /node/ids][%d] getNodeIdsOK %s", 200, payload)
}
func (o *GetNodeIdsOK) GetPayload() []*models.NodeID {
return o.Payload
}
func (o *GetNodeIdsOK) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error {
// response payload
if err := consumer.Consume(response.Body(), &o.Payload); err != nil && err != io.EOF {
return err
}
return nil
}
// Code generated by go-swagger; DO NOT EDIT.
// Copyright Authors of Cilium
// SPDX-License-Identifier: Apache-2.0
package daemon
// This file was generated by the swagger tool.
// Editing this file might prove futile when you re-run the swagger generate command
import (
"context"
"net/http"
"time"
"github.com/go-openapi/errors"
"github.com/go-openapi/runtime"
cr "github.com/go-openapi/runtime/client"
"github.com/go-openapi/strfmt"
"github.com/cilium/cilium/api/v1/models"
)
// NewPatchConfigParams creates a new PatchConfigParams object,
// with the default timeout for this client.
//
// Default values are not hydrated, since defaults are normally applied by the API server side.
//
// To enforce default values in parameter, use SetDefaults or WithDefaults.
func NewPatchConfigParams() *PatchConfigParams {
return &PatchConfigParams{
timeout: cr.DefaultTimeout,
}
}
// NewPatchConfigParamsWithTimeout creates a new PatchConfigParams object
// with the ability to set a timeout on a request.
func NewPatchConfigParamsWithTimeout(timeout time.Duration) *PatchConfigParams {
return &PatchConfigParams{
timeout: timeout,
}
}
// NewPatchConfigParamsWithContext creates a new PatchConfigParams object
// with the ability to set a context for a request.
func NewPatchConfigParamsWithContext(ctx context.Context) *PatchConfigParams {
return &PatchConfigParams{
Context: ctx,
}
}
// NewPatchConfigParamsWithHTTPClient creates a new PatchConfigParams object
// with the ability to set a custom HTTPClient for a request.
func NewPatchConfigParamsWithHTTPClient(client *http.Client) *PatchConfigParams {
return &PatchConfigParams{
HTTPClient: client,
}
}
/*
PatchConfigParams contains all the parameters to send to the API endpoint
for the patch config operation.
Typically these are written to a http.Request.
*/
type PatchConfigParams struct {
// Configuration.
Configuration *models.DaemonConfigurationSpec
timeout time.Duration
Context context.Context
HTTPClient *http.Client
}
// WithDefaults hydrates default values in the patch config params (not the query body).
//
// All values with no default are reset to their zero value.
func (o *PatchConfigParams) WithDefaults() *PatchConfigParams {
o.SetDefaults()
return o
}
// SetDefaults hydrates default values in the patch config params (not the query body).
//
// All values with no default are reset to their zero value.
func (o *PatchConfigParams) SetDefaults() {
// no default values defined for this parameter
}
// WithTimeout adds the timeout to the patch config params
func (o *PatchConfigParams) WithTimeout(timeout time.Duration) *PatchConfigParams {
o.SetTimeout(timeout)
return o
}
// SetTimeout adds the timeout to the patch config params
func (o *PatchConfigParams) SetTimeout(timeout time.Duration) {
o.timeout = timeout
}
// WithContext adds the context to the patch config params
func (o *PatchConfigParams) WithContext(ctx context.Context) *PatchConfigParams {
o.SetContext(ctx)
return o
}
// SetContext adds the context to the patch config params
func (o *PatchConfigParams) SetContext(ctx context.Context) {
o.Context = ctx
}
// WithHTTPClient adds the HTTPClient to the patch config params
func (o *PatchConfigParams) WithHTTPClient(client *http.Client) *PatchConfigParams {
o.SetHTTPClient(client)
return o
}
// SetHTTPClient adds the HTTPClient to the patch config params
func (o *PatchConfigParams) SetHTTPClient(client *http.Client) {
o.HTTPClient = client
}
// WithConfiguration adds the configuration to the patch config params
func (o *PatchConfigParams) WithConfiguration(configuration *models.DaemonConfigurationSpec) *PatchConfigParams {
o.SetConfiguration(configuration)
return o
}
// SetConfiguration adds the configuration to the patch config params
func (o *PatchConfigParams) SetConfiguration(configuration *models.DaemonConfigurationSpec) {
o.Configuration = configuration
}
// WriteToRequest writes these params to a swagger request
func (o *PatchConfigParams) WriteToRequest(r runtime.ClientRequest, reg strfmt.Registry) error {
if err := r.SetTimeout(o.timeout); err != nil {
return err
}
var res []error
if o.Configuration != nil {
if err := r.SetBodyParam(o.Configuration); err != nil {
return err
}
}
if len(res) > 0 {
return errors.CompositeValidationError(res...)
}
return nil
}
// Code generated by go-swagger; DO NOT EDIT.
// Copyright Authors of Cilium
// SPDX-License-Identifier: Apache-2.0
package daemon
// This file was generated by the swagger tool.
// Editing this file might prove futile when you re-run the swagger generate command
import (
"encoding/json"
"fmt"
"io"
"github.com/go-openapi/runtime"
"github.com/go-openapi/strfmt"
"github.com/cilium/cilium/api/v1/models"
)
// PatchConfigReader is a Reader for the PatchConfig structure.
type PatchConfigReader struct {
formats strfmt.Registry
}
// ReadResponse reads a server response into the received o.
func (o *PatchConfigReader) ReadResponse(response runtime.ClientResponse, consumer runtime.Consumer) (interface{}, error) {
switch response.Code() {
case 200:
result := NewPatchConfigOK()
if err := result.readResponse(response, consumer, o.formats); err != nil {
return nil, err
}
return result, nil
case 400:
result := NewPatchConfigBadRequest()
if err := result.readResponse(response, consumer, o.formats); err != nil {
return nil, err
}
return nil, result
case 403:
result := NewPatchConfigForbidden()
if err := result.readResponse(response, consumer, o.formats); err != nil {
return nil, err
}
return nil, result
case 500:
result := NewPatchConfigFailure()
if err := result.readResponse(response, consumer, o.formats); err != nil {
return nil, err
}
return nil, result
default:
return nil, runtime.NewAPIError("[PATCH /config] PatchConfig", response, response.Code())
}
}
// NewPatchConfigOK creates a PatchConfigOK with default headers values
func NewPatchConfigOK() *PatchConfigOK {
return &PatchConfigOK{}
}
/*
PatchConfigOK describes a response with status code 200, with default header values.
Success
*/
type PatchConfigOK struct {
}
// IsSuccess returns true when this patch config o k response has a 2xx status code
func (o *PatchConfigOK) IsSuccess() bool {
return true
}
// IsRedirect returns true when this patch config o k response has a 3xx status code
func (o *PatchConfigOK) IsRedirect() bool {
return false
}
// IsClientError returns true when this patch config o k response has a 4xx status code
func (o *PatchConfigOK) IsClientError() bool {
return false
}
// IsServerError returns true when this patch config o k response has a 5xx status code
func (o *PatchConfigOK) IsServerError() bool {
return false
}
// IsCode returns true when this patch config o k response a status code equal to that given
func (o *PatchConfigOK) IsCode(code int) bool {
return code == 200
}
// Code gets the status code for the patch config o k response
func (o *PatchConfigOK) Code() int {
return 200
}
func (o *PatchConfigOK) Error() string {
return fmt.Sprintf("[PATCH /config][%d] patchConfigOK", 200)
}
func (o *PatchConfigOK) String() string {
return fmt.Sprintf("[PATCH /config][%d] patchConfigOK", 200)
}
func (o *PatchConfigOK) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error {
return nil
}
// NewPatchConfigBadRequest creates a PatchConfigBadRequest with default headers values
func NewPatchConfigBadRequest() *PatchConfigBadRequest {
return &PatchConfigBadRequest{}
}
/*
PatchConfigBadRequest describes a response with status code 400, with default header values.
Bad configuration parameters
*/
type PatchConfigBadRequest struct {
Payload models.Error
}
// IsSuccess returns true when this patch config bad request response has a 2xx status code
func (o *PatchConfigBadRequest) IsSuccess() bool {
return false
}
// IsRedirect returns true when this patch config bad request response has a 3xx status code
func (o *PatchConfigBadRequest) IsRedirect() bool {
return false
}
// IsClientError returns true when this patch config bad request response has a 4xx status code
func (o *PatchConfigBadRequest) IsClientError() bool {
return true
}
// IsServerError returns true when this patch config bad request response has a 5xx status code
func (o *PatchConfigBadRequest) IsServerError() bool {
return false
}
// IsCode returns true when this patch config bad request response a status code equal to that given
func (o *PatchConfigBadRequest) IsCode(code int) bool {
return code == 400
}
// Code gets the status code for the patch config bad request response
func (o *PatchConfigBadRequest) Code() int {
return 400
}
func (o *PatchConfigBadRequest) Error() string {
payload, _ := json.Marshal(o.Payload)
return fmt.Sprintf("[PATCH /config][%d] patchConfigBadRequest %s", 400, payload)
}
func (o *PatchConfigBadRequest) String() string {
payload, _ := json.Marshal(o.Payload)
return fmt.Sprintf("[PATCH /config][%d] patchConfigBadRequest %s", 400, payload)
}
func (o *PatchConfigBadRequest) GetPayload() models.Error {
return o.Payload
}
func (o *PatchConfigBadRequest) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error {
// response payload
if err := consumer.Consume(response.Body(), &o.Payload); err != nil && err != io.EOF {
return err
}
return nil
}
// NewPatchConfigForbidden creates a PatchConfigForbidden with default headers values
func NewPatchConfigForbidden() *PatchConfigForbidden {
return &PatchConfigForbidden{}
}
/*
PatchConfigForbidden describes a response with status code 403, with default header values.
Forbidden
*/
type PatchConfigForbidden struct {
}
// IsSuccess returns true when this patch config forbidden response has a 2xx status code
func (o *PatchConfigForbidden) IsSuccess() bool {
return false
}
// IsRedirect returns true when this patch config forbidden response has a 3xx status code
func (o *PatchConfigForbidden) IsRedirect() bool {
return false
}
// IsClientError returns true when this patch config forbidden response has a 4xx status code
func (o *PatchConfigForbidden) IsClientError() bool {
return true
}
// IsServerError returns true when this patch config forbidden response has a 5xx status code
func (o *PatchConfigForbidden) IsServerError() bool {
return false
}
// IsCode returns true when this patch config forbidden response a status code equal to that given
func (o *PatchConfigForbidden) IsCode(code int) bool {
return code == 403
}
// Code gets the status code for the patch config forbidden response
func (o *PatchConfigForbidden) Code() int {
return 403
}
func (o *PatchConfigForbidden) Error() string {
return fmt.Sprintf("[PATCH /config][%d] patchConfigForbidden", 403)
}
func (o *PatchConfigForbidden) String() string {
return fmt.Sprintf("[PATCH /config][%d] patchConfigForbidden", 403)
}
func (o *PatchConfigForbidden) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error {
return nil
}
// NewPatchConfigFailure creates a PatchConfigFailure with default headers values
func NewPatchConfigFailure() *PatchConfigFailure {
return &PatchConfigFailure{}
}
/*
PatchConfigFailure describes a response with status code 500, with default header values.
Recompilation failed
*/
type PatchConfigFailure struct {
Payload models.Error
}
// IsSuccess returns true when this patch config failure response has a 2xx status code
func (o *PatchConfigFailure) IsSuccess() bool {
return false
}
// IsRedirect returns true when this patch config failure response has a 3xx status code
func (o *PatchConfigFailure) IsRedirect() bool {
return false
}
// IsClientError returns true when this patch config failure response has a 4xx status code
func (o *PatchConfigFailure) IsClientError() bool {
return false
}
// IsServerError returns true when this patch config failure response has a 5xx status code
func (o *PatchConfigFailure) IsServerError() bool {
return true
}
// IsCode returns true when this patch config failure response a status code equal to that given
func (o *PatchConfigFailure) IsCode(code int) bool {
return code == 500
}
// Code gets the status code for the patch config failure response
func (o *PatchConfigFailure) Code() int {
return 500
}
func (o *PatchConfigFailure) Error() string {
payload, _ := json.Marshal(o.Payload)
return fmt.Sprintf("[PATCH /config][%d] patchConfigFailure %s", 500, payload)
}
func (o *PatchConfigFailure) String() string {
payload, _ := json.Marshal(o.Payload)
return fmt.Sprintf("[PATCH /config][%d] patchConfigFailure %s", 500, payload)
}
func (o *PatchConfigFailure) GetPayload() models.Error {
return o.Payload
}
func (o *PatchConfigFailure) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error {
// response payload
if err := consumer.Consume(response.Body(), &o.Payload); err != nil && err != io.EOF {
return err
}
return nil
}
// Code generated by go-swagger; DO NOT EDIT.
// Copyright Authors of Cilium
// SPDX-License-Identifier: Apache-2.0
package endpoint
// This file was generated by the swagger tool.
// Editing this file might prove futile when you re-run the swagger generate command
import (
"context"
"net/http"
"time"
"github.com/go-openapi/errors"
"github.com/go-openapi/runtime"
cr "github.com/go-openapi/runtime/client"
"github.com/go-openapi/strfmt"
)
// NewDeleteEndpointIDParams creates a new DeleteEndpointIDParams object,
// with the default timeout for this client.
//
// Default values are not hydrated, since defaults are normally applied by the API server side.
//
// To enforce default values in parameter, use SetDefaults or WithDefaults.
func NewDeleteEndpointIDParams() *DeleteEndpointIDParams {
return &DeleteEndpointIDParams{
timeout: cr.DefaultTimeout,
}
}
// NewDeleteEndpointIDParamsWithTimeout creates a new DeleteEndpointIDParams object
// with the ability to set a timeout on a request.
func NewDeleteEndpointIDParamsWithTimeout(timeout time.Duration) *DeleteEndpointIDParams {
return &DeleteEndpointIDParams{
timeout: timeout,
}
}
// NewDeleteEndpointIDParamsWithContext creates a new DeleteEndpointIDParams object
// with the ability to set a context for a request.
func NewDeleteEndpointIDParamsWithContext(ctx context.Context) *DeleteEndpointIDParams {
return &DeleteEndpointIDParams{
Context: ctx,
}
}
// NewDeleteEndpointIDParamsWithHTTPClient creates a new DeleteEndpointIDParams object
// with the ability to set a custom HTTPClient for a request.
func NewDeleteEndpointIDParamsWithHTTPClient(client *http.Client) *DeleteEndpointIDParams {
return &DeleteEndpointIDParams{
HTTPClient: client,
}
}
/*
DeleteEndpointIDParams contains all the parameters to send to the API endpoint
for the delete endpoint ID operation.
Typically these are written to a http.Request.
*/
type DeleteEndpointIDParams struct {
/* ID.
String describing an endpoint with the format ``[prefix:]id``. If no prefix
is specified, a prefix of ``cilium-local:`` is assumed. Not all endpoints
will be addressable by all endpoint ID prefixes with the exception of the
local Cilium UUID which is assigned to all endpoints.
Supported endpoint id prefixes:
- cilium-local: Local Cilium endpoint UUID, e.g. cilium-local:3389595
- cilium-global: Global Cilium endpoint UUID, e.g. cilium-global:cluster1:nodeX:452343
- cni-attachment-id: CNI attachment ID, e.g. cni-attachment-id:22222:eth0
- container-id: Container runtime ID, e.g. container-id:22222 (deprecated, may not be unique)
- container-name: Container name, e.g. container-name:foobar (deprecated, may not be unique)
- pod-name: pod name for this container if K8s is enabled, e.g. pod-name:default:foobar (deprecated, may not be unique)
- cep-name: cep name for this container if K8s is enabled, e.g. pod-name:default:foobar-net1
- docker-endpoint: Docker libnetwork endpoint ID, e.g. docker-endpoint:4444
*/
ID string
timeout time.Duration
Context context.Context
HTTPClient *http.Client
}
// WithDefaults hydrates default values in the delete endpoint ID params (not the query body).
//
// All values with no default are reset to their zero value.
func (o *DeleteEndpointIDParams) WithDefaults() *DeleteEndpointIDParams {
o.SetDefaults()
return o
}
// SetDefaults hydrates default values in the delete endpoint ID params (not the query body).
//
// All values with no default are reset to their zero value.
func (o *DeleteEndpointIDParams) SetDefaults() {
// no default values defined for this parameter
}
// WithTimeout adds the timeout to the delete endpoint ID params
func (o *DeleteEndpointIDParams) WithTimeout(timeout time.Duration) *DeleteEndpointIDParams {
o.SetTimeout(timeout)
return o
}
// SetTimeout adds the timeout to the delete endpoint ID params
func (o *DeleteEndpointIDParams) SetTimeout(timeout time.Duration) {
o.timeout = timeout
}
// WithContext adds the context to the delete endpoint ID params
func (o *DeleteEndpointIDParams) WithContext(ctx context.Context) *DeleteEndpointIDParams {
o.SetContext(ctx)
return o
}
// SetContext adds the context to the delete endpoint ID params
func (o *DeleteEndpointIDParams) SetContext(ctx context.Context) {
o.Context = ctx
}
// WithHTTPClient adds the HTTPClient to the delete endpoint ID params
func (o *DeleteEndpointIDParams) WithHTTPClient(client *http.Client) *DeleteEndpointIDParams {
o.SetHTTPClient(client)
return o
}
// SetHTTPClient adds the HTTPClient to the delete endpoint ID params
func (o *DeleteEndpointIDParams) SetHTTPClient(client *http.Client) {
o.HTTPClient = client
}
// WithID adds the id to the delete endpoint ID params
func (o *DeleteEndpointIDParams) WithID(id string) *DeleteEndpointIDParams {
o.SetID(id)
return o
}
// SetID adds the id to the delete endpoint ID params
func (o *DeleteEndpointIDParams) SetID(id string) {
o.ID = id
}
// WriteToRequest writes these params to a swagger request
func (o *DeleteEndpointIDParams) WriteToRequest(r runtime.ClientRequest, reg strfmt.Registry) error {
if err := r.SetTimeout(o.timeout); err != nil {
return err
}
var res []error
// path param id
if err := r.SetPathParam("id", o.ID); err != nil {
return err
}
if len(res) > 0 {
return errors.CompositeValidationError(res...)
}
return nil
}
// Code generated by go-swagger; DO NOT EDIT.
// Copyright Authors of Cilium
// SPDX-License-Identifier: Apache-2.0
package endpoint
// This file was generated by the swagger tool.
// Editing this file might prove futile when you re-run the swagger generate command
import (
"encoding/json"
"fmt"
"io"
"github.com/go-openapi/runtime"
"github.com/go-openapi/strfmt"
"github.com/cilium/cilium/api/v1/models"
)
// DeleteEndpointIDReader is a Reader for the DeleteEndpointID structure.
type DeleteEndpointIDReader struct {
formats strfmt.Registry
}
// ReadResponse reads a server response into the received o.
func (o *DeleteEndpointIDReader) ReadResponse(response runtime.ClientResponse, consumer runtime.Consumer) (interface{}, error) {
switch response.Code() {
case 200:
result := NewDeleteEndpointIDOK()
if err := result.readResponse(response, consumer, o.formats); err != nil {
return nil, err
}
return result, nil
case 206:
result := NewDeleteEndpointIDErrors()
if err := result.readResponse(response, consumer, o.formats); err != nil {
return nil, err
}
return result, nil
case 400:
result := NewDeleteEndpointIDInvalid()
if err := result.readResponse(response, consumer, o.formats); err != nil {
return nil, err
}
return nil, result
case 403:
result := NewDeleteEndpointIDForbidden()
if err := result.readResponse(response, consumer, o.formats); err != nil {
return nil, err
}
return nil, result
case 404:
result := NewDeleteEndpointIDNotFound()
if err := result.readResponse(response, consumer, o.formats); err != nil {
return nil, err
}
return nil, result
case 429:
result := NewDeleteEndpointIDTooManyRequests()
if err := result.readResponse(response, consumer, o.formats); err != nil {
return nil, err
}
return nil, result
case 503:
result := NewDeleteEndpointIDServiceUnavailable()
if err := result.readResponse(response, consumer, o.formats); err != nil {
return nil, err
}
return nil, result
default:
return nil, runtime.NewAPIError("[DELETE /endpoint/{id}] DeleteEndpointID", response, response.Code())
}
}
// NewDeleteEndpointIDOK creates a DeleteEndpointIDOK with default headers values
func NewDeleteEndpointIDOK() *DeleteEndpointIDOK {
return &DeleteEndpointIDOK{}
}
/*
DeleteEndpointIDOK describes a response with status code 200, with default header values.
Success
*/
type DeleteEndpointIDOK struct {
}
// IsSuccess returns true when this delete endpoint Id o k response has a 2xx status code
func (o *DeleteEndpointIDOK) IsSuccess() bool {
return true
}
// IsRedirect returns true when this delete endpoint Id o k response has a 3xx status code
func (o *DeleteEndpointIDOK) IsRedirect() bool {
return false
}
// IsClientError returns true when this delete endpoint Id o k response has a 4xx status code
func (o *DeleteEndpointIDOK) IsClientError() bool {
return false
}
// IsServerError returns true when this delete endpoint Id o k response has a 5xx status code
func (o *DeleteEndpointIDOK) IsServerError() bool {
return false
}
// IsCode returns true when this delete endpoint Id o k response a status code equal to that given
func (o *DeleteEndpointIDOK) IsCode(code int) bool {
return code == 200
}
// Code gets the status code for the delete endpoint Id o k response
func (o *DeleteEndpointIDOK) Code() int {
return 200
}
func (o *DeleteEndpointIDOK) Error() string {
return fmt.Sprintf("[DELETE /endpoint/{id}][%d] deleteEndpointIdOK", 200)
}
func (o *DeleteEndpointIDOK) String() string {
return fmt.Sprintf("[DELETE /endpoint/{id}][%d] deleteEndpointIdOK", 200)
}
func (o *DeleteEndpointIDOK) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error {
return nil
}
// NewDeleteEndpointIDErrors creates a DeleteEndpointIDErrors with default headers values
func NewDeleteEndpointIDErrors() *DeleteEndpointIDErrors {
return &DeleteEndpointIDErrors{}
}
/*
DeleteEndpointIDErrors describes a response with status code 206, with default header values.
Deleted with a number of errors encountered
*/
type DeleteEndpointIDErrors struct {
Payload int64
}
// IsSuccess returns true when this delete endpoint Id errors response has a 2xx status code
func (o *DeleteEndpointIDErrors) IsSuccess() bool {
return true
}
// IsRedirect returns true when this delete endpoint Id errors response has a 3xx status code
func (o *DeleteEndpointIDErrors) IsRedirect() bool {
return false
}
// IsClientError returns true when this delete endpoint Id errors response has a 4xx status code
func (o *DeleteEndpointIDErrors) IsClientError() bool {
return false
}
// IsServerError returns true when this delete endpoint Id errors response has a 5xx status code
func (o *DeleteEndpointIDErrors) IsServerError() bool {
return false
}
// IsCode returns true when this delete endpoint Id errors response a status code equal to that given
func (o *DeleteEndpointIDErrors) IsCode(code int) bool {
return code == 206
}
// Code gets the status code for the delete endpoint Id errors response
func (o *DeleteEndpointIDErrors) Code() int {
return 206
}
func (o *DeleteEndpointIDErrors) Error() string {
payload, _ := json.Marshal(o.Payload)
return fmt.Sprintf("[DELETE /endpoint/{id}][%d] deleteEndpointIdErrors %s", 206, payload)
}
func (o *DeleteEndpointIDErrors) String() string {
payload, _ := json.Marshal(o.Payload)
return fmt.Sprintf("[DELETE /endpoint/{id}][%d] deleteEndpointIdErrors %s", 206, payload)
}
func (o *DeleteEndpointIDErrors) GetPayload() int64 {
return o.Payload
}
func (o *DeleteEndpointIDErrors) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error {
// response payload
if err := consumer.Consume(response.Body(), &o.Payload); err != nil && err != io.EOF {
return err
}
return nil
}
// NewDeleteEndpointIDInvalid creates a DeleteEndpointIDInvalid with default headers values
func NewDeleteEndpointIDInvalid() *DeleteEndpointIDInvalid {
return &DeleteEndpointIDInvalid{}
}
/*
DeleteEndpointIDInvalid describes a response with status code 400, with default header values.
Invalid endpoint ID format for specified type. Details in error
message
*/
type DeleteEndpointIDInvalid struct {
Payload models.Error
}
// IsSuccess returns true when this delete endpoint Id invalid response has a 2xx status code
func (o *DeleteEndpointIDInvalid) IsSuccess() bool {
return false
}
// IsRedirect returns true when this delete endpoint Id invalid response has a 3xx status code
func (o *DeleteEndpointIDInvalid) IsRedirect() bool {
return false
}
// IsClientError returns true when this delete endpoint Id invalid response has a 4xx status code
func (o *DeleteEndpointIDInvalid) IsClientError() bool {
return true
}
// IsServerError returns true when this delete endpoint Id invalid response has a 5xx status code
func (o *DeleteEndpointIDInvalid) IsServerError() bool {
return false
}
// IsCode returns true when this delete endpoint Id invalid response a status code equal to that given
func (o *DeleteEndpointIDInvalid) IsCode(code int) bool {
return code == 400
}
// Code gets the status code for the delete endpoint Id invalid response
func (o *DeleteEndpointIDInvalid) Code() int {
return 400
}
func (o *DeleteEndpointIDInvalid) Error() string {
payload, _ := json.Marshal(o.Payload)
return fmt.Sprintf("[DELETE /endpoint/{id}][%d] deleteEndpointIdInvalid %s", 400, payload)
}
func (o *DeleteEndpointIDInvalid) String() string {
payload, _ := json.Marshal(o.Payload)
return fmt.Sprintf("[DELETE /endpoint/{id}][%d] deleteEndpointIdInvalid %s", 400, payload)
}
func (o *DeleteEndpointIDInvalid) GetPayload() models.Error {
return o.Payload
}
func (o *DeleteEndpointIDInvalid) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error {
// response payload
if err := consumer.Consume(response.Body(), &o.Payload); err != nil && err != io.EOF {
return err
}
return nil
}
// NewDeleteEndpointIDForbidden creates a DeleteEndpointIDForbidden with default headers values
func NewDeleteEndpointIDForbidden() *DeleteEndpointIDForbidden {
return &DeleteEndpointIDForbidden{}
}
/*
DeleteEndpointIDForbidden describes a response with status code 403, with default header values.
Forbidden
*/
type DeleteEndpointIDForbidden struct {
}
// IsSuccess returns true when this delete endpoint Id forbidden response has a 2xx status code
func (o *DeleteEndpointIDForbidden) IsSuccess() bool {
return false
}
// IsRedirect returns true when this delete endpoint Id forbidden response has a 3xx status code
func (o *DeleteEndpointIDForbidden) IsRedirect() bool {
return false
}
// IsClientError returns true when this delete endpoint Id forbidden response has a 4xx status code
func (o *DeleteEndpointIDForbidden) IsClientError() bool {
return true
}
// IsServerError returns true when this delete endpoint Id forbidden response has a 5xx status code
func (o *DeleteEndpointIDForbidden) IsServerError() bool {
return false
}
// IsCode returns true when this delete endpoint Id forbidden response a status code equal to that given
func (o *DeleteEndpointIDForbidden) IsCode(code int) bool {
return code == 403
}
// Code gets the status code for the delete endpoint Id forbidden response
func (o *DeleteEndpointIDForbidden) Code() int {
return 403
}
func (o *DeleteEndpointIDForbidden) Error() string {
return fmt.Sprintf("[DELETE /endpoint/{id}][%d] deleteEndpointIdForbidden", 403)
}
func (o *DeleteEndpointIDForbidden) String() string {
return fmt.Sprintf("[DELETE /endpoint/{id}][%d] deleteEndpointIdForbidden", 403)
}
func (o *DeleteEndpointIDForbidden) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error {
return nil
}
// NewDeleteEndpointIDNotFound creates a DeleteEndpointIDNotFound with default headers values
func NewDeleteEndpointIDNotFound() *DeleteEndpointIDNotFound {
return &DeleteEndpointIDNotFound{}
}
/*
DeleteEndpointIDNotFound describes a response with status code 404, with default header values.
Endpoint not found
*/
type DeleteEndpointIDNotFound struct {
}
// IsSuccess returns true when this delete endpoint Id not found response has a 2xx status code
func (o *DeleteEndpointIDNotFound) IsSuccess() bool {
return false
}
// IsRedirect returns true when this delete endpoint Id not found response has a 3xx status code
func (o *DeleteEndpointIDNotFound) IsRedirect() bool {
return false
}
// IsClientError returns true when this delete endpoint Id not found response has a 4xx status code
func (o *DeleteEndpointIDNotFound) IsClientError() bool {
return true
}
// IsServerError returns true when this delete endpoint Id not found response has a 5xx status code
func (o *DeleteEndpointIDNotFound) IsServerError() bool {
return false
}
// IsCode returns true when this delete endpoint Id not found response a status code equal to that given
func (o *DeleteEndpointIDNotFound) IsCode(code int) bool {
return code == 404
}
// Code gets the status code for the delete endpoint Id not found response
func (o *DeleteEndpointIDNotFound) Code() int {
return 404
}
func (o *DeleteEndpointIDNotFound) Error() string {
return fmt.Sprintf("[DELETE /endpoint/{id}][%d] deleteEndpointIdNotFound", 404)
}
func (o *DeleteEndpointIDNotFound) String() string {
return fmt.Sprintf("[DELETE /endpoint/{id}][%d] deleteEndpointIdNotFound", 404)
}
func (o *DeleteEndpointIDNotFound) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error {
return nil
}
// NewDeleteEndpointIDTooManyRequests creates a DeleteEndpointIDTooManyRequests with default headers values
func NewDeleteEndpointIDTooManyRequests() *DeleteEndpointIDTooManyRequests {
return &DeleteEndpointIDTooManyRequests{}
}
/*
DeleteEndpointIDTooManyRequests describes a response with status code 429, with default header values.
Rate-limiting too many requests in the given time frame
*/
type DeleteEndpointIDTooManyRequests struct {
}
// IsSuccess returns true when this delete endpoint Id too many requests response has a 2xx status code
func (o *DeleteEndpointIDTooManyRequests) IsSuccess() bool {
return false
}
// IsRedirect returns true when this delete endpoint Id too many requests response has a 3xx status code
func (o *DeleteEndpointIDTooManyRequests) IsRedirect() bool {
return false
}
// IsClientError returns true when this delete endpoint Id too many requests response has a 4xx status code
func (o *DeleteEndpointIDTooManyRequests) IsClientError() bool {
return true
}
// IsServerError returns true when this delete endpoint Id too many requests response has a 5xx status code
func (o *DeleteEndpointIDTooManyRequests) IsServerError() bool {
return false
}
// IsCode returns true when this delete endpoint Id too many requests response a status code equal to that given
func (o *DeleteEndpointIDTooManyRequests) IsCode(code int) bool {
return code == 429
}
// Code gets the status code for the delete endpoint Id too many requests response
func (o *DeleteEndpointIDTooManyRequests) Code() int {
return 429
}
func (o *DeleteEndpointIDTooManyRequests) Error() string {
return fmt.Sprintf("[DELETE /endpoint/{id}][%d] deleteEndpointIdTooManyRequests", 429)
}
func (o *DeleteEndpointIDTooManyRequests) String() string {
return fmt.Sprintf("[DELETE /endpoint/{id}][%d] deleteEndpointIdTooManyRequests", 429)
}
func (o *DeleteEndpointIDTooManyRequests) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error {
return nil
}
// NewDeleteEndpointIDServiceUnavailable creates a DeleteEndpointIDServiceUnavailable with default headers values
func NewDeleteEndpointIDServiceUnavailable() *DeleteEndpointIDServiceUnavailable {
return &DeleteEndpointIDServiceUnavailable{}
}
/*
DeleteEndpointIDServiceUnavailable describes a response with status code 503, with default header values.
Service Unavailable
*/
type DeleteEndpointIDServiceUnavailable struct {
}
// IsSuccess returns true when this delete endpoint Id service unavailable response has a 2xx status code
func (o *DeleteEndpointIDServiceUnavailable) IsSuccess() bool {
return false
}
// IsRedirect returns true when this delete endpoint Id service unavailable response has a 3xx status code
func (o *DeleteEndpointIDServiceUnavailable) IsRedirect() bool {
return false
}
// IsClientError returns true when this delete endpoint Id service unavailable response has a 4xx status code
func (o *DeleteEndpointIDServiceUnavailable) IsClientError() bool {
return false
}
// IsServerError returns true when this delete endpoint Id service unavailable response has a 5xx status code
func (o *DeleteEndpointIDServiceUnavailable) IsServerError() bool {
return true
}
// IsCode returns true when this delete endpoint Id service unavailable response a status code equal to that given
func (o *DeleteEndpointIDServiceUnavailable) IsCode(code int) bool {
return code == 503
}
// Code gets the status code for the delete endpoint Id service unavailable response
func (o *DeleteEndpointIDServiceUnavailable) Code() int {
return 503
}
func (o *DeleteEndpointIDServiceUnavailable) Error() string {
return fmt.Sprintf("[DELETE /endpoint/{id}][%d] deleteEndpointIdServiceUnavailable", 503)
}
func (o *DeleteEndpointIDServiceUnavailable) String() string {
return fmt.Sprintf("[DELETE /endpoint/{id}][%d] deleteEndpointIdServiceUnavailable", 503)
}
func (o *DeleteEndpointIDServiceUnavailable) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error {
return nil
}
// Code generated by go-swagger; DO NOT EDIT.
// Copyright Authors of Cilium
// SPDX-License-Identifier: Apache-2.0
package endpoint
// This file was generated by the swagger tool.
// Editing this file might prove futile when you re-run the swagger generate command
import (
"context"
"net/http"
"time"
"github.com/go-openapi/errors"
"github.com/go-openapi/runtime"
cr "github.com/go-openapi/runtime/client"
"github.com/go-openapi/strfmt"
"github.com/cilium/cilium/api/v1/models"
)
// NewDeleteEndpointParams creates a new DeleteEndpointParams object,
// with the default timeout for this client.
//
// Default values are not hydrated, since defaults are normally applied by the API server side.
//
// To enforce default values in parameter, use SetDefaults or WithDefaults.
func NewDeleteEndpointParams() *DeleteEndpointParams {
return &DeleteEndpointParams{
timeout: cr.DefaultTimeout,
}
}
// NewDeleteEndpointParamsWithTimeout creates a new DeleteEndpointParams object
// with the ability to set a timeout on a request.
func NewDeleteEndpointParamsWithTimeout(timeout time.Duration) *DeleteEndpointParams {
return &DeleteEndpointParams{
timeout: timeout,
}
}
// NewDeleteEndpointParamsWithContext creates a new DeleteEndpointParams object
// with the ability to set a context for a request.
func NewDeleteEndpointParamsWithContext(ctx context.Context) *DeleteEndpointParams {
return &DeleteEndpointParams{
Context: ctx,
}
}
// NewDeleteEndpointParamsWithHTTPClient creates a new DeleteEndpointParams object
// with the ability to set a custom HTTPClient for a request.
func NewDeleteEndpointParamsWithHTTPClient(client *http.Client) *DeleteEndpointParams {
return &DeleteEndpointParams{
HTTPClient: client,
}
}
/*
DeleteEndpointParams contains all the parameters to send to the API endpoint
for the delete endpoint operation.
Typically these are written to a http.Request.
*/
type DeleteEndpointParams struct {
// Endpoint.
Endpoint *models.EndpointBatchDeleteRequest
timeout time.Duration
Context context.Context
HTTPClient *http.Client
}
// WithDefaults hydrates default values in the delete endpoint params (not the query body).
//
// All values with no default are reset to their zero value.
func (o *DeleteEndpointParams) WithDefaults() *DeleteEndpointParams {
o.SetDefaults()
return o
}
// SetDefaults hydrates default values in the delete endpoint params (not the query body).
//
// All values with no default are reset to their zero value.
func (o *DeleteEndpointParams) SetDefaults() {
// no default values defined for this parameter
}
// WithTimeout adds the timeout to the delete endpoint params
func (o *DeleteEndpointParams) WithTimeout(timeout time.Duration) *DeleteEndpointParams {
o.SetTimeout(timeout)
return o
}
// SetTimeout adds the timeout to the delete endpoint params
func (o *DeleteEndpointParams) SetTimeout(timeout time.Duration) {
o.timeout = timeout
}
// WithContext adds the context to the delete endpoint params
func (o *DeleteEndpointParams) WithContext(ctx context.Context) *DeleteEndpointParams {
o.SetContext(ctx)
return o
}
// SetContext adds the context to the delete endpoint params
func (o *DeleteEndpointParams) SetContext(ctx context.Context) {
o.Context = ctx
}
// WithHTTPClient adds the HTTPClient to the delete endpoint params
func (o *DeleteEndpointParams) WithHTTPClient(client *http.Client) *DeleteEndpointParams {
o.SetHTTPClient(client)
return o
}
// SetHTTPClient adds the HTTPClient to the delete endpoint params
func (o *DeleteEndpointParams) SetHTTPClient(client *http.Client) {
o.HTTPClient = client
}
// WithEndpoint adds the endpoint to the delete endpoint params
func (o *DeleteEndpointParams) WithEndpoint(endpoint *models.EndpointBatchDeleteRequest) *DeleteEndpointParams {
o.SetEndpoint(endpoint)
return o
}
// SetEndpoint adds the endpoint to the delete endpoint params
func (o *DeleteEndpointParams) SetEndpoint(endpoint *models.EndpointBatchDeleteRequest) {
o.Endpoint = endpoint
}
// WriteToRequest writes these params to a swagger request
func (o *DeleteEndpointParams) WriteToRequest(r runtime.ClientRequest, reg strfmt.Registry) error {
if err := r.SetTimeout(o.timeout); err != nil {
return err
}
var res []error
if o.Endpoint != nil {
if err := r.SetBodyParam(o.Endpoint); err != nil {
return err
}
}
if len(res) > 0 {
return errors.CompositeValidationError(res...)
}
return nil
}
// Code generated by go-swagger; DO NOT EDIT.
// Copyright Authors of Cilium
// SPDX-License-Identifier: Apache-2.0
package endpoint
// This file was generated by the swagger tool.
// Editing this file might prove futile when you re-run the swagger generate command
import (
"encoding/json"
"fmt"
"io"
"github.com/go-openapi/runtime"
"github.com/go-openapi/strfmt"
)
// DeleteEndpointReader is a Reader for the DeleteEndpoint structure.
type DeleteEndpointReader struct {
formats strfmt.Registry
}
// ReadResponse reads a server response into the received o.
func (o *DeleteEndpointReader) ReadResponse(response runtime.ClientResponse, consumer runtime.Consumer) (interface{}, error) {
switch response.Code() {
case 200:
result := NewDeleteEndpointOK()
if err := result.readResponse(response, consumer, o.formats); err != nil {
return nil, err
}
return result, nil
case 206:
result := NewDeleteEndpointErrors()
if err := result.readResponse(response, consumer, o.formats); err != nil {
return nil, err
}
return result, nil
case 400:
result := NewDeleteEndpointInvalid()
if err := result.readResponse(response, consumer, o.formats); err != nil {
return nil, err
}
return nil, result
case 404:
result := NewDeleteEndpointNotFound()
if err := result.readResponse(response, consumer, o.formats); err != nil {
return nil, err
}
return nil, result
case 429:
result := NewDeleteEndpointTooManyRequests()
if err := result.readResponse(response, consumer, o.formats); err != nil {
return nil, err
}
return nil, result
case 503:
result := NewDeleteEndpointServiceUnavailable()
if err := result.readResponse(response, consumer, o.formats); err != nil {
return nil, err
}
return nil, result
default:
return nil, runtime.NewAPIError("[DELETE /endpoint] DeleteEndpoint", response, response.Code())
}
}
// NewDeleteEndpointOK creates a DeleteEndpointOK with default headers values
func NewDeleteEndpointOK() *DeleteEndpointOK {
return &DeleteEndpointOK{}
}
/*
DeleteEndpointOK describes a response with status code 200, with default header values.
Success
*/
type DeleteEndpointOK struct {
}
// IsSuccess returns true when this delete endpoint o k response has a 2xx status code
func (o *DeleteEndpointOK) IsSuccess() bool {
return true
}
// IsRedirect returns true when this delete endpoint o k response has a 3xx status code
func (o *DeleteEndpointOK) IsRedirect() bool {
return false
}
// IsClientError returns true when this delete endpoint o k response has a 4xx status code
func (o *DeleteEndpointOK) IsClientError() bool {
return false
}
// IsServerError returns true when this delete endpoint o k response has a 5xx status code
func (o *DeleteEndpointOK) IsServerError() bool {
return false
}
// IsCode returns true when this delete endpoint o k response a status code equal to that given
func (o *DeleteEndpointOK) IsCode(code int) bool {
return code == 200
}
// Code gets the status code for the delete endpoint o k response
func (o *DeleteEndpointOK) Code() int {
return 200
}
func (o *DeleteEndpointOK) Error() string {
return fmt.Sprintf("[DELETE /endpoint][%d] deleteEndpointOK", 200)
}
func (o *DeleteEndpointOK) String() string {
return fmt.Sprintf("[DELETE /endpoint][%d] deleteEndpointOK", 200)
}
func (o *DeleteEndpointOK) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error {
return nil
}
// NewDeleteEndpointErrors creates a DeleteEndpointErrors with default headers values
func NewDeleteEndpointErrors() *DeleteEndpointErrors {
return &DeleteEndpointErrors{}
}
/*
DeleteEndpointErrors describes a response with status code 206, with default header values.
Deleted with a number of errors encountered
*/
type DeleteEndpointErrors struct {
Payload int64
}
// IsSuccess returns true when this delete endpoint errors response has a 2xx status code
func (o *DeleteEndpointErrors) IsSuccess() bool {
return true
}
// IsRedirect returns true when this delete endpoint errors response has a 3xx status code
func (o *DeleteEndpointErrors) IsRedirect() bool {
return false
}
// IsClientError returns true when this delete endpoint errors response has a 4xx status code
func (o *DeleteEndpointErrors) IsClientError() bool {
return false
}
// IsServerError returns true when this delete endpoint errors response has a 5xx status code
func (o *DeleteEndpointErrors) IsServerError() bool {
return false
}
// IsCode returns true when this delete endpoint errors response a status code equal to that given
func (o *DeleteEndpointErrors) IsCode(code int) bool {
return code == 206
}
// Code gets the status code for the delete endpoint errors response
func (o *DeleteEndpointErrors) Code() int {
return 206
}
func (o *DeleteEndpointErrors) Error() string {
payload, _ := json.Marshal(o.Payload)
return fmt.Sprintf("[DELETE /endpoint][%d] deleteEndpointErrors %s", 206, payload)
}
func (o *DeleteEndpointErrors) String() string {
payload, _ := json.Marshal(o.Payload)
return fmt.Sprintf("[DELETE /endpoint][%d] deleteEndpointErrors %s", 206, payload)
}
func (o *DeleteEndpointErrors) GetPayload() int64 {
return o.Payload
}
func (o *DeleteEndpointErrors) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error {
// response payload
if err := consumer.Consume(response.Body(), &o.Payload); err != nil && err != io.EOF {
return err
}
return nil
}
// NewDeleteEndpointInvalid creates a DeleteEndpointInvalid with default headers values
func NewDeleteEndpointInvalid() *DeleteEndpointInvalid {
return &DeleteEndpointInvalid{}
}
/*
DeleteEndpointInvalid describes a response with status code 400, with default header values.
Invalid endpoint delete request
*/
type DeleteEndpointInvalid struct {
}
// IsSuccess returns true when this delete endpoint invalid response has a 2xx status code
func (o *DeleteEndpointInvalid) IsSuccess() bool {
return false
}
// IsRedirect returns true when this delete endpoint invalid response has a 3xx status code
func (o *DeleteEndpointInvalid) IsRedirect() bool {
return false
}
// IsClientError returns true when this delete endpoint invalid response has a 4xx status code
func (o *DeleteEndpointInvalid) IsClientError() bool {
return true
}
// IsServerError returns true when this delete endpoint invalid response has a 5xx status code
func (o *DeleteEndpointInvalid) IsServerError() bool {
return false
}
// IsCode returns true when this delete endpoint invalid response a status code equal to that given
func (o *DeleteEndpointInvalid) IsCode(code int) bool {
return code == 400
}
// Code gets the status code for the delete endpoint invalid response
func (o *DeleteEndpointInvalid) Code() int {
return 400
}
func (o *DeleteEndpointInvalid) Error() string {
return fmt.Sprintf("[DELETE /endpoint][%d] deleteEndpointInvalid", 400)
}
func (o *DeleteEndpointInvalid) String() string {
return fmt.Sprintf("[DELETE /endpoint][%d] deleteEndpointInvalid", 400)
}
func (o *DeleteEndpointInvalid) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error {
return nil
}
// NewDeleteEndpointNotFound creates a DeleteEndpointNotFound with default headers values
func NewDeleteEndpointNotFound() *DeleteEndpointNotFound {
return &DeleteEndpointNotFound{}
}
/*
DeleteEndpointNotFound describes a response with status code 404, with default header values.
No endpoints with provided parameters found
*/
type DeleteEndpointNotFound struct {
}
// IsSuccess returns true when this delete endpoint not found response has a 2xx status code
func (o *DeleteEndpointNotFound) IsSuccess() bool {
return false
}
// IsRedirect returns true when this delete endpoint not found response has a 3xx status code
func (o *DeleteEndpointNotFound) IsRedirect() bool {
return false
}
// IsClientError returns true when this delete endpoint not found response has a 4xx status code
func (o *DeleteEndpointNotFound) IsClientError() bool {
return true
}
// IsServerError returns true when this delete endpoint not found response has a 5xx status code
func (o *DeleteEndpointNotFound) IsServerError() bool {
return false
}
// IsCode returns true when this delete endpoint not found response a status code equal to that given
func (o *DeleteEndpointNotFound) IsCode(code int) bool {
return code == 404
}
// Code gets the status code for the delete endpoint not found response
func (o *DeleteEndpointNotFound) Code() int {
return 404
}
func (o *DeleteEndpointNotFound) Error() string {
return fmt.Sprintf("[DELETE /endpoint][%d] deleteEndpointNotFound", 404)
}
func (o *DeleteEndpointNotFound) String() string {
return fmt.Sprintf("[DELETE /endpoint][%d] deleteEndpointNotFound", 404)
}
func (o *DeleteEndpointNotFound) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error {
return nil
}
// NewDeleteEndpointTooManyRequests creates a DeleteEndpointTooManyRequests with default headers values
func NewDeleteEndpointTooManyRequests() *DeleteEndpointTooManyRequests {
return &DeleteEndpointTooManyRequests{}
}
/*
DeleteEndpointTooManyRequests describes a response with status code 429, with default header values.
Rate-limiting too many requests in the given time frame
*/
type DeleteEndpointTooManyRequests struct {
}
// IsSuccess returns true when this delete endpoint too many requests response has a 2xx status code
func (o *DeleteEndpointTooManyRequests) IsSuccess() bool {
return false
}
// IsRedirect returns true when this delete endpoint too many requests response has a 3xx status code
func (o *DeleteEndpointTooManyRequests) IsRedirect() bool {
return false
}
// IsClientError returns true when this delete endpoint too many requests response has a 4xx status code
func (o *DeleteEndpointTooManyRequests) IsClientError() bool {
return true
}
// IsServerError returns true when this delete endpoint too many requests response has a 5xx status code
func (o *DeleteEndpointTooManyRequests) IsServerError() bool {
return false
}
// IsCode returns true when this delete endpoint too many requests response a status code equal to that given
func (o *DeleteEndpointTooManyRequests) IsCode(code int) bool {
return code == 429
}
// Code gets the status code for the delete endpoint too many requests response
func (o *DeleteEndpointTooManyRequests) Code() int {
return 429
}
func (o *DeleteEndpointTooManyRequests) Error() string {
return fmt.Sprintf("[DELETE /endpoint][%d] deleteEndpointTooManyRequests", 429)
}
func (o *DeleteEndpointTooManyRequests) String() string {
return fmt.Sprintf("[DELETE /endpoint][%d] deleteEndpointTooManyRequests", 429)
}
func (o *DeleteEndpointTooManyRequests) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error {
return nil
}
// NewDeleteEndpointServiceUnavailable creates a DeleteEndpointServiceUnavailable with default headers values
func NewDeleteEndpointServiceUnavailable() *DeleteEndpointServiceUnavailable {
return &DeleteEndpointServiceUnavailable{}
}
/*
DeleteEndpointServiceUnavailable describes a response with status code 503, with default header values.
Service Unavailable
*/
type DeleteEndpointServiceUnavailable struct {
}
// IsSuccess returns true when this delete endpoint service unavailable response has a 2xx status code
func (o *DeleteEndpointServiceUnavailable) IsSuccess() bool {
return false
}
// IsRedirect returns true when this delete endpoint service unavailable response has a 3xx status code
func (o *DeleteEndpointServiceUnavailable) IsRedirect() bool {
return false
}
// IsClientError returns true when this delete endpoint service unavailable response has a 4xx status code
func (o *DeleteEndpointServiceUnavailable) IsClientError() bool {
return false
}
// IsServerError returns true when this delete endpoint service unavailable response has a 5xx status code
func (o *DeleteEndpointServiceUnavailable) IsServerError() bool {
return true
}
// IsCode returns true when this delete endpoint service unavailable response a status code equal to that given
func (o *DeleteEndpointServiceUnavailable) IsCode(code int) bool {
return code == 503
}
// Code gets the status code for the delete endpoint service unavailable response
func (o *DeleteEndpointServiceUnavailable) Code() int {
return 503
}
func (o *DeleteEndpointServiceUnavailable) Error() string {
return fmt.Sprintf("[DELETE /endpoint][%d] deleteEndpointServiceUnavailable", 503)
}
func (o *DeleteEndpointServiceUnavailable) String() string {
return fmt.Sprintf("[DELETE /endpoint][%d] deleteEndpointServiceUnavailable", 503)
}
func (o *DeleteEndpointServiceUnavailable) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error {
return nil
}
// Code generated by go-swagger; DO NOT EDIT.
// Copyright Authors of Cilium
// SPDX-License-Identifier: Apache-2.0
package endpoint
// This file was generated by the swagger tool.
// Editing this file might prove futile when you re-run the swagger generate command
import (
"fmt"
"github.com/go-openapi/runtime"
httptransport "github.com/go-openapi/runtime/client"
"github.com/go-openapi/strfmt"
)
// New creates a new endpoint API client.
func New(transport runtime.ClientTransport, formats strfmt.Registry) ClientService {
return &Client{transport: transport, formats: formats}
}
// New creates a new endpoint API client with basic auth credentials.
// It takes the following parameters:
// - host: http host (github.com).
// - basePath: any base path for the API client ("/v1", "/v3").
// - scheme: http scheme ("http", "https").
// - user: user for basic authentication header.
// - password: password for basic authentication header.
func NewClientWithBasicAuth(host, basePath, scheme, user, password string) ClientService {
transport := httptransport.New(host, basePath, []string{scheme})
transport.DefaultAuthentication = httptransport.BasicAuth(user, password)
return &Client{transport: transport, formats: strfmt.Default}
}
// New creates a new endpoint API client with a bearer token for authentication.
// It takes the following parameters:
// - host: http host (github.com).
// - basePath: any base path for the API client ("/v1", "/v3").
// - scheme: http scheme ("http", "https").
// - bearerToken: bearer token for Bearer authentication header.
func NewClientWithBearerToken(host, basePath, scheme, bearerToken string) ClientService {
transport := httptransport.New(host, basePath, []string{scheme})
transport.DefaultAuthentication = httptransport.BearerToken(bearerToken)
return &Client{transport: transport, formats: strfmt.Default}
}
/*
Client for endpoint API
*/
type Client struct {
transport runtime.ClientTransport
formats strfmt.Registry
}
// ClientOption may be used to customize the behavior of Client methods.
type ClientOption func(*runtime.ClientOperation)
// ClientService is the interface for Client methods
type ClientService interface {
DeleteEndpoint(params *DeleteEndpointParams, opts ...ClientOption) (*DeleteEndpointOK, *DeleteEndpointErrors, error)
DeleteEndpointID(params *DeleteEndpointIDParams, opts ...ClientOption) (*DeleteEndpointIDOK, *DeleteEndpointIDErrors, error)
GetEndpoint(params *GetEndpointParams, opts ...ClientOption) (*GetEndpointOK, error)
GetEndpointID(params *GetEndpointIDParams, opts ...ClientOption) (*GetEndpointIDOK, error)
GetEndpointIDConfig(params *GetEndpointIDConfigParams, opts ...ClientOption) (*GetEndpointIDConfigOK, error)
GetEndpointIDHealthz(params *GetEndpointIDHealthzParams, opts ...ClientOption) (*GetEndpointIDHealthzOK, error)
GetEndpointIDLabels(params *GetEndpointIDLabelsParams, opts ...ClientOption) (*GetEndpointIDLabelsOK, error)
GetEndpointIDLog(params *GetEndpointIDLogParams, opts ...ClientOption) (*GetEndpointIDLogOK, error)
PatchEndpointID(params *PatchEndpointIDParams, opts ...ClientOption) (*PatchEndpointIDOK, error)
PatchEndpointIDConfig(params *PatchEndpointIDConfigParams, opts ...ClientOption) (*PatchEndpointIDConfigOK, error)
PatchEndpointIDLabels(params *PatchEndpointIDLabelsParams, opts ...ClientOption) (*PatchEndpointIDLabelsOK, error)
PutEndpointID(params *PutEndpointIDParams, opts ...ClientOption) (*PutEndpointIDCreated, error)
SetTransport(transport runtime.ClientTransport)
}
/*
DeleteEndpoint deletes a list of endpoints
Deletes a list of endpoints that have endpoints matching the provided properties
*/
func (a *Client) DeleteEndpoint(params *DeleteEndpointParams, opts ...ClientOption) (*DeleteEndpointOK, *DeleteEndpointErrors, error) {
// TODO: Validate the params before sending
if params == nil {
params = NewDeleteEndpointParams()
}
op := &runtime.ClientOperation{
ID: "DeleteEndpoint",
Method: "DELETE",
PathPattern: "/endpoint",
ProducesMediaTypes: []string{"application/json"},
ConsumesMediaTypes: []string{"application/json"},
Schemes: []string{"http"},
Params: params,
Reader: &DeleteEndpointReader{formats: a.formats},
Context: params.Context,
Client: params.HTTPClient,
}
for _, opt := range opts {
opt(op)
}
result, err := a.transport.Submit(op)
if err != nil {
return nil, nil, err
}
switch value := result.(type) {
case *DeleteEndpointOK:
return value, nil, nil
case *DeleteEndpointErrors:
return nil, value, nil
}
// safeguard: normally, absent a default response, unknown success responses return an error above: so this is a codegen issue
msg := fmt.Sprintf("unexpected success response for endpoint: API contract not enforced by server. Client expected to get an error, but got: %T", result)
panic(msg)
}
/*
DeleteEndpointID deletes endpoint
Deletes the endpoint specified by the ID. Deletion is imminent and
atomic, if the deletion request is valid and the endpoint exists,
deletion will occur even if errors are encountered in the process. If
errors have been encountered, the code 202 will be returned, otherwise
200 on success.
All resources associated with the endpoint will be freed and the
workload represented by the endpoint will be disconnected.It will no
longer be able to initiate or receive communications of any sort.
*/
func (a *Client) DeleteEndpointID(params *DeleteEndpointIDParams, opts ...ClientOption) (*DeleteEndpointIDOK, *DeleteEndpointIDErrors, error) {
// TODO: Validate the params before sending
if params == nil {
params = NewDeleteEndpointIDParams()
}
op := &runtime.ClientOperation{
ID: "DeleteEndpointID",
Method: "DELETE",
PathPattern: "/endpoint/{id}",
ProducesMediaTypes: []string{"application/json"},
ConsumesMediaTypes: []string{"application/json"},
Schemes: []string{"http"},
Params: params,
Reader: &DeleteEndpointIDReader{formats: a.formats},
Context: params.Context,
Client: params.HTTPClient,
}
for _, opt := range opts {
opt(op)
}
result, err := a.transport.Submit(op)
if err != nil {
return nil, nil, err
}
switch value := result.(type) {
case *DeleteEndpointIDOK:
return value, nil, nil
case *DeleteEndpointIDErrors:
return nil, value, nil
}
// safeguard: normally, absent a default response, unknown success responses return an error above: so this is a codegen issue
msg := fmt.Sprintf("unexpected success response for endpoint: API contract not enforced by server. Client expected to get an error, but got: %T", result)
panic(msg)
}
/*
GetEndpoint retrieves a list of endpoints that have metadata matching the provided parameters
Retrieves a list of endpoints that have metadata matching the provided parameters, or all endpoints if no parameters provided.
*/
func (a *Client) GetEndpoint(params *GetEndpointParams, opts ...ClientOption) (*GetEndpointOK, error) {
// TODO: Validate the params before sending
if params == nil {
params = NewGetEndpointParams()
}
op := &runtime.ClientOperation{
ID: "GetEndpoint",
Method: "GET",
PathPattern: "/endpoint",
ProducesMediaTypes: []string{"application/json"},
ConsumesMediaTypes: []string{"application/json"},
Schemes: []string{"http"},
Params: params,
Reader: &GetEndpointReader{formats: a.formats},
Context: params.Context,
Client: params.HTTPClient,
}
for _, opt := range opts {
opt(op)
}
result, err := a.transport.Submit(op)
if err != nil {
return nil, err
}
success, ok := result.(*GetEndpointOK)
if ok {
return success, nil
}
// unexpected success response
// safeguard: normally, absent a default response, unknown success responses return an error above: so this is a codegen issue
msg := fmt.Sprintf("unexpected success response for GetEndpoint: API contract not enforced by server. Client expected to get an error, but got: %T", result)
panic(msg)
}
/*
GetEndpointID gets endpoint by endpoint ID
Returns endpoint information
*/
func (a *Client) GetEndpointID(params *GetEndpointIDParams, opts ...ClientOption) (*GetEndpointIDOK, error) {
// TODO: Validate the params before sending
if params == nil {
params = NewGetEndpointIDParams()
}
op := &runtime.ClientOperation{
ID: "GetEndpointID",
Method: "GET",
PathPattern: "/endpoint/{id}",
ProducesMediaTypes: []string{"application/json"},
ConsumesMediaTypes: []string{"application/json"},
Schemes: []string{"http"},
Params: params,
Reader: &GetEndpointIDReader{formats: a.formats},
Context: params.Context,
Client: params.HTTPClient,
}
for _, opt := range opts {
opt(op)
}
result, err := a.transport.Submit(op)
if err != nil {
return nil, err
}
success, ok := result.(*GetEndpointIDOK)
if ok {
return success, nil
}
// unexpected success response
// safeguard: normally, absent a default response, unknown success responses return an error above: so this is a codegen issue
msg := fmt.Sprintf("unexpected success response for GetEndpointID: API contract not enforced by server. Client expected to get an error, but got: %T", result)
panic(msg)
}
/*
GetEndpointIDConfig retrieves endpoint configuration
Retrieves the configuration of the specified endpoint.
*/
func (a *Client) GetEndpointIDConfig(params *GetEndpointIDConfigParams, opts ...ClientOption) (*GetEndpointIDConfigOK, error) {
// TODO: Validate the params before sending
if params == nil {
params = NewGetEndpointIDConfigParams()
}
op := &runtime.ClientOperation{
ID: "GetEndpointIDConfig",
Method: "GET",
PathPattern: "/endpoint/{id}/config",
ProducesMediaTypes: []string{"application/json"},
ConsumesMediaTypes: []string{"application/json"},
Schemes: []string{"http"},
Params: params,
Reader: &GetEndpointIDConfigReader{formats: a.formats},
Context: params.Context,
Client: params.HTTPClient,
}
for _, opt := range opts {
opt(op)
}
result, err := a.transport.Submit(op)
if err != nil {
return nil, err
}
success, ok := result.(*GetEndpointIDConfigOK)
if ok {
return success, nil
}
// unexpected success response
// safeguard: normally, absent a default response, unknown success responses return an error above: so this is a codegen issue
msg := fmt.Sprintf("unexpected success response for GetEndpointIDConfig: API contract not enforced by server. Client expected to get an error, but got: %T", result)
panic(msg)
}
/*
GetEndpointIDHealthz retrieves the status logs associated with this endpoint
*/
func (a *Client) GetEndpointIDHealthz(params *GetEndpointIDHealthzParams, opts ...ClientOption) (*GetEndpointIDHealthzOK, error) {
// TODO: Validate the params before sending
if params == nil {
params = NewGetEndpointIDHealthzParams()
}
op := &runtime.ClientOperation{
ID: "GetEndpointIDHealthz",
Method: "GET",
PathPattern: "/endpoint/{id}/healthz",
ProducesMediaTypes: []string{"application/json"},
ConsumesMediaTypes: []string{"application/json"},
Schemes: []string{"http"},
Params: params,
Reader: &GetEndpointIDHealthzReader{formats: a.formats},
Context: params.Context,
Client: params.HTTPClient,
}
for _, opt := range opts {
opt(op)
}
result, err := a.transport.Submit(op)
if err != nil {
return nil, err
}
success, ok := result.(*GetEndpointIDHealthzOK)
if ok {
return success, nil
}
// unexpected success response
// safeguard: normally, absent a default response, unknown success responses return an error above: so this is a codegen issue
msg := fmt.Sprintf("unexpected success response for GetEndpointIDHealthz: API contract not enforced by server. Client expected to get an error, but got: %T", result)
panic(msg)
}
/*
GetEndpointIDLabels retrieves the list of labels associated with an endpoint
*/
func (a *Client) GetEndpointIDLabels(params *GetEndpointIDLabelsParams, opts ...ClientOption) (*GetEndpointIDLabelsOK, error) {
// TODO: Validate the params before sending
if params == nil {
params = NewGetEndpointIDLabelsParams()
}
op := &runtime.ClientOperation{
ID: "GetEndpointIDLabels",
Method: "GET",
PathPattern: "/endpoint/{id}/labels",
ProducesMediaTypes: []string{"application/json"},
ConsumesMediaTypes: []string{"application/json"},
Schemes: []string{"http"},
Params: params,
Reader: &GetEndpointIDLabelsReader{formats: a.formats},
Context: params.Context,
Client: params.HTTPClient,
}
for _, opt := range opts {
opt(op)
}
result, err := a.transport.Submit(op)
if err != nil {
return nil, err
}
success, ok := result.(*GetEndpointIDLabelsOK)
if ok {
return success, nil
}
// unexpected success response
// safeguard: normally, absent a default response, unknown success responses return an error above: so this is a codegen issue
msg := fmt.Sprintf("unexpected success response for GetEndpointIDLabels: API contract not enforced by server. Client expected to get an error, but got: %T", result)
panic(msg)
}
/*
GetEndpointIDLog retrieves the status logs associated with this endpoint
*/
func (a *Client) GetEndpointIDLog(params *GetEndpointIDLogParams, opts ...ClientOption) (*GetEndpointIDLogOK, error) {
// TODO: Validate the params before sending
if params == nil {
params = NewGetEndpointIDLogParams()
}
op := &runtime.ClientOperation{
ID: "GetEndpointIDLog",
Method: "GET",
PathPattern: "/endpoint/{id}/log",
ProducesMediaTypes: []string{"application/json"},
ConsumesMediaTypes: []string{"application/json"},
Schemes: []string{"http"},
Params: params,
Reader: &GetEndpointIDLogReader{formats: a.formats},
Context: params.Context,
Client: params.HTTPClient,
}
for _, opt := range opts {
opt(op)
}
result, err := a.transport.Submit(op)
if err != nil {
return nil, err
}
success, ok := result.(*GetEndpointIDLogOK)
if ok {
return success, nil
}
// unexpected success response
// safeguard: normally, absent a default response, unknown success responses return an error above: so this is a codegen issue
msg := fmt.Sprintf("unexpected success response for GetEndpointIDLog: API contract not enforced by server. Client expected to get an error, but got: %T", result)
panic(msg)
}
/*
PatchEndpointID modifies existing endpoint
Applies the endpoint change request to an existing endpoint
*/
func (a *Client) PatchEndpointID(params *PatchEndpointIDParams, opts ...ClientOption) (*PatchEndpointIDOK, error) {
// TODO: Validate the params before sending
if params == nil {
params = NewPatchEndpointIDParams()
}
op := &runtime.ClientOperation{
ID: "PatchEndpointID",
Method: "PATCH",
PathPattern: "/endpoint/{id}",
ProducesMediaTypes: []string{"application/json"},
ConsumesMediaTypes: []string{"application/json"},
Schemes: []string{"http"},
Params: params,
Reader: &PatchEndpointIDReader{formats: a.formats},
Context: params.Context,
Client: params.HTTPClient,
}
for _, opt := range opts {
opt(op)
}
result, err := a.transport.Submit(op)
if err != nil {
return nil, err
}
success, ok := result.(*PatchEndpointIDOK)
if ok {
return success, nil
}
// unexpected success response
// safeguard: normally, absent a default response, unknown success responses return an error above: so this is a codegen issue
msg := fmt.Sprintf("unexpected success response for PatchEndpointID: API contract not enforced by server. Client expected to get an error, but got: %T", result)
panic(msg)
}
/*
PatchEndpointIDConfig modifies mutable endpoint configuration
Update the configuration of an existing endpoint and regenerates &
recompiles the corresponding programs automatically.
*/
func (a *Client) PatchEndpointIDConfig(params *PatchEndpointIDConfigParams, opts ...ClientOption) (*PatchEndpointIDConfigOK, error) {
// TODO: Validate the params before sending
if params == nil {
params = NewPatchEndpointIDConfigParams()
}
op := &runtime.ClientOperation{
ID: "PatchEndpointIDConfig",
Method: "PATCH",
PathPattern: "/endpoint/{id}/config",
ProducesMediaTypes: []string{"application/json"},
ConsumesMediaTypes: []string{"application/json"},
Schemes: []string{"http"},
Params: params,
Reader: &PatchEndpointIDConfigReader{formats: a.formats},
Context: params.Context,
Client: params.HTTPClient,
}
for _, opt := range opts {
opt(op)
}
result, err := a.transport.Submit(op)
if err != nil {
return nil, err
}
success, ok := result.(*PatchEndpointIDConfigOK)
if ok {
return success, nil
}
// unexpected success response
// safeguard: normally, absent a default response, unknown success responses return an error above: so this is a codegen issue
msg := fmt.Sprintf("unexpected success response for PatchEndpointIDConfig: API contract not enforced by server. Client expected to get an error, but got: %T", result)
panic(msg)
}
/*
PatchEndpointIDLabels sets label configuration of endpoint
Sets labels associated with an endpoint. These can be user provided or
derived from the orchestration system.
*/
func (a *Client) PatchEndpointIDLabels(params *PatchEndpointIDLabelsParams, opts ...ClientOption) (*PatchEndpointIDLabelsOK, error) {
// TODO: Validate the params before sending
if params == nil {
params = NewPatchEndpointIDLabelsParams()
}
op := &runtime.ClientOperation{
ID: "PatchEndpointIDLabels",
Method: "PATCH",
PathPattern: "/endpoint/{id}/labels",
ProducesMediaTypes: []string{"application/json"},
ConsumesMediaTypes: []string{"application/json"},
Schemes: []string{"http"},
Params: params,
Reader: &PatchEndpointIDLabelsReader{formats: a.formats},
Context: params.Context,
Client: params.HTTPClient,
}
for _, opt := range opts {
opt(op)
}
result, err := a.transport.Submit(op)
if err != nil {
return nil, err
}
success, ok := result.(*PatchEndpointIDLabelsOK)
if ok {
return success, nil
}
// unexpected success response
// safeguard: normally, absent a default response, unknown success responses return an error above: so this is a codegen issue
msg := fmt.Sprintf("unexpected success response for PatchEndpointIDLabels: API contract not enforced by server. Client expected to get an error, but got: %T", result)
panic(msg)
}
/*
PutEndpointID creates endpoint
Creates a new endpoint
*/
func (a *Client) PutEndpointID(params *PutEndpointIDParams, opts ...ClientOption) (*PutEndpointIDCreated, error) {
// TODO: Validate the params before sending
if params == nil {
params = NewPutEndpointIDParams()
}
op := &runtime.ClientOperation{
ID: "PutEndpointID",
Method: "PUT",
PathPattern: "/endpoint/{id}",
ProducesMediaTypes: []string{"application/json"},
ConsumesMediaTypes: []string{"application/json"},
Schemes: []string{"http"},
Params: params,
Reader: &PutEndpointIDReader{formats: a.formats},
Context: params.Context,
Client: params.HTTPClient,
}
for _, opt := range opts {
opt(op)
}
result, err := a.transport.Submit(op)
if err != nil {
return nil, err
}
success, ok := result.(*PutEndpointIDCreated)
if ok {
return success, nil
}
// unexpected success response
// safeguard: normally, absent a default response, unknown success responses return an error above: so this is a codegen issue
msg := fmt.Sprintf("unexpected success response for PutEndpointID: API contract not enforced by server. Client expected to get an error, but got: %T", result)
panic(msg)
}
// SetTransport changes the transport on the client
func (a *Client) SetTransport(transport runtime.ClientTransport) {
a.transport = transport
}
// Code generated by go-swagger; DO NOT EDIT.
// Copyright Authors of Cilium
// SPDX-License-Identifier: Apache-2.0
package endpoint
// This file was generated by the swagger tool.
// Editing this file might prove futile when you re-run the swagger generate command
import (
"context"
"net/http"
"time"
"github.com/go-openapi/errors"
"github.com/go-openapi/runtime"
cr "github.com/go-openapi/runtime/client"
"github.com/go-openapi/strfmt"
)
// NewGetEndpointIDConfigParams creates a new GetEndpointIDConfigParams object,
// with the default timeout for this client.
//
// Default values are not hydrated, since defaults are normally applied by the API server side.
//
// To enforce default values in parameter, use SetDefaults or WithDefaults.
func NewGetEndpointIDConfigParams() *GetEndpointIDConfigParams {
return &GetEndpointIDConfigParams{
timeout: cr.DefaultTimeout,
}
}
// NewGetEndpointIDConfigParamsWithTimeout creates a new GetEndpointIDConfigParams object
// with the ability to set a timeout on a request.
func NewGetEndpointIDConfigParamsWithTimeout(timeout time.Duration) *GetEndpointIDConfigParams {
return &GetEndpointIDConfigParams{
timeout: timeout,
}
}
// NewGetEndpointIDConfigParamsWithContext creates a new GetEndpointIDConfigParams object
// with the ability to set a context for a request.
func NewGetEndpointIDConfigParamsWithContext(ctx context.Context) *GetEndpointIDConfigParams {
return &GetEndpointIDConfigParams{
Context: ctx,
}
}
// NewGetEndpointIDConfigParamsWithHTTPClient creates a new GetEndpointIDConfigParams object
// with the ability to set a custom HTTPClient for a request.
func NewGetEndpointIDConfigParamsWithHTTPClient(client *http.Client) *GetEndpointIDConfigParams {
return &GetEndpointIDConfigParams{
HTTPClient: client,
}
}
/*
GetEndpointIDConfigParams contains all the parameters to send to the API endpoint
for the get endpoint ID config operation.
Typically these are written to a http.Request.
*/
type GetEndpointIDConfigParams struct {
/* ID.
String describing an endpoint with the format ``[prefix:]id``. If no prefix
is specified, a prefix of ``cilium-local:`` is assumed. Not all endpoints
will be addressable by all endpoint ID prefixes with the exception of the
local Cilium UUID which is assigned to all endpoints.
Supported endpoint id prefixes:
- cilium-local: Local Cilium endpoint UUID, e.g. cilium-local:3389595
- cilium-global: Global Cilium endpoint UUID, e.g. cilium-global:cluster1:nodeX:452343
- cni-attachment-id: CNI attachment ID, e.g. cni-attachment-id:22222:eth0
- container-id: Container runtime ID, e.g. container-id:22222 (deprecated, may not be unique)
- container-name: Container name, e.g. container-name:foobar (deprecated, may not be unique)
- pod-name: pod name for this container if K8s is enabled, e.g. pod-name:default:foobar (deprecated, may not be unique)
- cep-name: cep name for this container if K8s is enabled, e.g. pod-name:default:foobar-net1
- docker-endpoint: Docker libnetwork endpoint ID, e.g. docker-endpoint:4444
*/
ID string
timeout time.Duration
Context context.Context
HTTPClient *http.Client
}
// WithDefaults hydrates default values in the get endpoint ID config params (not the query body).
//
// All values with no default are reset to their zero value.
func (o *GetEndpointIDConfigParams) WithDefaults() *GetEndpointIDConfigParams {
o.SetDefaults()
return o
}
// SetDefaults hydrates default values in the get endpoint ID config params (not the query body).
//
// All values with no default are reset to their zero value.
func (o *GetEndpointIDConfigParams) SetDefaults() {
// no default values defined for this parameter
}
// WithTimeout adds the timeout to the get endpoint ID config params
func (o *GetEndpointIDConfigParams) WithTimeout(timeout time.Duration) *GetEndpointIDConfigParams {
o.SetTimeout(timeout)
return o
}
// SetTimeout adds the timeout to the get endpoint ID config params
func (o *GetEndpointIDConfigParams) SetTimeout(timeout time.Duration) {
o.timeout = timeout
}
// WithContext adds the context to the get endpoint ID config params
func (o *GetEndpointIDConfigParams) WithContext(ctx context.Context) *GetEndpointIDConfigParams {
o.SetContext(ctx)
return o
}
// SetContext adds the context to the get endpoint ID config params
func (o *GetEndpointIDConfigParams) SetContext(ctx context.Context) {
o.Context = ctx
}
// WithHTTPClient adds the HTTPClient to the get endpoint ID config params
func (o *GetEndpointIDConfigParams) WithHTTPClient(client *http.Client) *GetEndpointIDConfigParams {
o.SetHTTPClient(client)
return o
}
// SetHTTPClient adds the HTTPClient to the get endpoint ID config params
func (o *GetEndpointIDConfigParams) SetHTTPClient(client *http.Client) {
o.HTTPClient = client
}
// WithID adds the id to the get endpoint ID config params
func (o *GetEndpointIDConfigParams) WithID(id string) *GetEndpointIDConfigParams {
o.SetID(id)
return o
}
// SetID adds the id to the get endpoint ID config params
func (o *GetEndpointIDConfigParams) SetID(id string) {
o.ID = id
}
// WriteToRequest writes these params to a swagger request
func (o *GetEndpointIDConfigParams) WriteToRequest(r runtime.ClientRequest, reg strfmt.Registry) error {
if err := r.SetTimeout(o.timeout); err != nil {
return err
}
var res []error
// path param id
if err := r.SetPathParam("id", o.ID); err != nil {
return err
}
if len(res) > 0 {
return errors.CompositeValidationError(res...)
}
return nil
}
// Code generated by go-swagger; DO NOT EDIT.
// Copyright Authors of Cilium
// SPDX-License-Identifier: Apache-2.0
package endpoint
// This file was generated by the swagger tool.
// Editing this file might prove futile when you re-run the swagger generate command
import (
"encoding/json"
"fmt"
"io"
"github.com/go-openapi/runtime"
"github.com/go-openapi/strfmt"
"github.com/cilium/cilium/api/v1/models"
)
// GetEndpointIDConfigReader is a Reader for the GetEndpointIDConfig structure.
type GetEndpointIDConfigReader struct {
formats strfmt.Registry
}
// ReadResponse reads a server response into the received o.
func (o *GetEndpointIDConfigReader) ReadResponse(response runtime.ClientResponse, consumer runtime.Consumer) (interface{}, error) {
switch response.Code() {
case 200:
result := NewGetEndpointIDConfigOK()
if err := result.readResponse(response, consumer, o.formats); err != nil {
return nil, err
}
return result, nil
case 404:
result := NewGetEndpointIDConfigNotFound()
if err := result.readResponse(response, consumer, o.formats); err != nil {
return nil, err
}
return nil, result
case 429:
result := NewGetEndpointIDConfigTooManyRequests()
if err := result.readResponse(response, consumer, o.formats); err != nil {
return nil, err
}
return nil, result
default:
return nil, runtime.NewAPIError("[GET /endpoint/{id}/config] GetEndpointIDConfig", response, response.Code())
}
}
// NewGetEndpointIDConfigOK creates a GetEndpointIDConfigOK with default headers values
func NewGetEndpointIDConfigOK() *GetEndpointIDConfigOK {
return &GetEndpointIDConfigOK{}
}
/*
GetEndpointIDConfigOK describes a response with status code 200, with default header values.
Success
*/
type GetEndpointIDConfigOK struct {
Payload *models.EndpointConfigurationStatus
}
// IsSuccess returns true when this get endpoint Id config o k response has a 2xx status code
func (o *GetEndpointIDConfigOK) IsSuccess() bool {
return true
}
// IsRedirect returns true when this get endpoint Id config o k response has a 3xx status code
func (o *GetEndpointIDConfigOK) IsRedirect() bool {
return false
}
// IsClientError returns true when this get endpoint Id config o k response has a 4xx status code
func (o *GetEndpointIDConfigOK) IsClientError() bool {
return false
}
// IsServerError returns true when this get endpoint Id config o k response has a 5xx status code
func (o *GetEndpointIDConfigOK) IsServerError() bool {
return false
}
// IsCode returns true when this get endpoint Id config o k response a status code equal to that given
func (o *GetEndpointIDConfigOK) IsCode(code int) bool {
return code == 200
}
// Code gets the status code for the get endpoint Id config o k response
func (o *GetEndpointIDConfigOK) Code() int {
return 200
}
func (o *GetEndpointIDConfigOK) Error() string {
payload, _ := json.Marshal(o.Payload)
return fmt.Sprintf("[GET /endpoint/{id}/config][%d] getEndpointIdConfigOK %s", 200, payload)
}
func (o *GetEndpointIDConfigOK) String() string {
payload, _ := json.Marshal(o.Payload)
return fmt.Sprintf("[GET /endpoint/{id}/config][%d] getEndpointIdConfigOK %s", 200, payload)
}
func (o *GetEndpointIDConfigOK) GetPayload() *models.EndpointConfigurationStatus {
return o.Payload
}
func (o *GetEndpointIDConfigOK) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error {
o.Payload = new(models.EndpointConfigurationStatus)
// response payload
if err := consumer.Consume(response.Body(), o.Payload); err != nil && err != io.EOF {
return err
}
return nil
}
// NewGetEndpointIDConfigNotFound creates a GetEndpointIDConfigNotFound with default headers values
func NewGetEndpointIDConfigNotFound() *GetEndpointIDConfigNotFound {
return &GetEndpointIDConfigNotFound{}
}
/*
GetEndpointIDConfigNotFound describes a response with status code 404, with default header values.
Endpoint not found
*/
type GetEndpointIDConfigNotFound struct {
}
// IsSuccess returns true when this get endpoint Id config not found response has a 2xx status code
func (o *GetEndpointIDConfigNotFound) IsSuccess() bool {
return false
}
// IsRedirect returns true when this get endpoint Id config not found response has a 3xx status code
func (o *GetEndpointIDConfigNotFound) IsRedirect() bool {
return false
}
// IsClientError returns true when this get endpoint Id config not found response has a 4xx status code
func (o *GetEndpointIDConfigNotFound) IsClientError() bool {
return true
}
// IsServerError returns true when this get endpoint Id config not found response has a 5xx status code
func (o *GetEndpointIDConfigNotFound) IsServerError() bool {
return false
}
// IsCode returns true when this get endpoint Id config not found response a status code equal to that given
func (o *GetEndpointIDConfigNotFound) IsCode(code int) bool {
return code == 404
}
// Code gets the status code for the get endpoint Id config not found response
func (o *GetEndpointIDConfigNotFound) Code() int {
return 404
}
func (o *GetEndpointIDConfigNotFound) Error() string {
return fmt.Sprintf("[GET /endpoint/{id}/config][%d] getEndpointIdConfigNotFound", 404)
}
func (o *GetEndpointIDConfigNotFound) String() string {
return fmt.Sprintf("[GET /endpoint/{id}/config][%d] getEndpointIdConfigNotFound", 404)
}
func (o *GetEndpointIDConfigNotFound) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error {
return nil
}
// NewGetEndpointIDConfigTooManyRequests creates a GetEndpointIDConfigTooManyRequests with default headers values
func NewGetEndpointIDConfigTooManyRequests() *GetEndpointIDConfigTooManyRequests {
return &GetEndpointIDConfigTooManyRequests{}
}
/*
GetEndpointIDConfigTooManyRequests describes a response with status code 429, with default header values.
Rate-limiting too many requests in the given time frame
*/
type GetEndpointIDConfigTooManyRequests struct {
}
// IsSuccess returns true when this get endpoint Id config too many requests response has a 2xx status code
func (o *GetEndpointIDConfigTooManyRequests) IsSuccess() bool {
return false
}
// IsRedirect returns true when this get endpoint Id config too many requests response has a 3xx status code
func (o *GetEndpointIDConfigTooManyRequests) IsRedirect() bool {
return false
}
// IsClientError returns true when this get endpoint Id config too many requests response has a 4xx status code
func (o *GetEndpointIDConfigTooManyRequests) IsClientError() bool {
return true
}
// IsServerError returns true when this get endpoint Id config too many requests response has a 5xx status code
func (o *GetEndpointIDConfigTooManyRequests) IsServerError() bool {
return false
}
// IsCode returns true when this get endpoint Id config too many requests response a status code equal to that given
func (o *GetEndpointIDConfigTooManyRequests) IsCode(code int) bool {
return code == 429
}
// Code gets the status code for the get endpoint Id config too many requests response
func (o *GetEndpointIDConfigTooManyRequests) Code() int {
return 429
}
func (o *GetEndpointIDConfigTooManyRequests) Error() string {
return fmt.Sprintf("[GET /endpoint/{id}/config][%d] getEndpointIdConfigTooManyRequests", 429)
}
func (o *GetEndpointIDConfigTooManyRequests) String() string {
return fmt.Sprintf("[GET /endpoint/{id}/config][%d] getEndpointIdConfigTooManyRequests", 429)
}
func (o *GetEndpointIDConfigTooManyRequests) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error {
return nil
}
// Code generated by go-swagger; DO NOT EDIT.
// Copyright Authors of Cilium
// SPDX-License-Identifier: Apache-2.0
package endpoint
// This file was generated by the swagger tool.
// Editing this file might prove futile when you re-run the swagger generate command
import (
"context"
"net/http"
"time"
"github.com/go-openapi/errors"
"github.com/go-openapi/runtime"
cr "github.com/go-openapi/runtime/client"
"github.com/go-openapi/strfmt"
)
// NewGetEndpointIDHealthzParams creates a new GetEndpointIDHealthzParams object,
// with the default timeout for this client.
//
// Default values are not hydrated, since defaults are normally applied by the API server side.
//
// To enforce default values in parameter, use SetDefaults or WithDefaults.
func NewGetEndpointIDHealthzParams() *GetEndpointIDHealthzParams {
return &GetEndpointIDHealthzParams{
timeout: cr.DefaultTimeout,
}
}
// NewGetEndpointIDHealthzParamsWithTimeout creates a new GetEndpointIDHealthzParams object
// with the ability to set a timeout on a request.
func NewGetEndpointIDHealthzParamsWithTimeout(timeout time.Duration) *GetEndpointIDHealthzParams {
return &GetEndpointIDHealthzParams{
timeout: timeout,
}
}
// NewGetEndpointIDHealthzParamsWithContext creates a new GetEndpointIDHealthzParams object
// with the ability to set a context for a request.
func NewGetEndpointIDHealthzParamsWithContext(ctx context.Context) *GetEndpointIDHealthzParams {
return &GetEndpointIDHealthzParams{
Context: ctx,
}
}
// NewGetEndpointIDHealthzParamsWithHTTPClient creates a new GetEndpointIDHealthzParams object
// with the ability to set a custom HTTPClient for a request.
func NewGetEndpointIDHealthzParamsWithHTTPClient(client *http.Client) *GetEndpointIDHealthzParams {
return &GetEndpointIDHealthzParams{
HTTPClient: client,
}
}
/*
GetEndpointIDHealthzParams contains all the parameters to send to the API endpoint
for the get endpoint ID healthz operation.
Typically these are written to a http.Request.
*/
type GetEndpointIDHealthzParams struct {
/* ID.
String describing an endpoint with the format ``[prefix:]id``. If no prefix
is specified, a prefix of ``cilium-local:`` is assumed. Not all endpoints
will be addressable by all endpoint ID prefixes with the exception of the
local Cilium UUID which is assigned to all endpoints.
Supported endpoint id prefixes:
- cilium-local: Local Cilium endpoint UUID, e.g. cilium-local:3389595
- cilium-global: Global Cilium endpoint UUID, e.g. cilium-global:cluster1:nodeX:452343
- cni-attachment-id: CNI attachment ID, e.g. cni-attachment-id:22222:eth0
- container-id: Container runtime ID, e.g. container-id:22222 (deprecated, may not be unique)
- container-name: Container name, e.g. container-name:foobar (deprecated, may not be unique)
- pod-name: pod name for this container if K8s is enabled, e.g. pod-name:default:foobar (deprecated, may not be unique)
- cep-name: cep name for this container if K8s is enabled, e.g. pod-name:default:foobar-net1
- docker-endpoint: Docker libnetwork endpoint ID, e.g. docker-endpoint:4444
*/
ID string
timeout time.Duration
Context context.Context
HTTPClient *http.Client
}
// WithDefaults hydrates default values in the get endpoint ID healthz params (not the query body).
//
// All values with no default are reset to their zero value.
func (o *GetEndpointIDHealthzParams) WithDefaults() *GetEndpointIDHealthzParams {
o.SetDefaults()
return o
}
// SetDefaults hydrates default values in the get endpoint ID healthz params (not the query body).
//
// All values with no default are reset to their zero value.
func (o *GetEndpointIDHealthzParams) SetDefaults() {
// no default values defined for this parameter
}
// WithTimeout adds the timeout to the get endpoint ID healthz params
func (o *GetEndpointIDHealthzParams) WithTimeout(timeout time.Duration) *GetEndpointIDHealthzParams {
o.SetTimeout(timeout)
return o
}
// SetTimeout adds the timeout to the get endpoint ID healthz params
func (o *GetEndpointIDHealthzParams) SetTimeout(timeout time.Duration) {
o.timeout = timeout
}
// WithContext adds the context to the get endpoint ID healthz params
func (o *GetEndpointIDHealthzParams) WithContext(ctx context.Context) *GetEndpointIDHealthzParams {
o.SetContext(ctx)
return o
}
// SetContext adds the context to the get endpoint ID healthz params
func (o *GetEndpointIDHealthzParams) SetContext(ctx context.Context) {
o.Context = ctx
}
// WithHTTPClient adds the HTTPClient to the get endpoint ID healthz params
func (o *GetEndpointIDHealthzParams) WithHTTPClient(client *http.Client) *GetEndpointIDHealthzParams {
o.SetHTTPClient(client)
return o
}
// SetHTTPClient adds the HTTPClient to the get endpoint ID healthz params
func (o *GetEndpointIDHealthzParams) SetHTTPClient(client *http.Client) {
o.HTTPClient = client
}
// WithID adds the id to the get endpoint ID healthz params
func (o *GetEndpointIDHealthzParams) WithID(id string) *GetEndpointIDHealthzParams {
o.SetID(id)
return o
}
// SetID adds the id to the get endpoint ID healthz params
func (o *GetEndpointIDHealthzParams) SetID(id string) {
o.ID = id
}
// WriteToRequest writes these params to a swagger request
func (o *GetEndpointIDHealthzParams) WriteToRequest(r runtime.ClientRequest, reg strfmt.Registry) error {
if err := r.SetTimeout(o.timeout); err != nil {
return err
}
var res []error
// path param id
if err := r.SetPathParam("id", o.ID); err != nil {
return err
}
if len(res) > 0 {
return errors.CompositeValidationError(res...)
}
return nil
}
// Code generated by go-swagger; DO NOT EDIT.
// Copyright Authors of Cilium
// SPDX-License-Identifier: Apache-2.0
package endpoint
// This file was generated by the swagger tool.
// Editing this file might prove futile when you re-run the swagger generate command
import (
"encoding/json"
"fmt"
"io"
"github.com/go-openapi/runtime"
"github.com/go-openapi/strfmt"
"github.com/cilium/cilium/api/v1/models"
)
// GetEndpointIDHealthzReader is a Reader for the GetEndpointIDHealthz structure.
type GetEndpointIDHealthzReader struct {
formats strfmt.Registry
}
// ReadResponse reads a server response into the received o.
func (o *GetEndpointIDHealthzReader) ReadResponse(response runtime.ClientResponse, consumer runtime.Consumer) (interface{}, error) {
switch response.Code() {
case 200:
result := NewGetEndpointIDHealthzOK()
if err := result.readResponse(response, consumer, o.formats); err != nil {
return nil, err
}
return result, nil
case 400:
result := NewGetEndpointIDHealthzInvalid()
if err := result.readResponse(response, consumer, o.formats); err != nil {
return nil, err
}
return nil, result
case 404:
result := NewGetEndpointIDHealthzNotFound()
if err := result.readResponse(response, consumer, o.formats); err != nil {
return nil, err
}
return nil, result
case 429:
result := NewGetEndpointIDHealthzTooManyRequests()
if err := result.readResponse(response, consumer, o.formats); err != nil {
return nil, err
}
return nil, result
default:
return nil, runtime.NewAPIError("[GET /endpoint/{id}/healthz] GetEndpointIDHealthz", response, response.Code())
}
}
// NewGetEndpointIDHealthzOK creates a GetEndpointIDHealthzOK with default headers values
func NewGetEndpointIDHealthzOK() *GetEndpointIDHealthzOK {
return &GetEndpointIDHealthzOK{}
}
/*
GetEndpointIDHealthzOK describes a response with status code 200, with default header values.
Success
*/
type GetEndpointIDHealthzOK struct {
Payload *models.EndpointHealth
}
// IsSuccess returns true when this get endpoint Id healthz o k response has a 2xx status code
func (o *GetEndpointIDHealthzOK) IsSuccess() bool {
return true
}
// IsRedirect returns true when this get endpoint Id healthz o k response has a 3xx status code
func (o *GetEndpointIDHealthzOK) IsRedirect() bool {
return false
}
// IsClientError returns true when this get endpoint Id healthz o k response has a 4xx status code
func (o *GetEndpointIDHealthzOK) IsClientError() bool {
return false
}
// IsServerError returns true when this get endpoint Id healthz o k response has a 5xx status code
func (o *GetEndpointIDHealthzOK) IsServerError() bool {
return false
}
// IsCode returns true when this get endpoint Id healthz o k response a status code equal to that given
func (o *GetEndpointIDHealthzOK) IsCode(code int) bool {
return code == 200
}
// Code gets the status code for the get endpoint Id healthz o k response
func (o *GetEndpointIDHealthzOK) Code() int {
return 200
}
func (o *GetEndpointIDHealthzOK) Error() string {
payload, _ := json.Marshal(o.Payload)
return fmt.Sprintf("[GET /endpoint/{id}/healthz][%d] getEndpointIdHealthzOK %s", 200, payload)
}
func (o *GetEndpointIDHealthzOK) String() string {
payload, _ := json.Marshal(o.Payload)
return fmt.Sprintf("[GET /endpoint/{id}/healthz][%d] getEndpointIdHealthzOK %s", 200, payload)
}
func (o *GetEndpointIDHealthzOK) GetPayload() *models.EndpointHealth {
return o.Payload
}
func (o *GetEndpointIDHealthzOK) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error {
o.Payload = new(models.EndpointHealth)
// response payload
if err := consumer.Consume(response.Body(), o.Payload); err != nil && err != io.EOF {
return err
}
return nil
}
// NewGetEndpointIDHealthzInvalid creates a GetEndpointIDHealthzInvalid with default headers values
func NewGetEndpointIDHealthzInvalid() *GetEndpointIDHealthzInvalid {
return &GetEndpointIDHealthzInvalid{}
}
/*
GetEndpointIDHealthzInvalid describes a response with status code 400, with default header values.
Invalid identity provided
*/
type GetEndpointIDHealthzInvalid struct {
}
// IsSuccess returns true when this get endpoint Id healthz invalid response has a 2xx status code
func (o *GetEndpointIDHealthzInvalid) IsSuccess() bool {
return false
}
// IsRedirect returns true when this get endpoint Id healthz invalid response has a 3xx status code
func (o *GetEndpointIDHealthzInvalid) IsRedirect() bool {
return false
}
// IsClientError returns true when this get endpoint Id healthz invalid response has a 4xx status code
func (o *GetEndpointIDHealthzInvalid) IsClientError() bool {
return true
}
// IsServerError returns true when this get endpoint Id healthz invalid response has a 5xx status code
func (o *GetEndpointIDHealthzInvalid) IsServerError() bool {
return false
}
// IsCode returns true when this get endpoint Id healthz invalid response a status code equal to that given
func (o *GetEndpointIDHealthzInvalid) IsCode(code int) bool {
return code == 400
}
// Code gets the status code for the get endpoint Id healthz invalid response
func (o *GetEndpointIDHealthzInvalid) Code() int {
return 400
}
func (o *GetEndpointIDHealthzInvalid) Error() string {
return fmt.Sprintf("[GET /endpoint/{id}/healthz][%d] getEndpointIdHealthzInvalid", 400)
}
func (o *GetEndpointIDHealthzInvalid) String() string {
return fmt.Sprintf("[GET /endpoint/{id}/healthz][%d] getEndpointIdHealthzInvalid", 400)
}
func (o *GetEndpointIDHealthzInvalid) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error {
return nil
}
// NewGetEndpointIDHealthzNotFound creates a GetEndpointIDHealthzNotFound with default headers values
func NewGetEndpointIDHealthzNotFound() *GetEndpointIDHealthzNotFound {
return &GetEndpointIDHealthzNotFound{}
}
/*
GetEndpointIDHealthzNotFound describes a response with status code 404, with default header values.
Endpoint not found
*/
type GetEndpointIDHealthzNotFound struct {
}
// IsSuccess returns true when this get endpoint Id healthz not found response has a 2xx status code
func (o *GetEndpointIDHealthzNotFound) IsSuccess() bool {
return false
}
// IsRedirect returns true when this get endpoint Id healthz not found response has a 3xx status code
func (o *GetEndpointIDHealthzNotFound) IsRedirect() bool {
return false
}
// IsClientError returns true when this get endpoint Id healthz not found response has a 4xx status code
func (o *GetEndpointIDHealthzNotFound) IsClientError() bool {
return true
}
// IsServerError returns true when this get endpoint Id healthz not found response has a 5xx status code
func (o *GetEndpointIDHealthzNotFound) IsServerError() bool {
return false
}
// IsCode returns true when this get endpoint Id healthz not found response a status code equal to that given
func (o *GetEndpointIDHealthzNotFound) IsCode(code int) bool {
return code == 404
}
// Code gets the status code for the get endpoint Id healthz not found response
func (o *GetEndpointIDHealthzNotFound) Code() int {
return 404
}
func (o *GetEndpointIDHealthzNotFound) Error() string {
return fmt.Sprintf("[GET /endpoint/{id}/healthz][%d] getEndpointIdHealthzNotFound", 404)
}
func (o *GetEndpointIDHealthzNotFound) String() string {
return fmt.Sprintf("[GET /endpoint/{id}/healthz][%d] getEndpointIdHealthzNotFound", 404)
}
func (o *GetEndpointIDHealthzNotFound) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error {
return nil
}
// NewGetEndpointIDHealthzTooManyRequests creates a GetEndpointIDHealthzTooManyRequests with default headers values
func NewGetEndpointIDHealthzTooManyRequests() *GetEndpointIDHealthzTooManyRequests {
return &GetEndpointIDHealthzTooManyRequests{}
}
/*
GetEndpointIDHealthzTooManyRequests describes a response with status code 429, with default header values.
Rate-limiting too many requests in the given time frame
*/
type GetEndpointIDHealthzTooManyRequests struct {
}
// IsSuccess returns true when this get endpoint Id healthz too many requests response has a 2xx status code
func (o *GetEndpointIDHealthzTooManyRequests) IsSuccess() bool {
return false
}
// IsRedirect returns true when this get endpoint Id healthz too many requests response has a 3xx status code
func (o *GetEndpointIDHealthzTooManyRequests) IsRedirect() bool {
return false
}
// IsClientError returns true when this get endpoint Id healthz too many requests response has a 4xx status code
func (o *GetEndpointIDHealthzTooManyRequests) IsClientError() bool {
return true
}
// IsServerError returns true when this get endpoint Id healthz too many requests response has a 5xx status code
func (o *GetEndpointIDHealthzTooManyRequests) IsServerError() bool {
return false
}
// IsCode returns true when this get endpoint Id healthz too many requests response a status code equal to that given
func (o *GetEndpointIDHealthzTooManyRequests) IsCode(code int) bool {
return code == 429
}
// Code gets the status code for the get endpoint Id healthz too many requests response
func (o *GetEndpointIDHealthzTooManyRequests) Code() int {
return 429
}
func (o *GetEndpointIDHealthzTooManyRequests) Error() string {
return fmt.Sprintf("[GET /endpoint/{id}/healthz][%d] getEndpointIdHealthzTooManyRequests", 429)
}
func (o *GetEndpointIDHealthzTooManyRequests) String() string {
return fmt.Sprintf("[GET /endpoint/{id}/healthz][%d] getEndpointIdHealthzTooManyRequests", 429)
}
func (o *GetEndpointIDHealthzTooManyRequests) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error {
return nil
}
// Code generated by go-swagger; DO NOT EDIT.
// Copyright Authors of Cilium
// SPDX-License-Identifier: Apache-2.0
package endpoint
// This file was generated by the swagger tool.
// Editing this file might prove futile when you re-run the swagger generate command
import (
"context"
"net/http"
"time"
"github.com/go-openapi/errors"
"github.com/go-openapi/runtime"
cr "github.com/go-openapi/runtime/client"
"github.com/go-openapi/strfmt"
)
// NewGetEndpointIDLabelsParams creates a new GetEndpointIDLabelsParams object,
// with the default timeout for this client.
//
// Default values are not hydrated, since defaults are normally applied by the API server side.
//
// To enforce default values in parameter, use SetDefaults or WithDefaults.
func NewGetEndpointIDLabelsParams() *GetEndpointIDLabelsParams {
return &GetEndpointIDLabelsParams{
timeout: cr.DefaultTimeout,
}
}
// NewGetEndpointIDLabelsParamsWithTimeout creates a new GetEndpointIDLabelsParams object
// with the ability to set a timeout on a request.
func NewGetEndpointIDLabelsParamsWithTimeout(timeout time.Duration) *GetEndpointIDLabelsParams {
return &GetEndpointIDLabelsParams{
timeout: timeout,
}
}
// NewGetEndpointIDLabelsParamsWithContext creates a new GetEndpointIDLabelsParams object
// with the ability to set a context for a request.
func NewGetEndpointIDLabelsParamsWithContext(ctx context.Context) *GetEndpointIDLabelsParams {
return &GetEndpointIDLabelsParams{
Context: ctx,
}
}
// NewGetEndpointIDLabelsParamsWithHTTPClient creates a new GetEndpointIDLabelsParams object
// with the ability to set a custom HTTPClient for a request.
func NewGetEndpointIDLabelsParamsWithHTTPClient(client *http.Client) *GetEndpointIDLabelsParams {
return &GetEndpointIDLabelsParams{
HTTPClient: client,
}
}
/*
GetEndpointIDLabelsParams contains all the parameters to send to the API endpoint
for the get endpoint ID labels operation.
Typically these are written to a http.Request.
*/
type GetEndpointIDLabelsParams struct {
/* ID.
String describing an endpoint with the format ``[prefix:]id``. If no prefix
is specified, a prefix of ``cilium-local:`` is assumed. Not all endpoints
will be addressable by all endpoint ID prefixes with the exception of the
local Cilium UUID which is assigned to all endpoints.
Supported endpoint id prefixes:
- cilium-local: Local Cilium endpoint UUID, e.g. cilium-local:3389595
- cilium-global: Global Cilium endpoint UUID, e.g. cilium-global:cluster1:nodeX:452343
- cni-attachment-id: CNI attachment ID, e.g. cni-attachment-id:22222:eth0
- container-id: Container runtime ID, e.g. container-id:22222 (deprecated, may not be unique)
- container-name: Container name, e.g. container-name:foobar (deprecated, may not be unique)
- pod-name: pod name for this container if K8s is enabled, e.g. pod-name:default:foobar (deprecated, may not be unique)
- cep-name: cep name for this container if K8s is enabled, e.g. pod-name:default:foobar-net1
- docker-endpoint: Docker libnetwork endpoint ID, e.g. docker-endpoint:4444
*/
ID string
timeout time.Duration
Context context.Context
HTTPClient *http.Client
}
// WithDefaults hydrates default values in the get endpoint ID labels params (not the query body).
//
// All values with no default are reset to their zero value.
func (o *GetEndpointIDLabelsParams) WithDefaults() *GetEndpointIDLabelsParams {
o.SetDefaults()
return o
}
// SetDefaults hydrates default values in the get endpoint ID labels params (not the query body).
//
// All values with no default are reset to their zero value.
func (o *GetEndpointIDLabelsParams) SetDefaults() {
// no default values defined for this parameter
}
// WithTimeout adds the timeout to the get endpoint ID labels params
func (o *GetEndpointIDLabelsParams) WithTimeout(timeout time.Duration) *GetEndpointIDLabelsParams {
o.SetTimeout(timeout)
return o
}
// SetTimeout adds the timeout to the get endpoint ID labels params
func (o *GetEndpointIDLabelsParams) SetTimeout(timeout time.Duration) {
o.timeout = timeout
}
// WithContext adds the context to the get endpoint ID labels params
func (o *GetEndpointIDLabelsParams) WithContext(ctx context.Context) *GetEndpointIDLabelsParams {
o.SetContext(ctx)
return o
}
// SetContext adds the context to the get endpoint ID labels params
func (o *GetEndpointIDLabelsParams) SetContext(ctx context.Context) {
o.Context = ctx
}
// WithHTTPClient adds the HTTPClient to the get endpoint ID labels params
func (o *GetEndpointIDLabelsParams) WithHTTPClient(client *http.Client) *GetEndpointIDLabelsParams {
o.SetHTTPClient(client)
return o
}
// SetHTTPClient adds the HTTPClient to the get endpoint ID labels params
func (o *GetEndpointIDLabelsParams) SetHTTPClient(client *http.Client) {
o.HTTPClient = client
}
// WithID adds the id to the get endpoint ID labels params
func (o *GetEndpointIDLabelsParams) WithID(id string) *GetEndpointIDLabelsParams {
o.SetID(id)
return o
}
// SetID adds the id to the get endpoint ID labels params
func (o *GetEndpointIDLabelsParams) SetID(id string) {
o.ID = id
}
// WriteToRequest writes these params to a swagger request
func (o *GetEndpointIDLabelsParams) WriteToRequest(r runtime.ClientRequest, reg strfmt.Registry) error {
if err := r.SetTimeout(o.timeout); err != nil {
return err
}
var res []error
// path param id
if err := r.SetPathParam("id", o.ID); err != nil {
return err
}
if len(res) > 0 {
return errors.CompositeValidationError(res...)
}
return nil
}
// Code generated by go-swagger; DO NOT EDIT.
// Copyright Authors of Cilium
// SPDX-License-Identifier: Apache-2.0
package endpoint
// This file was generated by the swagger tool.
// Editing this file might prove futile when you re-run the swagger generate command
import (
"encoding/json"
"fmt"
"io"
"github.com/go-openapi/runtime"
"github.com/go-openapi/strfmt"
"github.com/cilium/cilium/api/v1/models"
)
// GetEndpointIDLabelsReader is a Reader for the GetEndpointIDLabels structure.
type GetEndpointIDLabelsReader struct {
formats strfmt.Registry
}
// ReadResponse reads a server response into the received o.
func (o *GetEndpointIDLabelsReader) ReadResponse(response runtime.ClientResponse, consumer runtime.Consumer) (interface{}, error) {
switch response.Code() {
case 200:
result := NewGetEndpointIDLabelsOK()
if err := result.readResponse(response, consumer, o.formats); err != nil {
return nil, err
}
return result, nil
case 404:
result := NewGetEndpointIDLabelsNotFound()
if err := result.readResponse(response, consumer, o.formats); err != nil {
return nil, err
}
return nil, result
case 429:
result := NewGetEndpointIDLabelsTooManyRequests()
if err := result.readResponse(response, consumer, o.formats); err != nil {
return nil, err
}
return nil, result
default:
return nil, runtime.NewAPIError("[GET /endpoint/{id}/labels] GetEndpointIDLabels", response, response.Code())
}
}
// NewGetEndpointIDLabelsOK creates a GetEndpointIDLabelsOK with default headers values
func NewGetEndpointIDLabelsOK() *GetEndpointIDLabelsOK {
return &GetEndpointIDLabelsOK{}
}
/*
GetEndpointIDLabelsOK describes a response with status code 200, with default header values.
Success
*/
type GetEndpointIDLabelsOK struct {
Payload *models.LabelConfiguration
}
// IsSuccess returns true when this get endpoint Id labels o k response has a 2xx status code
func (o *GetEndpointIDLabelsOK) IsSuccess() bool {
return true
}
// IsRedirect returns true when this get endpoint Id labels o k response has a 3xx status code
func (o *GetEndpointIDLabelsOK) IsRedirect() bool {
return false
}
// IsClientError returns true when this get endpoint Id labels o k response has a 4xx status code
func (o *GetEndpointIDLabelsOK) IsClientError() bool {
return false
}
// IsServerError returns true when this get endpoint Id labels o k response has a 5xx status code
func (o *GetEndpointIDLabelsOK) IsServerError() bool {
return false
}
// IsCode returns true when this get endpoint Id labels o k response a status code equal to that given
func (o *GetEndpointIDLabelsOK) IsCode(code int) bool {
return code == 200
}
// Code gets the status code for the get endpoint Id labels o k response
func (o *GetEndpointIDLabelsOK) Code() int {
return 200
}
func (o *GetEndpointIDLabelsOK) Error() string {
payload, _ := json.Marshal(o.Payload)
return fmt.Sprintf("[GET /endpoint/{id}/labels][%d] getEndpointIdLabelsOK %s", 200, payload)
}
func (o *GetEndpointIDLabelsOK) String() string {
payload, _ := json.Marshal(o.Payload)
return fmt.Sprintf("[GET /endpoint/{id}/labels][%d] getEndpointIdLabelsOK %s", 200, payload)
}
func (o *GetEndpointIDLabelsOK) GetPayload() *models.LabelConfiguration {
return o.Payload
}
func (o *GetEndpointIDLabelsOK) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error {
o.Payload = new(models.LabelConfiguration)
// response payload
if err := consumer.Consume(response.Body(), o.Payload); err != nil && err != io.EOF {
return err
}
return nil
}
// NewGetEndpointIDLabelsNotFound creates a GetEndpointIDLabelsNotFound with default headers values
func NewGetEndpointIDLabelsNotFound() *GetEndpointIDLabelsNotFound {
return &GetEndpointIDLabelsNotFound{}
}
/*
GetEndpointIDLabelsNotFound describes a response with status code 404, with default header values.
Endpoint not found
*/
type GetEndpointIDLabelsNotFound struct {
}
// IsSuccess returns true when this get endpoint Id labels not found response has a 2xx status code
func (o *GetEndpointIDLabelsNotFound) IsSuccess() bool {
return false
}
// IsRedirect returns true when this get endpoint Id labels not found response has a 3xx status code
func (o *GetEndpointIDLabelsNotFound) IsRedirect() bool {
return false
}
// IsClientError returns true when this get endpoint Id labels not found response has a 4xx status code
func (o *GetEndpointIDLabelsNotFound) IsClientError() bool {
return true
}
// IsServerError returns true when this get endpoint Id labels not found response has a 5xx status code
func (o *GetEndpointIDLabelsNotFound) IsServerError() bool {
return false
}
// IsCode returns true when this get endpoint Id labels not found response a status code equal to that given
func (o *GetEndpointIDLabelsNotFound) IsCode(code int) bool {
return code == 404
}
// Code gets the status code for the get endpoint Id labels not found response
func (o *GetEndpointIDLabelsNotFound) Code() int {
return 404
}
func (o *GetEndpointIDLabelsNotFound) Error() string {
return fmt.Sprintf("[GET /endpoint/{id}/labels][%d] getEndpointIdLabelsNotFound", 404)
}
func (o *GetEndpointIDLabelsNotFound) String() string {
return fmt.Sprintf("[GET /endpoint/{id}/labels][%d] getEndpointIdLabelsNotFound", 404)
}
func (o *GetEndpointIDLabelsNotFound) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error {
return nil
}
// NewGetEndpointIDLabelsTooManyRequests creates a GetEndpointIDLabelsTooManyRequests with default headers values
func NewGetEndpointIDLabelsTooManyRequests() *GetEndpointIDLabelsTooManyRequests {
return &GetEndpointIDLabelsTooManyRequests{}
}
/*
GetEndpointIDLabelsTooManyRequests describes a response with status code 429, with default header values.
Rate-limiting too many requests in the given time frame
*/
type GetEndpointIDLabelsTooManyRequests struct {
}
// IsSuccess returns true when this get endpoint Id labels too many requests response has a 2xx status code
func (o *GetEndpointIDLabelsTooManyRequests) IsSuccess() bool {
return false
}
// IsRedirect returns true when this get endpoint Id labels too many requests response has a 3xx status code
func (o *GetEndpointIDLabelsTooManyRequests) IsRedirect() bool {
return false
}
// IsClientError returns true when this get endpoint Id labels too many requests response has a 4xx status code
func (o *GetEndpointIDLabelsTooManyRequests) IsClientError() bool {
return true
}
// IsServerError returns true when this get endpoint Id labels too many requests response has a 5xx status code
func (o *GetEndpointIDLabelsTooManyRequests) IsServerError() bool {
return false
}
// IsCode returns true when this get endpoint Id labels too many requests response a status code equal to that given
func (o *GetEndpointIDLabelsTooManyRequests) IsCode(code int) bool {
return code == 429
}
// Code gets the status code for the get endpoint Id labels too many requests response
func (o *GetEndpointIDLabelsTooManyRequests) Code() int {
return 429
}
func (o *GetEndpointIDLabelsTooManyRequests) Error() string {
return fmt.Sprintf("[GET /endpoint/{id}/labels][%d] getEndpointIdLabelsTooManyRequests", 429)
}
func (o *GetEndpointIDLabelsTooManyRequests) String() string {
return fmt.Sprintf("[GET /endpoint/{id}/labels][%d] getEndpointIdLabelsTooManyRequests", 429)
}
func (o *GetEndpointIDLabelsTooManyRequests) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error {
return nil
}
// Code generated by go-swagger; DO NOT EDIT.
// Copyright Authors of Cilium
// SPDX-License-Identifier: Apache-2.0
package endpoint
// This file was generated by the swagger tool.
// Editing this file might prove futile when you re-run the swagger generate command
import (
"context"
"net/http"
"time"
"github.com/go-openapi/errors"
"github.com/go-openapi/runtime"
cr "github.com/go-openapi/runtime/client"
"github.com/go-openapi/strfmt"
)
// NewGetEndpointIDLogParams creates a new GetEndpointIDLogParams object,
// with the default timeout for this client.
//
// Default values are not hydrated, since defaults are normally applied by the API server side.
//
// To enforce default values in parameter, use SetDefaults or WithDefaults.
func NewGetEndpointIDLogParams() *GetEndpointIDLogParams {
return &GetEndpointIDLogParams{
timeout: cr.DefaultTimeout,
}
}
// NewGetEndpointIDLogParamsWithTimeout creates a new GetEndpointIDLogParams object
// with the ability to set a timeout on a request.
func NewGetEndpointIDLogParamsWithTimeout(timeout time.Duration) *GetEndpointIDLogParams {
return &GetEndpointIDLogParams{
timeout: timeout,
}
}
// NewGetEndpointIDLogParamsWithContext creates a new GetEndpointIDLogParams object
// with the ability to set a context for a request.
func NewGetEndpointIDLogParamsWithContext(ctx context.Context) *GetEndpointIDLogParams {
return &GetEndpointIDLogParams{
Context: ctx,
}
}
// NewGetEndpointIDLogParamsWithHTTPClient creates a new GetEndpointIDLogParams object
// with the ability to set a custom HTTPClient for a request.
func NewGetEndpointIDLogParamsWithHTTPClient(client *http.Client) *GetEndpointIDLogParams {
return &GetEndpointIDLogParams{
HTTPClient: client,
}
}
/*
GetEndpointIDLogParams contains all the parameters to send to the API endpoint
for the get endpoint ID log operation.
Typically these are written to a http.Request.
*/
type GetEndpointIDLogParams struct {
/* ID.
String describing an endpoint with the format ``[prefix:]id``. If no prefix
is specified, a prefix of ``cilium-local:`` is assumed. Not all endpoints
will be addressable by all endpoint ID prefixes with the exception of the
local Cilium UUID which is assigned to all endpoints.
Supported endpoint id prefixes:
- cilium-local: Local Cilium endpoint UUID, e.g. cilium-local:3389595
- cilium-global: Global Cilium endpoint UUID, e.g. cilium-global:cluster1:nodeX:452343
- cni-attachment-id: CNI attachment ID, e.g. cni-attachment-id:22222:eth0
- container-id: Container runtime ID, e.g. container-id:22222 (deprecated, may not be unique)
- container-name: Container name, e.g. container-name:foobar (deprecated, may not be unique)
- pod-name: pod name for this container if K8s is enabled, e.g. pod-name:default:foobar (deprecated, may not be unique)
- cep-name: cep name for this container if K8s is enabled, e.g. pod-name:default:foobar-net1
- docker-endpoint: Docker libnetwork endpoint ID, e.g. docker-endpoint:4444
*/
ID string
timeout time.Duration
Context context.Context
HTTPClient *http.Client
}
// WithDefaults hydrates default values in the get endpoint ID log params (not the query body).
//
// All values with no default are reset to their zero value.
func (o *GetEndpointIDLogParams) WithDefaults() *GetEndpointIDLogParams {
o.SetDefaults()
return o
}
// SetDefaults hydrates default values in the get endpoint ID log params (not the query body).
//
// All values with no default are reset to their zero value.
func (o *GetEndpointIDLogParams) SetDefaults() {
// no default values defined for this parameter
}
// WithTimeout adds the timeout to the get endpoint ID log params
func (o *GetEndpointIDLogParams) WithTimeout(timeout time.Duration) *GetEndpointIDLogParams {
o.SetTimeout(timeout)
return o
}
// SetTimeout adds the timeout to the get endpoint ID log params
func (o *GetEndpointIDLogParams) SetTimeout(timeout time.Duration) {
o.timeout = timeout
}
// WithContext adds the context to the get endpoint ID log params
func (o *GetEndpointIDLogParams) WithContext(ctx context.Context) *GetEndpointIDLogParams {
o.SetContext(ctx)
return o
}
// SetContext adds the context to the get endpoint ID log params
func (o *GetEndpointIDLogParams) SetContext(ctx context.Context) {
o.Context = ctx
}
// WithHTTPClient adds the HTTPClient to the get endpoint ID log params
func (o *GetEndpointIDLogParams) WithHTTPClient(client *http.Client) *GetEndpointIDLogParams {
o.SetHTTPClient(client)
return o
}
// SetHTTPClient adds the HTTPClient to the get endpoint ID log params
func (o *GetEndpointIDLogParams) SetHTTPClient(client *http.Client) {
o.HTTPClient = client
}
// WithID adds the id to the get endpoint ID log params
func (o *GetEndpointIDLogParams) WithID(id string) *GetEndpointIDLogParams {
o.SetID(id)
return o
}
// SetID adds the id to the get endpoint ID log params
func (o *GetEndpointIDLogParams) SetID(id string) {
o.ID = id
}
// WriteToRequest writes these params to a swagger request
func (o *GetEndpointIDLogParams) WriteToRequest(r runtime.ClientRequest, reg strfmt.Registry) error {
if err := r.SetTimeout(o.timeout); err != nil {
return err
}
var res []error
// path param id
if err := r.SetPathParam("id", o.ID); err != nil {
return err
}
if len(res) > 0 {
return errors.CompositeValidationError(res...)
}
return nil
}
// Code generated by go-swagger; DO NOT EDIT.
// Copyright Authors of Cilium
// SPDX-License-Identifier: Apache-2.0
package endpoint
// This file was generated by the swagger tool.
// Editing this file might prove futile when you re-run the swagger generate command
import (
"encoding/json"
"fmt"
"io"
"github.com/go-openapi/runtime"
"github.com/go-openapi/strfmt"
"github.com/cilium/cilium/api/v1/models"
)
// GetEndpointIDLogReader is a Reader for the GetEndpointIDLog structure.
type GetEndpointIDLogReader struct {
formats strfmt.Registry
}
// ReadResponse reads a server response into the received o.
func (o *GetEndpointIDLogReader) ReadResponse(response runtime.ClientResponse, consumer runtime.Consumer) (interface{}, error) {
switch response.Code() {
case 200:
result := NewGetEndpointIDLogOK()
if err := result.readResponse(response, consumer, o.formats); err != nil {
return nil, err
}
return result, nil
case 400:
result := NewGetEndpointIDLogInvalid()
if err := result.readResponse(response, consumer, o.formats); err != nil {
return nil, err
}
return nil, result
case 404:
result := NewGetEndpointIDLogNotFound()
if err := result.readResponse(response, consumer, o.formats); err != nil {
return nil, err
}
return nil, result
case 429:
result := NewGetEndpointIDLogTooManyRequests()
if err := result.readResponse(response, consumer, o.formats); err != nil {
return nil, err
}
return nil, result
default:
return nil, runtime.NewAPIError("[GET /endpoint/{id}/log] GetEndpointIDLog", response, response.Code())
}
}
// NewGetEndpointIDLogOK creates a GetEndpointIDLogOK with default headers values
func NewGetEndpointIDLogOK() *GetEndpointIDLogOK {
return &GetEndpointIDLogOK{}
}
/*
GetEndpointIDLogOK describes a response with status code 200, with default header values.
Success
*/
type GetEndpointIDLogOK struct {
Payload models.EndpointStatusLog
}
// IsSuccess returns true when this get endpoint Id log o k response has a 2xx status code
func (o *GetEndpointIDLogOK) IsSuccess() bool {
return true
}
// IsRedirect returns true when this get endpoint Id log o k response has a 3xx status code
func (o *GetEndpointIDLogOK) IsRedirect() bool {
return false
}
// IsClientError returns true when this get endpoint Id log o k response has a 4xx status code
func (o *GetEndpointIDLogOK) IsClientError() bool {
return false
}
// IsServerError returns true when this get endpoint Id log o k response has a 5xx status code
func (o *GetEndpointIDLogOK) IsServerError() bool {
return false
}
// IsCode returns true when this get endpoint Id log o k response a status code equal to that given
func (o *GetEndpointIDLogOK) IsCode(code int) bool {
return code == 200
}
// Code gets the status code for the get endpoint Id log o k response
func (o *GetEndpointIDLogOK) Code() int {
return 200
}
func (o *GetEndpointIDLogOK) Error() string {
payload, _ := json.Marshal(o.Payload)
return fmt.Sprintf("[GET /endpoint/{id}/log][%d] getEndpointIdLogOK %s", 200, payload)
}
func (o *GetEndpointIDLogOK) String() string {
payload, _ := json.Marshal(o.Payload)
return fmt.Sprintf("[GET /endpoint/{id}/log][%d] getEndpointIdLogOK %s", 200, payload)
}
func (o *GetEndpointIDLogOK) GetPayload() models.EndpointStatusLog {
return o.Payload
}
func (o *GetEndpointIDLogOK) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error {
// response payload
if err := consumer.Consume(response.Body(), &o.Payload); err != nil && err != io.EOF {
return err
}
return nil
}
// NewGetEndpointIDLogInvalid creates a GetEndpointIDLogInvalid with default headers values
func NewGetEndpointIDLogInvalid() *GetEndpointIDLogInvalid {
return &GetEndpointIDLogInvalid{}
}
/*
GetEndpointIDLogInvalid describes a response with status code 400, with default header values.
Invalid identity provided
*/
type GetEndpointIDLogInvalid struct {
}
// IsSuccess returns true when this get endpoint Id log invalid response has a 2xx status code
func (o *GetEndpointIDLogInvalid) IsSuccess() bool {
return false
}
// IsRedirect returns true when this get endpoint Id log invalid response has a 3xx status code
func (o *GetEndpointIDLogInvalid) IsRedirect() bool {
return false
}
// IsClientError returns true when this get endpoint Id log invalid response has a 4xx status code
func (o *GetEndpointIDLogInvalid) IsClientError() bool {
return true
}
// IsServerError returns true when this get endpoint Id log invalid response has a 5xx status code
func (o *GetEndpointIDLogInvalid) IsServerError() bool {
return false
}
// IsCode returns true when this get endpoint Id log invalid response a status code equal to that given
func (o *GetEndpointIDLogInvalid) IsCode(code int) bool {
return code == 400
}
// Code gets the status code for the get endpoint Id log invalid response
func (o *GetEndpointIDLogInvalid) Code() int {
return 400
}
func (o *GetEndpointIDLogInvalid) Error() string {
return fmt.Sprintf("[GET /endpoint/{id}/log][%d] getEndpointIdLogInvalid", 400)
}
func (o *GetEndpointIDLogInvalid) String() string {
return fmt.Sprintf("[GET /endpoint/{id}/log][%d] getEndpointIdLogInvalid", 400)
}
func (o *GetEndpointIDLogInvalid) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error {
return nil
}
// NewGetEndpointIDLogNotFound creates a GetEndpointIDLogNotFound with default headers values
func NewGetEndpointIDLogNotFound() *GetEndpointIDLogNotFound {
return &GetEndpointIDLogNotFound{}
}
/*
GetEndpointIDLogNotFound describes a response with status code 404, with default header values.
Endpoint not found
*/
type GetEndpointIDLogNotFound struct {
}
// IsSuccess returns true when this get endpoint Id log not found response has a 2xx status code
func (o *GetEndpointIDLogNotFound) IsSuccess() bool {
return false
}
// IsRedirect returns true when this get endpoint Id log not found response has a 3xx status code
func (o *GetEndpointIDLogNotFound) IsRedirect() bool {
return false
}
// IsClientError returns true when this get endpoint Id log not found response has a 4xx status code
func (o *GetEndpointIDLogNotFound) IsClientError() bool {
return true
}
// IsServerError returns true when this get endpoint Id log not found response has a 5xx status code
func (o *GetEndpointIDLogNotFound) IsServerError() bool {
return false
}
// IsCode returns true when this get endpoint Id log not found response a status code equal to that given
func (o *GetEndpointIDLogNotFound) IsCode(code int) bool {
return code == 404
}
// Code gets the status code for the get endpoint Id log not found response
func (o *GetEndpointIDLogNotFound) Code() int {
return 404
}
func (o *GetEndpointIDLogNotFound) Error() string {
return fmt.Sprintf("[GET /endpoint/{id}/log][%d] getEndpointIdLogNotFound", 404)
}
func (o *GetEndpointIDLogNotFound) String() string {
return fmt.Sprintf("[GET /endpoint/{id}/log][%d] getEndpointIdLogNotFound", 404)
}
func (o *GetEndpointIDLogNotFound) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error {
return nil
}
// NewGetEndpointIDLogTooManyRequests creates a GetEndpointIDLogTooManyRequests with default headers values
func NewGetEndpointIDLogTooManyRequests() *GetEndpointIDLogTooManyRequests {
return &GetEndpointIDLogTooManyRequests{}
}
/*
GetEndpointIDLogTooManyRequests describes a response with status code 429, with default header values.
Rate-limiting too many requests in the given time frame
*/
type GetEndpointIDLogTooManyRequests struct {
}
// IsSuccess returns true when this get endpoint Id log too many requests response has a 2xx status code
func (o *GetEndpointIDLogTooManyRequests) IsSuccess() bool {
return false
}
// IsRedirect returns true when this get endpoint Id log too many requests response has a 3xx status code
func (o *GetEndpointIDLogTooManyRequests) IsRedirect() bool {
return false
}
// IsClientError returns true when this get endpoint Id log too many requests response has a 4xx status code
func (o *GetEndpointIDLogTooManyRequests) IsClientError() bool {
return true
}
// IsServerError returns true when this get endpoint Id log too many requests response has a 5xx status code
func (o *GetEndpointIDLogTooManyRequests) IsServerError() bool {
return false
}
// IsCode returns true when this get endpoint Id log too many requests response a status code equal to that given
func (o *GetEndpointIDLogTooManyRequests) IsCode(code int) bool {
return code == 429
}
// Code gets the status code for the get endpoint Id log too many requests response
func (o *GetEndpointIDLogTooManyRequests) Code() int {
return 429
}
func (o *GetEndpointIDLogTooManyRequests) Error() string {
return fmt.Sprintf("[GET /endpoint/{id}/log][%d] getEndpointIdLogTooManyRequests", 429)
}
func (o *GetEndpointIDLogTooManyRequests) String() string {
return fmt.Sprintf("[GET /endpoint/{id}/log][%d] getEndpointIdLogTooManyRequests", 429)
}
func (o *GetEndpointIDLogTooManyRequests) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error {
return nil
}
// Code generated by go-swagger; DO NOT EDIT.
// Copyright Authors of Cilium
// SPDX-License-Identifier: Apache-2.0
package endpoint
// This file was generated by the swagger tool.
// Editing this file might prove futile when you re-run the swagger generate command
import (
"context"
"net/http"
"time"
"github.com/go-openapi/errors"
"github.com/go-openapi/runtime"
cr "github.com/go-openapi/runtime/client"
"github.com/go-openapi/strfmt"
)
// NewGetEndpointIDParams creates a new GetEndpointIDParams object,
// with the default timeout for this client.
//
// Default values are not hydrated, since defaults are normally applied by the API server side.
//
// To enforce default values in parameter, use SetDefaults or WithDefaults.
func NewGetEndpointIDParams() *GetEndpointIDParams {
return &GetEndpointIDParams{
timeout: cr.DefaultTimeout,
}
}
// NewGetEndpointIDParamsWithTimeout creates a new GetEndpointIDParams object
// with the ability to set a timeout on a request.
func NewGetEndpointIDParamsWithTimeout(timeout time.Duration) *GetEndpointIDParams {
return &GetEndpointIDParams{
timeout: timeout,
}
}
// NewGetEndpointIDParamsWithContext creates a new GetEndpointIDParams object
// with the ability to set a context for a request.
func NewGetEndpointIDParamsWithContext(ctx context.Context) *GetEndpointIDParams {
return &GetEndpointIDParams{
Context: ctx,
}
}
// NewGetEndpointIDParamsWithHTTPClient creates a new GetEndpointIDParams object
// with the ability to set a custom HTTPClient for a request.
func NewGetEndpointIDParamsWithHTTPClient(client *http.Client) *GetEndpointIDParams {
return &GetEndpointIDParams{
HTTPClient: client,
}
}
/*
GetEndpointIDParams contains all the parameters to send to the API endpoint
for the get endpoint ID operation.
Typically these are written to a http.Request.
*/
type GetEndpointIDParams struct {
/* ID.
String describing an endpoint with the format ``[prefix:]id``. If no prefix
is specified, a prefix of ``cilium-local:`` is assumed. Not all endpoints
will be addressable by all endpoint ID prefixes with the exception of the
local Cilium UUID which is assigned to all endpoints.
Supported endpoint id prefixes:
- cilium-local: Local Cilium endpoint UUID, e.g. cilium-local:3389595
- cilium-global: Global Cilium endpoint UUID, e.g. cilium-global:cluster1:nodeX:452343
- cni-attachment-id: CNI attachment ID, e.g. cni-attachment-id:22222:eth0
- container-id: Container runtime ID, e.g. container-id:22222 (deprecated, may not be unique)
- container-name: Container name, e.g. container-name:foobar (deprecated, may not be unique)
- pod-name: pod name for this container if K8s is enabled, e.g. pod-name:default:foobar (deprecated, may not be unique)
- cep-name: cep name for this container if K8s is enabled, e.g. pod-name:default:foobar-net1
- docker-endpoint: Docker libnetwork endpoint ID, e.g. docker-endpoint:4444
*/
ID string
timeout time.Duration
Context context.Context
HTTPClient *http.Client
}
// WithDefaults hydrates default values in the get endpoint ID params (not the query body).
//
// All values with no default are reset to their zero value.
func (o *GetEndpointIDParams) WithDefaults() *GetEndpointIDParams {
o.SetDefaults()
return o
}
// SetDefaults hydrates default values in the get endpoint ID params (not the query body).
//
// All values with no default are reset to their zero value.
func (o *GetEndpointIDParams) SetDefaults() {
// no default values defined for this parameter
}
// WithTimeout adds the timeout to the get endpoint ID params
func (o *GetEndpointIDParams) WithTimeout(timeout time.Duration) *GetEndpointIDParams {
o.SetTimeout(timeout)
return o
}
// SetTimeout adds the timeout to the get endpoint ID params
func (o *GetEndpointIDParams) SetTimeout(timeout time.Duration) {
o.timeout = timeout
}
// WithContext adds the context to the get endpoint ID params
func (o *GetEndpointIDParams) WithContext(ctx context.Context) *GetEndpointIDParams {
o.SetContext(ctx)
return o
}
// SetContext adds the context to the get endpoint ID params
func (o *GetEndpointIDParams) SetContext(ctx context.Context) {
o.Context = ctx
}
// WithHTTPClient adds the HTTPClient to the get endpoint ID params
func (o *GetEndpointIDParams) WithHTTPClient(client *http.Client) *GetEndpointIDParams {
o.SetHTTPClient(client)
return o
}
// SetHTTPClient adds the HTTPClient to the get endpoint ID params
func (o *GetEndpointIDParams) SetHTTPClient(client *http.Client) {
o.HTTPClient = client
}
// WithID adds the id to the get endpoint ID params
func (o *GetEndpointIDParams) WithID(id string) *GetEndpointIDParams {
o.SetID(id)
return o
}
// SetID adds the id to the get endpoint ID params
func (o *GetEndpointIDParams) SetID(id string) {
o.ID = id
}
// WriteToRequest writes these params to a swagger request
func (o *GetEndpointIDParams) WriteToRequest(r runtime.ClientRequest, reg strfmt.Registry) error {
if err := r.SetTimeout(o.timeout); err != nil {
return err
}
var res []error
// path param id
if err := r.SetPathParam("id", o.ID); err != nil {
return err
}
if len(res) > 0 {
return errors.CompositeValidationError(res...)
}
return nil
}
// Code generated by go-swagger; DO NOT EDIT.
// Copyright Authors of Cilium
// SPDX-License-Identifier: Apache-2.0
package endpoint
// This file was generated by the swagger tool.
// Editing this file might prove futile when you re-run the swagger generate command
import (
"encoding/json"
"fmt"
"io"
"github.com/go-openapi/runtime"
"github.com/go-openapi/strfmt"
"github.com/cilium/cilium/api/v1/models"
)
// GetEndpointIDReader is a Reader for the GetEndpointID structure.
type GetEndpointIDReader struct {
formats strfmt.Registry
}
// ReadResponse reads a server response into the received o.
func (o *GetEndpointIDReader) ReadResponse(response runtime.ClientResponse, consumer runtime.Consumer) (interface{}, error) {
switch response.Code() {
case 200:
result := NewGetEndpointIDOK()
if err := result.readResponse(response, consumer, o.formats); err != nil {
return nil, err
}
return result, nil
case 400:
result := NewGetEndpointIDInvalid()
if err := result.readResponse(response, consumer, o.formats); err != nil {
return nil, err
}
return nil, result
case 404:
result := NewGetEndpointIDNotFound()
if err := result.readResponse(response, consumer, o.formats); err != nil {
return nil, err
}
return nil, result
case 429:
result := NewGetEndpointIDTooManyRequests()
if err := result.readResponse(response, consumer, o.formats); err != nil {
return nil, err
}
return nil, result
default:
return nil, runtime.NewAPIError("[GET /endpoint/{id}] GetEndpointID", response, response.Code())
}
}
// NewGetEndpointIDOK creates a GetEndpointIDOK with default headers values
func NewGetEndpointIDOK() *GetEndpointIDOK {
return &GetEndpointIDOK{}
}
/*
GetEndpointIDOK describes a response with status code 200, with default header values.
Success
*/
type GetEndpointIDOK struct {
Payload *models.Endpoint
}
// IsSuccess returns true when this get endpoint Id o k response has a 2xx status code
func (o *GetEndpointIDOK) IsSuccess() bool {
return true
}
// IsRedirect returns true when this get endpoint Id o k response has a 3xx status code
func (o *GetEndpointIDOK) IsRedirect() bool {
return false
}
// IsClientError returns true when this get endpoint Id o k response has a 4xx status code
func (o *GetEndpointIDOK) IsClientError() bool {
return false
}
// IsServerError returns true when this get endpoint Id o k response has a 5xx status code
func (o *GetEndpointIDOK) IsServerError() bool {
return false
}
// IsCode returns true when this get endpoint Id o k response a status code equal to that given
func (o *GetEndpointIDOK) IsCode(code int) bool {
return code == 200
}
// Code gets the status code for the get endpoint Id o k response
func (o *GetEndpointIDOK) Code() int {
return 200
}
func (o *GetEndpointIDOK) Error() string {
payload, _ := json.Marshal(o.Payload)
return fmt.Sprintf("[GET /endpoint/{id}][%d] getEndpointIdOK %s", 200, payload)
}
func (o *GetEndpointIDOK) String() string {
payload, _ := json.Marshal(o.Payload)
return fmt.Sprintf("[GET /endpoint/{id}][%d] getEndpointIdOK %s", 200, payload)
}
func (o *GetEndpointIDOK) GetPayload() *models.Endpoint {
return o.Payload
}
func (o *GetEndpointIDOK) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error {
o.Payload = new(models.Endpoint)
// response payload
if err := consumer.Consume(response.Body(), o.Payload); err != nil && err != io.EOF {
return err
}
return nil
}
// NewGetEndpointIDInvalid creates a GetEndpointIDInvalid with default headers values
func NewGetEndpointIDInvalid() *GetEndpointIDInvalid {
return &GetEndpointIDInvalid{}
}
/*
GetEndpointIDInvalid describes a response with status code 400, with default header values.
Invalid endpoint ID format for specified type
*/
type GetEndpointIDInvalid struct {
Payload models.Error
}
// IsSuccess returns true when this get endpoint Id invalid response has a 2xx status code
func (o *GetEndpointIDInvalid) IsSuccess() bool {
return false
}
// IsRedirect returns true when this get endpoint Id invalid response has a 3xx status code
func (o *GetEndpointIDInvalid) IsRedirect() bool {
return false
}
// IsClientError returns true when this get endpoint Id invalid response has a 4xx status code
func (o *GetEndpointIDInvalid) IsClientError() bool {
return true
}
// IsServerError returns true when this get endpoint Id invalid response has a 5xx status code
func (o *GetEndpointIDInvalid) IsServerError() bool {
return false
}
// IsCode returns true when this get endpoint Id invalid response a status code equal to that given
func (o *GetEndpointIDInvalid) IsCode(code int) bool {
return code == 400
}
// Code gets the status code for the get endpoint Id invalid response
func (o *GetEndpointIDInvalid) Code() int {
return 400
}
func (o *GetEndpointIDInvalid) Error() string {
payload, _ := json.Marshal(o.Payload)
return fmt.Sprintf("[GET /endpoint/{id}][%d] getEndpointIdInvalid %s", 400, payload)
}
func (o *GetEndpointIDInvalid) String() string {
payload, _ := json.Marshal(o.Payload)
return fmt.Sprintf("[GET /endpoint/{id}][%d] getEndpointIdInvalid %s", 400, payload)
}
func (o *GetEndpointIDInvalid) GetPayload() models.Error {
return o.Payload
}
func (o *GetEndpointIDInvalid) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error {
// response payload
if err := consumer.Consume(response.Body(), &o.Payload); err != nil && err != io.EOF {
return err
}
return nil
}
// NewGetEndpointIDNotFound creates a GetEndpointIDNotFound with default headers values
func NewGetEndpointIDNotFound() *GetEndpointIDNotFound {
return &GetEndpointIDNotFound{}
}
/*
GetEndpointIDNotFound describes a response with status code 404, with default header values.
Endpoint not found
*/
type GetEndpointIDNotFound struct {
}
// IsSuccess returns true when this get endpoint Id not found response has a 2xx status code
func (o *GetEndpointIDNotFound) IsSuccess() bool {
return false
}
// IsRedirect returns true when this get endpoint Id not found response has a 3xx status code
func (o *GetEndpointIDNotFound) IsRedirect() bool {
return false
}
// IsClientError returns true when this get endpoint Id not found response has a 4xx status code
func (o *GetEndpointIDNotFound) IsClientError() bool {
return true
}
// IsServerError returns true when this get endpoint Id not found response has a 5xx status code
func (o *GetEndpointIDNotFound) IsServerError() bool {
return false
}
// IsCode returns true when this get endpoint Id not found response a status code equal to that given
func (o *GetEndpointIDNotFound) IsCode(code int) bool {
return code == 404
}
// Code gets the status code for the get endpoint Id not found response
func (o *GetEndpointIDNotFound) Code() int {
return 404
}
func (o *GetEndpointIDNotFound) Error() string {
return fmt.Sprintf("[GET /endpoint/{id}][%d] getEndpointIdNotFound", 404)
}
func (o *GetEndpointIDNotFound) String() string {
return fmt.Sprintf("[GET /endpoint/{id}][%d] getEndpointIdNotFound", 404)
}
func (o *GetEndpointIDNotFound) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error {
return nil
}
// NewGetEndpointIDTooManyRequests creates a GetEndpointIDTooManyRequests with default headers values
func NewGetEndpointIDTooManyRequests() *GetEndpointIDTooManyRequests {
return &GetEndpointIDTooManyRequests{}
}
/*
GetEndpointIDTooManyRequests describes a response with status code 429, with default header values.
Rate-limiting too many requests in the given time frame
*/
type GetEndpointIDTooManyRequests struct {
}
// IsSuccess returns true when this get endpoint Id too many requests response has a 2xx status code
func (o *GetEndpointIDTooManyRequests) IsSuccess() bool {
return false
}
// IsRedirect returns true when this get endpoint Id too many requests response has a 3xx status code
func (o *GetEndpointIDTooManyRequests) IsRedirect() bool {
return false
}
// IsClientError returns true when this get endpoint Id too many requests response has a 4xx status code
func (o *GetEndpointIDTooManyRequests) IsClientError() bool {
return true
}
// IsServerError returns true when this get endpoint Id too many requests response has a 5xx status code
func (o *GetEndpointIDTooManyRequests) IsServerError() bool {
return false
}
// IsCode returns true when this get endpoint Id too many requests response a status code equal to that given
func (o *GetEndpointIDTooManyRequests) IsCode(code int) bool {
return code == 429
}
// Code gets the status code for the get endpoint Id too many requests response
func (o *GetEndpointIDTooManyRequests) Code() int {
return 429
}
func (o *GetEndpointIDTooManyRequests) Error() string {
return fmt.Sprintf("[GET /endpoint/{id}][%d] getEndpointIdTooManyRequests", 429)
}
func (o *GetEndpointIDTooManyRequests) String() string {
return fmt.Sprintf("[GET /endpoint/{id}][%d] getEndpointIdTooManyRequests", 429)
}
func (o *GetEndpointIDTooManyRequests) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error {
return nil
}
// Code generated by go-swagger; DO NOT EDIT.
// Copyright Authors of Cilium
// SPDX-License-Identifier: Apache-2.0
package endpoint
// This file was generated by the swagger tool.
// Editing this file might prove futile when you re-run the swagger generate command
import (
"context"
"net/http"
"time"
"github.com/go-openapi/errors"
"github.com/go-openapi/runtime"
cr "github.com/go-openapi/runtime/client"
"github.com/go-openapi/strfmt"
"github.com/cilium/cilium/api/v1/models"
)
// NewGetEndpointParams creates a new GetEndpointParams object,
// with the default timeout for this client.
//
// Default values are not hydrated, since defaults are normally applied by the API server side.
//
// To enforce default values in parameter, use SetDefaults or WithDefaults.
func NewGetEndpointParams() *GetEndpointParams {
return &GetEndpointParams{
timeout: cr.DefaultTimeout,
}
}
// NewGetEndpointParamsWithTimeout creates a new GetEndpointParams object
// with the ability to set a timeout on a request.
func NewGetEndpointParamsWithTimeout(timeout time.Duration) *GetEndpointParams {
return &GetEndpointParams{
timeout: timeout,
}
}
// NewGetEndpointParamsWithContext creates a new GetEndpointParams object
// with the ability to set a context for a request.
func NewGetEndpointParamsWithContext(ctx context.Context) *GetEndpointParams {
return &GetEndpointParams{
Context: ctx,
}
}
// NewGetEndpointParamsWithHTTPClient creates a new GetEndpointParams object
// with the ability to set a custom HTTPClient for a request.
func NewGetEndpointParamsWithHTTPClient(client *http.Client) *GetEndpointParams {
return &GetEndpointParams{
HTTPClient: client,
}
}
/*
GetEndpointParams contains all the parameters to send to the API endpoint
for the get endpoint operation.
Typically these are written to a http.Request.
*/
type GetEndpointParams struct {
/* Labels.
List of labels
*/
Labels models.Labels
timeout time.Duration
Context context.Context
HTTPClient *http.Client
}
// WithDefaults hydrates default values in the get endpoint params (not the query body).
//
// All values with no default are reset to their zero value.
func (o *GetEndpointParams) WithDefaults() *GetEndpointParams {
o.SetDefaults()
return o
}
// SetDefaults hydrates default values in the get endpoint params (not the query body).
//
// All values with no default are reset to their zero value.
func (o *GetEndpointParams) SetDefaults() {
// no default values defined for this parameter
}
// WithTimeout adds the timeout to the get endpoint params
func (o *GetEndpointParams) WithTimeout(timeout time.Duration) *GetEndpointParams {
o.SetTimeout(timeout)
return o
}
// SetTimeout adds the timeout to the get endpoint params
func (o *GetEndpointParams) SetTimeout(timeout time.Duration) {
o.timeout = timeout
}
// WithContext adds the context to the get endpoint params
func (o *GetEndpointParams) WithContext(ctx context.Context) *GetEndpointParams {
o.SetContext(ctx)
return o
}
// SetContext adds the context to the get endpoint params
func (o *GetEndpointParams) SetContext(ctx context.Context) {
o.Context = ctx
}
// WithHTTPClient adds the HTTPClient to the get endpoint params
func (o *GetEndpointParams) WithHTTPClient(client *http.Client) *GetEndpointParams {
o.SetHTTPClient(client)
return o
}
// SetHTTPClient adds the HTTPClient to the get endpoint params
func (o *GetEndpointParams) SetHTTPClient(client *http.Client) {
o.HTTPClient = client
}
// WithLabels adds the labels to the get endpoint params
func (o *GetEndpointParams) WithLabels(labels models.Labels) *GetEndpointParams {
o.SetLabels(labels)
return o
}
// SetLabels adds the labels to the get endpoint params
func (o *GetEndpointParams) SetLabels(labels models.Labels) {
o.Labels = labels
}
// WriteToRequest writes these params to a swagger request
func (o *GetEndpointParams) WriteToRequest(r runtime.ClientRequest, reg strfmt.Registry) error {
if err := r.SetTimeout(o.timeout); err != nil {
return err
}
var res []error
if o.Labels != nil {
if err := r.SetBodyParam(o.Labels); err != nil {
return err
}
}
if len(res) > 0 {
return errors.CompositeValidationError(res...)
}
return nil
}
// Code generated by go-swagger; DO NOT EDIT.
// Copyright Authors of Cilium
// SPDX-License-Identifier: Apache-2.0
package endpoint
// This file was generated by the swagger tool.
// Editing this file might prove futile when you re-run the swagger generate command
import (
"encoding/json"
"fmt"
"io"
"github.com/go-openapi/runtime"
"github.com/go-openapi/strfmt"
"github.com/cilium/cilium/api/v1/models"
)
// GetEndpointReader is a Reader for the GetEndpoint structure.
type GetEndpointReader struct {
formats strfmt.Registry
}
// ReadResponse reads a server response into the received o.
func (o *GetEndpointReader) ReadResponse(response runtime.ClientResponse, consumer runtime.Consumer) (interface{}, error) {
switch response.Code() {
case 200:
result := NewGetEndpointOK()
if err := result.readResponse(response, consumer, o.formats); err != nil {
return nil, err
}
return result, nil
case 404:
result := NewGetEndpointNotFound()
if err := result.readResponse(response, consumer, o.formats); err != nil {
return nil, err
}
return nil, result
case 429:
result := NewGetEndpointTooManyRequests()
if err := result.readResponse(response, consumer, o.formats); err != nil {
return nil, err
}
return nil, result
default:
return nil, runtime.NewAPIError("[GET /endpoint] GetEndpoint", response, response.Code())
}
}
// NewGetEndpointOK creates a GetEndpointOK with default headers values
func NewGetEndpointOK() *GetEndpointOK {
return &GetEndpointOK{}
}
/*
GetEndpointOK describes a response with status code 200, with default header values.
Success
*/
type GetEndpointOK struct {
Payload []*models.Endpoint
}
// IsSuccess returns true when this get endpoint o k response has a 2xx status code
func (o *GetEndpointOK) IsSuccess() bool {
return true
}
// IsRedirect returns true when this get endpoint o k response has a 3xx status code
func (o *GetEndpointOK) IsRedirect() bool {
return false
}
// IsClientError returns true when this get endpoint o k response has a 4xx status code
func (o *GetEndpointOK) IsClientError() bool {
return false
}
// IsServerError returns true when this get endpoint o k response has a 5xx status code
func (o *GetEndpointOK) IsServerError() bool {
return false
}
// IsCode returns true when this get endpoint o k response a status code equal to that given
func (o *GetEndpointOK) IsCode(code int) bool {
return code == 200
}
// Code gets the status code for the get endpoint o k response
func (o *GetEndpointOK) Code() int {
return 200
}
func (o *GetEndpointOK) Error() string {
payload, _ := json.Marshal(o.Payload)
return fmt.Sprintf("[GET /endpoint][%d] getEndpointOK %s", 200, payload)
}
func (o *GetEndpointOK) String() string {
payload, _ := json.Marshal(o.Payload)
return fmt.Sprintf("[GET /endpoint][%d] getEndpointOK %s", 200, payload)
}
func (o *GetEndpointOK) GetPayload() []*models.Endpoint {
return o.Payload
}
func (o *GetEndpointOK) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error {
// response payload
if err := consumer.Consume(response.Body(), &o.Payload); err != nil && err != io.EOF {
return err
}
return nil
}
// NewGetEndpointNotFound creates a GetEndpointNotFound with default headers values
func NewGetEndpointNotFound() *GetEndpointNotFound {
return &GetEndpointNotFound{}
}
/*
GetEndpointNotFound describes a response with status code 404, with default header values.
Endpoints with provided parameters not found
*/
type GetEndpointNotFound struct {
}
// IsSuccess returns true when this get endpoint not found response has a 2xx status code
func (o *GetEndpointNotFound) IsSuccess() bool {
return false
}
// IsRedirect returns true when this get endpoint not found response has a 3xx status code
func (o *GetEndpointNotFound) IsRedirect() bool {
return false
}
// IsClientError returns true when this get endpoint not found response has a 4xx status code
func (o *GetEndpointNotFound) IsClientError() bool {
return true
}
// IsServerError returns true when this get endpoint not found response has a 5xx status code
func (o *GetEndpointNotFound) IsServerError() bool {
return false
}
// IsCode returns true when this get endpoint not found response a status code equal to that given
func (o *GetEndpointNotFound) IsCode(code int) bool {
return code == 404
}
// Code gets the status code for the get endpoint not found response
func (o *GetEndpointNotFound) Code() int {
return 404
}
func (o *GetEndpointNotFound) Error() string {
return fmt.Sprintf("[GET /endpoint][%d] getEndpointNotFound", 404)
}
func (o *GetEndpointNotFound) String() string {
return fmt.Sprintf("[GET /endpoint][%d] getEndpointNotFound", 404)
}
func (o *GetEndpointNotFound) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error {
return nil
}
// NewGetEndpointTooManyRequests creates a GetEndpointTooManyRequests with default headers values
func NewGetEndpointTooManyRequests() *GetEndpointTooManyRequests {
return &GetEndpointTooManyRequests{}
}
/*
GetEndpointTooManyRequests describes a response with status code 429, with default header values.
Rate-limiting too many requests in the given time frame
*/
type GetEndpointTooManyRequests struct {
}
// IsSuccess returns true when this get endpoint too many requests response has a 2xx status code
func (o *GetEndpointTooManyRequests) IsSuccess() bool {
return false
}
// IsRedirect returns true when this get endpoint too many requests response has a 3xx status code
func (o *GetEndpointTooManyRequests) IsRedirect() bool {
return false
}
// IsClientError returns true when this get endpoint too many requests response has a 4xx status code
func (o *GetEndpointTooManyRequests) IsClientError() bool {
return true
}
// IsServerError returns true when this get endpoint too many requests response has a 5xx status code
func (o *GetEndpointTooManyRequests) IsServerError() bool {
return false
}
// IsCode returns true when this get endpoint too many requests response a status code equal to that given
func (o *GetEndpointTooManyRequests) IsCode(code int) bool {
return code == 429
}
// Code gets the status code for the get endpoint too many requests response
func (o *GetEndpointTooManyRequests) Code() int {
return 429
}
func (o *GetEndpointTooManyRequests) Error() string {
return fmt.Sprintf("[GET /endpoint][%d] getEndpointTooManyRequests", 429)
}
func (o *GetEndpointTooManyRequests) String() string {
return fmt.Sprintf("[GET /endpoint][%d] getEndpointTooManyRequests", 429)
}
func (o *GetEndpointTooManyRequests) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error {
return nil
}
// Code generated by go-swagger; DO NOT EDIT.
// Copyright Authors of Cilium
// SPDX-License-Identifier: Apache-2.0
package endpoint
// This file was generated by the swagger tool.
// Editing this file might prove futile when you re-run the swagger generate command
import (
"context"
"net/http"
"time"
"github.com/go-openapi/errors"
"github.com/go-openapi/runtime"
cr "github.com/go-openapi/runtime/client"
"github.com/go-openapi/strfmt"
"github.com/cilium/cilium/api/v1/models"
)
// NewPatchEndpointIDConfigParams creates a new PatchEndpointIDConfigParams object,
// with the default timeout for this client.
//
// Default values are not hydrated, since defaults are normally applied by the API server side.
//
// To enforce default values in parameter, use SetDefaults or WithDefaults.
func NewPatchEndpointIDConfigParams() *PatchEndpointIDConfigParams {
return &PatchEndpointIDConfigParams{
timeout: cr.DefaultTimeout,
}
}
// NewPatchEndpointIDConfigParamsWithTimeout creates a new PatchEndpointIDConfigParams object
// with the ability to set a timeout on a request.
func NewPatchEndpointIDConfigParamsWithTimeout(timeout time.Duration) *PatchEndpointIDConfigParams {
return &PatchEndpointIDConfigParams{
timeout: timeout,
}
}
// NewPatchEndpointIDConfigParamsWithContext creates a new PatchEndpointIDConfigParams object
// with the ability to set a context for a request.
func NewPatchEndpointIDConfigParamsWithContext(ctx context.Context) *PatchEndpointIDConfigParams {
return &PatchEndpointIDConfigParams{
Context: ctx,
}
}
// NewPatchEndpointIDConfigParamsWithHTTPClient creates a new PatchEndpointIDConfigParams object
// with the ability to set a custom HTTPClient for a request.
func NewPatchEndpointIDConfigParamsWithHTTPClient(client *http.Client) *PatchEndpointIDConfigParams {
return &PatchEndpointIDConfigParams{
HTTPClient: client,
}
}
/*
PatchEndpointIDConfigParams contains all the parameters to send to the API endpoint
for the patch endpoint ID config operation.
Typically these are written to a http.Request.
*/
type PatchEndpointIDConfigParams struct {
// EndpointConfiguration.
EndpointConfiguration *models.EndpointConfigurationSpec
/* ID.
String describing an endpoint with the format ``[prefix:]id``. If no prefix
is specified, a prefix of ``cilium-local:`` is assumed. Not all endpoints
will be addressable by all endpoint ID prefixes with the exception of the
local Cilium UUID which is assigned to all endpoints.
Supported endpoint id prefixes:
- cilium-local: Local Cilium endpoint UUID, e.g. cilium-local:3389595
- cilium-global: Global Cilium endpoint UUID, e.g. cilium-global:cluster1:nodeX:452343
- cni-attachment-id: CNI attachment ID, e.g. cni-attachment-id:22222:eth0
- container-id: Container runtime ID, e.g. container-id:22222 (deprecated, may not be unique)
- container-name: Container name, e.g. container-name:foobar (deprecated, may not be unique)
- pod-name: pod name for this container if K8s is enabled, e.g. pod-name:default:foobar (deprecated, may not be unique)
- cep-name: cep name for this container if K8s is enabled, e.g. pod-name:default:foobar-net1
- docker-endpoint: Docker libnetwork endpoint ID, e.g. docker-endpoint:4444
*/
ID string
timeout time.Duration
Context context.Context
HTTPClient *http.Client
}
// WithDefaults hydrates default values in the patch endpoint ID config params (not the query body).
//
// All values with no default are reset to their zero value.
func (o *PatchEndpointIDConfigParams) WithDefaults() *PatchEndpointIDConfigParams {
o.SetDefaults()
return o
}
// SetDefaults hydrates default values in the patch endpoint ID config params (not the query body).
//
// All values with no default are reset to their zero value.
func (o *PatchEndpointIDConfigParams) SetDefaults() {
// no default values defined for this parameter
}
// WithTimeout adds the timeout to the patch endpoint ID config params
func (o *PatchEndpointIDConfigParams) WithTimeout(timeout time.Duration) *PatchEndpointIDConfigParams {
o.SetTimeout(timeout)
return o
}
// SetTimeout adds the timeout to the patch endpoint ID config params
func (o *PatchEndpointIDConfigParams) SetTimeout(timeout time.Duration) {
o.timeout = timeout
}
// WithContext adds the context to the patch endpoint ID config params
func (o *PatchEndpointIDConfigParams) WithContext(ctx context.Context) *PatchEndpointIDConfigParams {
o.SetContext(ctx)
return o
}
// SetContext adds the context to the patch endpoint ID config params
func (o *PatchEndpointIDConfigParams) SetContext(ctx context.Context) {
o.Context = ctx
}
// WithHTTPClient adds the HTTPClient to the patch endpoint ID config params
func (o *PatchEndpointIDConfigParams) WithHTTPClient(client *http.Client) *PatchEndpointIDConfigParams {
o.SetHTTPClient(client)
return o
}
// SetHTTPClient adds the HTTPClient to the patch endpoint ID config params
func (o *PatchEndpointIDConfigParams) SetHTTPClient(client *http.Client) {
o.HTTPClient = client
}
// WithEndpointConfiguration adds the endpointConfiguration to the patch endpoint ID config params
func (o *PatchEndpointIDConfigParams) WithEndpointConfiguration(endpointConfiguration *models.EndpointConfigurationSpec) *PatchEndpointIDConfigParams {
o.SetEndpointConfiguration(endpointConfiguration)
return o
}
// SetEndpointConfiguration adds the endpointConfiguration to the patch endpoint ID config params
func (o *PatchEndpointIDConfigParams) SetEndpointConfiguration(endpointConfiguration *models.EndpointConfigurationSpec) {
o.EndpointConfiguration = endpointConfiguration
}
// WithID adds the id to the patch endpoint ID config params
func (o *PatchEndpointIDConfigParams) WithID(id string) *PatchEndpointIDConfigParams {
o.SetID(id)
return o
}
// SetID adds the id to the patch endpoint ID config params
func (o *PatchEndpointIDConfigParams) SetID(id string) {
o.ID = id
}
// WriteToRequest writes these params to a swagger request
func (o *PatchEndpointIDConfigParams) WriteToRequest(r runtime.ClientRequest, reg strfmt.Registry) error {
if err := r.SetTimeout(o.timeout); err != nil {
return err
}
var res []error
if o.EndpointConfiguration != nil {
if err := r.SetBodyParam(o.EndpointConfiguration); err != nil {
return err
}
}
// path param id
if err := r.SetPathParam("id", o.ID); err != nil {
return err
}
if len(res) > 0 {
return errors.CompositeValidationError(res...)
}
return nil
}
// Code generated by go-swagger; DO NOT EDIT.
// Copyright Authors of Cilium
// SPDX-License-Identifier: Apache-2.0
package endpoint
// This file was generated by the swagger tool.
// Editing this file might prove futile when you re-run the swagger generate command
import (
"encoding/json"
"fmt"
"io"
"github.com/go-openapi/runtime"
"github.com/go-openapi/strfmt"
"github.com/cilium/cilium/api/v1/models"
)
// PatchEndpointIDConfigReader is a Reader for the PatchEndpointIDConfig structure.
type PatchEndpointIDConfigReader struct {
formats strfmt.Registry
}
// ReadResponse reads a server response into the received o.
func (o *PatchEndpointIDConfigReader) ReadResponse(response runtime.ClientResponse, consumer runtime.Consumer) (interface{}, error) {
switch response.Code() {
case 200:
result := NewPatchEndpointIDConfigOK()
if err := result.readResponse(response, consumer, o.formats); err != nil {
return nil, err
}
return result, nil
case 400:
result := NewPatchEndpointIDConfigInvalid()
if err := result.readResponse(response, consumer, o.formats); err != nil {
return nil, err
}
return nil, result
case 403:
result := NewPatchEndpointIDConfigForbidden()
if err := result.readResponse(response, consumer, o.formats); err != nil {
return nil, err
}
return nil, result
case 404:
result := NewPatchEndpointIDConfigNotFound()
if err := result.readResponse(response, consumer, o.formats); err != nil {
return nil, err
}
return nil, result
case 429:
result := NewPatchEndpointIDConfigTooManyRequests()
if err := result.readResponse(response, consumer, o.formats); err != nil {
return nil, err
}
return nil, result
case 500:
result := NewPatchEndpointIDConfigFailed()
if err := result.readResponse(response, consumer, o.formats); err != nil {
return nil, err
}
return nil, result
case 503:
result := NewPatchEndpointIDConfigServiceUnavailable()
if err := result.readResponse(response, consumer, o.formats); err != nil {
return nil, err
}
return nil, result
default:
return nil, runtime.NewAPIError("[PATCH /endpoint/{id}/config] PatchEndpointIDConfig", response, response.Code())
}
}
// NewPatchEndpointIDConfigOK creates a PatchEndpointIDConfigOK with default headers values
func NewPatchEndpointIDConfigOK() *PatchEndpointIDConfigOK {
return &PatchEndpointIDConfigOK{}
}
/*
PatchEndpointIDConfigOK describes a response with status code 200, with default header values.
Success
*/
type PatchEndpointIDConfigOK struct {
}
// IsSuccess returns true when this patch endpoint Id config o k response has a 2xx status code
func (o *PatchEndpointIDConfigOK) IsSuccess() bool {
return true
}
// IsRedirect returns true when this patch endpoint Id config o k response has a 3xx status code
func (o *PatchEndpointIDConfigOK) IsRedirect() bool {
return false
}
// IsClientError returns true when this patch endpoint Id config o k response has a 4xx status code
func (o *PatchEndpointIDConfigOK) IsClientError() bool {
return false
}
// IsServerError returns true when this patch endpoint Id config o k response has a 5xx status code
func (o *PatchEndpointIDConfigOK) IsServerError() bool {
return false
}
// IsCode returns true when this patch endpoint Id config o k response a status code equal to that given
func (o *PatchEndpointIDConfigOK) IsCode(code int) bool {
return code == 200
}
// Code gets the status code for the patch endpoint Id config o k response
func (o *PatchEndpointIDConfigOK) Code() int {
return 200
}
func (o *PatchEndpointIDConfigOK) Error() string {
return fmt.Sprintf("[PATCH /endpoint/{id}/config][%d] patchEndpointIdConfigOK", 200)
}
func (o *PatchEndpointIDConfigOK) String() string {
return fmt.Sprintf("[PATCH /endpoint/{id}/config][%d] patchEndpointIdConfigOK", 200)
}
func (o *PatchEndpointIDConfigOK) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error {
return nil
}
// NewPatchEndpointIDConfigInvalid creates a PatchEndpointIDConfigInvalid with default headers values
func NewPatchEndpointIDConfigInvalid() *PatchEndpointIDConfigInvalid {
return &PatchEndpointIDConfigInvalid{}
}
/*
PatchEndpointIDConfigInvalid describes a response with status code 400, with default header values.
Invalid configuration request
*/
type PatchEndpointIDConfigInvalid struct {
}
// IsSuccess returns true when this patch endpoint Id config invalid response has a 2xx status code
func (o *PatchEndpointIDConfigInvalid) IsSuccess() bool {
return false
}
// IsRedirect returns true when this patch endpoint Id config invalid response has a 3xx status code
func (o *PatchEndpointIDConfigInvalid) IsRedirect() bool {
return false
}
// IsClientError returns true when this patch endpoint Id config invalid response has a 4xx status code
func (o *PatchEndpointIDConfigInvalid) IsClientError() bool {
return true
}
// IsServerError returns true when this patch endpoint Id config invalid response has a 5xx status code
func (o *PatchEndpointIDConfigInvalid) IsServerError() bool {
return false
}
// IsCode returns true when this patch endpoint Id config invalid response a status code equal to that given
func (o *PatchEndpointIDConfigInvalid) IsCode(code int) bool {
return code == 400
}
// Code gets the status code for the patch endpoint Id config invalid response
func (o *PatchEndpointIDConfigInvalid) Code() int {
return 400
}
func (o *PatchEndpointIDConfigInvalid) Error() string {
return fmt.Sprintf("[PATCH /endpoint/{id}/config][%d] patchEndpointIdConfigInvalid", 400)
}
func (o *PatchEndpointIDConfigInvalid) String() string {
return fmt.Sprintf("[PATCH /endpoint/{id}/config][%d] patchEndpointIdConfigInvalid", 400)
}
func (o *PatchEndpointIDConfigInvalid) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error {
return nil
}
// NewPatchEndpointIDConfigForbidden creates a PatchEndpointIDConfigForbidden with default headers values
func NewPatchEndpointIDConfigForbidden() *PatchEndpointIDConfigForbidden {
return &PatchEndpointIDConfigForbidden{}
}
/*
PatchEndpointIDConfigForbidden describes a response with status code 403, with default header values.
Forbidden
*/
type PatchEndpointIDConfigForbidden struct {
}
// IsSuccess returns true when this patch endpoint Id config forbidden response has a 2xx status code
func (o *PatchEndpointIDConfigForbidden) IsSuccess() bool {
return false
}
// IsRedirect returns true when this patch endpoint Id config forbidden response has a 3xx status code
func (o *PatchEndpointIDConfigForbidden) IsRedirect() bool {
return false
}
// IsClientError returns true when this patch endpoint Id config forbidden response has a 4xx status code
func (o *PatchEndpointIDConfigForbidden) IsClientError() bool {
return true
}
// IsServerError returns true when this patch endpoint Id config forbidden response has a 5xx status code
func (o *PatchEndpointIDConfigForbidden) IsServerError() bool {
return false
}
// IsCode returns true when this patch endpoint Id config forbidden response a status code equal to that given
func (o *PatchEndpointIDConfigForbidden) IsCode(code int) bool {
return code == 403
}
// Code gets the status code for the patch endpoint Id config forbidden response
func (o *PatchEndpointIDConfigForbidden) Code() int {
return 403
}
func (o *PatchEndpointIDConfigForbidden) Error() string {
return fmt.Sprintf("[PATCH /endpoint/{id}/config][%d] patchEndpointIdConfigForbidden", 403)
}
func (o *PatchEndpointIDConfigForbidden) String() string {
return fmt.Sprintf("[PATCH /endpoint/{id}/config][%d] patchEndpointIdConfigForbidden", 403)
}
func (o *PatchEndpointIDConfigForbidden) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error {
return nil
}
// NewPatchEndpointIDConfigNotFound creates a PatchEndpointIDConfigNotFound with default headers values
func NewPatchEndpointIDConfigNotFound() *PatchEndpointIDConfigNotFound {
return &PatchEndpointIDConfigNotFound{}
}
/*
PatchEndpointIDConfigNotFound describes a response with status code 404, with default header values.
Endpoint not found
*/
type PatchEndpointIDConfigNotFound struct {
}
// IsSuccess returns true when this patch endpoint Id config not found response has a 2xx status code
func (o *PatchEndpointIDConfigNotFound) IsSuccess() bool {
return false
}
// IsRedirect returns true when this patch endpoint Id config not found response has a 3xx status code
func (o *PatchEndpointIDConfigNotFound) IsRedirect() bool {
return false
}
// IsClientError returns true when this patch endpoint Id config not found response has a 4xx status code
func (o *PatchEndpointIDConfigNotFound) IsClientError() bool {
return true
}
// IsServerError returns true when this patch endpoint Id config not found response has a 5xx status code
func (o *PatchEndpointIDConfigNotFound) IsServerError() bool {
return false
}
// IsCode returns true when this patch endpoint Id config not found response a status code equal to that given
func (o *PatchEndpointIDConfigNotFound) IsCode(code int) bool {
return code == 404
}
// Code gets the status code for the patch endpoint Id config not found response
func (o *PatchEndpointIDConfigNotFound) Code() int {
return 404
}
func (o *PatchEndpointIDConfigNotFound) Error() string {
return fmt.Sprintf("[PATCH /endpoint/{id}/config][%d] patchEndpointIdConfigNotFound", 404)
}
func (o *PatchEndpointIDConfigNotFound) String() string {
return fmt.Sprintf("[PATCH /endpoint/{id}/config][%d] patchEndpointIdConfigNotFound", 404)
}
func (o *PatchEndpointIDConfigNotFound) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error {
return nil
}
// NewPatchEndpointIDConfigTooManyRequests creates a PatchEndpointIDConfigTooManyRequests with default headers values
func NewPatchEndpointIDConfigTooManyRequests() *PatchEndpointIDConfigTooManyRequests {
return &PatchEndpointIDConfigTooManyRequests{}
}
/*
PatchEndpointIDConfigTooManyRequests describes a response with status code 429, with default header values.
Rate-limiting too many requests in the given time frame
*/
type PatchEndpointIDConfigTooManyRequests struct {
}
// IsSuccess returns true when this patch endpoint Id config too many requests response has a 2xx status code
func (o *PatchEndpointIDConfigTooManyRequests) IsSuccess() bool {
return false
}
// IsRedirect returns true when this patch endpoint Id config too many requests response has a 3xx status code
func (o *PatchEndpointIDConfigTooManyRequests) IsRedirect() bool {
return false
}
// IsClientError returns true when this patch endpoint Id config too many requests response has a 4xx status code
func (o *PatchEndpointIDConfigTooManyRequests) IsClientError() bool {
return true
}
// IsServerError returns true when this patch endpoint Id config too many requests response has a 5xx status code
func (o *PatchEndpointIDConfigTooManyRequests) IsServerError() bool {
return false
}
// IsCode returns true when this patch endpoint Id config too many requests response a status code equal to that given
func (o *PatchEndpointIDConfigTooManyRequests) IsCode(code int) bool {
return code == 429
}
// Code gets the status code for the patch endpoint Id config too many requests response
func (o *PatchEndpointIDConfigTooManyRequests) Code() int {
return 429
}
func (o *PatchEndpointIDConfigTooManyRequests) Error() string {
return fmt.Sprintf("[PATCH /endpoint/{id}/config][%d] patchEndpointIdConfigTooManyRequests", 429)
}
func (o *PatchEndpointIDConfigTooManyRequests) String() string {
return fmt.Sprintf("[PATCH /endpoint/{id}/config][%d] patchEndpointIdConfigTooManyRequests", 429)
}
func (o *PatchEndpointIDConfigTooManyRequests) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error {
return nil
}
// NewPatchEndpointIDConfigFailed creates a PatchEndpointIDConfigFailed with default headers values
func NewPatchEndpointIDConfigFailed() *PatchEndpointIDConfigFailed {
return &PatchEndpointIDConfigFailed{}
}
/*
PatchEndpointIDConfigFailed describes a response with status code 500, with default header values.
Update failed. Details in message.
*/
type PatchEndpointIDConfigFailed struct {
Payload models.Error
}
// IsSuccess returns true when this patch endpoint Id config failed response has a 2xx status code
func (o *PatchEndpointIDConfigFailed) IsSuccess() bool {
return false
}
// IsRedirect returns true when this patch endpoint Id config failed response has a 3xx status code
func (o *PatchEndpointIDConfigFailed) IsRedirect() bool {
return false
}
// IsClientError returns true when this patch endpoint Id config failed response has a 4xx status code
func (o *PatchEndpointIDConfigFailed) IsClientError() bool {
return false
}
// IsServerError returns true when this patch endpoint Id config failed response has a 5xx status code
func (o *PatchEndpointIDConfigFailed) IsServerError() bool {
return true
}
// IsCode returns true when this patch endpoint Id config failed response a status code equal to that given
func (o *PatchEndpointIDConfigFailed) IsCode(code int) bool {
return code == 500
}
// Code gets the status code for the patch endpoint Id config failed response
func (o *PatchEndpointIDConfigFailed) Code() int {
return 500
}
func (o *PatchEndpointIDConfigFailed) Error() string {
payload, _ := json.Marshal(o.Payload)
return fmt.Sprintf("[PATCH /endpoint/{id}/config][%d] patchEndpointIdConfigFailed %s", 500, payload)
}
func (o *PatchEndpointIDConfigFailed) String() string {
payload, _ := json.Marshal(o.Payload)
return fmt.Sprintf("[PATCH /endpoint/{id}/config][%d] patchEndpointIdConfigFailed %s", 500, payload)
}
func (o *PatchEndpointIDConfigFailed) GetPayload() models.Error {
return o.Payload
}
func (o *PatchEndpointIDConfigFailed) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error {
// response payload
if err := consumer.Consume(response.Body(), &o.Payload); err != nil && err != io.EOF {
return err
}
return nil
}
// NewPatchEndpointIDConfigServiceUnavailable creates a PatchEndpointIDConfigServiceUnavailable with default headers values
func NewPatchEndpointIDConfigServiceUnavailable() *PatchEndpointIDConfigServiceUnavailable {
return &PatchEndpointIDConfigServiceUnavailable{}
}
/*
PatchEndpointIDConfigServiceUnavailable describes a response with status code 503, with default header values.
Service Unavailable
*/
type PatchEndpointIDConfigServiceUnavailable struct {
}
// IsSuccess returns true when this patch endpoint Id config service unavailable response has a 2xx status code
func (o *PatchEndpointIDConfigServiceUnavailable) IsSuccess() bool {
return false
}
// IsRedirect returns true when this patch endpoint Id config service unavailable response has a 3xx status code
func (o *PatchEndpointIDConfigServiceUnavailable) IsRedirect() bool {
return false
}
// IsClientError returns true when this patch endpoint Id config service unavailable response has a 4xx status code
func (o *PatchEndpointIDConfigServiceUnavailable) IsClientError() bool {
return false
}
// IsServerError returns true when this patch endpoint Id config service unavailable response has a 5xx status code
func (o *PatchEndpointIDConfigServiceUnavailable) IsServerError() bool {
return true
}
// IsCode returns true when this patch endpoint Id config service unavailable response a status code equal to that given
func (o *PatchEndpointIDConfigServiceUnavailable) IsCode(code int) bool {
return code == 503
}
// Code gets the status code for the patch endpoint Id config service unavailable response
func (o *PatchEndpointIDConfigServiceUnavailable) Code() int {
return 503
}
func (o *PatchEndpointIDConfigServiceUnavailable) Error() string {
return fmt.Sprintf("[PATCH /endpoint/{id}/config][%d] patchEndpointIdConfigServiceUnavailable", 503)
}
func (o *PatchEndpointIDConfigServiceUnavailable) String() string {
return fmt.Sprintf("[PATCH /endpoint/{id}/config][%d] patchEndpointIdConfigServiceUnavailable", 503)
}
func (o *PatchEndpointIDConfigServiceUnavailable) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error {
return nil
}
// Code generated by go-swagger; DO NOT EDIT.
// Copyright Authors of Cilium
// SPDX-License-Identifier: Apache-2.0
package endpoint
// This file was generated by the swagger tool.
// Editing this file might prove futile when you re-run the swagger generate command
import (
"context"
"net/http"
"time"
"github.com/go-openapi/errors"
"github.com/go-openapi/runtime"
cr "github.com/go-openapi/runtime/client"
"github.com/go-openapi/strfmt"
"github.com/cilium/cilium/api/v1/models"
)
// NewPatchEndpointIDLabelsParams creates a new PatchEndpointIDLabelsParams object,
// with the default timeout for this client.
//
// Default values are not hydrated, since defaults are normally applied by the API server side.
//
// To enforce default values in parameter, use SetDefaults or WithDefaults.
func NewPatchEndpointIDLabelsParams() *PatchEndpointIDLabelsParams {
return &PatchEndpointIDLabelsParams{
timeout: cr.DefaultTimeout,
}
}
// NewPatchEndpointIDLabelsParamsWithTimeout creates a new PatchEndpointIDLabelsParams object
// with the ability to set a timeout on a request.
func NewPatchEndpointIDLabelsParamsWithTimeout(timeout time.Duration) *PatchEndpointIDLabelsParams {
return &PatchEndpointIDLabelsParams{
timeout: timeout,
}
}
// NewPatchEndpointIDLabelsParamsWithContext creates a new PatchEndpointIDLabelsParams object
// with the ability to set a context for a request.
func NewPatchEndpointIDLabelsParamsWithContext(ctx context.Context) *PatchEndpointIDLabelsParams {
return &PatchEndpointIDLabelsParams{
Context: ctx,
}
}
// NewPatchEndpointIDLabelsParamsWithHTTPClient creates a new PatchEndpointIDLabelsParams object
// with the ability to set a custom HTTPClient for a request.
func NewPatchEndpointIDLabelsParamsWithHTTPClient(client *http.Client) *PatchEndpointIDLabelsParams {
return &PatchEndpointIDLabelsParams{
HTTPClient: client,
}
}
/*
PatchEndpointIDLabelsParams contains all the parameters to send to the API endpoint
for the patch endpoint ID labels operation.
Typically these are written to a http.Request.
*/
type PatchEndpointIDLabelsParams struct {
// Configuration.
Configuration *models.LabelConfigurationSpec
/* ID.
String describing an endpoint with the format ``[prefix:]id``. If no prefix
is specified, a prefix of ``cilium-local:`` is assumed. Not all endpoints
will be addressable by all endpoint ID prefixes with the exception of the
local Cilium UUID which is assigned to all endpoints.
Supported endpoint id prefixes:
- cilium-local: Local Cilium endpoint UUID, e.g. cilium-local:3389595
- cilium-global: Global Cilium endpoint UUID, e.g. cilium-global:cluster1:nodeX:452343
- cni-attachment-id: CNI attachment ID, e.g. cni-attachment-id:22222:eth0
- container-id: Container runtime ID, e.g. container-id:22222 (deprecated, may not be unique)
- container-name: Container name, e.g. container-name:foobar (deprecated, may not be unique)
- pod-name: pod name for this container if K8s is enabled, e.g. pod-name:default:foobar (deprecated, may not be unique)
- cep-name: cep name for this container if K8s is enabled, e.g. pod-name:default:foobar-net1
- docker-endpoint: Docker libnetwork endpoint ID, e.g. docker-endpoint:4444
*/
ID string
timeout time.Duration
Context context.Context
HTTPClient *http.Client
}
// WithDefaults hydrates default values in the patch endpoint ID labels params (not the query body).
//
// All values with no default are reset to their zero value.
func (o *PatchEndpointIDLabelsParams) WithDefaults() *PatchEndpointIDLabelsParams {
o.SetDefaults()
return o
}
// SetDefaults hydrates default values in the patch endpoint ID labels params (not the query body).
//
// All values with no default are reset to their zero value.
func (o *PatchEndpointIDLabelsParams) SetDefaults() {
// no default values defined for this parameter
}
// WithTimeout adds the timeout to the patch endpoint ID labels params
func (o *PatchEndpointIDLabelsParams) WithTimeout(timeout time.Duration) *PatchEndpointIDLabelsParams {
o.SetTimeout(timeout)
return o
}
// SetTimeout adds the timeout to the patch endpoint ID labels params
func (o *PatchEndpointIDLabelsParams) SetTimeout(timeout time.Duration) {
o.timeout = timeout
}
// WithContext adds the context to the patch endpoint ID labels params
func (o *PatchEndpointIDLabelsParams) WithContext(ctx context.Context) *PatchEndpointIDLabelsParams {
o.SetContext(ctx)
return o
}
// SetContext adds the context to the patch endpoint ID labels params
func (o *PatchEndpointIDLabelsParams) SetContext(ctx context.Context) {
o.Context = ctx
}
// WithHTTPClient adds the HTTPClient to the patch endpoint ID labels params
func (o *PatchEndpointIDLabelsParams) WithHTTPClient(client *http.Client) *PatchEndpointIDLabelsParams {
o.SetHTTPClient(client)
return o
}
// SetHTTPClient adds the HTTPClient to the patch endpoint ID labels params
func (o *PatchEndpointIDLabelsParams) SetHTTPClient(client *http.Client) {
o.HTTPClient = client
}
// WithConfiguration adds the configuration to the patch endpoint ID labels params
func (o *PatchEndpointIDLabelsParams) WithConfiguration(configuration *models.LabelConfigurationSpec) *PatchEndpointIDLabelsParams {
o.SetConfiguration(configuration)
return o
}
// SetConfiguration adds the configuration to the patch endpoint ID labels params
func (o *PatchEndpointIDLabelsParams) SetConfiguration(configuration *models.LabelConfigurationSpec) {
o.Configuration = configuration
}
// WithID adds the id to the patch endpoint ID labels params
func (o *PatchEndpointIDLabelsParams) WithID(id string) *PatchEndpointIDLabelsParams {
o.SetID(id)
return o
}
// SetID adds the id to the patch endpoint ID labels params
func (o *PatchEndpointIDLabelsParams) SetID(id string) {
o.ID = id
}
// WriteToRequest writes these params to a swagger request
func (o *PatchEndpointIDLabelsParams) WriteToRequest(r runtime.ClientRequest, reg strfmt.Registry) error {
if err := r.SetTimeout(o.timeout); err != nil {
return err
}
var res []error
if o.Configuration != nil {
if err := r.SetBodyParam(o.Configuration); err != nil {
return err
}
}
// path param id
if err := r.SetPathParam("id", o.ID); err != nil {
return err
}
if len(res) > 0 {
return errors.CompositeValidationError(res...)
}
return nil
}
// Code generated by go-swagger; DO NOT EDIT.
// Copyright Authors of Cilium
// SPDX-License-Identifier: Apache-2.0
package endpoint
// This file was generated by the swagger tool.
// Editing this file might prove futile when you re-run the swagger generate command
import (
"encoding/json"
"fmt"
"io"
"github.com/go-openapi/runtime"
"github.com/go-openapi/strfmt"
"github.com/cilium/cilium/api/v1/models"
)
// PatchEndpointIDLabelsReader is a Reader for the PatchEndpointIDLabels structure.
type PatchEndpointIDLabelsReader struct {
formats strfmt.Registry
}
// ReadResponse reads a server response into the received o.
func (o *PatchEndpointIDLabelsReader) ReadResponse(response runtime.ClientResponse, consumer runtime.Consumer) (interface{}, error) {
switch response.Code() {
case 200:
result := NewPatchEndpointIDLabelsOK()
if err := result.readResponse(response, consumer, o.formats); err != nil {
return nil, err
}
return result, nil
case 403:
result := NewPatchEndpointIDLabelsForbidden()
if err := result.readResponse(response, consumer, o.formats); err != nil {
return nil, err
}
return nil, result
case 404:
result := NewPatchEndpointIDLabelsNotFound()
if err := result.readResponse(response, consumer, o.formats); err != nil {
return nil, err
}
return nil, result
case 429:
result := NewPatchEndpointIDLabelsTooManyRequests()
if err := result.readResponse(response, consumer, o.formats); err != nil {
return nil, err
}
return nil, result
case 500:
result := NewPatchEndpointIDLabelsUpdateFailed()
if err := result.readResponse(response, consumer, o.formats); err != nil {
return nil, err
}
return nil, result
case 503:
result := NewPatchEndpointIDLabelsServiceUnavailable()
if err := result.readResponse(response, consumer, o.formats); err != nil {
return nil, err
}
return nil, result
default:
return nil, runtime.NewAPIError("[PATCH /endpoint/{id}/labels] PatchEndpointIDLabels", response, response.Code())
}
}
// NewPatchEndpointIDLabelsOK creates a PatchEndpointIDLabelsOK with default headers values
func NewPatchEndpointIDLabelsOK() *PatchEndpointIDLabelsOK {
return &PatchEndpointIDLabelsOK{}
}
/*
PatchEndpointIDLabelsOK describes a response with status code 200, with default header values.
Success
*/
type PatchEndpointIDLabelsOK struct {
}
// IsSuccess returns true when this patch endpoint Id labels o k response has a 2xx status code
func (o *PatchEndpointIDLabelsOK) IsSuccess() bool {
return true
}
// IsRedirect returns true when this patch endpoint Id labels o k response has a 3xx status code
func (o *PatchEndpointIDLabelsOK) IsRedirect() bool {
return false
}
// IsClientError returns true when this patch endpoint Id labels o k response has a 4xx status code
func (o *PatchEndpointIDLabelsOK) IsClientError() bool {
return false
}
// IsServerError returns true when this patch endpoint Id labels o k response has a 5xx status code
func (o *PatchEndpointIDLabelsOK) IsServerError() bool {
return false
}
// IsCode returns true when this patch endpoint Id labels o k response a status code equal to that given
func (o *PatchEndpointIDLabelsOK) IsCode(code int) bool {
return code == 200
}
// Code gets the status code for the patch endpoint Id labels o k response
func (o *PatchEndpointIDLabelsOK) Code() int {
return 200
}
func (o *PatchEndpointIDLabelsOK) Error() string {
return fmt.Sprintf("[PATCH /endpoint/{id}/labels][%d] patchEndpointIdLabelsOK", 200)
}
func (o *PatchEndpointIDLabelsOK) String() string {
return fmt.Sprintf("[PATCH /endpoint/{id}/labels][%d] patchEndpointIdLabelsOK", 200)
}
func (o *PatchEndpointIDLabelsOK) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error {
return nil
}
// NewPatchEndpointIDLabelsForbidden creates a PatchEndpointIDLabelsForbidden with default headers values
func NewPatchEndpointIDLabelsForbidden() *PatchEndpointIDLabelsForbidden {
return &PatchEndpointIDLabelsForbidden{}
}
/*
PatchEndpointIDLabelsForbidden describes a response with status code 403, with default header values.
Forbidden
*/
type PatchEndpointIDLabelsForbidden struct {
}
// IsSuccess returns true when this patch endpoint Id labels forbidden response has a 2xx status code
func (o *PatchEndpointIDLabelsForbidden) IsSuccess() bool {
return false
}
// IsRedirect returns true when this patch endpoint Id labels forbidden response has a 3xx status code
func (o *PatchEndpointIDLabelsForbidden) IsRedirect() bool {
return false
}
// IsClientError returns true when this patch endpoint Id labels forbidden response has a 4xx status code
func (o *PatchEndpointIDLabelsForbidden) IsClientError() bool {
return true
}
// IsServerError returns true when this patch endpoint Id labels forbidden response has a 5xx status code
func (o *PatchEndpointIDLabelsForbidden) IsServerError() bool {
return false
}
// IsCode returns true when this patch endpoint Id labels forbidden response a status code equal to that given
func (o *PatchEndpointIDLabelsForbidden) IsCode(code int) bool {
return code == 403
}
// Code gets the status code for the patch endpoint Id labels forbidden response
func (o *PatchEndpointIDLabelsForbidden) Code() int {
return 403
}
func (o *PatchEndpointIDLabelsForbidden) Error() string {
return fmt.Sprintf("[PATCH /endpoint/{id}/labels][%d] patchEndpointIdLabelsForbidden", 403)
}
func (o *PatchEndpointIDLabelsForbidden) String() string {
return fmt.Sprintf("[PATCH /endpoint/{id}/labels][%d] patchEndpointIdLabelsForbidden", 403)
}
func (o *PatchEndpointIDLabelsForbidden) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error {
return nil
}
// NewPatchEndpointIDLabelsNotFound creates a PatchEndpointIDLabelsNotFound with default headers values
func NewPatchEndpointIDLabelsNotFound() *PatchEndpointIDLabelsNotFound {
return &PatchEndpointIDLabelsNotFound{}
}
/*
PatchEndpointIDLabelsNotFound describes a response with status code 404, with default header values.
Endpoint not found
*/
type PatchEndpointIDLabelsNotFound struct {
}
// IsSuccess returns true when this patch endpoint Id labels not found response has a 2xx status code
func (o *PatchEndpointIDLabelsNotFound) IsSuccess() bool {
return false
}
// IsRedirect returns true when this patch endpoint Id labels not found response has a 3xx status code
func (o *PatchEndpointIDLabelsNotFound) IsRedirect() bool {
return false
}
// IsClientError returns true when this patch endpoint Id labels not found response has a 4xx status code
func (o *PatchEndpointIDLabelsNotFound) IsClientError() bool {
return true
}
// IsServerError returns true when this patch endpoint Id labels not found response has a 5xx status code
func (o *PatchEndpointIDLabelsNotFound) IsServerError() bool {
return false
}
// IsCode returns true when this patch endpoint Id labels not found response a status code equal to that given
func (o *PatchEndpointIDLabelsNotFound) IsCode(code int) bool {
return code == 404
}
// Code gets the status code for the patch endpoint Id labels not found response
func (o *PatchEndpointIDLabelsNotFound) Code() int {
return 404
}
func (o *PatchEndpointIDLabelsNotFound) Error() string {
return fmt.Sprintf("[PATCH /endpoint/{id}/labels][%d] patchEndpointIdLabelsNotFound", 404)
}
func (o *PatchEndpointIDLabelsNotFound) String() string {
return fmt.Sprintf("[PATCH /endpoint/{id}/labels][%d] patchEndpointIdLabelsNotFound", 404)
}
func (o *PatchEndpointIDLabelsNotFound) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error {
return nil
}
// NewPatchEndpointIDLabelsTooManyRequests creates a PatchEndpointIDLabelsTooManyRequests with default headers values
func NewPatchEndpointIDLabelsTooManyRequests() *PatchEndpointIDLabelsTooManyRequests {
return &PatchEndpointIDLabelsTooManyRequests{}
}
/*
PatchEndpointIDLabelsTooManyRequests describes a response with status code 429, with default header values.
Rate-limiting too many requests in the given time frame
*/
type PatchEndpointIDLabelsTooManyRequests struct {
}
// IsSuccess returns true when this patch endpoint Id labels too many requests response has a 2xx status code
func (o *PatchEndpointIDLabelsTooManyRequests) IsSuccess() bool {
return false
}
// IsRedirect returns true when this patch endpoint Id labels too many requests response has a 3xx status code
func (o *PatchEndpointIDLabelsTooManyRequests) IsRedirect() bool {
return false
}
// IsClientError returns true when this patch endpoint Id labels too many requests response has a 4xx status code
func (o *PatchEndpointIDLabelsTooManyRequests) IsClientError() bool {
return true
}
// IsServerError returns true when this patch endpoint Id labels too many requests response has a 5xx status code
func (o *PatchEndpointIDLabelsTooManyRequests) IsServerError() bool {
return false
}
// IsCode returns true when this patch endpoint Id labels too many requests response a status code equal to that given
func (o *PatchEndpointIDLabelsTooManyRequests) IsCode(code int) bool {
return code == 429
}
// Code gets the status code for the patch endpoint Id labels too many requests response
func (o *PatchEndpointIDLabelsTooManyRequests) Code() int {
return 429
}
func (o *PatchEndpointIDLabelsTooManyRequests) Error() string {
return fmt.Sprintf("[PATCH /endpoint/{id}/labels][%d] patchEndpointIdLabelsTooManyRequests", 429)
}
func (o *PatchEndpointIDLabelsTooManyRequests) String() string {
return fmt.Sprintf("[PATCH /endpoint/{id}/labels][%d] patchEndpointIdLabelsTooManyRequests", 429)
}
func (o *PatchEndpointIDLabelsTooManyRequests) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error {
return nil
}
// NewPatchEndpointIDLabelsUpdateFailed creates a PatchEndpointIDLabelsUpdateFailed with default headers values
func NewPatchEndpointIDLabelsUpdateFailed() *PatchEndpointIDLabelsUpdateFailed {
return &PatchEndpointIDLabelsUpdateFailed{}
}
/*
PatchEndpointIDLabelsUpdateFailed describes a response with status code 500, with default header values.
Error while updating labels
*/
type PatchEndpointIDLabelsUpdateFailed struct {
Payload models.Error
}
// IsSuccess returns true when this patch endpoint Id labels update failed response has a 2xx status code
func (o *PatchEndpointIDLabelsUpdateFailed) IsSuccess() bool {
return false
}
// IsRedirect returns true when this patch endpoint Id labels update failed response has a 3xx status code
func (o *PatchEndpointIDLabelsUpdateFailed) IsRedirect() bool {
return false
}
// IsClientError returns true when this patch endpoint Id labels update failed response has a 4xx status code
func (o *PatchEndpointIDLabelsUpdateFailed) IsClientError() bool {
return false
}
// IsServerError returns true when this patch endpoint Id labels update failed response has a 5xx status code
func (o *PatchEndpointIDLabelsUpdateFailed) IsServerError() bool {
return true
}
// IsCode returns true when this patch endpoint Id labels update failed response a status code equal to that given
func (o *PatchEndpointIDLabelsUpdateFailed) IsCode(code int) bool {
return code == 500
}
// Code gets the status code for the patch endpoint Id labels update failed response
func (o *PatchEndpointIDLabelsUpdateFailed) Code() int {
return 500
}
func (o *PatchEndpointIDLabelsUpdateFailed) Error() string {
payload, _ := json.Marshal(o.Payload)
return fmt.Sprintf("[PATCH /endpoint/{id}/labels][%d] patchEndpointIdLabelsUpdateFailed %s", 500, payload)
}
func (o *PatchEndpointIDLabelsUpdateFailed) String() string {
payload, _ := json.Marshal(o.Payload)
return fmt.Sprintf("[PATCH /endpoint/{id}/labels][%d] patchEndpointIdLabelsUpdateFailed %s", 500, payload)
}
func (o *PatchEndpointIDLabelsUpdateFailed) GetPayload() models.Error {
return o.Payload
}
func (o *PatchEndpointIDLabelsUpdateFailed) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error {
// response payload
if err := consumer.Consume(response.Body(), &o.Payload); err != nil && err != io.EOF {
return err
}
return nil
}
// NewPatchEndpointIDLabelsServiceUnavailable creates a PatchEndpointIDLabelsServiceUnavailable with default headers values
func NewPatchEndpointIDLabelsServiceUnavailable() *PatchEndpointIDLabelsServiceUnavailable {
return &PatchEndpointIDLabelsServiceUnavailable{}
}
/*
PatchEndpointIDLabelsServiceUnavailable describes a response with status code 503, with default header values.
Service Unavailable
*/
type PatchEndpointIDLabelsServiceUnavailable struct {
}
// IsSuccess returns true when this patch endpoint Id labels service unavailable response has a 2xx status code
func (o *PatchEndpointIDLabelsServiceUnavailable) IsSuccess() bool {
return false
}
// IsRedirect returns true when this patch endpoint Id labels service unavailable response has a 3xx status code
func (o *PatchEndpointIDLabelsServiceUnavailable) IsRedirect() bool {
return false
}
// IsClientError returns true when this patch endpoint Id labels service unavailable response has a 4xx status code
func (o *PatchEndpointIDLabelsServiceUnavailable) IsClientError() bool {
return false
}
// IsServerError returns true when this patch endpoint Id labels service unavailable response has a 5xx status code
func (o *PatchEndpointIDLabelsServiceUnavailable) IsServerError() bool {
return true
}
// IsCode returns true when this patch endpoint Id labels service unavailable response a status code equal to that given
func (o *PatchEndpointIDLabelsServiceUnavailable) IsCode(code int) bool {
return code == 503
}
// Code gets the status code for the patch endpoint Id labels service unavailable response
func (o *PatchEndpointIDLabelsServiceUnavailable) Code() int {
return 503
}
func (o *PatchEndpointIDLabelsServiceUnavailable) Error() string {
return fmt.Sprintf("[PATCH /endpoint/{id}/labels][%d] patchEndpointIdLabelsServiceUnavailable", 503)
}
func (o *PatchEndpointIDLabelsServiceUnavailable) String() string {
return fmt.Sprintf("[PATCH /endpoint/{id}/labels][%d] patchEndpointIdLabelsServiceUnavailable", 503)
}
func (o *PatchEndpointIDLabelsServiceUnavailable) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error {
return nil
}
// Code generated by go-swagger; DO NOT EDIT.
// Copyright Authors of Cilium
// SPDX-License-Identifier: Apache-2.0
package endpoint
// This file was generated by the swagger tool.
// Editing this file might prove futile when you re-run the swagger generate command
import (
"context"
"net/http"
"time"
"github.com/go-openapi/errors"
"github.com/go-openapi/runtime"
cr "github.com/go-openapi/runtime/client"
"github.com/go-openapi/strfmt"
"github.com/cilium/cilium/api/v1/models"
)
// NewPatchEndpointIDParams creates a new PatchEndpointIDParams object,
// with the default timeout for this client.
//
// Default values are not hydrated, since defaults are normally applied by the API server side.
//
// To enforce default values in parameter, use SetDefaults or WithDefaults.
func NewPatchEndpointIDParams() *PatchEndpointIDParams {
return &PatchEndpointIDParams{
timeout: cr.DefaultTimeout,
}
}
// NewPatchEndpointIDParamsWithTimeout creates a new PatchEndpointIDParams object
// with the ability to set a timeout on a request.
func NewPatchEndpointIDParamsWithTimeout(timeout time.Duration) *PatchEndpointIDParams {
return &PatchEndpointIDParams{
timeout: timeout,
}
}
// NewPatchEndpointIDParamsWithContext creates a new PatchEndpointIDParams object
// with the ability to set a context for a request.
func NewPatchEndpointIDParamsWithContext(ctx context.Context) *PatchEndpointIDParams {
return &PatchEndpointIDParams{
Context: ctx,
}
}
// NewPatchEndpointIDParamsWithHTTPClient creates a new PatchEndpointIDParams object
// with the ability to set a custom HTTPClient for a request.
func NewPatchEndpointIDParamsWithHTTPClient(client *http.Client) *PatchEndpointIDParams {
return &PatchEndpointIDParams{
HTTPClient: client,
}
}
/*
PatchEndpointIDParams contains all the parameters to send to the API endpoint
for the patch endpoint ID operation.
Typically these are written to a http.Request.
*/
type PatchEndpointIDParams struct {
// Endpoint.
Endpoint *models.EndpointChangeRequest
/* ID.
String describing an endpoint with the format ``[prefix:]id``. If no prefix
is specified, a prefix of ``cilium-local:`` is assumed. Not all endpoints
will be addressable by all endpoint ID prefixes with the exception of the
local Cilium UUID which is assigned to all endpoints.
Supported endpoint id prefixes:
- cilium-local: Local Cilium endpoint UUID, e.g. cilium-local:3389595
- cilium-global: Global Cilium endpoint UUID, e.g. cilium-global:cluster1:nodeX:452343
- cni-attachment-id: CNI attachment ID, e.g. cni-attachment-id:22222:eth0
- container-id: Container runtime ID, e.g. container-id:22222 (deprecated, may not be unique)
- container-name: Container name, e.g. container-name:foobar (deprecated, may not be unique)
- pod-name: pod name for this container if K8s is enabled, e.g. pod-name:default:foobar (deprecated, may not be unique)
- cep-name: cep name for this container if K8s is enabled, e.g. pod-name:default:foobar-net1
- docker-endpoint: Docker libnetwork endpoint ID, e.g. docker-endpoint:4444
*/
ID string
timeout time.Duration
Context context.Context
HTTPClient *http.Client
}
// WithDefaults hydrates default values in the patch endpoint ID params (not the query body).
//
// All values with no default are reset to their zero value.
func (o *PatchEndpointIDParams) WithDefaults() *PatchEndpointIDParams {
o.SetDefaults()
return o
}
// SetDefaults hydrates default values in the patch endpoint ID params (not the query body).
//
// All values with no default are reset to their zero value.
func (o *PatchEndpointIDParams) SetDefaults() {
// no default values defined for this parameter
}
// WithTimeout adds the timeout to the patch endpoint ID params
func (o *PatchEndpointIDParams) WithTimeout(timeout time.Duration) *PatchEndpointIDParams {
o.SetTimeout(timeout)
return o
}
// SetTimeout adds the timeout to the patch endpoint ID params
func (o *PatchEndpointIDParams) SetTimeout(timeout time.Duration) {
o.timeout = timeout
}
// WithContext adds the context to the patch endpoint ID params
func (o *PatchEndpointIDParams) WithContext(ctx context.Context) *PatchEndpointIDParams {
o.SetContext(ctx)
return o
}
// SetContext adds the context to the patch endpoint ID params
func (o *PatchEndpointIDParams) SetContext(ctx context.Context) {
o.Context = ctx
}
// WithHTTPClient adds the HTTPClient to the patch endpoint ID params
func (o *PatchEndpointIDParams) WithHTTPClient(client *http.Client) *PatchEndpointIDParams {
o.SetHTTPClient(client)
return o
}
// SetHTTPClient adds the HTTPClient to the patch endpoint ID params
func (o *PatchEndpointIDParams) SetHTTPClient(client *http.Client) {
o.HTTPClient = client
}
// WithEndpoint adds the endpoint to the patch endpoint ID params
func (o *PatchEndpointIDParams) WithEndpoint(endpoint *models.EndpointChangeRequest) *PatchEndpointIDParams {
o.SetEndpoint(endpoint)
return o
}
// SetEndpoint adds the endpoint to the patch endpoint ID params
func (o *PatchEndpointIDParams) SetEndpoint(endpoint *models.EndpointChangeRequest) {
o.Endpoint = endpoint
}
// WithID adds the id to the patch endpoint ID params
func (o *PatchEndpointIDParams) WithID(id string) *PatchEndpointIDParams {
o.SetID(id)
return o
}
// SetID adds the id to the patch endpoint ID params
func (o *PatchEndpointIDParams) SetID(id string) {
o.ID = id
}
// WriteToRequest writes these params to a swagger request
func (o *PatchEndpointIDParams) WriteToRequest(r runtime.ClientRequest, reg strfmt.Registry) error {
if err := r.SetTimeout(o.timeout); err != nil {
return err
}
var res []error
if o.Endpoint != nil {
if err := r.SetBodyParam(o.Endpoint); err != nil {
return err
}
}
// path param id
if err := r.SetPathParam("id", o.ID); err != nil {
return err
}
if len(res) > 0 {
return errors.CompositeValidationError(res...)
}
return nil
}
// Code generated by go-swagger; DO NOT EDIT.
// Copyright Authors of Cilium
// SPDX-License-Identifier: Apache-2.0
package endpoint
// This file was generated by the swagger tool.
// Editing this file might prove futile when you re-run the swagger generate command
import (
"encoding/json"
"fmt"
"io"
"github.com/go-openapi/runtime"
"github.com/go-openapi/strfmt"
"github.com/cilium/cilium/api/v1/models"
)
// PatchEndpointIDReader is a Reader for the PatchEndpointID structure.
type PatchEndpointIDReader struct {
formats strfmt.Registry
}
// ReadResponse reads a server response into the received o.
func (o *PatchEndpointIDReader) ReadResponse(response runtime.ClientResponse, consumer runtime.Consumer) (interface{}, error) {
switch response.Code() {
case 200:
result := NewPatchEndpointIDOK()
if err := result.readResponse(response, consumer, o.formats); err != nil {
return nil, err
}
return result, nil
case 400:
result := NewPatchEndpointIDInvalid()
if err := result.readResponse(response, consumer, o.formats); err != nil {
return nil, err
}
return nil, result
case 403:
result := NewPatchEndpointIDForbidden()
if err := result.readResponse(response, consumer, o.formats); err != nil {
return nil, err
}
return nil, result
case 404:
result := NewPatchEndpointIDNotFound()
if err := result.readResponse(response, consumer, o.formats); err != nil {
return nil, err
}
return nil, result
case 429:
result := NewPatchEndpointIDTooManyRequests()
if err := result.readResponse(response, consumer, o.formats); err != nil {
return nil, err
}
return nil, result
case 500:
result := NewPatchEndpointIDFailed()
if err := result.readResponse(response, consumer, o.formats); err != nil {
return nil, err
}
return nil, result
case 503:
result := NewPatchEndpointIDServiceUnavailable()
if err := result.readResponse(response, consumer, o.formats); err != nil {
return nil, err
}
return nil, result
default:
return nil, runtime.NewAPIError("[PATCH /endpoint/{id}] PatchEndpointID", response, response.Code())
}
}
// NewPatchEndpointIDOK creates a PatchEndpointIDOK with default headers values
func NewPatchEndpointIDOK() *PatchEndpointIDOK {
return &PatchEndpointIDOK{}
}
/*
PatchEndpointIDOK describes a response with status code 200, with default header values.
Success
*/
type PatchEndpointIDOK struct {
}
// IsSuccess returns true when this patch endpoint Id o k response has a 2xx status code
func (o *PatchEndpointIDOK) IsSuccess() bool {
return true
}
// IsRedirect returns true when this patch endpoint Id o k response has a 3xx status code
func (o *PatchEndpointIDOK) IsRedirect() bool {
return false
}
// IsClientError returns true when this patch endpoint Id o k response has a 4xx status code
func (o *PatchEndpointIDOK) IsClientError() bool {
return false
}
// IsServerError returns true when this patch endpoint Id o k response has a 5xx status code
func (o *PatchEndpointIDOK) IsServerError() bool {
return false
}
// IsCode returns true when this patch endpoint Id o k response a status code equal to that given
func (o *PatchEndpointIDOK) IsCode(code int) bool {
return code == 200
}
// Code gets the status code for the patch endpoint Id o k response
func (o *PatchEndpointIDOK) Code() int {
return 200
}
func (o *PatchEndpointIDOK) Error() string {
return fmt.Sprintf("[PATCH /endpoint/{id}][%d] patchEndpointIdOK", 200)
}
func (o *PatchEndpointIDOK) String() string {
return fmt.Sprintf("[PATCH /endpoint/{id}][%d] patchEndpointIdOK", 200)
}
func (o *PatchEndpointIDOK) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error {
return nil
}
// NewPatchEndpointIDInvalid creates a PatchEndpointIDInvalid with default headers values
func NewPatchEndpointIDInvalid() *PatchEndpointIDInvalid {
return &PatchEndpointIDInvalid{}
}
/*
PatchEndpointIDInvalid describes a response with status code 400, with default header values.
Invalid modify endpoint request
*/
type PatchEndpointIDInvalid struct {
Payload models.Error
}
// IsSuccess returns true when this patch endpoint Id invalid response has a 2xx status code
func (o *PatchEndpointIDInvalid) IsSuccess() bool {
return false
}
// IsRedirect returns true when this patch endpoint Id invalid response has a 3xx status code
func (o *PatchEndpointIDInvalid) IsRedirect() bool {
return false
}
// IsClientError returns true when this patch endpoint Id invalid response has a 4xx status code
func (o *PatchEndpointIDInvalid) IsClientError() bool {
return true
}
// IsServerError returns true when this patch endpoint Id invalid response has a 5xx status code
func (o *PatchEndpointIDInvalid) IsServerError() bool {
return false
}
// IsCode returns true when this patch endpoint Id invalid response a status code equal to that given
func (o *PatchEndpointIDInvalid) IsCode(code int) bool {
return code == 400
}
// Code gets the status code for the patch endpoint Id invalid response
func (o *PatchEndpointIDInvalid) Code() int {
return 400
}
func (o *PatchEndpointIDInvalid) Error() string {
payload, _ := json.Marshal(o.Payload)
return fmt.Sprintf("[PATCH /endpoint/{id}][%d] patchEndpointIdInvalid %s", 400, payload)
}
func (o *PatchEndpointIDInvalid) String() string {
payload, _ := json.Marshal(o.Payload)
return fmt.Sprintf("[PATCH /endpoint/{id}][%d] patchEndpointIdInvalid %s", 400, payload)
}
func (o *PatchEndpointIDInvalid) GetPayload() models.Error {
return o.Payload
}
func (o *PatchEndpointIDInvalid) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error {
// response payload
if err := consumer.Consume(response.Body(), &o.Payload); err != nil && err != io.EOF {
return err
}
return nil
}
// NewPatchEndpointIDForbidden creates a PatchEndpointIDForbidden with default headers values
func NewPatchEndpointIDForbidden() *PatchEndpointIDForbidden {
return &PatchEndpointIDForbidden{}
}
/*
PatchEndpointIDForbidden describes a response with status code 403, with default header values.
Forbidden
*/
type PatchEndpointIDForbidden struct {
}
// IsSuccess returns true when this patch endpoint Id forbidden response has a 2xx status code
func (o *PatchEndpointIDForbidden) IsSuccess() bool {
return false
}
// IsRedirect returns true when this patch endpoint Id forbidden response has a 3xx status code
func (o *PatchEndpointIDForbidden) IsRedirect() bool {
return false
}
// IsClientError returns true when this patch endpoint Id forbidden response has a 4xx status code
func (o *PatchEndpointIDForbidden) IsClientError() bool {
return true
}
// IsServerError returns true when this patch endpoint Id forbidden response has a 5xx status code
func (o *PatchEndpointIDForbidden) IsServerError() bool {
return false
}
// IsCode returns true when this patch endpoint Id forbidden response a status code equal to that given
func (o *PatchEndpointIDForbidden) IsCode(code int) bool {
return code == 403
}
// Code gets the status code for the patch endpoint Id forbidden response
func (o *PatchEndpointIDForbidden) Code() int {
return 403
}
func (o *PatchEndpointIDForbidden) Error() string {
return fmt.Sprintf("[PATCH /endpoint/{id}][%d] patchEndpointIdForbidden", 403)
}
func (o *PatchEndpointIDForbidden) String() string {
return fmt.Sprintf("[PATCH /endpoint/{id}][%d] patchEndpointIdForbidden", 403)
}
func (o *PatchEndpointIDForbidden) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error {
return nil
}
// NewPatchEndpointIDNotFound creates a PatchEndpointIDNotFound with default headers values
func NewPatchEndpointIDNotFound() *PatchEndpointIDNotFound {
return &PatchEndpointIDNotFound{}
}
/*
PatchEndpointIDNotFound describes a response with status code 404, with default header values.
Endpoint does not exist
*/
type PatchEndpointIDNotFound struct {
}
// IsSuccess returns true when this patch endpoint Id not found response has a 2xx status code
func (o *PatchEndpointIDNotFound) IsSuccess() bool {
return false
}
// IsRedirect returns true when this patch endpoint Id not found response has a 3xx status code
func (o *PatchEndpointIDNotFound) IsRedirect() bool {
return false
}
// IsClientError returns true when this patch endpoint Id not found response has a 4xx status code
func (o *PatchEndpointIDNotFound) IsClientError() bool {
return true
}
// IsServerError returns true when this patch endpoint Id not found response has a 5xx status code
func (o *PatchEndpointIDNotFound) IsServerError() bool {
return false
}
// IsCode returns true when this patch endpoint Id not found response a status code equal to that given
func (o *PatchEndpointIDNotFound) IsCode(code int) bool {
return code == 404
}
// Code gets the status code for the patch endpoint Id not found response
func (o *PatchEndpointIDNotFound) Code() int {
return 404
}
func (o *PatchEndpointIDNotFound) Error() string {
return fmt.Sprintf("[PATCH /endpoint/{id}][%d] patchEndpointIdNotFound", 404)
}
func (o *PatchEndpointIDNotFound) String() string {
return fmt.Sprintf("[PATCH /endpoint/{id}][%d] patchEndpointIdNotFound", 404)
}
func (o *PatchEndpointIDNotFound) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error {
return nil
}
// NewPatchEndpointIDTooManyRequests creates a PatchEndpointIDTooManyRequests with default headers values
func NewPatchEndpointIDTooManyRequests() *PatchEndpointIDTooManyRequests {
return &PatchEndpointIDTooManyRequests{}
}
/*
PatchEndpointIDTooManyRequests describes a response with status code 429, with default header values.
Rate-limiting too many requests in the given time frame
*/
type PatchEndpointIDTooManyRequests struct {
}
// IsSuccess returns true when this patch endpoint Id too many requests response has a 2xx status code
func (o *PatchEndpointIDTooManyRequests) IsSuccess() bool {
return false
}
// IsRedirect returns true when this patch endpoint Id too many requests response has a 3xx status code
func (o *PatchEndpointIDTooManyRequests) IsRedirect() bool {
return false
}
// IsClientError returns true when this patch endpoint Id too many requests response has a 4xx status code
func (o *PatchEndpointIDTooManyRequests) IsClientError() bool {
return true
}
// IsServerError returns true when this patch endpoint Id too many requests response has a 5xx status code
func (o *PatchEndpointIDTooManyRequests) IsServerError() bool {
return false
}
// IsCode returns true when this patch endpoint Id too many requests response a status code equal to that given
func (o *PatchEndpointIDTooManyRequests) IsCode(code int) bool {
return code == 429
}
// Code gets the status code for the patch endpoint Id too many requests response
func (o *PatchEndpointIDTooManyRequests) Code() int {
return 429
}
func (o *PatchEndpointIDTooManyRequests) Error() string {
return fmt.Sprintf("[PATCH /endpoint/{id}][%d] patchEndpointIdTooManyRequests", 429)
}
func (o *PatchEndpointIDTooManyRequests) String() string {
return fmt.Sprintf("[PATCH /endpoint/{id}][%d] patchEndpointIdTooManyRequests", 429)
}
func (o *PatchEndpointIDTooManyRequests) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error {
return nil
}
// NewPatchEndpointIDFailed creates a PatchEndpointIDFailed with default headers values
func NewPatchEndpointIDFailed() *PatchEndpointIDFailed {
return &PatchEndpointIDFailed{}
}
/*
PatchEndpointIDFailed describes a response with status code 500, with default header values.
Endpoint update failed
*/
type PatchEndpointIDFailed struct {
Payload models.Error
}
// IsSuccess returns true when this patch endpoint Id failed response has a 2xx status code
func (o *PatchEndpointIDFailed) IsSuccess() bool {
return false
}
// IsRedirect returns true when this patch endpoint Id failed response has a 3xx status code
func (o *PatchEndpointIDFailed) IsRedirect() bool {
return false
}
// IsClientError returns true when this patch endpoint Id failed response has a 4xx status code
func (o *PatchEndpointIDFailed) IsClientError() bool {
return false
}
// IsServerError returns true when this patch endpoint Id failed response has a 5xx status code
func (o *PatchEndpointIDFailed) IsServerError() bool {
return true
}
// IsCode returns true when this patch endpoint Id failed response a status code equal to that given
func (o *PatchEndpointIDFailed) IsCode(code int) bool {
return code == 500
}
// Code gets the status code for the patch endpoint Id failed response
func (o *PatchEndpointIDFailed) Code() int {
return 500
}
func (o *PatchEndpointIDFailed) Error() string {
payload, _ := json.Marshal(o.Payload)
return fmt.Sprintf("[PATCH /endpoint/{id}][%d] patchEndpointIdFailed %s", 500, payload)
}
func (o *PatchEndpointIDFailed) String() string {
payload, _ := json.Marshal(o.Payload)
return fmt.Sprintf("[PATCH /endpoint/{id}][%d] patchEndpointIdFailed %s", 500, payload)
}
func (o *PatchEndpointIDFailed) GetPayload() models.Error {
return o.Payload
}
func (o *PatchEndpointIDFailed) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error {
// response payload
if err := consumer.Consume(response.Body(), &o.Payload); err != nil && err != io.EOF {
return err
}
return nil
}
// NewPatchEndpointIDServiceUnavailable creates a PatchEndpointIDServiceUnavailable with default headers values
func NewPatchEndpointIDServiceUnavailable() *PatchEndpointIDServiceUnavailable {
return &PatchEndpointIDServiceUnavailable{}
}
/*
PatchEndpointIDServiceUnavailable describes a response with status code 503, with default header values.
Service Unavailable
*/
type PatchEndpointIDServiceUnavailable struct {
}
// IsSuccess returns true when this patch endpoint Id service unavailable response has a 2xx status code
func (o *PatchEndpointIDServiceUnavailable) IsSuccess() bool {
return false
}
// IsRedirect returns true when this patch endpoint Id service unavailable response has a 3xx status code
func (o *PatchEndpointIDServiceUnavailable) IsRedirect() bool {
return false
}
// IsClientError returns true when this patch endpoint Id service unavailable response has a 4xx status code
func (o *PatchEndpointIDServiceUnavailable) IsClientError() bool {
return false
}
// IsServerError returns true when this patch endpoint Id service unavailable response has a 5xx status code
func (o *PatchEndpointIDServiceUnavailable) IsServerError() bool {
return true
}
// IsCode returns true when this patch endpoint Id service unavailable response a status code equal to that given
func (o *PatchEndpointIDServiceUnavailable) IsCode(code int) bool {
return code == 503
}
// Code gets the status code for the patch endpoint Id service unavailable response
func (o *PatchEndpointIDServiceUnavailable) Code() int {
return 503
}
func (o *PatchEndpointIDServiceUnavailable) Error() string {
return fmt.Sprintf("[PATCH /endpoint/{id}][%d] patchEndpointIdServiceUnavailable", 503)
}
func (o *PatchEndpointIDServiceUnavailable) String() string {
return fmt.Sprintf("[PATCH /endpoint/{id}][%d] patchEndpointIdServiceUnavailable", 503)
}
func (o *PatchEndpointIDServiceUnavailable) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error {
return nil
}
// Code generated by go-swagger; DO NOT EDIT.
// Copyright Authors of Cilium
// SPDX-License-Identifier: Apache-2.0
package endpoint
// This file was generated by the swagger tool.
// Editing this file might prove futile when you re-run the swagger generate command
import (
"context"
"net/http"
"time"
"github.com/go-openapi/errors"
"github.com/go-openapi/runtime"
cr "github.com/go-openapi/runtime/client"
"github.com/go-openapi/strfmt"
"github.com/cilium/cilium/api/v1/models"
)
// NewPutEndpointIDParams creates a new PutEndpointIDParams object,
// with the default timeout for this client.
//
// Default values are not hydrated, since defaults are normally applied by the API server side.
//
// To enforce default values in parameter, use SetDefaults or WithDefaults.
func NewPutEndpointIDParams() *PutEndpointIDParams {
return &PutEndpointIDParams{
timeout: cr.DefaultTimeout,
}
}
// NewPutEndpointIDParamsWithTimeout creates a new PutEndpointIDParams object
// with the ability to set a timeout on a request.
func NewPutEndpointIDParamsWithTimeout(timeout time.Duration) *PutEndpointIDParams {
return &PutEndpointIDParams{
timeout: timeout,
}
}
// NewPutEndpointIDParamsWithContext creates a new PutEndpointIDParams object
// with the ability to set a context for a request.
func NewPutEndpointIDParamsWithContext(ctx context.Context) *PutEndpointIDParams {
return &PutEndpointIDParams{
Context: ctx,
}
}
// NewPutEndpointIDParamsWithHTTPClient creates a new PutEndpointIDParams object
// with the ability to set a custom HTTPClient for a request.
func NewPutEndpointIDParamsWithHTTPClient(client *http.Client) *PutEndpointIDParams {
return &PutEndpointIDParams{
HTTPClient: client,
}
}
/*
PutEndpointIDParams contains all the parameters to send to the API endpoint
for the put endpoint ID operation.
Typically these are written to a http.Request.
*/
type PutEndpointIDParams struct {
// Endpoint.
Endpoint *models.EndpointChangeRequest
/* ID.
String describing an endpoint with the format ``[prefix:]id``. If no prefix
is specified, a prefix of ``cilium-local:`` is assumed. Not all endpoints
will be addressable by all endpoint ID prefixes with the exception of the
local Cilium UUID which is assigned to all endpoints.
Supported endpoint id prefixes:
- cilium-local: Local Cilium endpoint UUID, e.g. cilium-local:3389595
- cilium-global: Global Cilium endpoint UUID, e.g. cilium-global:cluster1:nodeX:452343
- cni-attachment-id: CNI attachment ID, e.g. cni-attachment-id:22222:eth0
- container-id: Container runtime ID, e.g. container-id:22222 (deprecated, may not be unique)
- container-name: Container name, e.g. container-name:foobar (deprecated, may not be unique)
- pod-name: pod name for this container if K8s is enabled, e.g. pod-name:default:foobar (deprecated, may not be unique)
- cep-name: cep name for this container if K8s is enabled, e.g. pod-name:default:foobar-net1
- docker-endpoint: Docker libnetwork endpoint ID, e.g. docker-endpoint:4444
*/
ID string
timeout time.Duration
Context context.Context
HTTPClient *http.Client
}
// WithDefaults hydrates default values in the put endpoint ID params (not the query body).
//
// All values with no default are reset to their zero value.
func (o *PutEndpointIDParams) WithDefaults() *PutEndpointIDParams {
o.SetDefaults()
return o
}
// SetDefaults hydrates default values in the put endpoint ID params (not the query body).
//
// All values with no default are reset to their zero value.
func (o *PutEndpointIDParams) SetDefaults() {
// no default values defined for this parameter
}
// WithTimeout adds the timeout to the put endpoint ID params
func (o *PutEndpointIDParams) WithTimeout(timeout time.Duration) *PutEndpointIDParams {
o.SetTimeout(timeout)
return o
}
// SetTimeout adds the timeout to the put endpoint ID params
func (o *PutEndpointIDParams) SetTimeout(timeout time.Duration) {
o.timeout = timeout
}
// WithContext adds the context to the put endpoint ID params
func (o *PutEndpointIDParams) WithContext(ctx context.Context) *PutEndpointIDParams {
o.SetContext(ctx)
return o
}
// SetContext adds the context to the put endpoint ID params
func (o *PutEndpointIDParams) SetContext(ctx context.Context) {
o.Context = ctx
}
// WithHTTPClient adds the HTTPClient to the put endpoint ID params
func (o *PutEndpointIDParams) WithHTTPClient(client *http.Client) *PutEndpointIDParams {
o.SetHTTPClient(client)
return o
}
// SetHTTPClient adds the HTTPClient to the put endpoint ID params
func (o *PutEndpointIDParams) SetHTTPClient(client *http.Client) {
o.HTTPClient = client
}
// WithEndpoint adds the endpoint to the put endpoint ID params
func (o *PutEndpointIDParams) WithEndpoint(endpoint *models.EndpointChangeRequest) *PutEndpointIDParams {
o.SetEndpoint(endpoint)
return o
}
// SetEndpoint adds the endpoint to the put endpoint ID params
func (o *PutEndpointIDParams) SetEndpoint(endpoint *models.EndpointChangeRequest) {
o.Endpoint = endpoint
}
// WithID adds the id to the put endpoint ID params
func (o *PutEndpointIDParams) WithID(id string) *PutEndpointIDParams {
o.SetID(id)
return o
}
// SetID adds the id to the put endpoint ID params
func (o *PutEndpointIDParams) SetID(id string) {
o.ID = id
}
// WriteToRequest writes these params to a swagger request
func (o *PutEndpointIDParams) WriteToRequest(r runtime.ClientRequest, reg strfmt.Registry) error {
if err := r.SetTimeout(o.timeout); err != nil {
return err
}
var res []error
if o.Endpoint != nil {
if err := r.SetBodyParam(o.Endpoint); err != nil {
return err
}
}
// path param id
if err := r.SetPathParam("id", o.ID); err != nil {
return err
}
if len(res) > 0 {
return errors.CompositeValidationError(res...)
}
return nil
}
// Code generated by go-swagger; DO NOT EDIT.
// Copyright Authors of Cilium
// SPDX-License-Identifier: Apache-2.0
package endpoint
// This file was generated by the swagger tool.
// Editing this file might prove futile when you re-run the swagger generate command
import (
"encoding/json"
"fmt"
"io"
"github.com/go-openapi/runtime"
"github.com/go-openapi/strfmt"
"github.com/cilium/cilium/api/v1/models"
)
// PutEndpointIDReader is a Reader for the PutEndpointID structure.
type PutEndpointIDReader struct {
formats strfmt.Registry
}
// ReadResponse reads a server response into the received o.
func (o *PutEndpointIDReader) ReadResponse(response runtime.ClientResponse, consumer runtime.Consumer) (interface{}, error) {
switch response.Code() {
case 201:
result := NewPutEndpointIDCreated()
if err := result.readResponse(response, consumer, o.formats); err != nil {
return nil, err
}
return result, nil
case 400:
result := NewPutEndpointIDInvalid()
if err := result.readResponse(response, consumer, o.formats); err != nil {
return nil, err
}
return nil, result
case 403:
result := NewPutEndpointIDForbidden()
if err := result.readResponse(response, consumer, o.formats); err != nil {
return nil, err
}
return nil, result
case 409:
result := NewPutEndpointIDExists()
if err := result.readResponse(response, consumer, o.formats); err != nil {
return nil, err
}
return nil, result
case 429:
result := NewPutEndpointIDTooManyRequests()
if err := result.readResponse(response, consumer, o.formats); err != nil {
return nil, err
}
return nil, result
case 500:
result := NewPutEndpointIDFailed()
if err := result.readResponse(response, consumer, o.formats); err != nil {
return nil, err
}
return nil, result
case 503:
result := NewPutEndpointIDServiceUnavailable()
if err := result.readResponse(response, consumer, o.formats); err != nil {
return nil, err
}
return nil, result
default:
return nil, runtime.NewAPIError("[PUT /endpoint/{id}] PutEndpointID", response, response.Code())
}
}
// NewPutEndpointIDCreated creates a PutEndpointIDCreated with default headers values
func NewPutEndpointIDCreated() *PutEndpointIDCreated {
return &PutEndpointIDCreated{}
}
/*
PutEndpointIDCreated describes a response with status code 201, with default header values.
Created
*/
type PutEndpointIDCreated struct {
Payload *models.Endpoint
}
// IsSuccess returns true when this put endpoint Id created response has a 2xx status code
func (o *PutEndpointIDCreated) IsSuccess() bool {
return true
}
// IsRedirect returns true when this put endpoint Id created response has a 3xx status code
func (o *PutEndpointIDCreated) IsRedirect() bool {
return false
}
// IsClientError returns true when this put endpoint Id created response has a 4xx status code
func (o *PutEndpointIDCreated) IsClientError() bool {
return false
}
// IsServerError returns true when this put endpoint Id created response has a 5xx status code
func (o *PutEndpointIDCreated) IsServerError() bool {
return false
}
// IsCode returns true when this put endpoint Id created response a status code equal to that given
func (o *PutEndpointIDCreated) IsCode(code int) bool {
return code == 201
}
// Code gets the status code for the put endpoint Id created response
func (o *PutEndpointIDCreated) Code() int {
return 201
}
func (o *PutEndpointIDCreated) Error() string {
payload, _ := json.Marshal(o.Payload)
return fmt.Sprintf("[PUT /endpoint/{id}][%d] putEndpointIdCreated %s", 201, payload)
}
func (o *PutEndpointIDCreated) String() string {
payload, _ := json.Marshal(o.Payload)
return fmt.Sprintf("[PUT /endpoint/{id}][%d] putEndpointIdCreated %s", 201, payload)
}
func (o *PutEndpointIDCreated) GetPayload() *models.Endpoint {
return o.Payload
}
func (o *PutEndpointIDCreated) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error {
o.Payload = new(models.Endpoint)
// response payload
if err := consumer.Consume(response.Body(), o.Payload); err != nil && err != io.EOF {
return err
}
return nil
}
// NewPutEndpointIDInvalid creates a PutEndpointIDInvalid with default headers values
func NewPutEndpointIDInvalid() *PutEndpointIDInvalid {
return &PutEndpointIDInvalid{}
}
/*
PutEndpointIDInvalid describes a response with status code 400, with default header values.
Invalid endpoint in request
*/
type PutEndpointIDInvalid struct {
Payload models.Error
}
// IsSuccess returns true when this put endpoint Id invalid response has a 2xx status code
func (o *PutEndpointIDInvalid) IsSuccess() bool {
return false
}
// IsRedirect returns true when this put endpoint Id invalid response has a 3xx status code
func (o *PutEndpointIDInvalid) IsRedirect() bool {
return false
}
// IsClientError returns true when this put endpoint Id invalid response has a 4xx status code
func (o *PutEndpointIDInvalid) IsClientError() bool {
return true
}
// IsServerError returns true when this put endpoint Id invalid response has a 5xx status code
func (o *PutEndpointIDInvalid) IsServerError() bool {
return false
}
// IsCode returns true when this put endpoint Id invalid response a status code equal to that given
func (o *PutEndpointIDInvalid) IsCode(code int) bool {
return code == 400
}
// Code gets the status code for the put endpoint Id invalid response
func (o *PutEndpointIDInvalid) Code() int {
return 400
}
func (o *PutEndpointIDInvalid) Error() string {
payload, _ := json.Marshal(o.Payload)
return fmt.Sprintf("[PUT /endpoint/{id}][%d] putEndpointIdInvalid %s", 400, payload)
}
func (o *PutEndpointIDInvalid) String() string {
payload, _ := json.Marshal(o.Payload)
return fmt.Sprintf("[PUT /endpoint/{id}][%d] putEndpointIdInvalid %s", 400, payload)
}
func (o *PutEndpointIDInvalid) GetPayload() models.Error {
return o.Payload
}
func (o *PutEndpointIDInvalid) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error {
// response payload
if err := consumer.Consume(response.Body(), &o.Payload); err != nil && err != io.EOF {
return err
}
return nil
}
// NewPutEndpointIDForbidden creates a PutEndpointIDForbidden with default headers values
func NewPutEndpointIDForbidden() *PutEndpointIDForbidden {
return &PutEndpointIDForbidden{}
}
/*
PutEndpointIDForbidden describes a response with status code 403, with default header values.
Forbidden
*/
type PutEndpointIDForbidden struct {
}
// IsSuccess returns true when this put endpoint Id forbidden response has a 2xx status code
func (o *PutEndpointIDForbidden) IsSuccess() bool {
return false
}
// IsRedirect returns true when this put endpoint Id forbidden response has a 3xx status code
func (o *PutEndpointIDForbidden) IsRedirect() bool {
return false
}
// IsClientError returns true when this put endpoint Id forbidden response has a 4xx status code
func (o *PutEndpointIDForbidden) IsClientError() bool {
return true
}
// IsServerError returns true when this put endpoint Id forbidden response has a 5xx status code
func (o *PutEndpointIDForbidden) IsServerError() bool {
return false
}
// IsCode returns true when this put endpoint Id forbidden response a status code equal to that given
func (o *PutEndpointIDForbidden) IsCode(code int) bool {
return code == 403
}
// Code gets the status code for the put endpoint Id forbidden response
func (o *PutEndpointIDForbidden) Code() int {
return 403
}
func (o *PutEndpointIDForbidden) Error() string {
return fmt.Sprintf("[PUT /endpoint/{id}][%d] putEndpointIdForbidden", 403)
}
func (o *PutEndpointIDForbidden) String() string {
return fmt.Sprintf("[PUT /endpoint/{id}][%d] putEndpointIdForbidden", 403)
}
func (o *PutEndpointIDForbidden) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error {
return nil
}
// NewPutEndpointIDExists creates a PutEndpointIDExists with default headers values
func NewPutEndpointIDExists() *PutEndpointIDExists {
return &PutEndpointIDExists{}
}
/*
PutEndpointIDExists describes a response with status code 409, with default header values.
Endpoint already exists
*/
type PutEndpointIDExists struct {
}
// IsSuccess returns true when this put endpoint Id exists response has a 2xx status code
func (o *PutEndpointIDExists) IsSuccess() bool {
return false
}
// IsRedirect returns true when this put endpoint Id exists response has a 3xx status code
func (o *PutEndpointIDExists) IsRedirect() bool {
return false
}
// IsClientError returns true when this put endpoint Id exists response has a 4xx status code
func (o *PutEndpointIDExists) IsClientError() bool {
return true
}
// IsServerError returns true when this put endpoint Id exists response has a 5xx status code
func (o *PutEndpointIDExists) IsServerError() bool {
return false
}
// IsCode returns true when this put endpoint Id exists response a status code equal to that given
func (o *PutEndpointIDExists) IsCode(code int) bool {
return code == 409
}
// Code gets the status code for the put endpoint Id exists response
func (o *PutEndpointIDExists) Code() int {
return 409
}
func (o *PutEndpointIDExists) Error() string {
return fmt.Sprintf("[PUT /endpoint/{id}][%d] putEndpointIdExists", 409)
}
func (o *PutEndpointIDExists) String() string {
return fmt.Sprintf("[PUT /endpoint/{id}][%d] putEndpointIdExists", 409)
}
func (o *PutEndpointIDExists) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error {
return nil
}
// NewPutEndpointIDTooManyRequests creates a PutEndpointIDTooManyRequests with default headers values
func NewPutEndpointIDTooManyRequests() *PutEndpointIDTooManyRequests {
return &PutEndpointIDTooManyRequests{}
}
/*
PutEndpointIDTooManyRequests describes a response with status code 429, with default header values.
Rate-limiting too many requests in the given time frame
*/
type PutEndpointIDTooManyRequests struct {
}
// IsSuccess returns true when this put endpoint Id too many requests response has a 2xx status code
func (o *PutEndpointIDTooManyRequests) IsSuccess() bool {
return false
}
// IsRedirect returns true when this put endpoint Id too many requests response has a 3xx status code
func (o *PutEndpointIDTooManyRequests) IsRedirect() bool {
return false
}
// IsClientError returns true when this put endpoint Id too many requests response has a 4xx status code
func (o *PutEndpointIDTooManyRequests) IsClientError() bool {
return true
}
// IsServerError returns true when this put endpoint Id too many requests response has a 5xx status code
func (o *PutEndpointIDTooManyRequests) IsServerError() bool {
return false
}
// IsCode returns true when this put endpoint Id too many requests response a status code equal to that given
func (o *PutEndpointIDTooManyRequests) IsCode(code int) bool {
return code == 429
}
// Code gets the status code for the put endpoint Id too many requests response
func (o *PutEndpointIDTooManyRequests) Code() int {
return 429
}
func (o *PutEndpointIDTooManyRequests) Error() string {
return fmt.Sprintf("[PUT /endpoint/{id}][%d] putEndpointIdTooManyRequests", 429)
}
func (o *PutEndpointIDTooManyRequests) String() string {
return fmt.Sprintf("[PUT /endpoint/{id}][%d] putEndpointIdTooManyRequests", 429)
}
func (o *PutEndpointIDTooManyRequests) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error {
return nil
}
// NewPutEndpointIDFailed creates a PutEndpointIDFailed with default headers values
func NewPutEndpointIDFailed() *PutEndpointIDFailed {
return &PutEndpointIDFailed{}
}
/*
PutEndpointIDFailed describes a response with status code 500, with default header values.
Endpoint creation failed
*/
type PutEndpointIDFailed struct {
Payload models.Error
}
// IsSuccess returns true when this put endpoint Id failed response has a 2xx status code
func (o *PutEndpointIDFailed) IsSuccess() bool {
return false
}
// IsRedirect returns true when this put endpoint Id failed response has a 3xx status code
func (o *PutEndpointIDFailed) IsRedirect() bool {
return false
}
// IsClientError returns true when this put endpoint Id failed response has a 4xx status code
func (o *PutEndpointIDFailed) IsClientError() bool {
return false
}
// IsServerError returns true when this put endpoint Id failed response has a 5xx status code
func (o *PutEndpointIDFailed) IsServerError() bool {
return true
}
// IsCode returns true when this put endpoint Id failed response a status code equal to that given
func (o *PutEndpointIDFailed) IsCode(code int) bool {
return code == 500
}
// Code gets the status code for the put endpoint Id failed response
func (o *PutEndpointIDFailed) Code() int {
return 500
}
func (o *PutEndpointIDFailed) Error() string {
payload, _ := json.Marshal(o.Payload)
return fmt.Sprintf("[PUT /endpoint/{id}][%d] putEndpointIdFailed %s", 500, payload)
}
func (o *PutEndpointIDFailed) String() string {
payload, _ := json.Marshal(o.Payload)
return fmt.Sprintf("[PUT /endpoint/{id}][%d] putEndpointIdFailed %s", 500, payload)
}
func (o *PutEndpointIDFailed) GetPayload() models.Error {
return o.Payload
}
func (o *PutEndpointIDFailed) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error {
// response payload
if err := consumer.Consume(response.Body(), &o.Payload); err != nil && err != io.EOF {
return err
}
return nil
}
// NewPutEndpointIDServiceUnavailable creates a PutEndpointIDServiceUnavailable with default headers values
func NewPutEndpointIDServiceUnavailable() *PutEndpointIDServiceUnavailable {
return &PutEndpointIDServiceUnavailable{}
}
/*
PutEndpointIDServiceUnavailable describes a response with status code 503, with default header values.
Service Unavailable
*/
type PutEndpointIDServiceUnavailable struct {
}
// IsSuccess returns true when this put endpoint Id service unavailable response has a 2xx status code
func (o *PutEndpointIDServiceUnavailable) IsSuccess() bool {
return false
}
// IsRedirect returns true when this put endpoint Id service unavailable response has a 3xx status code
func (o *PutEndpointIDServiceUnavailable) IsRedirect() bool {
return false
}
// IsClientError returns true when this put endpoint Id service unavailable response has a 4xx status code
func (o *PutEndpointIDServiceUnavailable) IsClientError() bool {
return false
}
// IsServerError returns true when this put endpoint Id service unavailable response has a 5xx status code
func (o *PutEndpointIDServiceUnavailable) IsServerError() bool {
return true
}
// IsCode returns true when this put endpoint Id service unavailable response a status code equal to that given
func (o *PutEndpointIDServiceUnavailable) IsCode(code int) bool {
return code == 503
}
// Code gets the status code for the put endpoint Id service unavailable response
func (o *PutEndpointIDServiceUnavailable) Code() int {
return 503
}
func (o *PutEndpointIDServiceUnavailable) Error() string {
return fmt.Sprintf("[PUT /endpoint/{id}][%d] putEndpointIdServiceUnavailable", 503)
}
func (o *PutEndpointIDServiceUnavailable) String() string {
return fmt.Sprintf("[PUT /endpoint/{id}][%d] putEndpointIdServiceUnavailable", 503)
}
func (o *PutEndpointIDServiceUnavailable) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error {
return nil
}
// Code generated by go-swagger; DO NOT EDIT.
// Copyright Authors of Cilium
// SPDX-License-Identifier: Apache-2.0
package ipam
// This file was generated by the swagger tool.
// Editing this file might prove futile when you re-run the swagger generate command
import (
"context"
"net/http"
"time"
"github.com/go-openapi/errors"
"github.com/go-openapi/runtime"
cr "github.com/go-openapi/runtime/client"
"github.com/go-openapi/strfmt"
)
// NewDeleteIpamIPParams creates a new DeleteIpamIPParams object,
// with the default timeout for this client.
//
// Default values are not hydrated, since defaults are normally applied by the API server side.
//
// To enforce default values in parameter, use SetDefaults or WithDefaults.
func NewDeleteIpamIPParams() *DeleteIpamIPParams {
return &DeleteIpamIPParams{
timeout: cr.DefaultTimeout,
}
}
// NewDeleteIpamIPParamsWithTimeout creates a new DeleteIpamIPParams object
// with the ability to set a timeout on a request.
func NewDeleteIpamIPParamsWithTimeout(timeout time.Duration) *DeleteIpamIPParams {
return &DeleteIpamIPParams{
timeout: timeout,
}
}
// NewDeleteIpamIPParamsWithContext creates a new DeleteIpamIPParams object
// with the ability to set a context for a request.
func NewDeleteIpamIPParamsWithContext(ctx context.Context) *DeleteIpamIPParams {
return &DeleteIpamIPParams{
Context: ctx,
}
}
// NewDeleteIpamIPParamsWithHTTPClient creates a new DeleteIpamIPParams object
// with the ability to set a custom HTTPClient for a request.
func NewDeleteIpamIPParamsWithHTTPClient(client *http.Client) *DeleteIpamIPParams {
return &DeleteIpamIPParams{
HTTPClient: client,
}
}
/*
DeleteIpamIPParams contains all the parameters to send to the API endpoint
for the delete ipam IP operation.
Typically these are written to a http.Request.
*/
type DeleteIpamIPParams struct {
/* IP.
IP address
*/
IP string
// Pool.
Pool *string
timeout time.Duration
Context context.Context
HTTPClient *http.Client
}
// WithDefaults hydrates default values in the delete ipam IP params (not the query body).
//
// All values with no default are reset to their zero value.
func (o *DeleteIpamIPParams) WithDefaults() *DeleteIpamIPParams {
o.SetDefaults()
return o
}
// SetDefaults hydrates default values in the delete ipam IP params (not the query body).
//
// All values with no default are reset to their zero value.
func (o *DeleteIpamIPParams) SetDefaults() {
// no default values defined for this parameter
}
// WithTimeout adds the timeout to the delete ipam IP params
func (o *DeleteIpamIPParams) WithTimeout(timeout time.Duration) *DeleteIpamIPParams {
o.SetTimeout(timeout)
return o
}
// SetTimeout adds the timeout to the delete ipam IP params
func (o *DeleteIpamIPParams) SetTimeout(timeout time.Duration) {
o.timeout = timeout
}
// WithContext adds the context to the delete ipam IP params
func (o *DeleteIpamIPParams) WithContext(ctx context.Context) *DeleteIpamIPParams {
o.SetContext(ctx)
return o
}
// SetContext adds the context to the delete ipam IP params
func (o *DeleteIpamIPParams) SetContext(ctx context.Context) {
o.Context = ctx
}
// WithHTTPClient adds the HTTPClient to the delete ipam IP params
func (o *DeleteIpamIPParams) WithHTTPClient(client *http.Client) *DeleteIpamIPParams {
o.SetHTTPClient(client)
return o
}
// SetHTTPClient adds the HTTPClient to the delete ipam IP params
func (o *DeleteIpamIPParams) SetHTTPClient(client *http.Client) {
o.HTTPClient = client
}
// WithIP adds the ip to the delete ipam IP params
func (o *DeleteIpamIPParams) WithIP(ip string) *DeleteIpamIPParams {
o.SetIP(ip)
return o
}
// SetIP adds the ip to the delete ipam IP params
func (o *DeleteIpamIPParams) SetIP(ip string) {
o.IP = ip
}
// WithPool adds the pool to the delete ipam IP params
func (o *DeleteIpamIPParams) WithPool(pool *string) *DeleteIpamIPParams {
o.SetPool(pool)
return o
}
// SetPool adds the pool to the delete ipam IP params
func (o *DeleteIpamIPParams) SetPool(pool *string) {
o.Pool = pool
}
// WriteToRequest writes these params to a swagger request
func (o *DeleteIpamIPParams) WriteToRequest(r runtime.ClientRequest, reg strfmt.Registry) error {
if err := r.SetTimeout(o.timeout); err != nil {
return err
}
var res []error
// path param ip
if err := r.SetPathParam("ip", o.IP); err != nil {
return err
}
if o.Pool != nil {
// query param pool
var qrPool string
if o.Pool != nil {
qrPool = *o.Pool
}
qPool := qrPool
if qPool != "" {
if err := r.SetQueryParam("pool", qPool); err != nil {
return err
}
}
}
if len(res) > 0 {
return errors.CompositeValidationError(res...)
}
return nil
}
// Code generated by go-swagger; DO NOT EDIT.
// Copyright Authors of Cilium
// SPDX-License-Identifier: Apache-2.0
package ipam
// This file was generated by the swagger tool.
// Editing this file might prove futile when you re-run the swagger generate command
import (
"encoding/json"
"fmt"
"io"
"github.com/go-openapi/runtime"
"github.com/go-openapi/strfmt"
"github.com/cilium/cilium/api/v1/models"
)
// DeleteIpamIPReader is a Reader for the DeleteIpamIP structure.
type DeleteIpamIPReader struct {
formats strfmt.Registry
}
// ReadResponse reads a server response into the received o.
func (o *DeleteIpamIPReader) ReadResponse(response runtime.ClientResponse, consumer runtime.Consumer) (interface{}, error) {
switch response.Code() {
case 200:
result := NewDeleteIpamIPOK()
if err := result.readResponse(response, consumer, o.formats); err != nil {
return nil, err
}
return result, nil
case 400:
result := NewDeleteIpamIPInvalid()
if err := result.readResponse(response, consumer, o.formats); err != nil {
return nil, err
}
return nil, result
case 403:
result := NewDeleteIpamIPForbidden()
if err := result.readResponse(response, consumer, o.formats); err != nil {
return nil, err
}
return nil, result
case 404:
result := NewDeleteIpamIPNotFound()
if err := result.readResponse(response, consumer, o.formats); err != nil {
return nil, err
}
return nil, result
case 500:
result := NewDeleteIpamIPFailure()
if err := result.readResponse(response, consumer, o.formats); err != nil {
return nil, err
}
return nil, result
case 501:
result := NewDeleteIpamIPDisabled()
if err := result.readResponse(response, consumer, o.formats); err != nil {
return nil, err
}
return nil, result
default:
return nil, runtime.NewAPIError("[DELETE /ipam/{ip}] DeleteIpamIP", response, response.Code())
}
}
// NewDeleteIpamIPOK creates a DeleteIpamIPOK with default headers values
func NewDeleteIpamIPOK() *DeleteIpamIPOK {
return &DeleteIpamIPOK{}
}
/*
DeleteIpamIPOK describes a response with status code 200, with default header values.
Success
*/
type DeleteIpamIPOK struct {
}
// IsSuccess returns true when this delete ipam Ip o k response has a 2xx status code
func (o *DeleteIpamIPOK) IsSuccess() bool {
return true
}
// IsRedirect returns true when this delete ipam Ip o k response has a 3xx status code
func (o *DeleteIpamIPOK) IsRedirect() bool {
return false
}
// IsClientError returns true when this delete ipam Ip o k response has a 4xx status code
func (o *DeleteIpamIPOK) IsClientError() bool {
return false
}
// IsServerError returns true when this delete ipam Ip o k response has a 5xx status code
func (o *DeleteIpamIPOK) IsServerError() bool {
return false
}
// IsCode returns true when this delete ipam Ip o k response a status code equal to that given
func (o *DeleteIpamIPOK) IsCode(code int) bool {
return code == 200
}
// Code gets the status code for the delete ipam Ip o k response
func (o *DeleteIpamIPOK) Code() int {
return 200
}
func (o *DeleteIpamIPOK) Error() string {
return fmt.Sprintf("[DELETE /ipam/{ip}][%d] deleteIpamIpOK", 200)
}
func (o *DeleteIpamIPOK) String() string {
return fmt.Sprintf("[DELETE /ipam/{ip}][%d] deleteIpamIpOK", 200)
}
func (o *DeleteIpamIPOK) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error {
return nil
}
// NewDeleteIpamIPInvalid creates a DeleteIpamIPInvalid with default headers values
func NewDeleteIpamIPInvalid() *DeleteIpamIPInvalid {
return &DeleteIpamIPInvalid{}
}
/*
DeleteIpamIPInvalid describes a response with status code 400, with default header values.
Invalid IP address
*/
type DeleteIpamIPInvalid struct {
}
// IsSuccess returns true when this delete ipam Ip invalid response has a 2xx status code
func (o *DeleteIpamIPInvalid) IsSuccess() bool {
return false
}
// IsRedirect returns true when this delete ipam Ip invalid response has a 3xx status code
func (o *DeleteIpamIPInvalid) IsRedirect() bool {
return false
}
// IsClientError returns true when this delete ipam Ip invalid response has a 4xx status code
func (o *DeleteIpamIPInvalid) IsClientError() bool {
return true
}
// IsServerError returns true when this delete ipam Ip invalid response has a 5xx status code
func (o *DeleteIpamIPInvalid) IsServerError() bool {
return false
}
// IsCode returns true when this delete ipam Ip invalid response a status code equal to that given
func (o *DeleteIpamIPInvalid) IsCode(code int) bool {
return code == 400
}
// Code gets the status code for the delete ipam Ip invalid response
func (o *DeleteIpamIPInvalid) Code() int {
return 400
}
func (o *DeleteIpamIPInvalid) Error() string {
return fmt.Sprintf("[DELETE /ipam/{ip}][%d] deleteIpamIpInvalid", 400)
}
func (o *DeleteIpamIPInvalid) String() string {
return fmt.Sprintf("[DELETE /ipam/{ip}][%d] deleteIpamIpInvalid", 400)
}
func (o *DeleteIpamIPInvalid) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error {
return nil
}
// NewDeleteIpamIPForbidden creates a DeleteIpamIPForbidden with default headers values
func NewDeleteIpamIPForbidden() *DeleteIpamIPForbidden {
return &DeleteIpamIPForbidden{}
}
/*
DeleteIpamIPForbidden describes a response with status code 403, with default header values.
Forbidden
*/
type DeleteIpamIPForbidden struct {
}
// IsSuccess returns true when this delete ipam Ip forbidden response has a 2xx status code
func (o *DeleteIpamIPForbidden) IsSuccess() bool {
return false
}
// IsRedirect returns true when this delete ipam Ip forbidden response has a 3xx status code
func (o *DeleteIpamIPForbidden) IsRedirect() bool {
return false
}
// IsClientError returns true when this delete ipam Ip forbidden response has a 4xx status code
func (o *DeleteIpamIPForbidden) IsClientError() bool {
return true
}
// IsServerError returns true when this delete ipam Ip forbidden response has a 5xx status code
func (o *DeleteIpamIPForbidden) IsServerError() bool {
return false
}
// IsCode returns true when this delete ipam Ip forbidden response a status code equal to that given
func (o *DeleteIpamIPForbidden) IsCode(code int) bool {
return code == 403
}
// Code gets the status code for the delete ipam Ip forbidden response
func (o *DeleteIpamIPForbidden) Code() int {
return 403
}
func (o *DeleteIpamIPForbidden) Error() string {
return fmt.Sprintf("[DELETE /ipam/{ip}][%d] deleteIpamIpForbidden", 403)
}
func (o *DeleteIpamIPForbidden) String() string {
return fmt.Sprintf("[DELETE /ipam/{ip}][%d] deleteIpamIpForbidden", 403)
}
func (o *DeleteIpamIPForbidden) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error {
return nil
}
// NewDeleteIpamIPNotFound creates a DeleteIpamIPNotFound with default headers values
func NewDeleteIpamIPNotFound() *DeleteIpamIPNotFound {
return &DeleteIpamIPNotFound{}
}
/*
DeleteIpamIPNotFound describes a response with status code 404, with default header values.
IP address not found
*/
type DeleteIpamIPNotFound struct {
}
// IsSuccess returns true when this delete ipam Ip not found response has a 2xx status code
func (o *DeleteIpamIPNotFound) IsSuccess() bool {
return false
}
// IsRedirect returns true when this delete ipam Ip not found response has a 3xx status code
func (o *DeleteIpamIPNotFound) IsRedirect() bool {
return false
}
// IsClientError returns true when this delete ipam Ip not found response has a 4xx status code
func (o *DeleteIpamIPNotFound) IsClientError() bool {
return true
}
// IsServerError returns true when this delete ipam Ip not found response has a 5xx status code
func (o *DeleteIpamIPNotFound) IsServerError() bool {
return false
}
// IsCode returns true when this delete ipam Ip not found response a status code equal to that given
func (o *DeleteIpamIPNotFound) IsCode(code int) bool {
return code == 404
}
// Code gets the status code for the delete ipam Ip not found response
func (o *DeleteIpamIPNotFound) Code() int {
return 404
}
func (o *DeleteIpamIPNotFound) Error() string {
return fmt.Sprintf("[DELETE /ipam/{ip}][%d] deleteIpamIpNotFound", 404)
}
func (o *DeleteIpamIPNotFound) String() string {
return fmt.Sprintf("[DELETE /ipam/{ip}][%d] deleteIpamIpNotFound", 404)
}
func (o *DeleteIpamIPNotFound) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error {
return nil
}
// NewDeleteIpamIPFailure creates a DeleteIpamIPFailure with default headers values
func NewDeleteIpamIPFailure() *DeleteIpamIPFailure {
return &DeleteIpamIPFailure{}
}
/*
DeleteIpamIPFailure describes a response with status code 500, with default header values.
Address release failure
*/
type DeleteIpamIPFailure struct {
Payload models.Error
}
// IsSuccess returns true when this delete ipam Ip failure response has a 2xx status code
func (o *DeleteIpamIPFailure) IsSuccess() bool {
return false
}
// IsRedirect returns true when this delete ipam Ip failure response has a 3xx status code
func (o *DeleteIpamIPFailure) IsRedirect() bool {
return false
}
// IsClientError returns true when this delete ipam Ip failure response has a 4xx status code
func (o *DeleteIpamIPFailure) IsClientError() bool {
return false
}
// IsServerError returns true when this delete ipam Ip failure response has a 5xx status code
func (o *DeleteIpamIPFailure) IsServerError() bool {
return true
}
// IsCode returns true when this delete ipam Ip failure response a status code equal to that given
func (o *DeleteIpamIPFailure) IsCode(code int) bool {
return code == 500
}
// Code gets the status code for the delete ipam Ip failure response
func (o *DeleteIpamIPFailure) Code() int {
return 500
}
func (o *DeleteIpamIPFailure) Error() string {
payload, _ := json.Marshal(o.Payload)
return fmt.Sprintf("[DELETE /ipam/{ip}][%d] deleteIpamIpFailure %s", 500, payload)
}
func (o *DeleteIpamIPFailure) String() string {
payload, _ := json.Marshal(o.Payload)
return fmt.Sprintf("[DELETE /ipam/{ip}][%d] deleteIpamIpFailure %s", 500, payload)
}
func (o *DeleteIpamIPFailure) GetPayload() models.Error {
return o.Payload
}
func (o *DeleteIpamIPFailure) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error {
// response payload
if err := consumer.Consume(response.Body(), &o.Payload); err != nil && err != io.EOF {
return err
}
return nil
}
// NewDeleteIpamIPDisabled creates a DeleteIpamIPDisabled with default headers values
func NewDeleteIpamIPDisabled() *DeleteIpamIPDisabled {
return &DeleteIpamIPDisabled{}
}
/*
DeleteIpamIPDisabled describes a response with status code 501, with default header values.
Allocation for address family disabled
*/
type DeleteIpamIPDisabled struct {
}
// IsSuccess returns true when this delete ipam Ip disabled response has a 2xx status code
func (o *DeleteIpamIPDisabled) IsSuccess() bool {
return false
}
// IsRedirect returns true when this delete ipam Ip disabled response has a 3xx status code
func (o *DeleteIpamIPDisabled) IsRedirect() bool {
return false
}
// IsClientError returns true when this delete ipam Ip disabled response has a 4xx status code
func (o *DeleteIpamIPDisabled) IsClientError() bool {
return false
}
// IsServerError returns true when this delete ipam Ip disabled response has a 5xx status code
func (o *DeleteIpamIPDisabled) IsServerError() bool {
return true
}
// IsCode returns true when this delete ipam Ip disabled response a status code equal to that given
func (o *DeleteIpamIPDisabled) IsCode(code int) bool {
return code == 501
}
// Code gets the status code for the delete ipam Ip disabled response
func (o *DeleteIpamIPDisabled) Code() int {
return 501
}
func (o *DeleteIpamIPDisabled) Error() string {
return fmt.Sprintf("[DELETE /ipam/{ip}][%d] deleteIpamIpDisabled", 501)
}
func (o *DeleteIpamIPDisabled) String() string {
return fmt.Sprintf("[DELETE /ipam/{ip}][%d] deleteIpamIpDisabled", 501)
}
func (o *DeleteIpamIPDisabled) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error {
return nil
}
// Code generated by go-swagger; DO NOT EDIT.
// Copyright Authors of Cilium
// SPDX-License-Identifier: Apache-2.0
package ipam
// This file was generated by the swagger tool.
// Editing this file might prove futile when you re-run the swagger generate command
import (
"fmt"
"github.com/go-openapi/runtime"
httptransport "github.com/go-openapi/runtime/client"
"github.com/go-openapi/strfmt"
)
// New creates a new ipam API client.
func New(transport runtime.ClientTransport, formats strfmt.Registry) ClientService {
return &Client{transport: transport, formats: formats}
}
// New creates a new ipam API client with basic auth credentials.
// It takes the following parameters:
// - host: http host (github.com).
// - basePath: any base path for the API client ("/v1", "/v3").
// - scheme: http scheme ("http", "https").
// - user: user for basic authentication header.
// - password: password for basic authentication header.
func NewClientWithBasicAuth(host, basePath, scheme, user, password string) ClientService {
transport := httptransport.New(host, basePath, []string{scheme})
transport.DefaultAuthentication = httptransport.BasicAuth(user, password)
return &Client{transport: transport, formats: strfmt.Default}
}
// New creates a new ipam API client with a bearer token for authentication.
// It takes the following parameters:
// - host: http host (github.com).
// - basePath: any base path for the API client ("/v1", "/v3").
// - scheme: http scheme ("http", "https").
// - bearerToken: bearer token for Bearer authentication header.
func NewClientWithBearerToken(host, basePath, scheme, bearerToken string) ClientService {
transport := httptransport.New(host, basePath, []string{scheme})
transport.DefaultAuthentication = httptransport.BearerToken(bearerToken)
return &Client{transport: transport, formats: strfmt.Default}
}
/*
Client for ipam API
*/
type Client struct {
transport runtime.ClientTransport
formats strfmt.Registry
}
// ClientOption may be used to customize the behavior of Client methods.
type ClientOption func(*runtime.ClientOperation)
// ClientService is the interface for Client methods
type ClientService interface {
DeleteIpamIP(params *DeleteIpamIPParams, opts ...ClientOption) (*DeleteIpamIPOK, error)
PostIpam(params *PostIpamParams, opts ...ClientOption) (*PostIpamCreated, error)
PostIpamIP(params *PostIpamIPParams, opts ...ClientOption) (*PostIpamIPOK, error)
SetTransport(transport runtime.ClientTransport)
}
/*
DeleteIpamIP releases an allocated IP address
*/
func (a *Client) DeleteIpamIP(params *DeleteIpamIPParams, opts ...ClientOption) (*DeleteIpamIPOK, error) {
// TODO: Validate the params before sending
if params == nil {
params = NewDeleteIpamIPParams()
}
op := &runtime.ClientOperation{
ID: "DeleteIpamIP",
Method: "DELETE",
PathPattern: "/ipam/{ip}",
ProducesMediaTypes: []string{"application/json"},
ConsumesMediaTypes: []string{"application/json"},
Schemes: []string{"http"},
Params: params,
Reader: &DeleteIpamIPReader{formats: a.formats},
Context: params.Context,
Client: params.HTTPClient,
}
for _, opt := range opts {
opt(op)
}
result, err := a.transport.Submit(op)
if err != nil {
return nil, err
}
success, ok := result.(*DeleteIpamIPOK)
if ok {
return success, nil
}
// unexpected success response
// safeguard: normally, absent a default response, unknown success responses return an error above: so this is a codegen issue
msg := fmt.Sprintf("unexpected success response for DeleteIpamIP: API contract not enforced by server. Client expected to get an error, but got: %T", result)
panic(msg)
}
/*
PostIpam allocates an IP address
*/
func (a *Client) PostIpam(params *PostIpamParams, opts ...ClientOption) (*PostIpamCreated, error) {
// TODO: Validate the params before sending
if params == nil {
params = NewPostIpamParams()
}
op := &runtime.ClientOperation{
ID: "PostIpam",
Method: "POST",
PathPattern: "/ipam",
ProducesMediaTypes: []string{"application/json"},
ConsumesMediaTypes: []string{"application/json"},
Schemes: []string{"http"},
Params: params,
Reader: &PostIpamReader{formats: a.formats},
Context: params.Context,
Client: params.HTTPClient,
}
for _, opt := range opts {
opt(op)
}
result, err := a.transport.Submit(op)
if err != nil {
return nil, err
}
success, ok := result.(*PostIpamCreated)
if ok {
return success, nil
}
// unexpected success response
// safeguard: normally, absent a default response, unknown success responses return an error above: so this is a codegen issue
msg := fmt.Sprintf("unexpected success response for PostIpam: API contract not enforced by server. Client expected to get an error, but got: %T", result)
panic(msg)
}
/*
PostIpamIP allocates an IP address
*/
func (a *Client) PostIpamIP(params *PostIpamIPParams, opts ...ClientOption) (*PostIpamIPOK, error) {
// TODO: Validate the params before sending
if params == nil {
params = NewPostIpamIPParams()
}
op := &runtime.ClientOperation{
ID: "PostIpamIP",
Method: "POST",
PathPattern: "/ipam/{ip}",
ProducesMediaTypes: []string{"application/json"},
ConsumesMediaTypes: []string{"application/json"},
Schemes: []string{"http"},
Params: params,
Reader: &PostIpamIPReader{formats: a.formats},
Context: params.Context,
Client: params.HTTPClient,
}
for _, opt := range opts {
opt(op)
}
result, err := a.transport.Submit(op)
if err != nil {
return nil, err
}
success, ok := result.(*PostIpamIPOK)
if ok {
return success, nil
}
// unexpected success response
// safeguard: normally, absent a default response, unknown success responses return an error above: so this is a codegen issue
msg := fmt.Sprintf("unexpected success response for PostIpamIP: API contract not enforced by server. Client expected to get an error, but got: %T", result)
panic(msg)
}
// SetTransport changes the transport on the client
func (a *Client) SetTransport(transport runtime.ClientTransport) {
a.transport = transport
}
// Code generated by go-swagger; DO NOT EDIT.
// Copyright Authors of Cilium
// SPDX-License-Identifier: Apache-2.0
package ipam
// This file was generated by the swagger tool.
// Editing this file might prove futile when you re-run the swagger generate command
import (
"context"
"net/http"
"time"
"github.com/go-openapi/errors"
"github.com/go-openapi/runtime"
cr "github.com/go-openapi/runtime/client"
"github.com/go-openapi/strfmt"
)
// NewPostIpamIPParams creates a new PostIpamIPParams object,
// with the default timeout for this client.
//
// Default values are not hydrated, since defaults are normally applied by the API server side.
//
// To enforce default values in parameter, use SetDefaults or WithDefaults.
func NewPostIpamIPParams() *PostIpamIPParams {
return &PostIpamIPParams{
timeout: cr.DefaultTimeout,
}
}
// NewPostIpamIPParamsWithTimeout creates a new PostIpamIPParams object
// with the ability to set a timeout on a request.
func NewPostIpamIPParamsWithTimeout(timeout time.Duration) *PostIpamIPParams {
return &PostIpamIPParams{
timeout: timeout,
}
}
// NewPostIpamIPParamsWithContext creates a new PostIpamIPParams object
// with the ability to set a context for a request.
func NewPostIpamIPParamsWithContext(ctx context.Context) *PostIpamIPParams {
return &PostIpamIPParams{
Context: ctx,
}
}
// NewPostIpamIPParamsWithHTTPClient creates a new PostIpamIPParams object
// with the ability to set a custom HTTPClient for a request.
func NewPostIpamIPParamsWithHTTPClient(client *http.Client) *PostIpamIPParams {
return &PostIpamIPParams{
HTTPClient: client,
}
}
/*
PostIpamIPParams contains all the parameters to send to the API endpoint
for the post ipam IP operation.
Typically these are written to a http.Request.
*/
type PostIpamIPParams struct {
/* IP.
IP address
*/
IP string
// Owner.
Owner *string
// Pool.
Pool *string
timeout time.Duration
Context context.Context
HTTPClient *http.Client
}
// WithDefaults hydrates default values in the post ipam IP params (not the query body).
//
// All values with no default are reset to their zero value.
func (o *PostIpamIPParams) WithDefaults() *PostIpamIPParams {
o.SetDefaults()
return o
}
// SetDefaults hydrates default values in the post ipam IP params (not the query body).
//
// All values with no default are reset to their zero value.
func (o *PostIpamIPParams) SetDefaults() {
// no default values defined for this parameter
}
// WithTimeout adds the timeout to the post ipam IP params
func (o *PostIpamIPParams) WithTimeout(timeout time.Duration) *PostIpamIPParams {
o.SetTimeout(timeout)
return o
}
// SetTimeout adds the timeout to the post ipam IP params
func (o *PostIpamIPParams) SetTimeout(timeout time.Duration) {
o.timeout = timeout
}
// WithContext adds the context to the post ipam IP params
func (o *PostIpamIPParams) WithContext(ctx context.Context) *PostIpamIPParams {
o.SetContext(ctx)
return o
}
// SetContext adds the context to the post ipam IP params
func (o *PostIpamIPParams) SetContext(ctx context.Context) {
o.Context = ctx
}
// WithHTTPClient adds the HTTPClient to the post ipam IP params
func (o *PostIpamIPParams) WithHTTPClient(client *http.Client) *PostIpamIPParams {
o.SetHTTPClient(client)
return o
}
// SetHTTPClient adds the HTTPClient to the post ipam IP params
func (o *PostIpamIPParams) SetHTTPClient(client *http.Client) {
o.HTTPClient = client
}
// WithIP adds the ip to the post ipam IP params
func (o *PostIpamIPParams) WithIP(ip string) *PostIpamIPParams {
o.SetIP(ip)
return o
}
// SetIP adds the ip to the post ipam IP params
func (o *PostIpamIPParams) SetIP(ip string) {
o.IP = ip
}
// WithOwner adds the owner to the post ipam IP params
func (o *PostIpamIPParams) WithOwner(owner *string) *PostIpamIPParams {
o.SetOwner(owner)
return o
}
// SetOwner adds the owner to the post ipam IP params
func (o *PostIpamIPParams) SetOwner(owner *string) {
o.Owner = owner
}
// WithPool adds the pool to the post ipam IP params
func (o *PostIpamIPParams) WithPool(pool *string) *PostIpamIPParams {
o.SetPool(pool)
return o
}
// SetPool adds the pool to the post ipam IP params
func (o *PostIpamIPParams) SetPool(pool *string) {
o.Pool = pool
}
// WriteToRequest writes these params to a swagger request
func (o *PostIpamIPParams) WriteToRequest(r runtime.ClientRequest, reg strfmt.Registry) error {
if err := r.SetTimeout(o.timeout); err != nil {
return err
}
var res []error
// path param ip
if err := r.SetPathParam("ip", o.IP); err != nil {
return err
}
if o.Owner != nil {
// query param owner
var qrOwner string
if o.Owner != nil {
qrOwner = *o.Owner
}
qOwner := qrOwner
if qOwner != "" {
if err := r.SetQueryParam("owner", qOwner); err != nil {
return err
}
}
}
if o.Pool != nil {
// query param pool
var qrPool string
if o.Pool != nil {
qrPool = *o.Pool
}
qPool := qrPool
if qPool != "" {
if err := r.SetQueryParam("pool", qPool); err != nil {
return err
}
}
}
if len(res) > 0 {
return errors.CompositeValidationError(res...)
}
return nil
}
// Code generated by go-swagger; DO NOT EDIT.
// Copyright Authors of Cilium
// SPDX-License-Identifier: Apache-2.0
package ipam
// This file was generated by the swagger tool.
// Editing this file might prove futile when you re-run the swagger generate command
import (
"encoding/json"
"fmt"
"io"
"github.com/go-openapi/runtime"
"github.com/go-openapi/strfmt"
"github.com/cilium/cilium/api/v1/models"
)
// PostIpamIPReader is a Reader for the PostIpamIP structure.
type PostIpamIPReader struct {
formats strfmt.Registry
}
// ReadResponse reads a server response into the received o.
func (o *PostIpamIPReader) ReadResponse(response runtime.ClientResponse, consumer runtime.Consumer) (interface{}, error) {
switch response.Code() {
case 200:
result := NewPostIpamIPOK()
if err := result.readResponse(response, consumer, o.formats); err != nil {
return nil, err
}
return result, nil
case 400:
result := NewPostIpamIPInvalid()
if err := result.readResponse(response, consumer, o.formats); err != nil {
return nil, err
}
return nil, result
case 403:
result := NewPostIpamIPForbidden()
if err := result.readResponse(response, consumer, o.formats); err != nil {
return nil, err
}
return nil, result
case 409:
result := NewPostIpamIPExists()
if err := result.readResponse(response, consumer, o.formats); err != nil {
return nil, err
}
return nil, result
case 500:
result := NewPostIpamIPFailure()
if err := result.readResponse(response, consumer, o.formats); err != nil {
return nil, err
}
return nil, result
case 501:
result := NewPostIpamIPDisabled()
if err := result.readResponse(response, consumer, o.formats); err != nil {
return nil, err
}
return nil, result
default:
return nil, runtime.NewAPIError("[POST /ipam/{ip}] PostIpamIP", response, response.Code())
}
}
// NewPostIpamIPOK creates a PostIpamIPOK with default headers values
func NewPostIpamIPOK() *PostIpamIPOK {
return &PostIpamIPOK{}
}
/*
PostIpamIPOK describes a response with status code 200, with default header values.
Success
*/
type PostIpamIPOK struct {
}
// IsSuccess returns true when this post ipam Ip o k response has a 2xx status code
func (o *PostIpamIPOK) IsSuccess() bool {
return true
}
// IsRedirect returns true when this post ipam Ip o k response has a 3xx status code
func (o *PostIpamIPOK) IsRedirect() bool {
return false
}
// IsClientError returns true when this post ipam Ip o k response has a 4xx status code
func (o *PostIpamIPOK) IsClientError() bool {
return false
}
// IsServerError returns true when this post ipam Ip o k response has a 5xx status code
func (o *PostIpamIPOK) IsServerError() bool {
return false
}
// IsCode returns true when this post ipam Ip o k response a status code equal to that given
func (o *PostIpamIPOK) IsCode(code int) bool {
return code == 200
}
// Code gets the status code for the post ipam Ip o k response
func (o *PostIpamIPOK) Code() int {
return 200
}
func (o *PostIpamIPOK) Error() string {
return fmt.Sprintf("[POST /ipam/{ip}][%d] postIpamIpOK", 200)
}
func (o *PostIpamIPOK) String() string {
return fmt.Sprintf("[POST /ipam/{ip}][%d] postIpamIpOK", 200)
}
func (o *PostIpamIPOK) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error {
return nil
}
// NewPostIpamIPInvalid creates a PostIpamIPInvalid with default headers values
func NewPostIpamIPInvalid() *PostIpamIPInvalid {
return &PostIpamIPInvalid{}
}
/*
PostIpamIPInvalid describes a response with status code 400, with default header values.
Invalid IP address
*/
type PostIpamIPInvalid struct {
}
// IsSuccess returns true when this post ipam Ip invalid response has a 2xx status code
func (o *PostIpamIPInvalid) IsSuccess() bool {
return false
}
// IsRedirect returns true when this post ipam Ip invalid response has a 3xx status code
func (o *PostIpamIPInvalid) IsRedirect() bool {
return false
}
// IsClientError returns true when this post ipam Ip invalid response has a 4xx status code
func (o *PostIpamIPInvalid) IsClientError() bool {
return true
}
// IsServerError returns true when this post ipam Ip invalid response has a 5xx status code
func (o *PostIpamIPInvalid) IsServerError() bool {
return false
}
// IsCode returns true when this post ipam Ip invalid response a status code equal to that given
func (o *PostIpamIPInvalid) IsCode(code int) bool {
return code == 400
}
// Code gets the status code for the post ipam Ip invalid response
func (o *PostIpamIPInvalid) Code() int {
return 400
}
func (o *PostIpamIPInvalid) Error() string {
return fmt.Sprintf("[POST /ipam/{ip}][%d] postIpamIpInvalid", 400)
}
func (o *PostIpamIPInvalid) String() string {
return fmt.Sprintf("[POST /ipam/{ip}][%d] postIpamIpInvalid", 400)
}
func (o *PostIpamIPInvalid) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error {
return nil
}
// NewPostIpamIPForbidden creates a PostIpamIPForbidden with default headers values
func NewPostIpamIPForbidden() *PostIpamIPForbidden {
return &PostIpamIPForbidden{}
}
/*
PostIpamIPForbidden describes a response with status code 403, with default header values.
Forbidden
*/
type PostIpamIPForbidden struct {
}
// IsSuccess returns true when this post ipam Ip forbidden response has a 2xx status code
func (o *PostIpamIPForbidden) IsSuccess() bool {
return false
}
// IsRedirect returns true when this post ipam Ip forbidden response has a 3xx status code
func (o *PostIpamIPForbidden) IsRedirect() bool {
return false
}
// IsClientError returns true when this post ipam Ip forbidden response has a 4xx status code
func (o *PostIpamIPForbidden) IsClientError() bool {
return true
}
// IsServerError returns true when this post ipam Ip forbidden response has a 5xx status code
func (o *PostIpamIPForbidden) IsServerError() bool {
return false
}
// IsCode returns true when this post ipam Ip forbidden response a status code equal to that given
func (o *PostIpamIPForbidden) IsCode(code int) bool {
return code == 403
}
// Code gets the status code for the post ipam Ip forbidden response
func (o *PostIpamIPForbidden) Code() int {
return 403
}
func (o *PostIpamIPForbidden) Error() string {
return fmt.Sprintf("[POST /ipam/{ip}][%d] postIpamIpForbidden", 403)
}
func (o *PostIpamIPForbidden) String() string {
return fmt.Sprintf("[POST /ipam/{ip}][%d] postIpamIpForbidden", 403)
}
func (o *PostIpamIPForbidden) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error {
return nil
}
// NewPostIpamIPExists creates a PostIpamIPExists with default headers values
func NewPostIpamIPExists() *PostIpamIPExists {
return &PostIpamIPExists{}
}
/*
PostIpamIPExists describes a response with status code 409, with default header values.
IP already allocated
*/
type PostIpamIPExists struct {
}
// IsSuccess returns true when this post ipam Ip exists response has a 2xx status code
func (o *PostIpamIPExists) IsSuccess() bool {
return false
}
// IsRedirect returns true when this post ipam Ip exists response has a 3xx status code
func (o *PostIpamIPExists) IsRedirect() bool {
return false
}
// IsClientError returns true when this post ipam Ip exists response has a 4xx status code
func (o *PostIpamIPExists) IsClientError() bool {
return true
}
// IsServerError returns true when this post ipam Ip exists response has a 5xx status code
func (o *PostIpamIPExists) IsServerError() bool {
return false
}
// IsCode returns true when this post ipam Ip exists response a status code equal to that given
func (o *PostIpamIPExists) IsCode(code int) bool {
return code == 409
}
// Code gets the status code for the post ipam Ip exists response
func (o *PostIpamIPExists) Code() int {
return 409
}
func (o *PostIpamIPExists) Error() string {
return fmt.Sprintf("[POST /ipam/{ip}][%d] postIpamIpExists", 409)
}
func (o *PostIpamIPExists) String() string {
return fmt.Sprintf("[POST /ipam/{ip}][%d] postIpamIpExists", 409)
}
func (o *PostIpamIPExists) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error {
return nil
}
// NewPostIpamIPFailure creates a PostIpamIPFailure with default headers values
func NewPostIpamIPFailure() *PostIpamIPFailure {
return &PostIpamIPFailure{}
}
/*
PostIpamIPFailure describes a response with status code 500, with default header values.
IP allocation failure. Details in message.
*/
type PostIpamIPFailure struct {
Payload models.Error
}
// IsSuccess returns true when this post ipam Ip failure response has a 2xx status code
func (o *PostIpamIPFailure) IsSuccess() bool {
return false
}
// IsRedirect returns true when this post ipam Ip failure response has a 3xx status code
func (o *PostIpamIPFailure) IsRedirect() bool {
return false
}
// IsClientError returns true when this post ipam Ip failure response has a 4xx status code
func (o *PostIpamIPFailure) IsClientError() bool {
return false
}
// IsServerError returns true when this post ipam Ip failure response has a 5xx status code
func (o *PostIpamIPFailure) IsServerError() bool {
return true
}
// IsCode returns true when this post ipam Ip failure response a status code equal to that given
func (o *PostIpamIPFailure) IsCode(code int) bool {
return code == 500
}
// Code gets the status code for the post ipam Ip failure response
func (o *PostIpamIPFailure) Code() int {
return 500
}
func (o *PostIpamIPFailure) Error() string {
payload, _ := json.Marshal(o.Payload)
return fmt.Sprintf("[POST /ipam/{ip}][%d] postIpamIpFailure %s", 500, payload)
}
func (o *PostIpamIPFailure) String() string {
payload, _ := json.Marshal(o.Payload)
return fmt.Sprintf("[POST /ipam/{ip}][%d] postIpamIpFailure %s", 500, payload)
}
func (o *PostIpamIPFailure) GetPayload() models.Error {
return o.Payload
}
func (o *PostIpamIPFailure) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error {
// response payload
if err := consumer.Consume(response.Body(), &o.Payload); err != nil && err != io.EOF {
return err
}
return nil
}
// NewPostIpamIPDisabled creates a PostIpamIPDisabled with default headers values
func NewPostIpamIPDisabled() *PostIpamIPDisabled {
return &PostIpamIPDisabled{}
}
/*
PostIpamIPDisabled describes a response with status code 501, with default header values.
Allocation for address family disabled
*/
type PostIpamIPDisabled struct {
}
// IsSuccess returns true when this post ipam Ip disabled response has a 2xx status code
func (o *PostIpamIPDisabled) IsSuccess() bool {
return false
}
// IsRedirect returns true when this post ipam Ip disabled response has a 3xx status code
func (o *PostIpamIPDisabled) IsRedirect() bool {
return false
}
// IsClientError returns true when this post ipam Ip disabled response has a 4xx status code
func (o *PostIpamIPDisabled) IsClientError() bool {
return false
}
// IsServerError returns true when this post ipam Ip disabled response has a 5xx status code
func (o *PostIpamIPDisabled) IsServerError() bool {
return true
}
// IsCode returns true when this post ipam Ip disabled response a status code equal to that given
func (o *PostIpamIPDisabled) IsCode(code int) bool {
return code == 501
}
// Code gets the status code for the post ipam Ip disabled response
func (o *PostIpamIPDisabled) Code() int {
return 501
}
func (o *PostIpamIPDisabled) Error() string {
return fmt.Sprintf("[POST /ipam/{ip}][%d] postIpamIpDisabled", 501)
}
func (o *PostIpamIPDisabled) String() string {
return fmt.Sprintf("[POST /ipam/{ip}][%d] postIpamIpDisabled", 501)
}
func (o *PostIpamIPDisabled) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error {
return nil
}
// Code generated by go-swagger; DO NOT EDIT.
// Copyright Authors of Cilium
// SPDX-License-Identifier: Apache-2.0
package ipam
// This file was generated by the swagger tool.
// Editing this file might prove futile when you re-run the swagger generate command
import (
"context"
"net/http"
"time"
"github.com/go-openapi/errors"
"github.com/go-openapi/runtime"
cr "github.com/go-openapi/runtime/client"
"github.com/go-openapi/strfmt"
"github.com/go-openapi/swag"
)
// NewPostIpamParams creates a new PostIpamParams object,
// with the default timeout for this client.
//
// Default values are not hydrated, since defaults are normally applied by the API server side.
//
// To enforce default values in parameter, use SetDefaults or WithDefaults.
func NewPostIpamParams() *PostIpamParams {
return &PostIpamParams{
timeout: cr.DefaultTimeout,
}
}
// NewPostIpamParamsWithTimeout creates a new PostIpamParams object
// with the ability to set a timeout on a request.
func NewPostIpamParamsWithTimeout(timeout time.Duration) *PostIpamParams {
return &PostIpamParams{
timeout: timeout,
}
}
// NewPostIpamParamsWithContext creates a new PostIpamParams object
// with the ability to set a context for a request.
func NewPostIpamParamsWithContext(ctx context.Context) *PostIpamParams {
return &PostIpamParams{
Context: ctx,
}
}
// NewPostIpamParamsWithHTTPClient creates a new PostIpamParams object
// with the ability to set a custom HTTPClient for a request.
func NewPostIpamParamsWithHTTPClient(client *http.Client) *PostIpamParams {
return &PostIpamParams{
HTTPClient: client,
}
}
/*
PostIpamParams contains all the parameters to send to the API endpoint
for the post ipam operation.
Typically these are written to a http.Request.
*/
type PostIpamParams struct {
// Expiration.
Expiration *bool
// Family.
Family *string
// Owner.
Owner *string
// Pool.
Pool *string
timeout time.Duration
Context context.Context
HTTPClient *http.Client
}
// WithDefaults hydrates default values in the post ipam params (not the query body).
//
// All values with no default are reset to their zero value.
func (o *PostIpamParams) WithDefaults() *PostIpamParams {
o.SetDefaults()
return o
}
// SetDefaults hydrates default values in the post ipam params (not the query body).
//
// All values with no default are reset to their zero value.
func (o *PostIpamParams) SetDefaults() {
// no default values defined for this parameter
}
// WithTimeout adds the timeout to the post ipam params
func (o *PostIpamParams) WithTimeout(timeout time.Duration) *PostIpamParams {
o.SetTimeout(timeout)
return o
}
// SetTimeout adds the timeout to the post ipam params
func (o *PostIpamParams) SetTimeout(timeout time.Duration) {
o.timeout = timeout
}
// WithContext adds the context to the post ipam params
func (o *PostIpamParams) WithContext(ctx context.Context) *PostIpamParams {
o.SetContext(ctx)
return o
}
// SetContext adds the context to the post ipam params
func (o *PostIpamParams) SetContext(ctx context.Context) {
o.Context = ctx
}
// WithHTTPClient adds the HTTPClient to the post ipam params
func (o *PostIpamParams) WithHTTPClient(client *http.Client) *PostIpamParams {
o.SetHTTPClient(client)
return o
}
// SetHTTPClient adds the HTTPClient to the post ipam params
func (o *PostIpamParams) SetHTTPClient(client *http.Client) {
o.HTTPClient = client
}
// WithExpiration adds the expiration to the post ipam params
func (o *PostIpamParams) WithExpiration(expiration *bool) *PostIpamParams {
o.SetExpiration(expiration)
return o
}
// SetExpiration adds the expiration to the post ipam params
func (o *PostIpamParams) SetExpiration(expiration *bool) {
o.Expiration = expiration
}
// WithFamily adds the family to the post ipam params
func (o *PostIpamParams) WithFamily(family *string) *PostIpamParams {
o.SetFamily(family)
return o
}
// SetFamily adds the family to the post ipam params
func (o *PostIpamParams) SetFamily(family *string) {
o.Family = family
}
// WithOwner adds the owner to the post ipam params
func (o *PostIpamParams) WithOwner(owner *string) *PostIpamParams {
o.SetOwner(owner)
return o
}
// SetOwner adds the owner to the post ipam params
func (o *PostIpamParams) SetOwner(owner *string) {
o.Owner = owner
}
// WithPool adds the pool to the post ipam params
func (o *PostIpamParams) WithPool(pool *string) *PostIpamParams {
o.SetPool(pool)
return o
}
// SetPool adds the pool to the post ipam params
func (o *PostIpamParams) SetPool(pool *string) {
o.Pool = pool
}
// WriteToRequest writes these params to a swagger request
func (o *PostIpamParams) WriteToRequest(r runtime.ClientRequest, reg strfmt.Registry) error {
if err := r.SetTimeout(o.timeout); err != nil {
return err
}
var res []error
if o.Expiration != nil {
// header param expiration
if err := r.SetHeaderParam("expiration", swag.FormatBool(*o.Expiration)); err != nil {
return err
}
}
if o.Family != nil {
// query param family
var qrFamily string
if o.Family != nil {
qrFamily = *o.Family
}
qFamily := qrFamily
if qFamily != "" {
if err := r.SetQueryParam("family", qFamily); err != nil {
return err
}
}
}
if o.Owner != nil {
// query param owner
var qrOwner string
if o.Owner != nil {
qrOwner = *o.Owner
}
qOwner := qrOwner
if qOwner != "" {
if err := r.SetQueryParam("owner", qOwner); err != nil {
return err
}
}
}
if o.Pool != nil {
// query param pool
var qrPool string
if o.Pool != nil {
qrPool = *o.Pool
}
qPool := qrPool
if qPool != "" {
if err := r.SetQueryParam("pool", qPool); err != nil {
return err
}
}
}
if len(res) > 0 {
return errors.CompositeValidationError(res...)
}
return nil
}
// Code generated by go-swagger; DO NOT EDIT.
// Copyright Authors of Cilium
// SPDX-License-Identifier: Apache-2.0
package ipam
// This file was generated by the swagger tool.
// Editing this file might prove futile when you re-run the swagger generate command
import (
"encoding/json"
"fmt"
"io"
"github.com/go-openapi/runtime"
"github.com/go-openapi/strfmt"
"github.com/cilium/cilium/api/v1/models"
)
// PostIpamReader is a Reader for the PostIpam structure.
type PostIpamReader struct {
formats strfmt.Registry
}
// ReadResponse reads a server response into the received o.
func (o *PostIpamReader) ReadResponse(response runtime.ClientResponse, consumer runtime.Consumer) (interface{}, error) {
switch response.Code() {
case 201:
result := NewPostIpamCreated()
if err := result.readResponse(response, consumer, o.formats); err != nil {
return nil, err
}
return result, nil
case 403:
result := NewPostIpamForbidden()
if err := result.readResponse(response, consumer, o.formats); err != nil {
return nil, err
}
return nil, result
case 502:
result := NewPostIpamFailure()
if err := result.readResponse(response, consumer, o.formats); err != nil {
return nil, err
}
return nil, result
default:
return nil, runtime.NewAPIError("[POST /ipam] PostIpam", response, response.Code())
}
}
// NewPostIpamCreated creates a PostIpamCreated with default headers values
func NewPostIpamCreated() *PostIpamCreated {
return &PostIpamCreated{}
}
/*
PostIpamCreated describes a response with status code 201, with default header values.
Success
*/
type PostIpamCreated struct {
Payload *models.IPAMResponse
}
// IsSuccess returns true when this post ipam created response has a 2xx status code
func (o *PostIpamCreated) IsSuccess() bool {
return true
}
// IsRedirect returns true when this post ipam created response has a 3xx status code
func (o *PostIpamCreated) IsRedirect() bool {
return false
}
// IsClientError returns true when this post ipam created response has a 4xx status code
func (o *PostIpamCreated) IsClientError() bool {
return false
}
// IsServerError returns true when this post ipam created response has a 5xx status code
func (o *PostIpamCreated) IsServerError() bool {
return false
}
// IsCode returns true when this post ipam created response a status code equal to that given
func (o *PostIpamCreated) IsCode(code int) bool {
return code == 201
}
// Code gets the status code for the post ipam created response
func (o *PostIpamCreated) Code() int {
return 201
}
func (o *PostIpamCreated) Error() string {
payload, _ := json.Marshal(o.Payload)
return fmt.Sprintf("[POST /ipam][%d] postIpamCreated %s", 201, payload)
}
func (o *PostIpamCreated) String() string {
payload, _ := json.Marshal(o.Payload)
return fmt.Sprintf("[POST /ipam][%d] postIpamCreated %s", 201, payload)
}
func (o *PostIpamCreated) GetPayload() *models.IPAMResponse {
return o.Payload
}
func (o *PostIpamCreated) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error {
o.Payload = new(models.IPAMResponse)
// response payload
if err := consumer.Consume(response.Body(), o.Payload); err != nil && err != io.EOF {
return err
}
return nil
}
// NewPostIpamForbidden creates a PostIpamForbidden with default headers values
func NewPostIpamForbidden() *PostIpamForbidden {
return &PostIpamForbidden{}
}
/*
PostIpamForbidden describes a response with status code 403, with default header values.
Forbidden
*/
type PostIpamForbidden struct {
}
// IsSuccess returns true when this post ipam forbidden response has a 2xx status code
func (o *PostIpamForbidden) IsSuccess() bool {
return false
}
// IsRedirect returns true when this post ipam forbidden response has a 3xx status code
func (o *PostIpamForbidden) IsRedirect() bool {
return false
}
// IsClientError returns true when this post ipam forbidden response has a 4xx status code
func (o *PostIpamForbidden) IsClientError() bool {
return true
}
// IsServerError returns true when this post ipam forbidden response has a 5xx status code
func (o *PostIpamForbidden) IsServerError() bool {
return false
}
// IsCode returns true when this post ipam forbidden response a status code equal to that given
func (o *PostIpamForbidden) IsCode(code int) bool {
return code == 403
}
// Code gets the status code for the post ipam forbidden response
func (o *PostIpamForbidden) Code() int {
return 403
}
func (o *PostIpamForbidden) Error() string {
return fmt.Sprintf("[POST /ipam][%d] postIpamForbidden", 403)
}
func (o *PostIpamForbidden) String() string {
return fmt.Sprintf("[POST /ipam][%d] postIpamForbidden", 403)
}
func (o *PostIpamForbidden) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error {
return nil
}
// NewPostIpamFailure creates a PostIpamFailure with default headers values
func NewPostIpamFailure() *PostIpamFailure {
return &PostIpamFailure{}
}
/*
PostIpamFailure describes a response with status code 502, with default header values.
Allocation failure
*/
type PostIpamFailure struct {
Payload models.Error
}
// IsSuccess returns true when this post ipam failure response has a 2xx status code
func (o *PostIpamFailure) IsSuccess() bool {
return false
}
// IsRedirect returns true when this post ipam failure response has a 3xx status code
func (o *PostIpamFailure) IsRedirect() bool {
return false
}
// IsClientError returns true when this post ipam failure response has a 4xx status code
func (o *PostIpamFailure) IsClientError() bool {
return false
}
// IsServerError returns true when this post ipam failure response has a 5xx status code
func (o *PostIpamFailure) IsServerError() bool {
return true
}
// IsCode returns true when this post ipam failure response a status code equal to that given
func (o *PostIpamFailure) IsCode(code int) bool {
return code == 502
}
// Code gets the status code for the post ipam failure response
func (o *PostIpamFailure) Code() int {
return 502
}
func (o *PostIpamFailure) Error() string {
payload, _ := json.Marshal(o.Payload)
return fmt.Sprintf("[POST /ipam][%d] postIpamFailure %s", 502, payload)
}
func (o *PostIpamFailure) String() string {
payload, _ := json.Marshal(o.Payload)
return fmt.Sprintf("[POST /ipam][%d] postIpamFailure %s", 502, payload)
}
func (o *PostIpamFailure) GetPayload() models.Error {
return o.Payload
}
func (o *PostIpamFailure) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error {
// response payload
if err := consumer.Consume(response.Body(), &o.Payload); err != nil && err != io.EOF {
return err
}
return nil
}
// Code generated by go-swagger; DO NOT EDIT.
// Copyright Authors of Cilium
// SPDX-License-Identifier: Apache-2.0
package policy
// This file was generated by the swagger tool.
// Editing this file might prove futile when you re-run the swagger generate command
import (
"context"
"net/http"
"time"
"github.com/go-openapi/errors"
"github.com/go-openapi/runtime"
cr "github.com/go-openapi/runtime/client"
"github.com/go-openapi/strfmt"
)
// NewDeleteFqdnCacheParams creates a new DeleteFqdnCacheParams object,
// with the default timeout for this client.
//
// Default values are not hydrated, since defaults are normally applied by the API server side.
//
// To enforce default values in parameter, use SetDefaults or WithDefaults.
func NewDeleteFqdnCacheParams() *DeleteFqdnCacheParams {
return &DeleteFqdnCacheParams{
timeout: cr.DefaultTimeout,
}
}
// NewDeleteFqdnCacheParamsWithTimeout creates a new DeleteFqdnCacheParams object
// with the ability to set a timeout on a request.
func NewDeleteFqdnCacheParamsWithTimeout(timeout time.Duration) *DeleteFqdnCacheParams {
return &DeleteFqdnCacheParams{
timeout: timeout,
}
}
// NewDeleteFqdnCacheParamsWithContext creates a new DeleteFqdnCacheParams object
// with the ability to set a context for a request.
func NewDeleteFqdnCacheParamsWithContext(ctx context.Context) *DeleteFqdnCacheParams {
return &DeleteFqdnCacheParams{
Context: ctx,
}
}
// NewDeleteFqdnCacheParamsWithHTTPClient creates a new DeleteFqdnCacheParams object
// with the ability to set a custom HTTPClient for a request.
func NewDeleteFqdnCacheParamsWithHTTPClient(client *http.Client) *DeleteFqdnCacheParams {
return &DeleteFqdnCacheParams{
HTTPClient: client,
}
}
/*
DeleteFqdnCacheParams contains all the parameters to send to the API endpoint
for the delete fqdn cache operation.
Typically these are written to a http.Request.
*/
type DeleteFqdnCacheParams struct {
/* Matchpattern.
A toFQDNs compatible matchPattern expression
*/
Matchpattern *string
timeout time.Duration
Context context.Context
HTTPClient *http.Client
}
// WithDefaults hydrates default values in the delete fqdn cache params (not the query body).
//
// All values with no default are reset to their zero value.
func (o *DeleteFqdnCacheParams) WithDefaults() *DeleteFqdnCacheParams {
o.SetDefaults()
return o
}
// SetDefaults hydrates default values in the delete fqdn cache params (not the query body).
//
// All values with no default are reset to their zero value.
func (o *DeleteFqdnCacheParams) SetDefaults() {
// no default values defined for this parameter
}
// WithTimeout adds the timeout to the delete fqdn cache params
func (o *DeleteFqdnCacheParams) WithTimeout(timeout time.Duration) *DeleteFqdnCacheParams {
o.SetTimeout(timeout)
return o
}
// SetTimeout adds the timeout to the delete fqdn cache params
func (o *DeleteFqdnCacheParams) SetTimeout(timeout time.Duration) {
o.timeout = timeout
}
// WithContext adds the context to the delete fqdn cache params
func (o *DeleteFqdnCacheParams) WithContext(ctx context.Context) *DeleteFqdnCacheParams {
o.SetContext(ctx)
return o
}
// SetContext adds the context to the delete fqdn cache params
func (o *DeleteFqdnCacheParams) SetContext(ctx context.Context) {
o.Context = ctx
}
// WithHTTPClient adds the HTTPClient to the delete fqdn cache params
func (o *DeleteFqdnCacheParams) WithHTTPClient(client *http.Client) *DeleteFqdnCacheParams {
o.SetHTTPClient(client)
return o
}
// SetHTTPClient adds the HTTPClient to the delete fqdn cache params
func (o *DeleteFqdnCacheParams) SetHTTPClient(client *http.Client) {
o.HTTPClient = client
}
// WithMatchpattern adds the matchpattern to the delete fqdn cache params
func (o *DeleteFqdnCacheParams) WithMatchpattern(matchpattern *string) *DeleteFqdnCacheParams {
o.SetMatchpattern(matchpattern)
return o
}
// SetMatchpattern adds the matchpattern to the delete fqdn cache params
func (o *DeleteFqdnCacheParams) SetMatchpattern(matchpattern *string) {
o.Matchpattern = matchpattern
}
// WriteToRequest writes these params to a swagger request
func (o *DeleteFqdnCacheParams) WriteToRequest(r runtime.ClientRequest, reg strfmt.Registry) error {
if err := r.SetTimeout(o.timeout); err != nil {
return err
}
var res []error
if o.Matchpattern != nil {
// query param matchpattern
var qrMatchpattern string
if o.Matchpattern != nil {
qrMatchpattern = *o.Matchpattern
}
qMatchpattern := qrMatchpattern
if qMatchpattern != "" {
if err := r.SetQueryParam("matchpattern", qMatchpattern); err != nil {
return err
}
}
}
if len(res) > 0 {
return errors.CompositeValidationError(res...)
}
return nil
}
// Code generated by go-swagger; DO NOT EDIT.
// Copyright Authors of Cilium
// SPDX-License-Identifier: Apache-2.0
package policy
// This file was generated by the swagger tool.
// Editing this file might prove futile when you re-run the swagger generate command
import (
"encoding/json"
"fmt"
"io"
"github.com/go-openapi/runtime"
"github.com/go-openapi/strfmt"
"github.com/cilium/cilium/api/v1/models"
)
// DeleteFqdnCacheReader is a Reader for the DeleteFqdnCache structure.
type DeleteFqdnCacheReader struct {
formats strfmt.Registry
}
// ReadResponse reads a server response into the received o.
func (o *DeleteFqdnCacheReader) ReadResponse(response runtime.ClientResponse, consumer runtime.Consumer) (interface{}, error) {
switch response.Code() {
case 200:
result := NewDeleteFqdnCacheOK()
if err := result.readResponse(response, consumer, o.formats); err != nil {
return nil, err
}
return result, nil
case 400:
result := NewDeleteFqdnCacheBadRequest()
if err := result.readResponse(response, consumer, o.formats); err != nil {
return nil, err
}
return nil, result
case 403:
result := NewDeleteFqdnCacheForbidden()
if err := result.readResponse(response, consumer, o.formats); err != nil {
return nil, err
}
return nil, result
default:
return nil, runtime.NewAPIError("[DELETE /fqdn/cache] DeleteFqdnCache", response, response.Code())
}
}
// NewDeleteFqdnCacheOK creates a DeleteFqdnCacheOK with default headers values
func NewDeleteFqdnCacheOK() *DeleteFqdnCacheOK {
return &DeleteFqdnCacheOK{}
}
/*
DeleteFqdnCacheOK describes a response with status code 200, with default header values.
Success
*/
type DeleteFqdnCacheOK struct {
}
// IsSuccess returns true when this delete fqdn cache o k response has a 2xx status code
func (o *DeleteFqdnCacheOK) IsSuccess() bool {
return true
}
// IsRedirect returns true when this delete fqdn cache o k response has a 3xx status code
func (o *DeleteFqdnCacheOK) IsRedirect() bool {
return false
}
// IsClientError returns true when this delete fqdn cache o k response has a 4xx status code
func (o *DeleteFqdnCacheOK) IsClientError() bool {
return false
}
// IsServerError returns true when this delete fqdn cache o k response has a 5xx status code
func (o *DeleteFqdnCacheOK) IsServerError() bool {
return false
}
// IsCode returns true when this delete fqdn cache o k response a status code equal to that given
func (o *DeleteFqdnCacheOK) IsCode(code int) bool {
return code == 200
}
// Code gets the status code for the delete fqdn cache o k response
func (o *DeleteFqdnCacheOK) Code() int {
return 200
}
func (o *DeleteFqdnCacheOK) Error() string {
return fmt.Sprintf("[DELETE /fqdn/cache][%d] deleteFqdnCacheOK", 200)
}
func (o *DeleteFqdnCacheOK) String() string {
return fmt.Sprintf("[DELETE /fqdn/cache][%d] deleteFqdnCacheOK", 200)
}
func (o *DeleteFqdnCacheOK) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error {
return nil
}
// NewDeleteFqdnCacheBadRequest creates a DeleteFqdnCacheBadRequest with default headers values
func NewDeleteFqdnCacheBadRequest() *DeleteFqdnCacheBadRequest {
return &DeleteFqdnCacheBadRequest{}
}
/*
DeleteFqdnCacheBadRequest describes a response with status code 400, with default header values.
Invalid request (error parsing parameters)
*/
type DeleteFqdnCacheBadRequest struct {
Payload models.Error
}
// IsSuccess returns true when this delete fqdn cache bad request response has a 2xx status code
func (o *DeleteFqdnCacheBadRequest) IsSuccess() bool {
return false
}
// IsRedirect returns true when this delete fqdn cache bad request response has a 3xx status code
func (o *DeleteFqdnCacheBadRequest) IsRedirect() bool {
return false
}
// IsClientError returns true when this delete fqdn cache bad request response has a 4xx status code
func (o *DeleteFqdnCacheBadRequest) IsClientError() bool {
return true
}
// IsServerError returns true when this delete fqdn cache bad request response has a 5xx status code
func (o *DeleteFqdnCacheBadRequest) IsServerError() bool {
return false
}
// IsCode returns true when this delete fqdn cache bad request response a status code equal to that given
func (o *DeleteFqdnCacheBadRequest) IsCode(code int) bool {
return code == 400
}
// Code gets the status code for the delete fqdn cache bad request response
func (o *DeleteFqdnCacheBadRequest) Code() int {
return 400
}
func (o *DeleteFqdnCacheBadRequest) Error() string {
payload, _ := json.Marshal(o.Payload)
return fmt.Sprintf("[DELETE /fqdn/cache][%d] deleteFqdnCacheBadRequest %s", 400, payload)
}
func (o *DeleteFqdnCacheBadRequest) String() string {
payload, _ := json.Marshal(o.Payload)
return fmt.Sprintf("[DELETE /fqdn/cache][%d] deleteFqdnCacheBadRequest %s", 400, payload)
}
func (o *DeleteFqdnCacheBadRequest) GetPayload() models.Error {
return o.Payload
}
func (o *DeleteFqdnCacheBadRequest) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error {
// response payload
if err := consumer.Consume(response.Body(), &o.Payload); err != nil && err != io.EOF {
return err
}
return nil
}
// NewDeleteFqdnCacheForbidden creates a DeleteFqdnCacheForbidden with default headers values
func NewDeleteFqdnCacheForbidden() *DeleteFqdnCacheForbidden {
return &DeleteFqdnCacheForbidden{}
}
/*
DeleteFqdnCacheForbidden describes a response with status code 403, with default header values.
Forbidden
*/
type DeleteFqdnCacheForbidden struct {
}
// IsSuccess returns true when this delete fqdn cache forbidden response has a 2xx status code
func (o *DeleteFqdnCacheForbidden) IsSuccess() bool {
return false
}
// IsRedirect returns true when this delete fqdn cache forbidden response has a 3xx status code
func (o *DeleteFqdnCacheForbidden) IsRedirect() bool {
return false
}
// IsClientError returns true when this delete fqdn cache forbidden response has a 4xx status code
func (o *DeleteFqdnCacheForbidden) IsClientError() bool {
return true
}
// IsServerError returns true when this delete fqdn cache forbidden response has a 5xx status code
func (o *DeleteFqdnCacheForbidden) IsServerError() bool {
return false
}
// IsCode returns true when this delete fqdn cache forbidden response a status code equal to that given
func (o *DeleteFqdnCacheForbidden) IsCode(code int) bool {
return code == 403
}
// Code gets the status code for the delete fqdn cache forbidden response
func (o *DeleteFqdnCacheForbidden) Code() int {
return 403
}
func (o *DeleteFqdnCacheForbidden) Error() string {
return fmt.Sprintf("[DELETE /fqdn/cache][%d] deleteFqdnCacheForbidden", 403)
}
func (o *DeleteFqdnCacheForbidden) String() string {
return fmt.Sprintf("[DELETE /fqdn/cache][%d] deleteFqdnCacheForbidden", 403)
}
func (o *DeleteFqdnCacheForbidden) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error {
return nil
}
// Code generated by go-swagger; DO NOT EDIT.
// Copyright Authors of Cilium
// SPDX-License-Identifier: Apache-2.0
package policy
// This file was generated by the swagger tool.
// Editing this file might prove futile when you re-run the swagger generate command
import (
"context"
"net/http"
"time"
"github.com/go-openapi/errors"
"github.com/go-openapi/runtime"
cr "github.com/go-openapi/runtime/client"
"github.com/go-openapi/strfmt"
"github.com/cilium/cilium/api/v1/models"
)
// NewDeletePolicyParams creates a new DeletePolicyParams object,
// with the default timeout for this client.
//
// Default values are not hydrated, since defaults are normally applied by the API server side.
//
// To enforce default values in parameter, use SetDefaults or WithDefaults.
func NewDeletePolicyParams() *DeletePolicyParams {
return &DeletePolicyParams{
timeout: cr.DefaultTimeout,
}
}
// NewDeletePolicyParamsWithTimeout creates a new DeletePolicyParams object
// with the ability to set a timeout on a request.
func NewDeletePolicyParamsWithTimeout(timeout time.Duration) *DeletePolicyParams {
return &DeletePolicyParams{
timeout: timeout,
}
}
// NewDeletePolicyParamsWithContext creates a new DeletePolicyParams object
// with the ability to set a context for a request.
func NewDeletePolicyParamsWithContext(ctx context.Context) *DeletePolicyParams {
return &DeletePolicyParams{
Context: ctx,
}
}
// NewDeletePolicyParamsWithHTTPClient creates a new DeletePolicyParams object
// with the ability to set a custom HTTPClient for a request.
func NewDeletePolicyParamsWithHTTPClient(client *http.Client) *DeletePolicyParams {
return &DeletePolicyParams{
HTTPClient: client,
}
}
/*
DeletePolicyParams contains all the parameters to send to the API endpoint
for the delete policy operation.
Typically these are written to a http.Request.
*/
type DeletePolicyParams struct {
// Labels.
Labels models.Labels
timeout time.Duration
Context context.Context
HTTPClient *http.Client
}
// WithDefaults hydrates default values in the delete policy params (not the query body).
//
// All values with no default are reset to their zero value.
func (o *DeletePolicyParams) WithDefaults() *DeletePolicyParams {
o.SetDefaults()
return o
}
// SetDefaults hydrates default values in the delete policy params (not the query body).
//
// All values with no default are reset to their zero value.
func (o *DeletePolicyParams) SetDefaults() {
// no default values defined for this parameter
}
// WithTimeout adds the timeout to the delete policy params
func (o *DeletePolicyParams) WithTimeout(timeout time.Duration) *DeletePolicyParams {
o.SetTimeout(timeout)
return o
}
// SetTimeout adds the timeout to the delete policy params
func (o *DeletePolicyParams) SetTimeout(timeout time.Duration) {
o.timeout = timeout
}
// WithContext adds the context to the delete policy params
func (o *DeletePolicyParams) WithContext(ctx context.Context) *DeletePolicyParams {
o.SetContext(ctx)
return o
}
// SetContext adds the context to the delete policy params
func (o *DeletePolicyParams) SetContext(ctx context.Context) {
o.Context = ctx
}
// WithHTTPClient adds the HTTPClient to the delete policy params
func (o *DeletePolicyParams) WithHTTPClient(client *http.Client) *DeletePolicyParams {
o.SetHTTPClient(client)
return o
}
// SetHTTPClient adds the HTTPClient to the delete policy params
func (o *DeletePolicyParams) SetHTTPClient(client *http.Client) {
o.HTTPClient = client
}
// WithLabels adds the labels to the delete policy params
func (o *DeletePolicyParams) WithLabels(labels models.Labels) *DeletePolicyParams {
o.SetLabels(labels)
return o
}
// SetLabels adds the labels to the delete policy params
func (o *DeletePolicyParams) SetLabels(labels models.Labels) {
o.Labels = labels
}
// WriteToRequest writes these params to a swagger request
func (o *DeletePolicyParams) WriteToRequest(r runtime.ClientRequest, reg strfmt.Registry) error {
if err := r.SetTimeout(o.timeout); err != nil {
return err
}
var res []error
if o.Labels != nil {
if err := r.SetBodyParam(o.Labels); err != nil {
return err
}
}
if len(res) > 0 {
return errors.CompositeValidationError(res...)
}
return nil
}
// Code generated by go-swagger; DO NOT EDIT.
// Copyright Authors of Cilium
// SPDX-License-Identifier: Apache-2.0
package policy
// This file was generated by the swagger tool.
// Editing this file might prove futile when you re-run the swagger generate command
import (
"encoding/json"
"fmt"
"io"
"github.com/go-openapi/runtime"
"github.com/go-openapi/strfmt"
"github.com/cilium/cilium/api/v1/models"
)
// DeletePolicyReader is a Reader for the DeletePolicy structure.
type DeletePolicyReader struct {
formats strfmt.Registry
}
// ReadResponse reads a server response into the received o.
func (o *DeletePolicyReader) ReadResponse(response runtime.ClientResponse, consumer runtime.Consumer) (interface{}, error) {
switch response.Code() {
case 200:
result := NewDeletePolicyOK()
if err := result.readResponse(response, consumer, o.formats); err != nil {
return nil, err
}
return result, nil
case 400:
result := NewDeletePolicyInvalid()
if err := result.readResponse(response, consumer, o.formats); err != nil {
return nil, err
}
return nil, result
case 403:
result := NewDeletePolicyForbidden()
if err := result.readResponse(response, consumer, o.formats); err != nil {
return nil, err
}
return nil, result
case 404:
result := NewDeletePolicyNotFound()
if err := result.readResponse(response, consumer, o.formats); err != nil {
return nil, err
}
return nil, result
case 500:
result := NewDeletePolicyFailure()
if err := result.readResponse(response, consumer, o.formats); err != nil {
return nil, err
}
return nil, result
default:
return nil, runtime.NewAPIError("[DELETE /policy] DeletePolicy", response, response.Code())
}
}
// NewDeletePolicyOK creates a DeletePolicyOK with default headers values
func NewDeletePolicyOK() *DeletePolicyOK {
return &DeletePolicyOK{}
}
/*
DeletePolicyOK describes a response with status code 200, with default header values.
Success
*/
type DeletePolicyOK struct {
Payload *models.Policy
}
// IsSuccess returns true when this delete policy o k response has a 2xx status code
func (o *DeletePolicyOK) IsSuccess() bool {
return true
}
// IsRedirect returns true when this delete policy o k response has a 3xx status code
func (o *DeletePolicyOK) IsRedirect() bool {
return false
}
// IsClientError returns true when this delete policy o k response has a 4xx status code
func (o *DeletePolicyOK) IsClientError() bool {
return false
}
// IsServerError returns true when this delete policy o k response has a 5xx status code
func (o *DeletePolicyOK) IsServerError() bool {
return false
}
// IsCode returns true when this delete policy o k response a status code equal to that given
func (o *DeletePolicyOK) IsCode(code int) bool {
return code == 200
}
// Code gets the status code for the delete policy o k response
func (o *DeletePolicyOK) Code() int {
return 200
}
func (o *DeletePolicyOK) Error() string {
payload, _ := json.Marshal(o.Payload)
return fmt.Sprintf("[DELETE /policy][%d] deletePolicyOK %s", 200, payload)
}
func (o *DeletePolicyOK) String() string {
payload, _ := json.Marshal(o.Payload)
return fmt.Sprintf("[DELETE /policy][%d] deletePolicyOK %s", 200, payload)
}
func (o *DeletePolicyOK) GetPayload() *models.Policy {
return o.Payload
}
func (o *DeletePolicyOK) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error {
o.Payload = new(models.Policy)
// response payload
if err := consumer.Consume(response.Body(), o.Payload); err != nil && err != io.EOF {
return err
}
return nil
}
// NewDeletePolicyInvalid creates a DeletePolicyInvalid with default headers values
func NewDeletePolicyInvalid() *DeletePolicyInvalid {
return &DeletePolicyInvalid{}
}
/*
DeletePolicyInvalid describes a response with status code 400, with default header values.
Invalid request
*/
type DeletePolicyInvalid struct {
Payload models.Error
}
// IsSuccess returns true when this delete policy invalid response has a 2xx status code
func (o *DeletePolicyInvalid) IsSuccess() bool {
return false
}
// IsRedirect returns true when this delete policy invalid response has a 3xx status code
func (o *DeletePolicyInvalid) IsRedirect() bool {
return false
}
// IsClientError returns true when this delete policy invalid response has a 4xx status code
func (o *DeletePolicyInvalid) IsClientError() bool {
return true
}
// IsServerError returns true when this delete policy invalid response has a 5xx status code
func (o *DeletePolicyInvalid) IsServerError() bool {
return false
}
// IsCode returns true when this delete policy invalid response a status code equal to that given
func (o *DeletePolicyInvalid) IsCode(code int) bool {
return code == 400
}
// Code gets the status code for the delete policy invalid response
func (o *DeletePolicyInvalid) Code() int {
return 400
}
func (o *DeletePolicyInvalid) Error() string {
payload, _ := json.Marshal(o.Payload)
return fmt.Sprintf("[DELETE /policy][%d] deletePolicyInvalid %s", 400, payload)
}
func (o *DeletePolicyInvalid) String() string {
payload, _ := json.Marshal(o.Payload)
return fmt.Sprintf("[DELETE /policy][%d] deletePolicyInvalid %s", 400, payload)
}
func (o *DeletePolicyInvalid) GetPayload() models.Error {
return o.Payload
}
func (o *DeletePolicyInvalid) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error {
// response payload
if err := consumer.Consume(response.Body(), &o.Payload); err != nil && err != io.EOF {
return err
}
return nil
}
// NewDeletePolicyForbidden creates a DeletePolicyForbidden with default headers values
func NewDeletePolicyForbidden() *DeletePolicyForbidden {
return &DeletePolicyForbidden{}
}
/*
DeletePolicyForbidden describes a response with status code 403, with default header values.
Forbidden
*/
type DeletePolicyForbidden struct {
}
// IsSuccess returns true when this delete policy forbidden response has a 2xx status code
func (o *DeletePolicyForbidden) IsSuccess() bool {
return false
}
// IsRedirect returns true when this delete policy forbidden response has a 3xx status code
func (o *DeletePolicyForbidden) IsRedirect() bool {
return false
}
// IsClientError returns true when this delete policy forbidden response has a 4xx status code
func (o *DeletePolicyForbidden) IsClientError() bool {
return true
}
// IsServerError returns true when this delete policy forbidden response has a 5xx status code
func (o *DeletePolicyForbidden) IsServerError() bool {
return false
}
// IsCode returns true when this delete policy forbidden response a status code equal to that given
func (o *DeletePolicyForbidden) IsCode(code int) bool {
return code == 403
}
// Code gets the status code for the delete policy forbidden response
func (o *DeletePolicyForbidden) Code() int {
return 403
}
func (o *DeletePolicyForbidden) Error() string {
return fmt.Sprintf("[DELETE /policy][%d] deletePolicyForbidden", 403)
}
func (o *DeletePolicyForbidden) String() string {
return fmt.Sprintf("[DELETE /policy][%d] deletePolicyForbidden", 403)
}
func (o *DeletePolicyForbidden) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error {
return nil
}
// NewDeletePolicyNotFound creates a DeletePolicyNotFound with default headers values
func NewDeletePolicyNotFound() *DeletePolicyNotFound {
return &DeletePolicyNotFound{}
}
/*
DeletePolicyNotFound describes a response with status code 404, with default header values.
Policy not found
*/
type DeletePolicyNotFound struct {
}
// IsSuccess returns true when this delete policy not found response has a 2xx status code
func (o *DeletePolicyNotFound) IsSuccess() bool {
return false
}
// IsRedirect returns true when this delete policy not found response has a 3xx status code
func (o *DeletePolicyNotFound) IsRedirect() bool {
return false
}
// IsClientError returns true when this delete policy not found response has a 4xx status code
func (o *DeletePolicyNotFound) IsClientError() bool {
return true
}
// IsServerError returns true when this delete policy not found response has a 5xx status code
func (o *DeletePolicyNotFound) IsServerError() bool {
return false
}
// IsCode returns true when this delete policy not found response a status code equal to that given
func (o *DeletePolicyNotFound) IsCode(code int) bool {
return code == 404
}
// Code gets the status code for the delete policy not found response
func (o *DeletePolicyNotFound) Code() int {
return 404
}
func (o *DeletePolicyNotFound) Error() string {
return fmt.Sprintf("[DELETE /policy][%d] deletePolicyNotFound", 404)
}
func (o *DeletePolicyNotFound) String() string {
return fmt.Sprintf("[DELETE /policy][%d] deletePolicyNotFound", 404)
}
func (o *DeletePolicyNotFound) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error {
return nil
}
// NewDeletePolicyFailure creates a DeletePolicyFailure with default headers values
func NewDeletePolicyFailure() *DeletePolicyFailure {
return &DeletePolicyFailure{}
}
/*
DeletePolicyFailure describes a response with status code 500, with default header values.
Error while deleting policy
*/
type DeletePolicyFailure struct {
Payload models.Error
}
// IsSuccess returns true when this delete policy failure response has a 2xx status code
func (o *DeletePolicyFailure) IsSuccess() bool {
return false
}
// IsRedirect returns true when this delete policy failure response has a 3xx status code
func (o *DeletePolicyFailure) IsRedirect() bool {
return false
}
// IsClientError returns true when this delete policy failure response has a 4xx status code
func (o *DeletePolicyFailure) IsClientError() bool {
return false
}
// IsServerError returns true when this delete policy failure response has a 5xx status code
func (o *DeletePolicyFailure) IsServerError() bool {
return true
}
// IsCode returns true when this delete policy failure response a status code equal to that given
func (o *DeletePolicyFailure) IsCode(code int) bool {
return code == 500
}
// Code gets the status code for the delete policy failure response
func (o *DeletePolicyFailure) Code() int {
return 500
}
func (o *DeletePolicyFailure) Error() string {
payload, _ := json.Marshal(o.Payload)
return fmt.Sprintf("[DELETE /policy][%d] deletePolicyFailure %s", 500, payload)
}
func (o *DeletePolicyFailure) String() string {
payload, _ := json.Marshal(o.Payload)
return fmt.Sprintf("[DELETE /policy][%d] deletePolicyFailure %s", 500, payload)
}
func (o *DeletePolicyFailure) GetPayload() models.Error {
return o.Payload
}
func (o *DeletePolicyFailure) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error {
// response payload
if err := consumer.Consume(response.Body(), &o.Payload); err != nil && err != io.EOF {
return err
}
return nil
}
// Code generated by go-swagger; DO NOT EDIT.
// Copyright Authors of Cilium
// SPDX-License-Identifier: Apache-2.0
package policy
// This file was generated by the swagger tool.
// Editing this file might prove futile when you re-run the swagger generate command
import (
"context"
"net/http"
"time"
"github.com/go-openapi/errors"
"github.com/go-openapi/runtime"
cr "github.com/go-openapi/runtime/client"
"github.com/go-openapi/strfmt"
)
// NewGetFqdnCacheIDParams creates a new GetFqdnCacheIDParams object,
// with the default timeout for this client.
//
// Default values are not hydrated, since defaults are normally applied by the API server side.
//
// To enforce default values in parameter, use SetDefaults or WithDefaults.
func NewGetFqdnCacheIDParams() *GetFqdnCacheIDParams {
return &GetFqdnCacheIDParams{
timeout: cr.DefaultTimeout,
}
}
// NewGetFqdnCacheIDParamsWithTimeout creates a new GetFqdnCacheIDParams object
// with the ability to set a timeout on a request.
func NewGetFqdnCacheIDParamsWithTimeout(timeout time.Duration) *GetFqdnCacheIDParams {
return &GetFqdnCacheIDParams{
timeout: timeout,
}
}
// NewGetFqdnCacheIDParamsWithContext creates a new GetFqdnCacheIDParams object
// with the ability to set a context for a request.
func NewGetFqdnCacheIDParamsWithContext(ctx context.Context) *GetFqdnCacheIDParams {
return &GetFqdnCacheIDParams{
Context: ctx,
}
}
// NewGetFqdnCacheIDParamsWithHTTPClient creates a new GetFqdnCacheIDParams object
// with the ability to set a custom HTTPClient for a request.
func NewGetFqdnCacheIDParamsWithHTTPClient(client *http.Client) *GetFqdnCacheIDParams {
return &GetFqdnCacheIDParams{
HTTPClient: client,
}
}
/*
GetFqdnCacheIDParams contains all the parameters to send to the API endpoint
for the get fqdn cache ID operation.
Typically these are written to a http.Request.
*/
type GetFqdnCacheIDParams struct {
/* Cidr.
A CIDR range of IPs
*/
Cidr *string
/* ID.
String describing an endpoint with the format ``[prefix:]id``. If no prefix
is specified, a prefix of ``cilium-local:`` is assumed. Not all endpoints
will be addressable by all endpoint ID prefixes with the exception of the
local Cilium UUID which is assigned to all endpoints.
Supported endpoint id prefixes:
- cilium-local: Local Cilium endpoint UUID, e.g. cilium-local:3389595
- cilium-global: Global Cilium endpoint UUID, e.g. cilium-global:cluster1:nodeX:452343
- cni-attachment-id: CNI attachment ID, e.g. cni-attachment-id:22222:eth0
- container-id: Container runtime ID, e.g. container-id:22222 (deprecated, may not be unique)
- container-name: Container name, e.g. container-name:foobar (deprecated, may not be unique)
- pod-name: pod name for this container if K8s is enabled, e.g. pod-name:default:foobar (deprecated, may not be unique)
- cep-name: cep name for this container if K8s is enabled, e.g. pod-name:default:foobar-net1
- docker-endpoint: Docker libnetwork endpoint ID, e.g. docker-endpoint:4444
*/
ID string
/* Matchpattern.
A toFQDNs compatible matchPattern expression
*/
Matchpattern *string
/* Source.
Source from which FQDN entries come from
*/
Source *string
timeout time.Duration
Context context.Context
HTTPClient *http.Client
}
// WithDefaults hydrates default values in the get fqdn cache ID params (not the query body).
//
// All values with no default are reset to their zero value.
func (o *GetFqdnCacheIDParams) WithDefaults() *GetFqdnCacheIDParams {
o.SetDefaults()
return o
}
// SetDefaults hydrates default values in the get fqdn cache ID params (not the query body).
//
// All values with no default are reset to their zero value.
func (o *GetFqdnCacheIDParams) SetDefaults() {
// no default values defined for this parameter
}
// WithTimeout adds the timeout to the get fqdn cache ID params
func (o *GetFqdnCacheIDParams) WithTimeout(timeout time.Duration) *GetFqdnCacheIDParams {
o.SetTimeout(timeout)
return o
}
// SetTimeout adds the timeout to the get fqdn cache ID params
func (o *GetFqdnCacheIDParams) SetTimeout(timeout time.Duration) {
o.timeout = timeout
}
// WithContext adds the context to the get fqdn cache ID params
func (o *GetFqdnCacheIDParams) WithContext(ctx context.Context) *GetFqdnCacheIDParams {
o.SetContext(ctx)
return o
}
// SetContext adds the context to the get fqdn cache ID params
func (o *GetFqdnCacheIDParams) SetContext(ctx context.Context) {
o.Context = ctx
}
// WithHTTPClient adds the HTTPClient to the get fqdn cache ID params
func (o *GetFqdnCacheIDParams) WithHTTPClient(client *http.Client) *GetFqdnCacheIDParams {
o.SetHTTPClient(client)
return o
}
// SetHTTPClient adds the HTTPClient to the get fqdn cache ID params
func (o *GetFqdnCacheIDParams) SetHTTPClient(client *http.Client) {
o.HTTPClient = client
}
// WithCidr adds the cidr to the get fqdn cache ID params
func (o *GetFqdnCacheIDParams) WithCidr(cidr *string) *GetFqdnCacheIDParams {
o.SetCidr(cidr)
return o
}
// SetCidr adds the cidr to the get fqdn cache ID params
func (o *GetFqdnCacheIDParams) SetCidr(cidr *string) {
o.Cidr = cidr
}
// WithID adds the id to the get fqdn cache ID params
func (o *GetFqdnCacheIDParams) WithID(id string) *GetFqdnCacheIDParams {
o.SetID(id)
return o
}
// SetID adds the id to the get fqdn cache ID params
func (o *GetFqdnCacheIDParams) SetID(id string) {
o.ID = id
}
// WithMatchpattern adds the matchpattern to the get fqdn cache ID params
func (o *GetFqdnCacheIDParams) WithMatchpattern(matchpattern *string) *GetFqdnCacheIDParams {
o.SetMatchpattern(matchpattern)
return o
}
// SetMatchpattern adds the matchpattern to the get fqdn cache ID params
func (o *GetFqdnCacheIDParams) SetMatchpattern(matchpattern *string) {
o.Matchpattern = matchpattern
}
// WithSource adds the source to the get fqdn cache ID params
func (o *GetFqdnCacheIDParams) WithSource(source *string) *GetFqdnCacheIDParams {
o.SetSource(source)
return o
}
// SetSource adds the source to the get fqdn cache ID params
func (o *GetFqdnCacheIDParams) SetSource(source *string) {
o.Source = source
}
// WriteToRequest writes these params to a swagger request
func (o *GetFqdnCacheIDParams) WriteToRequest(r runtime.ClientRequest, reg strfmt.Registry) error {
if err := r.SetTimeout(o.timeout); err != nil {
return err
}
var res []error
if o.Cidr != nil {
// query param cidr
var qrCidr string
if o.Cidr != nil {
qrCidr = *o.Cidr
}
qCidr := qrCidr
if qCidr != "" {
if err := r.SetQueryParam("cidr", qCidr); err != nil {
return err
}
}
}
// path param id
if err := r.SetPathParam("id", o.ID); err != nil {
return err
}
if o.Matchpattern != nil {
// query param matchpattern
var qrMatchpattern string
if o.Matchpattern != nil {
qrMatchpattern = *o.Matchpattern
}
qMatchpattern := qrMatchpattern
if qMatchpattern != "" {
if err := r.SetQueryParam("matchpattern", qMatchpattern); err != nil {
return err
}
}
}
if o.Source != nil {
// query param source
var qrSource string
if o.Source != nil {
qrSource = *o.Source
}
qSource := qrSource
if qSource != "" {
if err := r.SetQueryParam("source", qSource); err != nil {
return err
}
}
}
if len(res) > 0 {
return errors.CompositeValidationError(res...)
}
return nil
}
// Code generated by go-swagger; DO NOT EDIT.
// Copyright Authors of Cilium
// SPDX-License-Identifier: Apache-2.0
package policy
// This file was generated by the swagger tool.
// Editing this file might prove futile when you re-run the swagger generate command
import (
"encoding/json"
"fmt"
"io"
"github.com/go-openapi/runtime"
"github.com/go-openapi/strfmt"
"github.com/cilium/cilium/api/v1/models"
)
// GetFqdnCacheIDReader is a Reader for the GetFqdnCacheID structure.
type GetFqdnCacheIDReader struct {
formats strfmt.Registry
}
// ReadResponse reads a server response into the received o.
func (o *GetFqdnCacheIDReader) ReadResponse(response runtime.ClientResponse, consumer runtime.Consumer) (interface{}, error) {
switch response.Code() {
case 200:
result := NewGetFqdnCacheIDOK()
if err := result.readResponse(response, consumer, o.formats); err != nil {
return nil, err
}
return result, nil
case 400:
result := NewGetFqdnCacheIDBadRequest()
if err := result.readResponse(response, consumer, o.formats); err != nil {
return nil, err
}
return nil, result
case 404:
result := NewGetFqdnCacheIDNotFound()
if err := result.readResponse(response, consumer, o.formats); err != nil {
return nil, err
}
return nil, result
default:
return nil, runtime.NewAPIError("[GET /fqdn/cache/{id}] GetFqdnCacheID", response, response.Code())
}
}
// NewGetFqdnCacheIDOK creates a GetFqdnCacheIDOK with default headers values
func NewGetFqdnCacheIDOK() *GetFqdnCacheIDOK {
return &GetFqdnCacheIDOK{}
}
/*
GetFqdnCacheIDOK describes a response with status code 200, with default header values.
Success
*/
type GetFqdnCacheIDOK struct {
Payload []*models.DNSLookup
}
// IsSuccess returns true when this get fqdn cache Id o k response has a 2xx status code
func (o *GetFqdnCacheIDOK) IsSuccess() bool {
return true
}
// IsRedirect returns true when this get fqdn cache Id o k response has a 3xx status code
func (o *GetFqdnCacheIDOK) IsRedirect() bool {
return false
}
// IsClientError returns true when this get fqdn cache Id o k response has a 4xx status code
func (o *GetFqdnCacheIDOK) IsClientError() bool {
return false
}
// IsServerError returns true when this get fqdn cache Id o k response has a 5xx status code
func (o *GetFqdnCacheIDOK) IsServerError() bool {
return false
}
// IsCode returns true when this get fqdn cache Id o k response a status code equal to that given
func (o *GetFqdnCacheIDOK) IsCode(code int) bool {
return code == 200
}
// Code gets the status code for the get fqdn cache Id o k response
func (o *GetFqdnCacheIDOK) Code() int {
return 200
}
func (o *GetFqdnCacheIDOK) Error() string {
payload, _ := json.Marshal(o.Payload)
return fmt.Sprintf("[GET /fqdn/cache/{id}][%d] getFqdnCacheIdOK %s", 200, payload)
}
func (o *GetFqdnCacheIDOK) String() string {
payload, _ := json.Marshal(o.Payload)
return fmt.Sprintf("[GET /fqdn/cache/{id}][%d] getFqdnCacheIdOK %s", 200, payload)
}
func (o *GetFqdnCacheIDOK) GetPayload() []*models.DNSLookup {
return o.Payload
}
func (o *GetFqdnCacheIDOK) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error {
// response payload
if err := consumer.Consume(response.Body(), &o.Payload); err != nil && err != io.EOF {
return err
}
return nil
}
// NewGetFqdnCacheIDBadRequest creates a GetFqdnCacheIDBadRequest with default headers values
func NewGetFqdnCacheIDBadRequest() *GetFqdnCacheIDBadRequest {
return &GetFqdnCacheIDBadRequest{}
}
/*
GetFqdnCacheIDBadRequest describes a response with status code 400, with default header values.
Invalid request (error parsing parameters)
*/
type GetFqdnCacheIDBadRequest struct {
Payload models.Error
}
// IsSuccess returns true when this get fqdn cache Id bad request response has a 2xx status code
func (o *GetFqdnCacheIDBadRequest) IsSuccess() bool {
return false
}
// IsRedirect returns true when this get fqdn cache Id bad request response has a 3xx status code
func (o *GetFqdnCacheIDBadRequest) IsRedirect() bool {
return false
}
// IsClientError returns true when this get fqdn cache Id bad request response has a 4xx status code
func (o *GetFqdnCacheIDBadRequest) IsClientError() bool {
return true
}
// IsServerError returns true when this get fqdn cache Id bad request response has a 5xx status code
func (o *GetFqdnCacheIDBadRequest) IsServerError() bool {
return false
}
// IsCode returns true when this get fqdn cache Id bad request response a status code equal to that given
func (o *GetFqdnCacheIDBadRequest) IsCode(code int) bool {
return code == 400
}
// Code gets the status code for the get fqdn cache Id bad request response
func (o *GetFqdnCacheIDBadRequest) Code() int {
return 400
}
func (o *GetFqdnCacheIDBadRequest) Error() string {
payload, _ := json.Marshal(o.Payload)
return fmt.Sprintf("[GET /fqdn/cache/{id}][%d] getFqdnCacheIdBadRequest %s", 400, payload)
}
func (o *GetFqdnCacheIDBadRequest) String() string {
payload, _ := json.Marshal(o.Payload)
return fmt.Sprintf("[GET /fqdn/cache/{id}][%d] getFqdnCacheIdBadRequest %s", 400, payload)
}
func (o *GetFqdnCacheIDBadRequest) GetPayload() models.Error {
return o.Payload
}
func (o *GetFqdnCacheIDBadRequest) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error {
// response payload
if err := consumer.Consume(response.Body(), &o.Payload); err != nil && err != io.EOF {
return err
}
return nil
}
// NewGetFqdnCacheIDNotFound creates a GetFqdnCacheIDNotFound with default headers values
func NewGetFqdnCacheIDNotFound() *GetFqdnCacheIDNotFound {
return &GetFqdnCacheIDNotFound{}
}
/*
GetFqdnCacheIDNotFound describes a response with status code 404, with default header values.
No DNS data with provided parameters found
*/
type GetFqdnCacheIDNotFound struct {
}
// IsSuccess returns true when this get fqdn cache Id not found response has a 2xx status code
func (o *GetFqdnCacheIDNotFound) IsSuccess() bool {
return false
}
// IsRedirect returns true when this get fqdn cache Id not found response has a 3xx status code
func (o *GetFqdnCacheIDNotFound) IsRedirect() bool {
return false
}
// IsClientError returns true when this get fqdn cache Id not found response has a 4xx status code
func (o *GetFqdnCacheIDNotFound) IsClientError() bool {
return true
}
// IsServerError returns true when this get fqdn cache Id not found response has a 5xx status code
func (o *GetFqdnCacheIDNotFound) IsServerError() bool {
return false
}
// IsCode returns true when this get fqdn cache Id not found response a status code equal to that given
func (o *GetFqdnCacheIDNotFound) IsCode(code int) bool {
return code == 404
}
// Code gets the status code for the get fqdn cache Id not found response
func (o *GetFqdnCacheIDNotFound) Code() int {
return 404
}
func (o *GetFqdnCacheIDNotFound) Error() string {
return fmt.Sprintf("[GET /fqdn/cache/{id}][%d] getFqdnCacheIdNotFound", 404)
}
func (o *GetFqdnCacheIDNotFound) String() string {
return fmt.Sprintf("[GET /fqdn/cache/{id}][%d] getFqdnCacheIdNotFound", 404)
}
func (o *GetFqdnCacheIDNotFound) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error {
return nil
}
// Code generated by go-swagger; DO NOT EDIT.
// Copyright Authors of Cilium
// SPDX-License-Identifier: Apache-2.0
package policy
// This file was generated by the swagger tool.
// Editing this file might prove futile when you re-run the swagger generate command
import (
"context"
"net/http"
"time"
"github.com/go-openapi/errors"
"github.com/go-openapi/runtime"
cr "github.com/go-openapi/runtime/client"
"github.com/go-openapi/strfmt"
)
// NewGetFqdnCacheParams creates a new GetFqdnCacheParams object,
// with the default timeout for this client.
//
// Default values are not hydrated, since defaults are normally applied by the API server side.
//
// To enforce default values in parameter, use SetDefaults or WithDefaults.
func NewGetFqdnCacheParams() *GetFqdnCacheParams {
return &GetFqdnCacheParams{
timeout: cr.DefaultTimeout,
}
}
// NewGetFqdnCacheParamsWithTimeout creates a new GetFqdnCacheParams object
// with the ability to set a timeout on a request.
func NewGetFqdnCacheParamsWithTimeout(timeout time.Duration) *GetFqdnCacheParams {
return &GetFqdnCacheParams{
timeout: timeout,
}
}
// NewGetFqdnCacheParamsWithContext creates a new GetFqdnCacheParams object
// with the ability to set a context for a request.
func NewGetFqdnCacheParamsWithContext(ctx context.Context) *GetFqdnCacheParams {
return &GetFqdnCacheParams{
Context: ctx,
}
}
// NewGetFqdnCacheParamsWithHTTPClient creates a new GetFqdnCacheParams object
// with the ability to set a custom HTTPClient for a request.
func NewGetFqdnCacheParamsWithHTTPClient(client *http.Client) *GetFqdnCacheParams {
return &GetFqdnCacheParams{
HTTPClient: client,
}
}
/*
GetFqdnCacheParams contains all the parameters to send to the API endpoint
for the get fqdn cache operation.
Typically these are written to a http.Request.
*/
type GetFqdnCacheParams struct {
/* Cidr.
A CIDR range of IPs
*/
Cidr *string
/* Matchpattern.
A toFQDNs compatible matchPattern expression
*/
Matchpattern *string
/* Source.
Source from which FQDN entries come from
*/
Source *string
timeout time.Duration
Context context.Context
HTTPClient *http.Client
}
// WithDefaults hydrates default values in the get fqdn cache params (not the query body).
//
// All values with no default are reset to their zero value.
func (o *GetFqdnCacheParams) WithDefaults() *GetFqdnCacheParams {
o.SetDefaults()
return o
}
// SetDefaults hydrates default values in the get fqdn cache params (not the query body).
//
// All values with no default are reset to their zero value.
func (o *GetFqdnCacheParams) SetDefaults() {
// no default values defined for this parameter
}
// WithTimeout adds the timeout to the get fqdn cache params
func (o *GetFqdnCacheParams) WithTimeout(timeout time.Duration) *GetFqdnCacheParams {
o.SetTimeout(timeout)
return o
}
// SetTimeout adds the timeout to the get fqdn cache params
func (o *GetFqdnCacheParams) SetTimeout(timeout time.Duration) {
o.timeout = timeout
}
// WithContext adds the context to the get fqdn cache params
func (o *GetFqdnCacheParams) WithContext(ctx context.Context) *GetFqdnCacheParams {
o.SetContext(ctx)
return o
}
// SetContext adds the context to the get fqdn cache params
func (o *GetFqdnCacheParams) SetContext(ctx context.Context) {
o.Context = ctx
}
// WithHTTPClient adds the HTTPClient to the get fqdn cache params
func (o *GetFqdnCacheParams) WithHTTPClient(client *http.Client) *GetFqdnCacheParams {
o.SetHTTPClient(client)
return o
}
// SetHTTPClient adds the HTTPClient to the get fqdn cache params
func (o *GetFqdnCacheParams) SetHTTPClient(client *http.Client) {
o.HTTPClient = client
}
// WithCidr adds the cidr to the get fqdn cache params
func (o *GetFqdnCacheParams) WithCidr(cidr *string) *GetFqdnCacheParams {
o.SetCidr(cidr)
return o
}
// SetCidr adds the cidr to the get fqdn cache params
func (o *GetFqdnCacheParams) SetCidr(cidr *string) {
o.Cidr = cidr
}
// WithMatchpattern adds the matchpattern to the get fqdn cache params
func (o *GetFqdnCacheParams) WithMatchpattern(matchpattern *string) *GetFqdnCacheParams {
o.SetMatchpattern(matchpattern)
return o
}
// SetMatchpattern adds the matchpattern to the get fqdn cache params
func (o *GetFqdnCacheParams) SetMatchpattern(matchpattern *string) {
o.Matchpattern = matchpattern
}
// WithSource adds the source to the get fqdn cache params
func (o *GetFqdnCacheParams) WithSource(source *string) *GetFqdnCacheParams {
o.SetSource(source)
return o
}
// SetSource adds the source to the get fqdn cache params
func (o *GetFqdnCacheParams) SetSource(source *string) {
o.Source = source
}
// WriteToRequest writes these params to a swagger request
func (o *GetFqdnCacheParams) WriteToRequest(r runtime.ClientRequest, reg strfmt.Registry) error {
if err := r.SetTimeout(o.timeout); err != nil {
return err
}
var res []error
if o.Cidr != nil {
// query param cidr
var qrCidr string
if o.Cidr != nil {
qrCidr = *o.Cidr
}
qCidr := qrCidr
if qCidr != "" {
if err := r.SetQueryParam("cidr", qCidr); err != nil {
return err
}
}
}
if o.Matchpattern != nil {
// query param matchpattern
var qrMatchpattern string
if o.Matchpattern != nil {
qrMatchpattern = *o.Matchpattern
}
qMatchpattern := qrMatchpattern
if qMatchpattern != "" {
if err := r.SetQueryParam("matchpattern", qMatchpattern); err != nil {
return err
}
}
}
if o.Source != nil {
// query param source
var qrSource string
if o.Source != nil {
qrSource = *o.Source
}
qSource := qrSource
if qSource != "" {
if err := r.SetQueryParam("source", qSource); err != nil {
return err
}
}
}
if len(res) > 0 {
return errors.CompositeValidationError(res...)
}
return nil
}
// Code generated by go-swagger; DO NOT EDIT.
// Copyright Authors of Cilium
// SPDX-License-Identifier: Apache-2.0
package policy
// This file was generated by the swagger tool.
// Editing this file might prove futile when you re-run the swagger generate command
import (
"encoding/json"
"fmt"
"io"
"github.com/go-openapi/runtime"
"github.com/go-openapi/strfmt"
"github.com/cilium/cilium/api/v1/models"
)
// GetFqdnCacheReader is a Reader for the GetFqdnCache structure.
type GetFqdnCacheReader struct {
formats strfmt.Registry
}
// ReadResponse reads a server response into the received o.
func (o *GetFqdnCacheReader) ReadResponse(response runtime.ClientResponse, consumer runtime.Consumer) (interface{}, error) {
switch response.Code() {
case 200:
result := NewGetFqdnCacheOK()
if err := result.readResponse(response, consumer, o.formats); err != nil {
return nil, err
}
return result, nil
case 400:
result := NewGetFqdnCacheBadRequest()
if err := result.readResponse(response, consumer, o.formats); err != nil {
return nil, err
}
return nil, result
case 404:
result := NewGetFqdnCacheNotFound()
if err := result.readResponse(response, consumer, o.formats); err != nil {
return nil, err
}
return nil, result
default:
return nil, runtime.NewAPIError("[GET /fqdn/cache] GetFqdnCache", response, response.Code())
}
}
// NewGetFqdnCacheOK creates a GetFqdnCacheOK with default headers values
func NewGetFqdnCacheOK() *GetFqdnCacheOK {
return &GetFqdnCacheOK{}
}
/*
GetFqdnCacheOK describes a response with status code 200, with default header values.
Success
*/
type GetFqdnCacheOK struct {
Payload []*models.DNSLookup
}
// IsSuccess returns true when this get fqdn cache o k response has a 2xx status code
func (o *GetFqdnCacheOK) IsSuccess() bool {
return true
}
// IsRedirect returns true when this get fqdn cache o k response has a 3xx status code
func (o *GetFqdnCacheOK) IsRedirect() bool {
return false
}
// IsClientError returns true when this get fqdn cache o k response has a 4xx status code
func (o *GetFqdnCacheOK) IsClientError() bool {
return false
}
// IsServerError returns true when this get fqdn cache o k response has a 5xx status code
func (o *GetFqdnCacheOK) IsServerError() bool {
return false
}
// IsCode returns true when this get fqdn cache o k response a status code equal to that given
func (o *GetFqdnCacheOK) IsCode(code int) bool {
return code == 200
}
// Code gets the status code for the get fqdn cache o k response
func (o *GetFqdnCacheOK) Code() int {
return 200
}
func (o *GetFqdnCacheOK) Error() string {
payload, _ := json.Marshal(o.Payload)
return fmt.Sprintf("[GET /fqdn/cache][%d] getFqdnCacheOK %s", 200, payload)
}
func (o *GetFqdnCacheOK) String() string {
payload, _ := json.Marshal(o.Payload)
return fmt.Sprintf("[GET /fqdn/cache][%d] getFqdnCacheOK %s", 200, payload)
}
func (o *GetFqdnCacheOK) GetPayload() []*models.DNSLookup {
return o.Payload
}
func (o *GetFqdnCacheOK) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error {
// response payload
if err := consumer.Consume(response.Body(), &o.Payload); err != nil && err != io.EOF {
return err
}
return nil
}
// NewGetFqdnCacheBadRequest creates a GetFqdnCacheBadRequest with default headers values
func NewGetFqdnCacheBadRequest() *GetFqdnCacheBadRequest {
return &GetFqdnCacheBadRequest{}
}
/*
GetFqdnCacheBadRequest describes a response with status code 400, with default header values.
Invalid request (error parsing parameters)
*/
type GetFqdnCacheBadRequest struct {
Payload models.Error
}
// IsSuccess returns true when this get fqdn cache bad request response has a 2xx status code
func (o *GetFqdnCacheBadRequest) IsSuccess() bool {
return false
}
// IsRedirect returns true when this get fqdn cache bad request response has a 3xx status code
func (o *GetFqdnCacheBadRequest) IsRedirect() bool {
return false
}
// IsClientError returns true when this get fqdn cache bad request response has a 4xx status code
func (o *GetFqdnCacheBadRequest) IsClientError() bool {
return true
}
// IsServerError returns true when this get fqdn cache bad request response has a 5xx status code
func (o *GetFqdnCacheBadRequest) IsServerError() bool {
return false
}
// IsCode returns true when this get fqdn cache bad request response a status code equal to that given
func (o *GetFqdnCacheBadRequest) IsCode(code int) bool {
return code == 400
}
// Code gets the status code for the get fqdn cache bad request response
func (o *GetFqdnCacheBadRequest) Code() int {
return 400
}
func (o *GetFqdnCacheBadRequest) Error() string {
payload, _ := json.Marshal(o.Payload)
return fmt.Sprintf("[GET /fqdn/cache][%d] getFqdnCacheBadRequest %s", 400, payload)
}
func (o *GetFqdnCacheBadRequest) String() string {
payload, _ := json.Marshal(o.Payload)
return fmt.Sprintf("[GET /fqdn/cache][%d] getFqdnCacheBadRequest %s", 400, payload)
}
func (o *GetFqdnCacheBadRequest) GetPayload() models.Error {
return o.Payload
}
func (o *GetFqdnCacheBadRequest) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error {
// response payload
if err := consumer.Consume(response.Body(), &o.Payload); err != nil && err != io.EOF {
return err
}
return nil
}
// NewGetFqdnCacheNotFound creates a GetFqdnCacheNotFound with default headers values
func NewGetFqdnCacheNotFound() *GetFqdnCacheNotFound {
return &GetFqdnCacheNotFound{}
}
/*
GetFqdnCacheNotFound describes a response with status code 404, with default header values.
No DNS data with provided parameters found
*/
type GetFqdnCacheNotFound struct {
}
// IsSuccess returns true when this get fqdn cache not found response has a 2xx status code
func (o *GetFqdnCacheNotFound) IsSuccess() bool {
return false
}
// IsRedirect returns true when this get fqdn cache not found response has a 3xx status code
func (o *GetFqdnCacheNotFound) IsRedirect() bool {
return false
}
// IsClientError returns true when this get fqdn cache not found response has a 4xx status code
func (o *GetFqdnCacheNotFound) IsClientError() bool {
return true
}
// IsServerError returns true when this get fqdn cache not found response has a 5xx status code
func (o *GetFqdnCacheNotFound) IsServerError() bool {
return false
}
// IsCode returns true when this get fqdn cache not found response a status code equal to that given
func (o *GetFqdnCacheNotFound) IsCode(code int) bool {
return code == 404
}
// Code gets the status code for the get fqdn cache not found response
func (o *GetFqdnCacheNotFound) Code() int {
return 404
}
func (o *GetFqdnCacheNotFound) Error() string {
return fmt.Sprintf("[GET /fqdn/cache][%d] getFqdnCacheNotFound", 404)
}
func (o *GetFqdnCacheNotFound) String() string {
return fmt.Sprintf("[GET /fqdn/cache][%d] getFqdnCacheNotFound", 404)
}
func (o *GetFqdnCacheNotFound) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error {
return nil
}
// Code generated by go-swagger; DO NOT EDIT.
// Copyright Authors of Cilium
// SPDX-License-Identifier: Apache-2.0
package policy
// This file was generated by the swagger tool.
// Editing this file might prove futile when you re-run the swagger generate command
import (
"context"
"net/http"
"time"
"github.com/go-openapi/errors"
"github.com/go-openapi/runtime"
cr "github.com/go-openapi/runtime/client"
"github.com/go-openapi/strfmt"
)
// NewGetFqdnNamesParams creates a new GetFqdnNamesParams object,
// with the default timeout for this client.
//
// Default values are not hydrated, since defaults are normally applied by the API server side.
//
// To enforce default values in parameter, use SetDefaults or WithDefaults.
func NewGetFqdnNamesParams() *GetFqdnNamesParams {
return &GetFqdnNamesParams{
timeout: cr.DefaultTimeout,
}
}
// NewGetFqdnNamesParamsWithTimeout creates a new GetFqdnNamesParams object
// with the ability to set a timeout on a request.
func NewGetFqdnNamesParamsWithTimeout(timeout time.Duration) *GetFqdnNamesParams {
return &GetFqdnNamesParams{
timeout: timeout,
}
}
// NewGetFqdnNamesParamsWithContext creates a new GetFqdnNamesParams object
// with the ability to set a context for a request.
func NewGetFqdnNamesParamsWithContext(ctx context.Context) *GetFqdnNamesParams {
return &GetFqdnNamesParams{
Context: ctx,
}
}
// NewGetFqdnNamesParamsWithHTTPClient creates a new GetFqdnNamesParams object
// with the ability to set a custom HTTPClient for a request.
func NewGetFqdnNamesParamsWithHTTPClient(client *http.Client) *GetFqdnNamesParams {
return &GetFqdnNamesParams{
HTTPClient: client,
}
}
/*
GetFqdnNamesParams contains all the parameters to send to the API endpoint
for the get fqdn names operation.
Typically these are written to a http.Request.
*/
type GetFqdnNamesParams struct {
timeout time.Duration
Context context.Context
HTTPClient *http.Client
}
// WithDefaults hydrates default values in the get fqdn names params (not the query body).
//
// All values with no default are reset to their zero value.
func (o *GetFqdnNamesParams) WithDefaults() *GetFqdnNamesParams {
o.SetDefaults()
return o
}
// SetDefaults hydrates default values in the get fqdn names params (not the query body).
//
// All values with no default are reset to their zero value.
func (o *GetFqdnNamesParams) SetDefaults() {
// no default values defined for this parameter
}
// WithTimeout adds the timeout to the get fqdn names params
func (o *GetFqdnNamesParams) WithTimeout(timeout time.Duration) *GetFqdnNamesParams {
o.SetTimeout(timeout)
return o
}
// SetTimeout adds the timeout to the get fqdn names params
func (o *GetFqdnNamesParams) SetTimeout(timeout time.Duration) {
o.timeout = timeout
}
// WithContext adds the context to the get fqdn names params
func (o *GetFqdnNamesParams) WithContext(ctx context.Context) *GetFqdnNamesParams {
o.SetContext(ctx)
return o
}
// SetContext adds the context to the get fqdn names params
func (o *GetFqdnNamesParams) SetContext(ctx context.Context) {
o.Context = ctx
}
// WithHTTPClient adds the HTTPClient to the get fqdn names params
func (o *GetFqdnNamesParams) WithHTTPClient(client *http.Client) *GetFqdnNamesParams {
o.SetHTTPClient(client)
return o
}
// SetHTTPClient adds the HTTPClient to the get fqdn names params
func (o *GetFqdnNamesParams) SetHTTPClient(client *http.Client) {
o.HTTPClient = client
}
// WriteToRequest writes these params to a swagger request
func (o *GetFqdnNamesParams) WriteToRequest(r runtime.ClientRequest, reg strfmt.Registry) error {
if err := r.SetTimeout(o.timeout); err != nil {
return err
}
var res []error
if len(res) > 0 {
return errors.CompositeValidationError(res...)
}
return nil
}
// Code generated by go-swagger; DO NOT EDIT.
// Copyright Authors of Cilium
// SPDX-License-Identifier: Apache-2.0
package policy
// This file was generated by the swagger tool.
// Editing this file might prove futile when you re-run the swagger generate command
import (
"encoding/json"
"fmt"
"io"
"github.com/go-openapi/runtime"
"github.com/go-openapi/strfmt"
"github.com/cilium/cilium/api/v1/models"
)
// GetFqdnNamesReader is a Reader for the GetFqdnNames structure.
type GetFqdnNamesReader struct {
formats strfmt.Registry
}
// ReadResponse reads a server response into the received o.
func (o *GetFqdnNamesReader) ReadResponse(response runtime.ClientResponse, consumer runtime.Consumer) (interface{}, error) {
switch response.Code() {
case 200:
result := NewGetFqdnNamesOK()
if err := result.readResponse(response, consumer, o.formats); err != nil {
return nil, err
}
return result, nil
case 400:
result := NewGetFqdnNamesBadRequest()
if err := result.readResponse(response, consumer, o.formats); err != nil {
return nil, err
}
return nil, result
default:
return nil, runtime.NewAPIError("[GET /fqdn/names] GetFqdnNames", response, response.Code())
}
}
// NewGetFqdnNamesOK creates a GetFqdnNamesOK with default headers values
func NewGetFqdnNamesOK() *GetFqdnNamesOK {
return &GetFqdnNamesOK{}
}
/*
GetFqdnNamesOK describes a response with status code 200, with default header values.
Success
*/
type GetFqdnNamesOK struct {
Payload *models.NameManager
}
// IsSuccess returns true when this get fqdn names o k response has a 2xx status code
func (o *GetFqdnNamesOK) IsSuccess() bool {
return true
}
// IsRedirect returns true when this get fqdn names o k response has a 3xx status code
func (o *GetFqdnNamesOK) IsRedirect() bool {
return false
}
// IsClientError returns true when this get fqdn names o k response has a 4xx status code
func (o *GetFqdnNamesOK) IsClientError() bool {
return false
}
// IsServerError returns true when this get fqdn names o k response has a 5xx status code
func (o *GetFqdnNamesOK) IsServerError() bool {
return false
}
// IsCode returns true when this get fqdn names o k response a status code equal to that given
func (o *GetFqdnNamesOK) IsCode(code int) bool {
return code == 200
}
// Code gets the status code for the get fqdn names o k response
func (o *GetFqdnNamesOK) Code() int {
return 200
}
func (o *GetFqdnNamesOK) Error() string {
payload, _ := json.Marshal(o.Payload)
return fmt.Sprintf("[GET /fqdn/names][%d] getFqdnNamesOK %s", 200, payload)
}
func (o *GetFqdnNamesOK) String() string {
payload, _ := json.Marshal(o.Payload)
return fmt.Sprintf("[GET /fqdn/names][%d] getFqdnNamesOK %s", 200, payload)
}
func (o *GetFqdnNamesOK) GetPayload() *models.NameManager {
return o.Payload
}
func (o *GetFqdnNamesOK) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error {
o.Payload = new(models.NameManager)
// response payload
if err := consumer.Consume(response.Body(), o.Payload); err != nil && err != io.EOF {
return err
}
return nil
}
// NewGetFqdnNamesBadRequest creates a GetFqdnNamesBadRequest with default headers values
func NewGetFqdnNamesBadRequest() *GetFqdnNamesBadRequest {
return &GetFqdnNamesBadRequest{}
}
/*
GetFqdnNamesBadRequest describes a response with status code 400, with default header values.
Invalid request (error parsing parameters)
*/
type GetFqdnNamesBadRequest struct {
Payload models.Error
}
// IsSuccess returns true when this get fqdn names bad request response has a 2xx status code
func (o *GetFqdnNamesBadRequest) IsSuccess() bool {
return false
}
// IsRedirect returns true when this get fqdn names bad request response has a 3xx status code
func (o *GetFqdnNamesBadRequest) IsRedirect() bool {
return false
}
// IsClientError returns true when this get fqdn names bad request response has a 4xx status code
func (o *GetFqdnNamesBadRequest) IsClientError() bool {
return true
}
// IsServerError returns true when this get fqdn names bad request response has a 5xx status code
func (o *GetFqdnNamesBadRequest) IsServerError() bool {
return false
}
// IsCode returns true when this get fqdn names bad request response a status code equal to that given
func (o *GetFqdnNamesBadRequest) IsCode(code int) bool {
return code == 400
}
// Code gets the status code for the get fqdn names bad request response
func (o *GetFqdnNamesBadRequest) Code() int {
return 400
}
func (o *GetFqdnNamesBadRequest) Error() string {
payload, _ := json.Marshal(o.Payload)
return fmt.Sprintf("[GET /fqdn/names][%d] getFqdnNamesBadRequest %s", 400, payload)
}
func (o *GetFqdnNamesBadRequest) String() string {
payload, _ := json.Marshal(o.Payload)
return fmt.Sprintf("[GET /fqdn/names][%d] getFqdnNamesBadRequest %s", 400, payload)
}
func (o *GetFqdnNamesBadRequest) GetPayload() models.Error {
return o.Payload
}
func (o *GetFqdnNamesBadRequest) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error {
// response payload
if err := consumer.Consume(response.Body(), &o.Payload); err != nil && err != io.EOF {
return err
}
return nil
}
// Code generated by go-swagger; DO NOT EDIT.
// Copyright Authors of Cilium
// SPDX-License-Identifier: Apache-2.0
package policy
// This file was generated by the swagger tool.
// Editing this file might prove futile when you re-run the swagger generate command
import (
"context"
"net/http"
"time"
"github.com/go-openapi/errors"
"github.com/go-openapi/runtime"
cr "github.com/go-openapi/runtime/client"
"github.com/go-openapi/strfmt"
)
// NewGetIdentityEndpointsParams creates a new GetIdentityEndpointsParams object,
// with the default timeout for this client.
//
// Default values are not hydrated, since defaults are normally applied by the API server side.
//
// To enforce default values in parameter, use SetDefaults or WithDefaults.
func NewGetIdentityEndpointsParams() *GetIdentityEndpointsParams {
return &GetIdentityEndpointsParams{
timeout: cr.DefaultTimeout,
}
}
// NewGetIdentityEndpointsParamsWithTimeout creates a new GetIdentityEndpointsParams object
// with the ability to set a timeout on a request.
func NewGetIdentityEndpointsParamsWithTimeout(timeout time.Duration) *GetIdentityEndpointsParams {
return &GetIdentityEndpointsParams{
timeout: timeout,
}
}
// NewGetIdentityEndpointsParamsWithContext creates a new GetIdentityEndpointsParams object
// with the ability to set a context for a request.
func NewGetIdentityEndpointsParamsWithContext(ctx context.Context) *GetIdentityEndpointsParams {
return &GetIdentityEndpointsParams{
Context: ctx,
}
}
// NewGetIdentityEndpointsParamsWithHTTPClient creates a new GetIdentityEndpointsParams object
// with the ability to set a custom HTTPClient for a request.
func NewGetIdentityEndpointsParamsWithHTTPClient(client *http.Client) *GetIdentityEndpointsParams {
return &GetIdentityEndpointsParams{
HTTPClient: client,
}
}
/*
GetIdentityEndpointsParams contains all the parameters to send to the API endpoint
for the get identity endpoints operation.
Typically these are written to a http.Request.
*/
type GetIdentityEndpointsParams struct {
timeout time.Duration
Context context.Context
HTTPClient *http.Client
}
// WithDefaults hydrates default values in the get identity endpoints params (not the query body).
//
// All values with no default are reset to their zero value.
func (o *GetIdentityEndpointsParams) WithDefaults() *GetIdentityEndpointsParams {
o.SetDefaults()
return o
}
// SetDefaults hydrates default values in the get identity endpoints params (not the query body).
//
// All values with no default are reset to their zero value.
func (o *GetIdentityEndpointsParams) SetDefaults() {
// no default values defined for this parameter
}
// WithTimeout adds the timeout to the get identity endpoints params
func (o *GetIdentityEndpointsParams) WithTimeout(timeout time.Duration) *GetIdentityEndpointsParams {
o.SetTimeout(timeout)
return o
}
// SetTimeout adds the timeout to the get identity endpoints params
func (o *GetIdentityEndpointsParams) SetTimeout(timeout time.Duration) {
o.timeout = timeout
}
// WithContext adds the context to the get identity endpoints params
func (o *GetIdentityEndpointsParams) WithContext(ctx context.Context) *GetIdentityEndpointsParams {
o.SetContext(ctx)
return o
}
// SetContext adds the context to the get identity endpoints params
func (o *GetIdentityEndpointsParams) SetContext(ctx context.Context) {
o.Context = ctx
}
// WithHTTPClient adds the HTTPClient to the get identity endpoints params
func (o *GetIdentityEndpointsParams) WithHTTPClient(client *http.Client) *GetIdentityEndpointsParams {
o.SetHTTPClient(client)
return o
}
// SetHTTPClient adds the HTTPClient to the get identity endpoints params
func (o *GetIdentityEndpointsParams) SetHTTPClient(client *http.Client) {
o.HTTPClient = client
}
// WriteToRequest writes these params to a swagger request
func (o *GetIdentityEndpointsParams) WriteToRequest(r runtime.ClientRequest, reg strfmt.Registry) error {
if err := r.SetTimeout(o.timeout); err != nil {
return err
}
var res []error
if len(res) > 0 {
return errors.CompositeValidationError(res...)
}
return nil
}
// Code generated by go-swagger; DO NOT EDIT.
// Copyright Authors of Cilium
// SPDX-License-Identifier: Apache-2.0
package policy
// This file was generated by the swagger tool.
// Editing this file might prove futile when you re-run the swagger generate command
import (
"encoding/json"
"fmt"
"io"
"github.com/go-openapi/runtime"
"github.com/go-openapi/strfmt"
"github.com/cilium/cilium/api/v1/models"
)
// GetIdentityEndpointsReader is a Reader for the GetIdentityEndpoints structure.
type GetIdentityEndpointsReader struct {
formats strfmt.Registry
}
// ReadResponse reads a server response into the received o.
func (o *GetIdentityEndpointsReader) ReadResponse(response runtime.ClientResponse, consumer runtime.Consumer) (interface{}, error) {
switch response.Code() {
case 200:
result := NewGetIdentityEndpointsOK()
if err := result.readResponse(response, consumer, o.formats); err != nil {
return nil, err
}
return result, nil
case 404:
result := NewGetIdentityEndpointsNotFound()
if err := result.readResponse(response, consumer, o.formats); err != nil {
return nil, err
}
return nil, result
default:
return nil, runtime.NewAPIError("[GET /identity/endpoints] GetIdentityEndpoints", response, response.Code())
}
}
// NewGetIdentityEndpointsOK creates a GetIdentityEndpointsOK with default headers values
func NewGetIdentityEndpointsOK() *GetIdentityEndpointsOK {
return &GetIdentityEndpointsOK{}
}
/*
GetIdentityEndpointsOK describes a response with status code 200, with default header values.
Success
*/
type GetIdentityEndpointsOK struct {
Payload []*models.IdentityEndpoints
}
// IsSuccess returns true when this get identity endpoints o k response has a 2xx status code
func (o *GetIdentityEndpointsOK) IsSuccess() bool {
return true
}
// IsRedirect returns true when this get identity endpoints o k response has a 3xx status code
func (o *GetIdentityEndpointsOK) IsRedirect() bool {
return false
}
// IsClientError returns true when this get identity endpoints o k response has a 4xx status code
func (o *GetIdentityEndpointsOK) IsClientError() bool {
return false
}
// IsServerError returns true when this get identity endpoints o k response has a 5xx status code
func (o *GetIdentityEndpointsOK) IsServerError() bool {
return false
}
// IsCode returns true when this get identity endpoints o k response a status code equal to that given
func (o *GetIdentityEndpointsOK) IsCode(code int) bool {
return code == 200
}
// Code gets the status code for the get identity endpoints o k response
func (o *GetIdentityEndpointsOK) Code() int {
return 200
}
func (o *GetIdentityEndpointsOK) Error() string {
payload, _ := json.Marshal(o.Payload)
return fmt.Sprintf("[GET /identity/endpoints][%d] getIdentityEndpointsOK %s", 200, payload)
}
func (o *GetIdentityEndpointsOK) String() string {
payload, _ := json.Marshal(o.Payload)
return fmt.Sprintf("[GET /identity/endpoints][%d] getIdentityEndpointsOK %s", 200, payload)
}
func (o *GetIdentityEndpointsOK) GetPayload() []*models.IdentityEndpoints {
return o.Payload
}
func (o *GetIdentityEndpointsOK) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error {
// response payload
if err := consumer.Consume(response.Body(), &o.Payload); err != nil && err != io.EOF {
return err
}
return nil
}
// NewGetIdentityEndpointsNotFound creates a GetIdentityEndpointsNotFound with default headers values
func NewGetIdentityEndpointsNotFound() *GetIdentityEndpointsNotFound {
return &GetIdentityEndpointsNotFound{}
}
/*
GetIdentityEndpointsNotFound describes a response with status code 404, with default header values.
Set of identities which are being used by local endpoints could not be found.
*/
type GetIdentityEndpointsNotFound struct {
}
// IsSuccess returns true when this get identity endpoints not found response has a 2xx status code
func (o *GetIdentityEndpointsNotFound) IsSuccess() bool {
return false
}
// IsRedirect returns true when this get identity endpoints not found response has a 3xx status code
func (o *GetIdentityEndpointsNotFound) IsRedirect() bool {
return false
}
// IsClientError returns true when this get identity endpoints not found response has a 4xx status code
func (o *GetIdentityEndpointsNotFound) IsClientError() bool {
return true
}
// IsServerError returns true when this get identity endpoints not found response has a 5xx status code
func (o *GetIdentityEndpointsNotFound) IsServerError() bool {
return false
}
// IsCode returns true when this get identity endpoints not found response a status code equal to that given
func (o *GetIdentityEndpointsNotFound) IsCode(code int) bool {
return code == 404
}
// Code gets the status code for the get identity endpoints not found response
func (o *GetIdentityEndpointsNotFound) Code() int {
return 404
}
func (o *GetIdentityEndpointsNotFound) Error() string {
return fmt.Sprintf("[GET /identity/endpoints][%d] getIdentityEndpointsNotFound", 404)
}
func (o *GetIdentityEndpointsNotFound) String() string {
return fmt.Sprintf("[GET /identity/endpoints][%d] getIdentityEndpointsNotFound", 404)
}
func (o *GetIdentityEndpointsNotFound) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error {
return nil
}
// Code generated by go-swagger; DO NOT EDIT.
// Copyright Authors of Cilium
// SPDX-License-Identifier: Apache-2.0
package policy
// This file was generated by the swagger tool.
// Editing this file might prove futile when you re-run the swagger generate command
import (
"context"
"net/http"
"time"
"github.com/go-openapi/errors"
"github.com/go-openapi/runtime"
cr "github.com/go-openapi/runtime/client"
"github.com/go-openapi/strfmt"
)
// NewGetIdentityIDParams creates a new GetIdentityIDParams object,
// with the default timeout for this client.
//
// Default values are not hydrated, since defaults are normally applied by the API server side.
//
// To enforce default values in parameter, use SetDefaults or WithDefaults.
func NewGetIdentityIDParams() *GetIdentityIDParams {
return &GetIdentityIDParams{
timeout: cr.DefaultTimeout,
}
}
// NewGetIdentityIDParamsWithTimeout creates a new GetIdentityIDParams object
// with the ability to set a timeout on a request.
func NewGetIdentityIDParamsWithTimeout(timeout time.Duration) *GetIdentityIDParams {
return &GetIdentityIDParams{
timeout: timeout,
}
}
// NewGetIdentityIDParamsWithContext creates a new GetIdentityIDParams object
// with the ability to set a context for a request.
func NewGetIdentityIDParamsWithContext(ctx context.Context) *GetIdentityIDParams {
return &GetIdentityIDParams{
Context: ctx,
}
}
// NewGetIdentityIDParamsWithHTTPClient creates a new GetIdentityIDParams object
// with the ability to set a custom HTTPClient for a request.
func NewGetIdentityIDParamsWithHTTPClient(client *http.Client) *GetIdentityIDParams {
return &GetIdentityIDParams{
HTTPClient: client,
}
}
/*
GetIdentityIDParams contains all the parameters to send to the API endpoint
for the get identity ID operation.
Typically these are written to a http.Request.
*/
type GetIdentityIDParams struct {
/* ID.
Cluster wide unique identifier of a security identity.
*/
ID string
timeout time.Duration
Context context.Context
HTTPClient *http.Client
}
// WithDefaults hydrates default values in the get identity ID params (not the query body).
//
// All values with no default are reset to their zero value.
func (o *GetIdentityIDParams) WithDefaults() *GetIdentityIDParams {
o.SetDefaults()
return o
}
// SetDefaults hydrates default values in the get identity ID params (not the query body).
//
// All values with no default are reset to their zero value.
func (o *GetIdentityIDParams) SetDefaults() {
// no default values defined for this parameter
}
// WithTimeout adds the timeout to the get identity ID params
func (o *GetIdentityIDParams) WithTimeout(timeout time.Duration) *GetIdentityIDParams {
o.SetTimeout(timeout)
return o
}
// SetTimeout adds the timeout to the get identity ID params
func (o *GetIdentityIDParams) SetTimeout(timeout time.Duration) {
o.timeout = timeout
}
// WithContext adds the context to the get identity ID params
func (o *GetIdentityIDParams) WithContext(ctx context.Context) *GetIdentityIDParams {
o.SetContext(ctx)
return o
}
// SetContext adds the context to the get identity ID params
func (o *GetIdentityIDParams) SetContext(ctx context.Context) {
o.Context = ctx
}
// WithHTTPClient adds the HTTPClient to the get identity ID params
func (o *GetIdentityIDParams) WithHTTPClient(client *http.Client) *GetIdentityIDParams {
o.SetHTTPClient(client)
return o
}
// SetHTTPClient adds the HTTPClient to the get identity ID params
func (o *GetIdentityIDParams) SetHTTPClient(client *http.Client) {
o.HTTPClient = client
}
// WithID adds the id to the get identity ID params
func (o *GetIdentityIDParams) WithID(id string) *GetIdentityIDParams {
o.SetID(id)
return o
}
// SetID adds the id to the get identity ID params
func (o *GetIdentityIDParams) SetID(id string) {
o.ID = id
}
// WriteToRequest writes these params to a swagger request
func (o *GetIdentityIDParams) WriteToRequest(r runtime.ClientRequest, reg strfmt.Registry) error {
if err := r.SetTimeout(o.timeout); err != nil {
return err
}
var res []error
// path param id
if err := r.SetPathParam("id", o.ID); err != nil {
return err
}
if len(res) > 0 {
return errors.CompositeValidationError(res...)
}
return nil
}
// Code generated by go-swagger; DO NOT EDIT.
// Copyright Authors of Cilium
// SPDX-License-Identifier: Apache-2.0
package policy
// This file was generated by the swagger tool.
// Editing this file might prove futile when you re-run the swagger generate command
import (
"encoding/json"
"fmt"
"io"
"github.com/go-openapi/runtime"
"github.com/go-openapi/strfmt"
"github.com/cilium/cilium/api/v1/models"
)
// GetIdentityIDReader is a Reader for the GetIdentityID structure.
type GetIdentityIDReader struct {
formats strfmt.Registry
}
// ReadResponse reads a server response into the received o.
func (o *GetIdentityIDReader) ReadResponse(response runtime.ClientResponse, consumer runtime.Consumer) (interface{}, error) {
switch response.Code() {
case 200:
result := NewGetIdentityIDOK()
if err := result.readResponse(response, consumer, o.formats); err != nil {
return nil, err
}
return result, nil
case 400:
result := NewGetIdentityIDBadRequest()
if err := result.readResponse(response, consumer, o.formats); err != nil {
return nil, err
}
return nil, result
case 404:
result := NewGetIdentityIDNotFound()
if err := result.readResponse(response, consumer, o.formats); err != nil {
return nil, err
}
return nil, result
case 520:
result := NewGetIdentityIDUnreachable()
if err := result.readResponse(response, consumer, o.formats); err != nil {
return nil, err
}
return nil, result
case 521:
result := NewGetIdentityIDInvalidStorageFormat()
if err := result.readResponse(response, consumer, o.formats); err != nil {
return nil, err
}
return nil, result
default:
return nil, runtime.NewAPIError("[GET /identity/{id}] GetIdentityID", response, response.Code())
}
}
// NewGetIdentityIDOK creates a GetIdentityIDOK with default headers values
func NewGetIdentityIDOK() *GetIdentityIDOK {
return &GetIdentityIDOK{}
}
/*
GetIdentityIDOK describes a response with status code 200, with default header values.
Success
*/
type GetIdentityIDOK struct {
Payload *models.Identity
}
// IsSuccess returns true when this get identity Id o k response has a 2xx status code
func (o *GetIdentityIDOK) IsSuccess() bool {
return true
}
// IsRedirect returns true when this get identity Id o k response has a 3xx status code
func (o *GetIdentityIDOK) IsRedirect() bool {
return false
}
// IsClientError returns true when this get identity Id o k response has a 4xx status code
func (o *GetIdentityIDOK) IsClientError() bool {
return false
}
// IsServerError returns true when this get identity Id o k response has a 5xx status code
func (o *GetIdentityIDOK) IsServerError() bool {
return false
}
// IsCode returns true when this get identity Id o k response a status code equal to that given
func (o *GetIdentityIDOK) IsCode(code int) bool {
return code == 200
}
// Code gets the status code for the get identity Id o k response
func (o *GetIdentityIDOK) Code() int {
return 200
}
func (o *GetIdentityIDOK) Error() string {
payload, _ := json.Marshal(o.Payload)
return fmt.Sprintf("[GET /identity/{id}][%d] getIdentityIdOK %s", 200, payload)
}
func (o *GetIdentityIDOK) String() string {
payload, _ := json.Marshal(o.Payload)
return fmt.Sprintf("[GET /identity/{id}][%d] getIdentityIdOK %s", 200, payload)
}
func (o *GetIdentityIDOK) GetPayload() *models.Identity {
return o.Payload
}
func (o *GetIdentityIDOK) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error {
o.Payload = new(models.Identity)
// response payload
if err := consumer.Consume(response.Body(), o.Payload); err != nil && err != io.EOF {
return err
}
return nil
}
// NewGetIdentityIDBadRequest creates a GetIdentityIDBadRequest with default headers values
func NewGetIdentityIDBadRequest() *GetIdentityIDBadRequest {
return &GetIdentityIDBadRequest{}
}
/*
GetIdentityIDBadRequest describes a response with status code 400, with default header values.
Invalid identity provided
*/
type GetIdentityIDBadRequest struct {
}
// IsSuccess returns true when this get identity Id bad request response has a 2xx status code
func (o *GetIdentityIDBadRequest) IsSuccess() bool {
return false
}
// IsRedirect returns true when this get identity Id bad request response has a 3xx status code
func (o *GetIdentityIDBadRequest) IsRedirect() bool {
return false
}
// IsClientError returns true when this get identity Id bad request response has a 4xx status code
func (o *GetIdentityIDBadRequest) IsClientError() bool {
return true
}
// IsServerError returns true when this get identity Id bad request response has a 5xx status code
func (o *GetIdentityIDBadRequest) IsServerError() bool {
return false
}
// IsCode returns true when this get identity Id bad request response a status code equal to that given
func (o *GetIdentityIDBadRequest) IsCode(code int) bool {
return code == 400
}
// Code gets the status code for the get identity Id bad request response
func (o *GetIdentityIDBadRequest) Code() int {
return 400
}
func (o *GetIdentityIDBadRequest) Error() string {
return fmt.Sprintf("[GET /identity/{id}][%d] getIdentityIdBadRequest", 400)
}
func (o *GetIdentityIDBadRequest) String() string {
return fmt.Sprintf("[GET /identity/{id}][%d] getIdentityIdBadRequest", 400)
}
func (o *GetIdentityIDBadRequest) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error {
return nil
}
// NewGetIdentityIDNotFound creates a GetIdentityIDNotFound with default headers values
func NewGetIdentityIDNotFound() *GetIdentityIDNotFound {
return &GetIdentityIDNotFound{}
}
/*
GetIdentityIDNotFound describes a response with status code 404, with default header values.
Identity not found
*/
type GetIdentityIDNotFound struct {
}
// IsSuccess returns true when this get identity Id not found response has a 2xx status code
func (o *GetIdentityIDNotFound) IsSuccess() bool {
return false
}
// IsRedirect returns true when this get identity Id not found response has a 3xx status code
func (o *GetIdentityIDNotFound) IsRedirect() bool {
return false
}
// IsClientError returns true when this get identity Id not found response has a 4xx status code
func (o *GetIdentityIDNotFound) IsClientError() bool {
return true
}
// IsServerError returns true when this get identity Id not found response has a 5xx status code
func (o *GetIdentityIDNotFound) IsServerError() bool {
return false
}
// IsCode returns true when this get identity Id not found response a status code equal to that given
func (o *GetIdentityIDNotFound) IsCode(code int) bool {
return code == 404
}
// Code gets the status code for the get identity Id not found response
func (o *GetIdentityIDNotFound) Code() int {
return 404
}
func (o *GetIdentityIDNotFound) Error() string {
return fmt.Sprintf("[GET /identity/{id}][%d] getIdentityIdNotFound", 404)
}
func (o *GetIdentityIDNotFound) String() string {
return fmt.Sprintf("[GET /identity/{id}][%d] getIdentityIdNotFound", 404)
}
func (o *GetIdentityIDNotFound) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error {
return nil
}
// NewGetIdentityIDUnreachable creates a GetIdentityIDUnreachable with default headers values
func NewGetIdentityIDUnreachable() *GetIdentityIDUnreachable {
return &GetIdentityIDUnreachable{}
}
/*
GetIdentityIDUnreachable describes a response with status code 520, with default header values.
Identity storage unreachable. Likely a network problem.
*/
type GetIdentityIDUnreachable struct {
Payload models.Error
}
// IsSuccess returns true when this get identity Id unreachable response has a 2xx status code
func (o *GetIdentityIDUnreachable) IsSuccess() bool {
return false
}
// IsRedirect returns true when this get identity Id unreachable response has a 3xx status code
func (o *GetIdentityIDUnreachable) IsRedirect() bool {
return false
}
// IsClientError returns true when this get identity Id unreachable response has a 4xx status code
func (o *GetIdentityIDUnreachable) IsClientError() bool {
return false
}
// IsServerError returns true when this get identity Id unreachable response has a 5xx status code
func (o *GetIdentityIDUnreachable) IsServerError() bool {
return true
}
// IsCode returns true when this get identity Id unreachable response a status code equal to that given
func (o *GetIdentityIDUnreachable) IsCode(code int) bool {
return code == 520
}
// Code gets the status code for the get identity Id unreachable response
func (o *GetIdentityIDUnreachable) Code() int {
return 520
}
func (o *GetIdentityIDUnreachable) Error() string {
payload, _ := json.Marshal(o.Payload)
return fmt.Sprintf("[GET /identity/{id}][%d] getIdentityIdUnreachable %s", 520, payload)
}
func (o *GetIdentityIDUnreachable) String() string {
payload, _ := json.Marshal(o.Payload)
return fmt.Sprintf("[GET /identity/{id}][%d] getIdentityIdUnreachable %s", 520, payload)
}
func (o *GetIdentityIDUnreachable) GetPayload() models.Error {
return o.Payload
}
func (o *GetIdentityIDUnreachable) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error {
// response payload
if err := consumer.Consume(response.Body(), &o.Payload); err != nil && err != io.EOF {
return err
}
return nil
}
// NewGetIdentityIDInvalidStorageFormat creates a GetIdentityIDInvalidStorageFormat with default headers values
func NewGetIdentityIDInvalidStorageFormat() *GetIdentityIDInvalidStorageFormat {
return &GetIdentityIDInvalidStorageFormat{}
}
/*
GetIdentityIDInvalidStorageFormat describes a response with status code 521, with default header values.
Invalid identity format in storage
*/
type GetIdentityIDInvalidStorageFormat struct {
Payload models.Error
}
// IsSuccess returns true when this get identity Id invalid storage format response has a 2xx status code
func (o *GetIdentityIDInvalidStorageFormat) IsSuccess() bool {
return false
}
// IsRedirect returns true when this get identity Id invalid storage format response has a 3xx status code
func (o *GetIdentityIDInvalidStorageFormat) IsRedirect() bool {
return false
}
// IsClientError returns true when this get identity Id invalid storage format response has a 4xx status code
func (o *GetIdentityIDInvalidStorageFormat) IsClientError() bool {
return false
}
// IsServerError returns true when this get identity Id invalid storage format response has a 5xx status code
func (o *GetIdentityIDInvalidStorageFormat) IsServerError() bool {
return true
}
// IsCode returns true when this get identity Id invalid storage format response a status code equal to that given
func (o *GetIdentityIDInvalidStorageFormat) IsCode(code int) bool {
return code == 521
}
// Code gets the status code for the get identity Id invalid storage format response
func (o *GetIdentityIDInvalidStorageFormat) Code() int {
return 521
}
func (o *GetIdentityIDInvalidStorageFormat) Error() string {
payload, _ := json.Marshal(o.Payload)
return fmt.Sprintf("[GET /identity/{id}][%d] getIdentityIdInvalidStorageFormat %s", 521, payload)
}
func (o *GetIdentityIDInvalidStorageFormat) String() string {
payload, _ := json.Marshal(o.Payload)
return fmt.Sprintf("[GET /identity/{id}][%d] getIdentityIdInvalidStorageFormat %s", 521, payload)
}
func (o *GetIdentityIDInvalidStorageFormat) GetPayload() models.Error {
return o.Payload
}
func (o *GetIdentityIDInvalidStorageFormat) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error {
// response payload
if err := consumer.Consume(response.Body(), &o.Payload); err != nil && err != io.EOF {
return err
}
return nil
}
// Code generated by go-swagger; DO NOT EDIT.
// Copyright Authors of Cilium
// SPDX-License-Identifier: Apache-2.0
package policy
// This file was generated by the swagger tool.
// Editing this file might prove futile when you re-run the swagger generate command
import (
"context"
"net/http"
"time"
"github.com/go-openapi/errors"
"github.com/go-openapi/runtime"
cr "github.com/go-openapi/runtime/client"
"github.com/go-openapi/strfmt"
"github.com/cilium/cilium/api/v1/models"
)
// NewGetIdentityParams creates a new GetIdentityParams object,
// with the default timeout for this client.
//
// Default values are not hydrated, since defaults are normally applied by the API server side.
//
// To enforce default values in parameter, use SetDefaults or WithDefaults.
func NewGetIdentityParams() *GetIdentityParams {
return &GetIdentityParams{
timeout: cr.DefaultTimeout,
}
}
// NewGetIdentityParamsWithTimeout creates a new GetIdentityParams object
// with the ability to set a timeout on a request.
func NewGetIdentityParamsWithTimeout(timeout time.Duration) *GetIdentityParams {
return &GetIdentityParams{
timeout: timeout,
}
}
// NewGetIdentityParamsWithContext creates a new GetIdentityParams object
// with the ability to set a context for a request.
func NewGetIdentityParamsWithContext(ctx context.Context) *GetIdentityParams {
return &GetIdentityParams{
Context: ctx,
}
}
// NewGetIdentityParamsWithHTTPClient creates a new GetIdentityParams object
// with the ability to set a custom HTTPClient for a request.
func NewGetIdentityParamsWithHTTPClient(client *http.Client) *GetIdentityParams {
return &GetIdentityParams{
HTTPClient: client,
}
}
/*
GetIdentityParams contains all the parameters to send to the API endpoint
for the get identity operation.
Typically these are written to a http.Request.
*/
type GetIdentityParams struct {
/* Labels.
List of labels
*/
Labels models.Labels
timeout time.Duration
Context context.Context
HTTPClient *http.Client
}
// WithDefaults hydrates default values in the get identity params (not the query body).
//
// All values with no default are reset to their zero value.
func (o *GetIdentityParams) WithDefaults() *GetIdentityParams {
o.SetDefaults()
return o
}
// SetDefaults hydrates default values in the get identity params (not the query body).
//
// All values with no default are reset to their zero value.
func (o *GetIdentityParams) SetDefaults() {
// no default values defined for this parameter
}
// WithTimeout adds the timeout to the get identity params
func (o *GetIdentityParams) WithTimeout(timeout time.Duration) *GetIdentityParams {
o.SetTimeout(timeout)
return o
}
// SetTimeout adds the timeout to the get identity params
func (o *GetIdentityParams) SetTimeout(timeout time.Duration) {
o.timeout = timeout
}
// WithContext adds the context to the get identity params
func (o *GetIdentityParams) WithContext(ctx context.Context) *GetIdentityParams {
o.SetContext(ctx)
return o
}
// SetContext adds the context to the get identity params
func (o *GetIdentityParams) SetContext(ctx context.Context) {
o.Context = ctx
}
// WithHTTPClient adds the HTTPClient to the get identity params
func (o *GetIdentityParams) WithHTTPClient(client *http.Client) *GetIdentityParams {
o.SetHTTPClient(client)
return o
}
// SetHTTPClient adds the HTTPClient to the get identity params
func (o *GetIdentityParams) SetHTTPClient(client *http.Client) {
o.HTTPClient = client
}
// WithLabels adds the labels to the get identity params
func (o *GetIdentityParams) WithLabels(labels models.Labels) *GetIdentityParams {
o.SetLabels(labels)
return o
}
// SetLabels adds the labels to the get identity params
func (o *GetIdentityParams) SetLabels(labels models.Labels) {
o.Labels = labels
}
// WriteToRequest writes these params to a swagger request
func (o *GetIdentityParams) WriteToRequest(r runtime.ClientRequest, reg strfmt.Registry) error {
if err := r.SetTimeout(o.timeout); err != nil {
return err
}
var res []error
if o.Labels != nil {
if err := r.SetBodyParam(o.Labels); err != nil {
return err
}
}
if len(res) > 0 {
return errors.CompositeValidationError(res...)
}
return nil
}
// Code generated by go-swagger; DO NOT EDIT.
// Copyright Authors of Cilium
// SPDX-License-Identifier: Apache-2.0
package policy
// This file was generated by the swagger tool.
// Editing this file might prove futile when you re-run the swagger generate command
import (
"encoding/json"
"fmt"
"io"
"github.com/go-openapi/runtime"
"github.com/go-openapi/strfmt"
"github.com/cilium/cilium/api/v1/models"
)
// GetIdentityReader is a Reader for the GetIdentity structure.
type GetIdentityReader struct {
formats strfmt.Registry
}
// ReadResponse reads a server response into the received o.
func (o *GetIdentityReader) ReadResponse(response runtime.ClientResponse, consumer runtime.Consumer) (interface{}, error) {
switch response.Code() {
case 200:
result := NewGetIdentityOK()
if err := result.readResponse(response, consumer, o.formats); err != nil {
return nil, err
}
return result, nil
case 404:
result := NewGetIdentityNotFound()
if err := result.readResponse(response, consumer, o.formats); err != nil {
return nil, err
}
return nil, result
case 520:
result := NewGetIdentityUnreachable()
if err := result.readResponse(response, consumer, o.formats); err != nil {
return nil, err
}
return nil, result
case 521:
result := NewGetIdentityInvalidStorageFormat()
if err := result.readResponse(response, consumer, o.formats); err != nil {
return nil, err
}
return nil, result
default:
return nil, runtime.NewAPIError("[GET /identity] GetIdentity", response, response.Code())
}
}
// NewGetIdentityOK creates a GetIdentityOK with default headers values
func NewGetIdentityOK() *GetIdentityOK {
return &GetIdentityOK{}
}
/*
GetIdentityOK describes a response with status code 200, with default header values.
Success
*/
type GetIdentityOK struct {
Payload []*models.Identity
}
// IsSuccess returns true when this get identity o k response has a 2xx status code
func (o *GetIdentityOK) IsSuccess() bool {
return true
}
// IsRedirect returns true when this get identity o k response has a 3xx status code
func (o *GetIdentityOK) IsRedirect() bool {
return false
}
// IsClientError returns true when this get identity o k response has a 4xx status code
func (o *GetIdentityOK) IsClientError() bool {
return false
}
// IsServerError returns true when this get identity o k response has a 5xx status code
func (o *GetIdentityOK) IsServerError() bool {
return false
}
// IsCode returns true when this get identity o k response a status code equal to that given
func (o *GetIdentityOK) IsCode(code int) bool {
return code == 200
}
// Code gets the status code for the get identity o k response
func (o *GetIdentityOK) Code() int {
return 200
}
func (o *GetIdentityOK) Error() string {
payload, _ := json.Marshal(o.Payload)
return fmt.Sprintf("[GET /identity][%d] getIdentityOK %s", 200, payload)
}
func (o *GetIdentityOK) String() string {
payload, _ := json.Marshal(o.Payload)
return fmt.Sprintf("[GET /identity][%d] getIdentityOK %s", 200, payload)
}
func (o *GetIdentityOK) GetPayload() []*models.Identity {
return o.Payload
}
func (o *GetIdentityOK) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error {
// response payload
if err := consumer.Consume(response.Body(), &o.Payload); err != nil && err != io.EOF {
return err
}
return nil
}
// NewGetIdentityNotFound creates a GetIdentityNotFound with default headers values
func NewGetIdentityNotFound() *GetIdentityNotFound {
return &GetIdentityNotFound{}
}
/*
GetIdentityNotFound describes a response with status code 404, with default header values.
Identities with provided parameters not found
*/
type GetIdentityNotFound struct {
}
// IsSuccess returns true when this get identity not found response has a 2xx status code
func (o *GetIdentityNotFound) IsSuccess() bool {
return false
}
// IsRedirect returns true when this get identity not found response has a 3xx status code
func (o *GetIdentityNotFound) IsRedirect() bool {
return false
}
// IsClientError returns true when this get identity not found response has a 4xx status code
func (o *GetIdentityNotFound) IsClientError() bool {
return true
}
// IsServerError returns true when this get identity not found response has a 5xx status code
func (o *GetIdentityNotFound) IsServerError() bool {
return false
}
// IsCode returns true when this get identity not found response a status code equal to that given
func (o *GetIdentityNotFound) IsCode(code int) bool {
return code == 404
}
// Code gets the status code for the get identity not found response
func (o *GetIdentityNotFound) Code() int {
return 404
}
func (o *GetIdentityNotFound) Error() string {
return fmt.Sprintf("[GET /identity][%d] getIdentityNotFound", 404)
}
func (o *GetIdentityNotFound) String() string {
return fmt.Sprintf("[GET /identity][%d] getIdentityNotFound", 404)
}
func (o *GetIdentityNotFound) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error {
return nil
}
// NewGetIdentityUnreachable creates a GetIdentityUnreachable with default headers values
func NewGetIdentityUnreachable() *GetIdentityUnreachable {
return &GetIdentityUnreachable{}
}
/*
GetIdentityUnreachable describes a response with status code 520, with default header values.
Identity storage unreachable. Likely a network problem.
*/
type GetIdentityUnreachable struct {
Payload models.Error
}
// IsSuccess returns true when this get identity unreachable response has a 2xx status code
func (o *GetIdentityUnreachable) IsSuccess() bool {
return false
}
// IsRedirect returns true when this get identity unreachable response has a 3xx status code
func (o *GetIdentityUnreachable) IsRedirect() bool {
return false
}
// IsClientError returns true when this get identity unreachable response has a 4xx status code
func (o *GetIdentityUnreachable) IsClientError() bool {
return false
}
// IsServerError returns true when this get identity unreachable response has a 5xx status code
func (o *GetIdentityUnreachable) IsServerError() bool {
return true
}
// IsCode returns true when this get identity unreachable response a status code equal to that given
func (o *GetIdentityUnreachable) IsCode(code int) bool {
return code == 520
}
// Code gets the status code for the get identity unreachable response
func (o *GetIdentityUnreachable) Code() int {
return 520
}
func (o *GetIdentityUnreachable) Error() string {
payload, _ := json.Marshal(o.Payload)
return fmt.Sprintf("[GET /identity][%d] getIdentityUnreachable %s", 520, payload)
}
func (o *GetIdentityUnreachable) String() string {
payload, _ := json.Marshal(o.Payload)
return fmt.Sprintf("[GET /identity][%d] getIdentityUnreachable %s", 520, payload)
}
func (o *GetIdentityUnreachable) GetPayload() models.Error {
return o.Payload
}
func (o *GetIdentityUnreachable) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error {
// response payload
if err := consumer.Consume(response.Body(), &o.Payload); err != nil && err != io.EOF {
return err
}
return nil
}
// NewGetIdentityInvalidStorageFormat creates a GetIdentityInvalidStorageFormat with default headers values
func NewGetIdentityInvalidStorageFormat() *GetIdentityInvalidStorageFormat {
return &GetIdentityInvalidStorageFormat{}
}
/*
GetIdentityInvalidStorageFormat describes a response with status code 521, with default header values.
Invalid identity format in storage
*/
type GetIdentityInvalidStorageFormat struct {
Payload models.Error
}
// IsSuccess returns true when this get identity invalid storage format response has a 2xx status code
func (o *GetIdentityInvalidStorageFormat) IsSuccess() bool {
return false
}
// IsRedirect returns true when this get identity invalid storage format response has a 3xx status code
func (o *GetIdentityInvalidStorageFormat) IsRedirect() bool {
return false
}
// IsClientError returns true when this get identity invalid storage format response has a 4xx status code
func (o *GetIdentityInvalidStorageFormat) IsClientError() bool {
return false
}
// IsServerError returns true when this get identity invalid storage format response has a 5xx status code
func (o *GetIdentityInvalidStorageFormat) IsServerError() bool {
return true
}
// IsCode returns true when this get identity invalid storage format response a status code equal to that given
func (o *GetIdentityInvalidStorageFormat) IsCode(code int) bool {
return code == 521
}
// Code gets the status code for the get identity invalid storage format response
func (o *GetIdentityInvalidStorageFormat) Code() int {
return 521
}
func (o *GetIdentityInvalidStorageFormat) Error() string {
payload, _ := json.Marshal(o.Payload)
return fmt.Sprintf("[GET /identity][%d] getIdentityInvalidStorageFormat %s", 521, payload)
}
func (o *GetIdentityInvalidStorageFormat) String() string {
payload, _ := json.Marshal(o.Payload)
return fmt.Sprintf("[GET /identity][%d] getIdentityInvalidStorageFormat %s", 521, payload)
}
func (o *GetIdentityInvalidStorageFormat) GetPayload() models.Error {
return o.Payload
}
func (o *GetIdentityInvalidStorageFormat) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error {
// response payload
if err := consumer.Consume(response.Body(), &o.Payload); err != nil && err != io.EOF {
return err
}
return nil
}
// Code generated by go-swagger; DO NOT EDIT.
// Copyright Authors of Cilium
// SPDX-License-Identifier: Apache-2.0
package policy
// This file was generated by the swagger tool.
// Editing this file might prove futile when you re-run the swagger generate command
import (
"context"
"net/http"
"time"
"github.com/go-openapi/errors"
"github.com/go-openapi/runtime"
cr "github.com/go-openapi/runtime/client"
"github.com/go-openapi/strfmt"
"github.com/cilium/cilium/api/v1/models"
)
// NewGetIPParams creates a new GetIPParams object,
// with the default timeout for this client.
//
// Default values are not hydrated, since defaults are normally applied by the API server side.
//
// To enforce default values in parameter, use SetDefaults or WithDefaults.
func NewGetIPParams() *GetIPParams {
return &GetIPParams{
timeout: cr.DefaultTimeout,
}
}
// NewGetIPParamsWithTimeout creates a new GetIPParams object
// with the ability to set a timeout on a request.
func NewGetIPParamsWithTimeout(timeout time.Duration) *GetIPParams {
return &GetIPParams{
timeout: timeout,
}
}
// NewGetIPParamsWithContext creates a new GetIPParams object
// with the ability to set a context for a request.
func NewGetIPParamsWithContext(ctx context.Context) *GetIPParams {
return &GetIPParams{
Context: ctx,
}
}
// NewGetIPParamsWithHTTPClient creates a new GetIPParams object
// with the ability to set a custom HTTPClient for a request.
func NewGetIPParamsWithHTTPClient(client *http.Client) *GetIPParams {
return &GetIPParams{
HTTPClient: client,
}
}
/*
GetIPParams contains all the parameters to send to the API endpoint
for the get IP operation.
Typically these are written to a http.Request.
*/
type GetIPParams struct {
/* Cidr.
A CIDR range of IPs
*/
Cidr *string
/* Labels.
List of labels
*/
Labels models.Labels
timeout time.Duration
Context context.Context
HTTPClient *http.Client
}
// WithDefaults hydrates default values in the get IP params (not the query body).
//
// All values with no default are reset to their zero value.
func (o *GetIPParams) WithDefaults() *GetIPParams {
o.SetDefaults()
return o
}
// SetDefaults hydrates default values in the get IP params (not the query body).
//
// All values with no default are reset to their zero value.
func (o *GetIPParams) SetDefaults() {
// no default values defined for this parameter
}
// WithTimeout adds the timeout to the get IP params
func (o *GetIPParams) WithTimeout(timeout time.Duration) *GetIPParams {
o.SetTimeout(timeout)
return o
}
// SetTimeout adds the timeout to the get IP params
func (o *GetIPParams) SetTimeout(timeout time.Duration) {
o.timeout = timeout
}
// WithContext adds the context to the get IP params
func (o *GetIPParams) WithContext(ctx context.Context) *GetIPParams {
o.SetContext(ctx)
return o
}
// SetContext adds the context to the get IP params
func (o *GetIPParams) SetContext(ctx context.Context) {
o.Context = ctx
}
// WithHTTPClient adds the HTTPClient to the get IP params
func (o *GetIPParams) WithHTTPClient(client *http.Client) *GetIPParams {
o.SetHTTPClient(client)
return o
}
// SetHTTPClient adds the HTTPClient to the get IP params
func (o *GetIPParams) SetHTTPClient(client *http.Client) {
o.HTTPClient = client
}
// WithCidr adds the cidr to the get IP params
func (o *GetIPParams) WithCidr(cidr *string) *GetIPParams {
o.SetCidr(cidr)
return o
}
// SetCidr adds the cidr to the get IP params
func (o *GetIPParams) SetCidr(cidr *string) {
o.Cidr = cidr
}
// WithLabels adds the labels to the get IP params
func (o *GetIPParams) WithLabels(labels models.Labels) *GetIPParams {
o.SetLabels(labels)
return o
}
// SetLabels adds the labels to the get IP params
func (o *GetIPParams) SetLabels(labels models.Labels) {
o.Labels = labels
}
// WriteToRequest writes these params to a swagger request
func (o *GetIPParams) WriteToRequest(r runtime.ClientRequest, reg strfmt.Registry) error {
if err := r.SetTimeout(o.timeout); err != nil {
return err
}
var res []error
if o.Cidr != nil {
// query param cidr
var qrCidr string
if o.Cidr != nil {
qrCidr = *o.Cidr
}
qCidr := qrCidr
if qCidr != "" {
if err := r.SetQueryParam("cidr", qCidr); err != nil {
return err
}
}
}
if o.Labels != nil {
if err := r.SetBodyParam(o.Labels); err != nil {
return err
}
}
if len(res) > 0 {
return errors.CompositeValidationError(res...)
}
return nil
}
// Code generated by go-swagger; DO NOT EDIT.
// Copyright Authors of Cilium
// SPDX-License-Identifier: Apache-2.0
package policy
// This file was generated by the swagger tool.
// Editing this file might prove futile when you re-run the swagger generate command
import (
"encoding/json"
"fmt"
"io"
"github.com/go-openapi/runtime"
"github.com/go-openapi/strfmt"
"github.com/cilium/cilium/api/v1/models"
)
// GetIPReader is a Reader for the GetIP structure.
type GetIPReader struct {
formats strfmt.Registry
}
// ReadResponse reads a server response into the received o.
func (o *GetIPReader) ReadResponse(response runtime.ClientResponse, consumer runtime.Consumer) (interface{}, error) {
switch response.Code() {
case 200:
result := NewGetIPOK()
if err := result.readResponse(response, consumer, o.formats); err != nil {
return nil, err
}
return result, nil
case 400:
result := NewGetIPBadRequest()
if err := result.readResponse(response, consumer, o.formats); err != nil {
return nil, err
}
return nil, result
case 404:
result := NewGetIPNotFound()
if err := result.readResponse(response, consumer, o.formats); err != nil {
return nil, err
}
return nil, result
default:
return nil, runtime.NewAPIError("[GET /ip] GetIP", response, response.Code())
}
}
// NewGetIPOK creates a GetIPOK with default headers values
func NewGetIPOK() *GetIPOK {
return &GetIPOK{}
}
/*
GetIPOK describes a response with status code 200, with default header values.
Success
*/
type GetIPOK struct {
Payload []*models.IPListEntry
}
// IsSuccess returns true when this get Ip o k response has a 2xx status code
func (o *GetIPOK) IsSuccess() bool {
return true
}
// IsRedirect returns true when this get Ip o k response has a 3xx status code
func (o *GetIPOK) IsRedirect() bool {
return false
}
// IsClientError returns true when this get Ip o k response has a 4xx status code
func (o *GetIPOK) IsClientError() bool {
return false
}
// IsServerError returns true when this get Ip o k response has a 5xx status code
func (o *GetIPOK) IsServerError() bool {
return false
}
// IsCode returns true when this get Ip o k response a status code equal to that given
func (o *GetIPOK) IsCode(code int) bool {
return code == 200
}
// Code gets the status code for the get Ip o k response
func (o *GetIPOK) Code() int {
return 200
}
func (o *GetIPOK) Error() string {
payload, _ := json.Marshal(o.Payload)
return fmt.Sprintf("[GET /ip][%d] getIpOK %s", 200, payload)
}
func (o *GetIPOK) String() string {
payload, _ := json.Marshal(o.Payload)
return fmt.Sprintf("[GET /ip][%d] getIpOK %s", 200, payload)
}
func (o *GetIPOK) GetPayload() []*models.IPListEntry {
return o.Payload
}
func (o *GetIPOK) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error {
// response payload
if err := consumer.Consume(response.Body(), &o.Payload); err != nil && err != io.EOF {
return err
}
return nil
}
// NewGetIPBadRequest creates a GetIPBadRequest with default headers values
func NewGetIPBadRequest() *GetIPBadRequest {
return &GetIPBadRequest{}
}
/*
GetIPBadRequest describes a response with status code 400, with default header values.
Invalid request (error parsing parameters)
*/
type GetIPBadRequest struct {
Payload models.Error
}
// IsSuccess returns true when this get Ip bad request response has a 2xx status code
func (o *GetIPBadRequest) IsSuccess() bool {
return false
}
// IsRedirect returns true when this get Ip bad request response has a 3xx status code
func (o *GetIPBadRequest) IsRedirect() bool {
return false
}
// IsClientError returns true when this get Ip bad request response has a 4xx status code
func (o *GetIPBadRequest) IsClientError() bool {
return true
}
// IsServerError returns true when this get Ip bad request response has a 5xx status code
func (o *GetIPBadRequest) IsServerError() bool {
return false
}
// IsCode returns true when this get Ip bad request response a status code equal to that given
func (o *GetIPBadRequest) IsCode(code int) bool {
return code == 400
}
// Code gets the status code for the get Ip bad request response
func (o *GetIPBadRequest) Code() int {
return 400
}
func (o *GetIPBadRequest) Error() string {
payload, _ := json.Marshal(o.Payload)
return fmt.Sprintf("[GET /ip][%d] getIpBadRequest %s", 400, payload)
}
func (o *GetIPBadRequest) String() string {
payload, _ := json.Marshal(o.Payload)
return fmt.Sprintf("[GET /ip][%d] getIpBadRequest %s", 400, payload)
}
func (o *GetIPBadRequest) GetPayload() models.Error {
return o.Payload
}
func (o *GetIPBadRequest) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error {
// response payload
if err := consumer.Consume(response.Body(), &o.Payload); err != nil && err != io.EOF {
return err
}
return nil
}
// NewGetIPNotFound creates a GetIPNotFound with default headers values
func NewGetIPNotFound() *GetIPNotFound {
return &GetIPNotFound{}
}
/*
GetIPNotFound describes a response with status code 404, with default header values.
No IP cache entries with provided parameters found
*/
type GetIPNotFound struct {
}
// IsSuccess returns true when this get Ip not found response has a 2xx status code
func (o *GetIPNotFound) IsSuccess() bool {
return false
}
// IsRedirect returns true when this get Ip not found response has a 3xx status code
func (o *GetIPNotFound) IsRedirect() bool {
return false
}
// IsClientError returns true when this get Ip not found response has a 4xx status code
func (o *GetIPNotFound) IsClientError() bool {
return true
}
// IsServerError returns true when this get Ip not found response has a 5xx status code
func (o *GetIPNotFound) IsServerError() bool {
return false
}
// IsCode returns true when this get Ip not found response a status code equal to that given
func (o *GetIPNotFound) IsCode(code int) bool {
return code == 404
}
// Code gets the status code for the get Ip not found response
func (o *GetIPNotFound) Code() int {
return 404
}
func (o *GetIPNotFound) Error() string {
return fmt.Sprintf("[GET /ip][%d] getIpNotFound", 404)
}
func (o *GetIPNotFound) String() string {
return fmt.Sprintf("[GET /ip][%d] getIpNotFound", 404)
}
func (o *GetIPNotFound) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error {
return nil
}
// Code generated by go-swagger; DO NOT EDIT.
// Copyright Authors of Cilium
// SPDX-License-Identifier: Apache-2.0
package policy
// This file was generated by the swagger tool.
// Editing this file might prove futile when you re-run the swagger generate command
import (
"context"
"net/http"
"time"
"github.com/go-openapi/errors"
"github.com/go-openapi/runtime"
cr "github.com/go-openapi/runtime/client"
"github.com/go-openapi/strfmt"
"github.com/cilium/cilium/api/v1/models"
)
// NewGetPolicyParams creates a new GetPolicyParams object,
// with the default timeout for this client.
//
// Default values are not hydrated, since defaults are normally applied by the API server side.
//
// To enforce default values in parameter, use SetDefaults or WithDefaults.
func NewGetPolicyParams() *GetPolicyParams {
return &GetPolicyParams{
timeout: cr.DefaultTimeout,
}
}
// NewGetPolicyParamsWithTimeout creates a new GetPolicyParams object
// with the ability to set a timeout on a request.
func NewGetPolicyParamsWithTimeout(timeout time.Duration) *GetPolicyParams {
return &GetPolicyParams{
timeout: timeout,
}
}
// NewGetPolicyParamsWithContext creates a new GetPolicyParams object
// with the ability to set a context for a request.
func NewGetPolicyParamsWithContext(ctx context.Context) *GetPolicyParams {
return &GetPolicyParams{
Context: ctx,
}
}
// NewGetPolicyParamsWithHTTPClient creates a new GetPolicyParams object
// with the ability to set a custom HTTPClient for a request.
func NewGetPolicyParamsWithHTTPClient(client *http.Client) *GetPolicyParams {
return &GetPolicyParams{
HTTPClient: client,
}
}
/*
GetPolicyParams contains all the parameters to send to the API endpoint
for the get policy operation.
Typically these are written to a http.Request.
*/
type GetPolicyParams struct {
// Labels.
Labels models.Labels
timeout time.Duration
Context context.Context
HTTPClient *http.Client
}
// WithDefaults hydrates default values in the get policy params (not the query body).
//
// All values with no default are reset to their zero value.
func (o *GetPolicyParams) WithDefaults() *GetPolicyParams {
o.SetDefaults()
return o
}
// SetDefaults hydrates default values in the get policy params (not the query body).
//
// All values with no default are reset to their zero value.
func (o *GetPolicyParams) SetDefaults() {
// no default values defined for this parameter
}
// WithTimeout adds the timeout to the get policy params
func (o *GetPolicyParams) WithTimeout(timeout time.Duration) *GetPolicyParams {
o.SetTimeout(timeout)
return o
}
// SetTimeout adds the timeout to the get policy params
func (o *GetPolicyParams) SetTimeout(timeout time.Duration) {
o.timeout = timeout
}
// WithContext adds the context to the get policy params
func (o *GetPolicyParams) WithContext(ctx context.Context) *GetPolicyParams {
o.SetContext(ctx)
return o
}
// SetContext adds the context to the get policy params
func (o *GetPolicyParams) SetContext(ctx context.Context) {
o.Context = ctx
}
// WithHTTPClient adds the HTTPClient to the get policy params
func (o *GetPolicyParams) WithHTTPClient(client *http.Client) *GetPolicyParams {
o.SetHTTPClient(client)
return o
}
// SetHTTPClient adds the HTTPClient to the get policy params
func (o *GetPolicyParams) SetHTTPClient(client *http.Client) {
o.HTTPClient = client
}
// WithLabels adds the labels to the get policy params
func (o *GetPolicyParams) WithLabels(labels models.Labels) *GetPolicyParams {
o.SetLabels(labels)
return o
}
// SetLabels adds the labels to the get policy params
func (o *GetPolicyParams) SetLabels(labels models.Labels) {
o.Labels = labels
}
// WriteToRequest writes these params to a swagger request
func (o *GetPolicyParams) WriteToRequest(r runtime.ClientRequest, reg strfmt.Registry) error {
if err := r.SetTimeout(o.timeout); err != nil {
return err
}
var res []error
if o.Labels != nil {
if err := r.SetBodyParam(o.Labels); err != nil {
return err
}
}
if len(res) > 0 {
return errors.CompositeValidationError(res...)
}
return nil
}
// Code generated by go-swagger; DO NOT EDIT.
// Copyright Authors of Cilium
// SPDX-License-Identifier: Apache-2.0
package policy
// This file was generated by the swagger tool.
// Editing this file might prove futile when you re-run the swagger generate command
import (
"encoding/json"
"fmt"
"io"
"github.com/go-openapi/runtime"
"github.com/go-openapi/strfmt"
"github.com/cilium/cilium/api/v1/models"
)
// GetPolicyReader is a Reader for the GetPolicy structure.
type GetPolicyReader struct {
formats strfmt.Registry
}
// ReadResponse reads a server response into the received o.
func (o *GetPolicyReader) ReadResponse(response runtime.ClientResponse, consumer runtime.Consumer) (interface{}, error) {
switch response.Code() {
case 200:
result := NewGetPolicyOK()
if err := result.readResponse(response, consumer, o.formats); err != nil {
return nil, err
}
return result, nil
case 404:
result := NewGetPolicyNotFound()
if err := result.readResponse(response, consumer, o.formats); err != nil {
return nil, err
}
return nil, result
default:
return nil, runtime.NewAPIError("[GET /policy] GetPolicy", response, response.Code())
}
}
// NewGetPolicyOK creates a GetPolicyOK with default headers values
func NewGetPolicyOK() *GetPolicyOK {
return &GetPolicyOK{}
}
/*
GetPolicyOK describes a response with status code 200, with default header values.
Success
*/
type GetPolicyOK struct {
Payload *models.Policy
}
// IsSuccess returns true when this get policy o k response has a 2xx status code
func (o *GetPolicyOK) IsSuccess() bool {
return true
}
// IsRedirect returns true when this get policy o k response has a 3xx status code
func (o *GetPolicyOK) IsRedirect() bool {
return false
}
// IsClientError returns true when this get policy o k response has a 4xx status code
func (o *GetPolicyOK) IsClientError() bool {
return false
}
// IsServerError returns true when this get policy o k response has a 5xx status code
func (o *GetPolicyOK) IsServerError() bool {
return false
}
// IsCode returns true when this get policy o k response a status code equal to that given
func (o *GetPolicyOK) IsCode(code int) bool {
return code == 200
}
// Code gets the status code for the get policy o k response
func (o *GetPolicyOK) Code() int {
return 200
}
func (o *GetPolicyOK) Error() string {
payload, _ := json.Marshal(o.Payload)
return fmt.Sprintf("[GET /policy][%d] getPolicyOK %s", 200, payload)
}
func (o *GetPolicyOK) String() string {
payload, _ := json.Marshal(o.Payload)
return fmt.Sprintf("[GET /policy][%d] getPolicyOK %s", 200, payload)
}
func (o *GetPolicyOK) GetPayload() *models.Policy {
return o.Payload
}
func (o *GetPolicyOK) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error {
o.Payload = new(models.Policy)
// response payload
if err := consumer.Consume(response.Body(), o.Payload); err != nil && err != io.EOF {
return err
}
return nil
}
// NewGetPolicyNotFound creates a GetPolicyNotFound with default headers values
func NewGetPolicyNotFound() *GetPolicyNotFound {
return &GetPolicyNotFound{}
}
/*
GetPolicyNotFound describes a response with status code 404, with default header values.
No policy rules found
*/
type GetPolicyNotFound struct {
}
// IsSuccess returns true when this get policy not found response has a 2xx status code
func (o *GetPolicyNotFound) IsSuccess() bool {
return false
}
// IsRedirect returns true when this get policy not found response has a 3xx status code
func (o *GetPolicyNotFound) IsRedirect() bool {
return false
}
// IsClientError returns true when this get policy not found response has a 4xx status code
func (o *GetPolicyNotFound) IsClientError() bool {
return true
}
// IsServerError returns true when this get policy not found response has a 5xx status code
func (o *GetPolicyNotFound) IsServerError() bool {
return false
}
// IsCode returns true when this get policy not found response a status code equal to that given
func (o *GetPolicyNotFound) IsCode(code int) bool {
return code == 404
}
// Code gets the status code for the get policy not found response
func (o *GetPolicyNotFound) Code() int {
return 404
}
func (o *GetPolicyNotFound) Error() string {
return fmt.Sprintf("[GET /policy][%d] getPolicyNotFound", 404)
}
func (o *GetPolicyNotFound) String() string {
return fmt.Sprintf("[GET /policy][%d] getPolicyNotFound", 404)
}
func (o *GetPolicyNotFound) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error {
return nil
}
// Code generated by go-swagger; DO NOT EDIT.
// Copyright Authors of Cilium
// SPDX-License-Identifier: Apache-2.0
package policy
// This file was generated by the swagger tool.
// Editing this file might prove futile when you re-run the swagger generate command
import (
"context"
"net/http"
"time"
"github.com/go-openapi/errors"
"github.com/go-openapi/runtime"
cr "github.com/go-openapi/runtime/client"
"github.com/go-openapi/strfmt"
)
// NewGetPolicySelectorsParams creates a new GetPolicySelectorsParams object,
// with the default timeout for this client.
//
// Default values are not hydrated, since defaults are normally applied by the API server side.
//
// To enforce default values in parameter, use SetDefaults or WithDefaults.
func NewGetPolicySelectorsParams() *GetPolicySelectorsParams {
return &GetPolicySelectorsParams{
timeout: cr.DefaultTimeout,
}
}
// NewGetPolicySelectorsParamsWithTimeout creates a new GetPolicySelectorsParams object
// with the ability to set a timeout on a request.
func NewGetPolicySelectorsParamsWithTimeout(timeout time.Duration) *GetPolicySelectorsParams {
return &GetPolicySelectorsParams{
timeout: timeout,
}
}
// NewGetPolicySelectorsParamsWithContext creates a new GetPolicySelectorsParams object
// with the ability to set a context for a request.
func NewGetPolicySelectorsParamsWithContext(ctx context.Context) *GetPolicySelectorsParams {
return &GetPolicySelectorsParams{
Context: ctx,
}
}
// NewGetPolicySelectorsParamsWithHTTPClient creates a new GetPolicySelectorsParams object
// with the ability to set a custom HTTPClient for a request.
func NewGetPolicySelectorsParamsWithHTTPClient(client *http.Client) *GetPolicySelectorsParams {
return &GetPolicySelectorsParams{
HTTPClient: client,
}
}
/*
GetPolicySelectorsParams contains all the parameters to send to the API endpoint
for the get policy selectors operation.
Typically these are written to a http.Request.
*/
type GetPolicySelectorsParams struct {
timeout time.Duration
Context context.Context
HTTPClient *http.Client
}
// WithDefaults hydrates default values in the get policy selectors params (not the query body).
//
// All values with no default are reset to their zero value.
func (o *GetPolicySelectorsParams) WithDefaults() *GetPolicySelectorsParams {
o.SetDefaults()
return o
}
// SetDefaults hydrates default values in the get policy selectors params (not the query body).
//
// All values with no default are reset to their zero value.
func (o *GetPolicySelectorsParams) SetDefaults() {
// no default values defined for this parameter
}
// WithTimeout adds the timeout to the get policy selectors params
func (o *GetPolicySelectorsParams) WithTimeout(timeout time.Duration) *GetPolicySelectorsParams {
o.SetTimeout(timeout)
return o
}
// SetTimeout adds the timeout to the get policy selectors params
func (o *GetPolicySelectorsParams) SetTimeout(timeout time.Duration) {
o.timeout = timeout
}
// WithContext adds the context to the get policy selectors params
func (o *GetPolicySelectorsParams) WithContext(ctx context.Context) *GetPolicySelectorsParams {
o.SetContext(ctx)
return o
}
// SetContext adds the context to the get policy selectors params
func (o *GetPolicySelectorsParams) SetContext(ctx context.Context) {
o.Context = ctx
}
// WithHTTPClient adds the HTTPClient to the get policy selectors params
func (o *GetPolicySelectorsParams) WithHTTPClient(client *http.Client) *GetPolicySelectorsParams {
o.SetHTTPClient(client)
return o
}
// SetHTTPClient adds the HTTPClient to the get policy selectors params
func (o *GetPolicySelectorsParams) SetHTTPClient(client *http.Client) {
o.HTTPClient = client
}
// WriteToRequest writes these params to a swagger request
func (o *GetPolicySelectorsParams) WriteToRequest(r runtime.ClientRequest, reg strfmt.Registry) error {
if err := r.SetTimeout(o.timeout); err != nil {
return err
}
var res []error
if len(res) > 0 {
return errors.CompositeValidationError(res...)
}
return nil
}
// Code generated by go-swagger; DO NOT EDIT.
// Copyright Authors of Cilium
// SPDX-License-Identifier: Apache-2.0
package policy
// This file was generated by the swagger tool.
// Editing this file might prove futile when you re-run the swagger generate command
import (
"encoding/json"
"fmt"
"io"
"github.com/go-openapi/runtime"
"github.com/go-openapi/strfmt"
"github.com/cilium/cilium/api/v1/models"
)
// GetPolicySelectorsReader is a Reader for the GetPolicySelectors structure.
type GetPolicySelectorsReader struct {
formats strfmt.Registry
}
// ReadResponse reads a server response into the received o.
func (o *GetPolicySelectorsReader) ReadResponse(response runtime.ClientResponse, consumer runtime.Consumer) (interface{}, error) {
switch response.Code() {
case 200:
result := NewGetPolicySelectorsOK()
if err := result.readResponse(response, consumer, o.formats); err != nil {
return nil, err
}
return result, nil
default:
return nil, runtime.NewAPIError("[GET /policy/selectors] GetPolicySelectors", response, response.Code())
}
}
// NewGetPolicySelectorsOK creates a GetPolicySelectorsOK with default headers values
func NewGetPolicySelectorsOK() *GetPolicySelectorsOK {
return &GetPolicySelectorsOK{}
}
/*
GetPolicySelectorsOK describes a response with status code 200, with default header values.
Success
*/
type GetPolicySelectorsOK struct {
Payload models.SelectorCache
}
// IsSuccess returns true when this get policy selectors o k response has a 2xx status code
func (o *GetPolicySelectorsOK) IsSuccess() bool {
return true
}
// IsRedirect returns true when this get policy selectors o k response has a 3xx status code
func (o *GetPolicySelectorsOK) IsRedirect() bool {
return false
}
// IsClientError returns true when this get policy selectors o k response has a 4xx status code
func (o *GetPolicySelectorsOK) IsClientError() bool {
return false
}
// IsServerError returns true when this get policy selectors o k response has a 5xx status code
func (o *GetPolicySelectorsOK) IsServerError() bool {
return false
}
// IsCode returns true when this get policy selectors o k response a status code equal to that given
func (o *GetPolicySelectorsOK) IsCode(code int) bool {
return code == 200
}
// Code gets the status code for the get policy selectors o k response
func (o *GetPolicySelectorsOK) Code() int {
return 200
}
func (o *GetPolicySelectorsOK) Error() string {
payload, _ := json.Marshal(o.Payload)
return fmt.Sprintf("[GET /policy/selectors][%d] getPolicySelectorsOK %s", 200, payload)
}
func (o *GetPolicySelectorsOK) String() string {
payload, _ := json.Marshal(o.Payload)
return fmt.Sprintf("[GET /policy/selectors][%d] getPolicySelectorsOK %s", 200, payload)
}
func (o *GetPolicySelectorsOK) GetPayload() models.SelectorCache {
return o.Payload
}
func (o *GetPolicySelectorsOK) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error {
// response payload
if err := consumer.Consume(response.Body(), &o.Payload); err != nil && err != io.EOF {
return err
}
return nil
}
// Code generated by go-swagger; DO NOT EDIT.
// Copyright Authors of Cilium
// SPDX-License-Identifier: Apache-2.0
package policy
// This file was generated by the swagger tool.
// Editing this file might prove futile when you re-run the swagger generate command
import (
"fmt"
"github.com/go-openapi/runtime"
httptransport "github.com/go-openapi/runtime/client"
"github.com/go-openapi/strfmt"
)
// New creates a new policy API client.
func New(transport runtime.ClientTransport, formats strfmt.Registry) ClientService {
return &Client{transport: transport, formats: formats}
}
// New creates a new policy API client with basic auth credentials.
// It takes the following parameters:
// - host: http host (github.com).
// - basePath: any base path for the API client ("/v1", "/v3").
// - scheme: http scheme ("http", "https").
// - user: user for basic authentication header.
// - password: password for basic authentication header.
func NewClientWithBasicAuth(host, basePath, scheme, user, password string) ClientService {
transport := httptransport.New(host, basePath, []string{scheme})
transport.DefaultAuthentication = httptransport.BasicAuth(user, password)
return &Client{transport: transport, formats: strfmt.Default}
}
// New creates a new policy API client with a bearer token for authentication.
// It takes the following parameters:
// - host: http host (github.com).
// - basePath: any base path for the API client ("/v1", "/v3").
// - scheme: http scheme ("http", "https").
// - bearerToken: bearer token for Bearer authentication header.
func NewClientWithBearerToken(host, basePath, scheme, bearerToken string) ClientService {
transport := httptransport.New(host, basePath, []string{scheme})
transport.DefaultAuthentication = httptransport.BearerToken(bearerToken)
return &Client{transport: transport, formats: strfmt.Default}
}
/*
Client for policy API
*/
type Client struct {
transport runtime.ClientTransport
formats strfmt.Registry
}
// ClientOption may be used to customize the behavior of Client methods.
type ClientOption func(*runtime.ClientOperation)
// ClientService is the interface for Client methods
type ClientService interface {
DeleteFqdnCache(params *DeleteFqdnCacheParams, opts ...ClientOption) (*DeleteFqdnCacheOK, error)
DeletePolicy(params *DeletePolicyParams, opts ...ClientOption) (*DeletePolicyOK, error)
GetFqdnCache(params *GetFqdnCacheParams, opts ...ClientOption) (*GetFqdnCacheOK, error)
GetFqdnCacheID(params *GetFqdnCacheIDParams, opts ...ClientOption) (*GetFqdnCacheIDOK, error)
GetFqdnNames(params *GetFqdnNamesParams, opts ...ClientOption) (*GetFqdnNamesOK, error)
GetIP(params *GetIPParams, opts ...ClientOption) (*GetIPOK, error)
GetIdentity(params *GetIdentityParams, opts ...ClientOption) (*GetIdentityOK, error)
GetIdentityEndpoints(params *GetIdentityEndpointsParams, opts ...ClientOption) (*GetIdentityEndpointsOK, error)
GetIdentityID(params *GetIdentityIDParams, opts ...ClientOption) (*GetIdentityIDOK, error)
GetPolicy(params *GetPolicyParams, opts ...ClientOption) (*GetPolicyOK, error)
GetPolicySelectors(params *GetPolicySelectorsParams, opts ...ClientOption) (*GetPolicySelectorsOK, error)
PutPolicy(params *PutPolicyParams, opts ...ClientOption) (*PutPolicyOK, error)
SetTransport(transport runtime.ClientTransport)
}
/*
DeleteFqdnCache deletes matching DNS lookups from the policy generation cache
Deletes matching DNS lookups from the cache, optionally restricted by
DNS name. The removed IP data will no longer be used in generated
policies.
*/
func (a *Client) DeleteFqdnCache(params *DeleteFqdnCacheParams, opts ...ClientOption) (*DeleteFqdnCacheOK, error) {
// TODO: Validate the params before sending
if params == nil {
params = NewDeleteFqdnCacheParams()
}
op := &runtime.ClientOperation{
ID: "DeleteFqdnCache",
Method: "DELETE",
PathPattern: "/fqdn/cache",
ProducesMediaTypes: []string{"application/json"},
ConsumesMediaTypes: []string{"application/json"},
Schemes: []string{"http"},
Params: params,
Reader: &DeleteFqdnCacheReader{formats: a.formats},
Context: params.Context,
Client: params.HTTPClient,
}
for _, opt := range opts {
opt(op)
}
result, err := a.transport.Submit(op)
if err != nil {
return nil, err
}
success, ok := result.(*DeleteFqdnCacheOK)
if ok {
return success, nil
}
// unexpected success response
// safeguard: normally, absent a default response, unknown success responses return an error above: so this is a codegen issue
msg := fmt.Sprintf("unexpected success response for DeleteFqdnCache: API contract not enforced by server. Client expected to get an error, but got: %T", result)
panic(msg)
}
/*
DeletePolicy deletes a policy sub tree
Deprecated: will be removed in v1.19
*/
func (a *Client) DeletePolicy(params *DeletePolicyParams, opts ...ClientOption) (*DeletePolicyOK, error) {
// TODO: Validate the params before sending
if params == nil {
params = NewDeletePolicyParams()
}
op := &runtime.ClientOperation{
ID: "DeletePolicy",
Method: "DELETE",
PathPattern: "/policy",
ProducesMediaTypes: []string{"application/json"},
ConsumesMediaTypes: []string{"application/json"},
Schemes: []string{"http"},
Params: params,
Reader: &DeletePolicyReader{formats: a.formats},
Context: params.Context,
Client: params.HTTPClient,
}
for _, opt := range opts {
opt(op)
}
result, err := a.transport.Submit(op)
if err != nil {
return nil, err
}
success, ok := result.(*DeletePolicyOK)
if ok {
return success, nil
}
// unexpected success response
// safeguard: normally, absent a default response, unknown success responses return an error above: so this is a codegen issue
msg := fmt.Sprintf("unexpected success response for DeletePolicy: API contract not enforced by server. Client expected to get an error, but got: %T", result)
panic(msg)
}
/*
GetFqdnCache retrieves the list of DNS lookups intercepted from all endpoints
Retrieves the list of DNS lookups intercepted from endpoints,
optionally filtered by DNS name, CIDR IP range or source.
*/
func (a *Client) GetFqdnCache(params *GetFqdnCacheParams, opts ...ClientOption) (*GetFqdnCacheOK, error) {
// TODO: Validate the params before sending
if params == nil {
params = NewGetFqdnCacheParams()
}
op := &runtime.ClientOperation{
ID: "GetFqdnCache",
Method: "GET",
PathPattern: "/fqdn/cache",
ProducesMediaTypes: []string{"application/json"},
ConsumesMediaTypes: []string{"application/json"},
Schemes: []string{"http"},
Params: params,
Reader: &GetFqdnCacheReader{formats: a.formats},
Context: params.Context,
Client: params.HTTPClient,
}
for _, opt := range opts {
opt(op)
}
result, err := a.transport.Submit(op)
if err != nil {
return nil, err
}
success, ok := result.(*GetFqdnCacheOK)
if ok {
return success, nil
}
// unexpected success response
// safeguard: normally, absent a default response, unknown success responses return an error above: so this is a codegen issue
msg := fmt.Sprintf("unexpected success response for GetFqdnCache: API contract not enforced by server. Client expected to get an error, but got: %T", result)
panic(msg)
}
/*
GetFqdnCacheID retrieves the list of DNS lookups intercepted from an endpoint
Retrieves the list of DNS lookups intercepted from the specific endpoint,
optionally filtered by endpoint id, DNS name, CIDR IP range or source.
*/
func (a *Client) GetFqdnCacheID(params *GetFqdnCacheIDParams, opts ...ClientOption) (*GetFqdnCacheIDOK, error) {
// TODO: Validate the params before sending
if params == nil {
params = NewGetFqdnCacheIDParams()
}
op := &runtime.ClientOperation{
ID: "GetFqdnCacheID",
Method: "GET",
PathPattern: "/fqdn/cache/{id}",
ProducesMediaTypes: []string{"application/json"},
ConsumesMediaTypes: []string{"application/json"},
Schemes: []string{"http"},
Params: params,
Reader: &GetFqdnCacheIDReader{formats: a.formats},
Context: params.Context,
Client: params.HTTPClient,
}
for _, opt := range opts {
opt(op)
}
result, err := a.transport.Submit(op)
if err != nil {
return nil, err
}
success, ok := result.(*GetFqdnCacheIDOK)
if ok {
return success, nil
}
// unexpected success response
// safeguard: normally, absent a default response, unknown success responses return an error above: so this is a codegen issue
msg := fmt.Sprintf("unexpected success response for GetFqdnCacheID: API contract not enforced by server. Client expected to get an error, but got: %T", result)
panic(msg)
}
/*
GetFqdnNames lists internal DNS selector representations
Retrieves the list of DNS-related fields (names to poll, selectors and
their corresponding regexes).
*/
func (a *Client) GetFqdnNames(params *GetFqdnNamesParams, opts ...ClientOption) (*GetFqdnNamesOK, error) {
// TODO: Validate the params before sending
if params == nil {
params = NewGetFqdnNamesParams()
}
op := &runtime.ClientOperation{
ID: "GetFqdnNames",
Method: "GET",
PathPattern: "/fqdn/names",
ProducesMediaTypes: []string{"application/json"},
ConsumesMediaTypes: []string{"application/json"},
Schemes: []string{"http"},
Params: params,
Reader: &GetFqdnNamesReader{formats: a.formats},
Context: params.Context,
Client: params.HTTPClient,
}
for _, opt := range opts {
opt(op)
}
result, err := a.transport.Submit(op)
if err != nil {
return nil, err
}
success, ok := result.(*GetFqdnNamesOK)
if ok {
return success, nil
}
// unexpected success response
// safeguard: normally, absent a default response, unknown success responses return an error above: so this is a codegen issue
msg := fmt.Sprintf("unexpected success response for GetFqdnNames: API contract not enforced by server. Client expected to get an error, but got: %T", result)
panic(msg)
}
/*
GetIP lists information about known IP addresses
Retrieves a list of IPs with known associated information such as
their identities, host addresses, Kubernetes pod names, etc.
The list can optionally filtered by a CIDR IP range.
*/
func (a *Client) GetIP(params *GetIPParams, opts ...ClientOption) (*GetIPOK, error) {
// TODO: Validate the params before sending
if params == nil {
params = NewGetIPParams()
}
op := &runtime.ClientOperation{
ID: "GetIP",
Method: "GET",
PathPattern: "/ip",
ProducesMediaTypes: []string{"application/json"},
ConsumesMediaTypes: []string{"application/json"},
Schemes: []string{"http"},
Params: params,
Reader: &GetIPReader{formats: a.formats},
Context: params.Context,
Client: params.HTTPClient,
}
for _, opt := range opts {
opt(op)
}
result, err := a.transport.Submit(op)
if err != nil {
return nil, err
}
success, ok := result.(*GetIPOK)
if ok {
return success, nil
}
// unexpected success response
// safeguard: normally, absent a default response, unknown success responses return an error above: so this is a codegen issue
msg := fmt.Sprintf("unexpected success response for GetIP: API contract not enforced by server. Client expected to get an error, but got: %T", result)
panic(msg)
}
/*
GetIdentity retrieves a list of identities that have metadata matching the provided parameters
Retrieves a list of identities that have metadata matching the provided parameters, or all identities if no parameters are provided.
*/
func (a *Client) GetIdentity(params *GetIdentityParams, opts ...ClientOption) (*GetIdentityOK, error) {
// TODO: Validate the params before sending
if params == nil {
params = NewGetIdentityParams()
}
op := &runtime.ClientOperation{
ID: "GetIdentity",
Method: "GET",
PathPattern: "/identity",
ProducesMediaTypes: []string{"application/json"},
ConsumesMediaTypes: []string{"application/json"},
Schemes: []string{"http"},
Params: params,
Reader: &GetIdentityReader{formats: a.formats},
Context: params.Context,
Client: params.HTTPClient,
}
for _, opt := range opts {
opt(op)
}
result, err := a.transport.Submit(op)
if err != nil {
return nil, err
}
success, ok := result.(*GetIdentityOK)
if ok {
return success, nil
}
// unexpected success response
// safeguard: normally, absent a default response, unknown success responses return an error above: so this is a codegen issue
msg := fmt.Sprintf("unexpected success response for GetIdentity: API contract not enforced by server. Client expected to get an error, but got: %T", result)
panic(msg)
}
/*
GetIdentityEndpoints retrieves identities which are being used by local endpoints
*/
func (a *Client) GetIdentityEndpoints(params *GetIdentityEndpointsParams, opts ...ClientOption) (*GetIdentityEndpointsOK, error) {
// TODO: Validate the params before sending
if params == nil {
params = NewGetIdentityEndpointsParams()
}
op := &runtime.ClientOperation{
ID: "GetIdentityEndpoints",
Method: "GET",
PathPattern: "/identity/endpoints",
ProducesMediaTypes: []string{"application/json"},
ConsumesMediaTypes: []string{"application/json"},
Schemes: []string{"http"},
Params: params,
Reader: &GetIdentityEndpointsReader{formats: a.formats},
Context: params.Context,
Client: params.HTTPClient,
}
for _, opt := range opts {
opt(op)
}
result, err := a.transport.Submit(op)
if err != nil {
return nil, err
}
success, ok := result.(*GetIdentityEndpointsOK)
if ok {
return success, nil
}
// unexpected success response
// safeguard: normally, absent a default response, unknown success responses return an error above: so this is a codegen issue
msg := fmt.Sprintf("unexpected success response for GetIdentityEndpoints: API contract not enforced by server. Client expected to get an error, but got: %T", result)
panic(msg)
}
/*
GetIdentityID retrieves identity
*/
func (a *Client) GetIdentityID(params *GetIdentityIDParams, opts ...ClientOption) (*GetIdentityIDOK, error) {
// TODO: Validate the params before sending
if params == nil {
params = NewGetIdentityIDParams()
}
op := &runtime.ClientOperation{
ID: "GetIdentityID",
Method: "GET",
PathPattern: "/identity/{id}",
ProducesMediaTypes: []string{"application/json"},
ConsumesMediaTypes: []string{"application/json"},
Schemes: []string{"http"},
Params: params,
Reader: &GetIdentityIDReader{formats: a.formats},
Context: params.Context,
Client: params.HTTPClient,
}
for _, opt := range opts {
opt(op)
}
result, err := a.transport.Submit(op)
if err != nil {
return nil, err
}
success, ok := result.(*GetIdentityIDOK)
if ok {
return success, nil
}
// unexpected success response
// safeguard: normally, absent a default response, unknown success responses return an error above: so this is a codegen issue
msg := fmt.Sprintf("unexpected success response for GetIdentityID: API contract not enforced by server. Client expected to get an error, but got: %T", result)
panic(msg)
}
/*
GetPolicy retrieves entire policy tree
Returns the entire policy tree with all children.
Deprecated: will be removed in v1.19
*/
func (a *Client) GetPolicy(params *GetPolicyParams, opts ...ClientOption) (*GetPolicyOK, error) {
// TODO: Validate the params before sending
if params == nil {
params = NewGetPolicyParams()
}
op := &runtime.ClientOperation{
ID: "GetPolicy",
Method: "GET",
PathPattern: "/policy",
ProducesMediaTypes: []string{"application/json"},
ConsumesMediaTypes: []string{"application/json"},
Schemes: []string{"http"},
Params: params,
Reader: &GetPolicyReader{formats: a.formats},
Context: params.Context,
Client: params.HTTPClient,
}
for _, opt := range opts {
opt(op)
}
result, err := a.transport.Submit(op)
if err != nil {
return nil, err
}
success, ok := result.(*GetPolicyOK)
if ok {
return success, nil
}
// unexpected success response
// safeguard: normally, absent a default response, unknown success responses return an error above: so this is a codegen issue
msg := fmt.Sprintf("unexpected success response for GetPolicy: API contract not enforced by server. Client expected to get an error, but got: %T", result)
panic(msg)
}
/*
GetPolicySelectors sees what selectors match which identities
*/
func (a *Client) GetPolicySelectors(params *GetPolicySelectorsParams, opts ...ClientOption) (*GetPolicySelectorsOK, error) {
// TODO: Validate the params before sending
if params == nil {
params = NewGetPolicySelectorsParams()
}
op := &runtime.ClientOperation{
ID: "GetPolicySelectors",
Method: "GET",
PathPattern: "/policy/selectors",
ProducesMediaTypes: []string{"application/json"},
ConsumesMediaTypes: []string{"application/json"},
Schemes: []string{"http"},
Params: params,
Reader: &GetPolicySelectorsReader{formats: a.formats},
Context: params.Context,
Client: params.HTTPClient,
}
for _, opt := range opts {
opt(op)
}
result, err := a.transport.Submit(op)
if err != nil {
return nil, err
}
success, ok := result.(*GetPolicySelectorsOK)
if ok {
return success, nil
}
// unexpected success response
// safeguard: normally, absent a default response, unknown success responses return an error above: so this is a codegen issue
msg := fmt.Sprintf("unexpected success response for GetPolicySelectors: API contract not enforced by server. Client expected to get an error, but got: %T", result)
panic(msg)
}
/*
PutPolicy creates or update a policy sub tree
Deprecated: will be removed in v1.19
*/
func (a *Client) PutPolicy(params *PutPolicyParams, opts ...ClientOption) (*PutPolicyOK, error) {
// TODO: Validate the params before sending
if params == nil {
params = NewPutPolicyParams()
}
op := &runtime.ClientOperation{
ID: "PutPolicy",
Method: "PUT",
PathPattern: "/policy",
ProducesMediaTypes: []string{"application/json"},
ConsumesMediaTypes: []string{"application/json"},
Schemes: []string{"http"},
Params: params,
Reader: &PutPolicyReader{formats: a.formats},
Context: params.Context,
Client: params.HTTPClient,
}
for _, opt := range opts {
opt(op)
}
result, err := a.transport.Submit(op)
if err != nil {
return nil, err
}
success, ok := result.(*PutPolicyOK)
if ok {
return success, nil
}
// unexpected success response
// safeguard: normally, absent a default response, unknown success responses return an error above: so this is a codegen issue
msg := fmt.Sprintf("unexpected success response for PutPolicy: API contract not enforced by server. Client expected to get an error, but got: %T", result)
panic(msg)
}
// SetTransport changes the transport on the client
func (a *Client) SetTransport(transport runtime.ClientTransport) {
a.transport = transport
}
// Code generated by go-swagger; DO NOT EDIT.
// Copyright Authors of Cilium
// SPDX-License-Identifier: Apache-2.0
package policy
// This file was generated by the swagger tool.
// Editing this file might prove futile when you re-run the swagger generate command
import (
"context"
"net/http"
"time"
"github.com/go-openapi/errors"
"github.com/go-openapi/runtime"
cr "github.com/go-openapi/runtime/client"
"github.com/go-openapi/strfmt"
"github.com/go-openapi/swag"
)
// NewPutPolicyParams creates a new PutPolicyParams object,
// with the default timeout for this client.
//
// Default values are not hydrated, since defaults are normally applied by the API server side.
//
// To enforce default values in parameter, use SetDefaults or WithDefaults.
func NewPutPolicyParams() *PutPolicyParams {
return &PutPolicyParams{
timeout: cr.DefaultTimeout,
}
}
// NewPutPolicyParamsWithTimeout creates a new PutPolicyParams object
// with the ability to set a timeout on a request.
func NewPutPolicyParamsWithTimeout(timeout time.Duration) *PutPolicyParams {
return &PutPolicyParams{
timeout: timeout,
}
}
// NewPutPolicyParamsWithContext creates a new PutPolicyParams object
// with the ability to set a context for a request.
func NewPutPolicyParamsWithContext(ctx context.Context) *PutPolicyParams {
return &PutPolicyParams{
Context: ctx,
}
}
// NewPutPolicyParamsWithHTTPClient creates a new PutPolicyParams object
// with the ability to set a custom HTTPClient for a request.
func NewPutPolicyParamsWithHTTPClient(client *http.Client) *PutPolicyParams {
return &PutPolicyParams{
HTTPClient: client,
}
}
/*
PutPolicyParams contains all the parameters to send to the API endpoint
for the put policy operation.
Typically these are written to a http.Request.
*/
type PutPolicyParams struct {
/* Policy.
Policy rules
*/
Policy string
/* Replace.
If true, indicates that existing rules with identical labels should be replaced.
*/
Replace *bool
/* ReplaceWithLabels.
If present, indicates that existing rules with the given labels should be deleted.
*/
ReplaceWithLabels []string
timeout time.Duration
Context context.Context
HTTPClient *http.Client
}
// WithDefaults hydrates default values in the put policy params (not the query body).
//
// All values with no default are reset to their zero value.
func (o *PutPolicyParams) WithDefaults() *PutPolicyParams {
o.SetDefaults()
return o
}
// SetDefaults hydrates default values in the put policy params (not the query body).
//
// All values with no default are reset to their zero value.
func (o *PutPolicyParams) SetDefaults() {
// no default values defined for this parameter
}
// WithTimeout adds the timeout to the put policy params
func (o *PutPolicyParams) WithTimeout(timeout time.Duration) *PutPolicyParams {
o.SetTimeout(timeout)
return o
}
// SetTimeout adds the timeout to the put policy params
func (o *PutPolicyParams) SetTimeout(timeout time.Duration) {
o.timeout = timeout
}
// WithContext adds the context to the put policy params
func (o *PutPolicyParams) WithContext(ctx context.Context) *PutPolicyParams {
o.SetContext(ctx)
return o
}
// SetContext adds the context to the put policy params
func (o *PutPolicyParams) SetContext(ctx context.Context) {
o.Context = ctx
}
// WithHTTPClient adds the HTTPClient to the put policy params
func (o *PutPolicyParams) WithHTTPClient(client *http.Client) *PutPolicyParams {
o.SetHTTPClient(client)
return o
}
// SetHTTPClient adds the HTTPClient to the put policy params
func (o *PutPolicyParams) SetHTTPClient(client *http.Client) {
o.HTTPClient = client
}
// WithPolicy adds the policy to the put policy params
func (o *PutPolicyParams) WithPolicy(policy string) *PutPolicyParams {
o.SetPolicy(policy)
return o
}
// SetPolicy adds the policy to the put policy params
func (o *PutPolicyParams) SetPolicy(policy string) {
o.Policy = policy
}
// WithReplace adds the replace to the put policy params
func (o *PutPolicyParams) WithReplace(replace *bool) *PutPolicyParams {
o.SetReplace(replace)
return o
}
// SetReplace adds the replace to the put policy params
func (o *PutPolicyParams) SetReplace(replace *bool) {
o.Replace = replace
}
// WithReplaceWithLabels adds the replaceWithLabels to the put policy params
func (o *PutPolicyParams) WithReplaceWithLabels(replaceWithLabels []string) *PutPolicyParams {
o.SetReplaceWithLabels(replaceWithLabels)
return o
}
// SetReplaceWithLabels adds the replaceWithLabels to the put policy params
func (o *PutPolicyParams) SetReplaceWithLabels(replaceWithLabels []string) {
o.ReplaceWithLabels = replaceWithLabels
}
// WriteToRequest writes these params to a swagger request
func (o *PutPolicyParams) WriteToRequest(r runtime.ClientRequest, reg strfmt.Registry) error {
if err := r.SetTimeout(o.timeout); err != nil {
return err
}
var res []error
if err := r.SetBodyParam(o.Policy); err != nil {
return err
}
if o.Replace != nil {
// query param replace
var qrReplace bool
if o.Replace != nil {
qrReplace = *o.Replace
}
qReplace := swag.FormatBool(qrReplace)
if qReplace != "" {
if err := r.SetQueryParam("replace", qReplace); err != nil {
return err
}
}
}
if o.ReplaceWithLabels != nil {
// binding items for replace-with-labels
joinedReplaceWithLabels := o.bindParamReplaceWithLabels(reg)
// query array param replace-with-labels
if err := r.SetQueryParam("replace-with-labels", joinedReplaceWithLabels...); err != nil {
return err
}
}
if len(res) > 0 {
return errors.CompositeValidationError(res...)
}
return nil
}
// bindParamPutPolicy binds the parameter replace-with-labels
func (o *PutPolicyParams) bindParamReplaceWithLabels(formats strfmt.Registry) []string {
replaceWithLabelsIR := o.ReplaceWithLabels
var replaceWithLabelsIC []string
for _, replaceWithLabelsIIR := range replaceWithLabelsIR { // explode []string
replaceWithLabelsIIV := replaceWithLabelsIIR // string as string
replaceWithLabelsIC = append(replaceWithLabelsIC, replaceWithLabelsIIV)
}
// items.CollectionFormat: ""
replaceWithLabelsIS := swag.JoinByFormat(replaceWithLabelsIC, "")
return replaceWithLabelsIS
}
// Code generated by go-swagger; DO NOT EDIT.
// Copyright Authors of Cilium
// SPDX-License-Identifier: Apache-2.0
package policy
// This file was generated by the swagger tool.
// Editing this file might prove futile when you re-run the swagger generate command
import (
"encoding/json"
"fmt"
"io"
"github.com/go-openapi/runtime"
"github.com/go-openapi/strfmt"
"github.com/cilium/cilium/api/v1/models"
)
// PutPolicyReader is a Reader for the PutPolicy structure.
type PutPolicyReader struct {
formats strfmt.Registry
}
// ReadResponse reads a server response into the received o.
func (o *PutPolicyReader) ReadResponse(response runtime.ClientResponse, consumer runtime.Consumer) (interface{}, error) {
switch response.Code() {
case 200:
result := NewPutPolicyOK()
if err := result.readResponse(response, consumer, o.formats); err != nil {
return nil, err
}
return result, nil
case 400:
result := NewPutPolicyInvalidPolicy()
if err := result.readResponse(response, consumer, o.formats); err != nil {
return nil, err
}
return nil, result
case 403:
result := NewPutPolicyForbidden()
if err := result.readResponse(response, consumer, o.formats); err != nil {
return nil, err
}
return nil, result
case 460:
result := NewPutPolicyInvalidPath()
if err := result.readResponse(response, consumer, o.formats); err != nil {
return nil, err
}
return nil, result
case 500:
result := NewPutPolicyFailure()
if err := result.readResponse(response, consumer, o.formats); err != nil {
return nil, err
}
return nil, result
default:
return nil, runtime.NewAPIError("[PUT /policy] PutPolicy", response, response.Code())
}
}
// NewPutPolicyOK creates a PutPolicyOK with default headers values
func NewPutPolicyOK() *PutPolicyOK {
return &PutPolicyOK{}
}
/*
PutPolicyOK describes a response with status code 200, with default header values.
Success
*/
type PutPolicyOK struct {
Payload *models.Policy
}
// IsSuccess returns true when this put policy o k response has a 2xx status code
func (o *PutPolicyOK) IsSuccess() bool {
return true
}
// IsRedirect returns true when this put policy o k response has a 3xx status code
func (o *PutPolicyOK) IsRedirect() bool {
return false
}
// IsClientError returns true when this put policy o k response has a 4xx status code
func (o *PutPolicyOK) IsClientError() bool {
return false
}
// IsServerError returns true when this put policy o k response has a 5xx status code
func (o *PutPolicyOK) IsServerError() bool {
return false
}
// IsCode returns true when this put policy o k response a status code equal to that given
func (o *PutPolicyOK) IsCode(code int) bool {
return code == 200
}
// Code gets the status code for the put policy o k response
func (o *PutPolicyOK) Code() int {
return 200
}
func (o *PutPolicyOK) Error() string {
payload, _ := json.Marshal(o.Payload)
return fmt.Sprintf("[PUT /policy][%d] putPolicyOK %s", 200, payload)
}
func (o *PutPolicyOK) String() string {
payload, _ := json.Marshal(o.Payload)
return fmt.Sprintf("[PUT /policy][%d] putPolicyOK %s", 200, payload)
}
func (o *PutPolicyOK) GetPayload() *models.Policy {
return o.Payload
}
func (o *PutPolicyOK) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error {
o.Payload = new(models.Policy)
// response payload
if err := consumer.Consume(response.Body(), o.Payload); err != nil && err != io.EOF {
return err
}
return nil
}
// NewPutPolicyInvalidPolicy creates a PutPolicyInvalidPolicy with default headers values
func NewPutPolicyInvalidPolicy() *PutPolicyInvalidPolicy {
return &PutPolicyInvalidPolicy{}
}
/*
PutPolicyInvalidPolicy describes a response with status code 400, with default header values.
Invalid policy
*/
type PutPolicyInvalidPolicy struct {
Payload models.Error
}
// IsSuccess returns true when this put policy invalid policy response has a 2xx status code
func (o *PutPolicyInvalidPolicy) IsSuccess() bool {
return false
}
// IsRedirect returns true when this put policy invalid policy response has a 3xx status code
func (o *PutPolicyInvalidPolicy) IsRedirect() bool {
return false
}
// IsClientError returns true when this put policy invalid policy response has a 4xx status code
func (o *PutPolicyInvalidPolicy) IsClientError() bool {
return true
}
// IsServerError returns true when this put policy invalid policy response has a 5xx status code
func (o *PutPolicyInvalidPolicy) IsServerError() bool {
return false
}
// IsCode returns true when this put policy invalid policy response a status code equal to that given
func (o *PutPolicyInvalidPolicy) IsCode(code int) bool {
return code == 400
}
// Code gets the status code for the put policy invalid policy response
func (o *PutPolicyInvalidPolicy) Code() int {
return 400
}
func (o *PutPolicyInvalidPolicy) Error() string {
payload, _ := json.Marshal(o.Payload)
return fmt.Sprintf("[PUT /policy][%d] putPolicyInvalidPolicy %s", 400, payload)
}
func (o *PutPolicyInvalidPolicy) String() string {
payload, _ := json.Marshal(o.Payload)
return fmt.Sprintf("[PUT /policy][%d] putPolicyInvalidPolicy %s", 400, payload)
}
func (o *PutPolicyInvalidPolicy) GetPayload() models.Error {
return o.Payload
}
func (o *PutPolicyInvalidPolicy) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error {
// response payload
if err := consumer.Consume(response.Body(), &o.Payload); err != nil && err != io.EOF {
return err
}
return nil
}
// NewPutPolicyForbidden creates a PutPolicyForbidden with default headers values
func NewPutPolicyForbidden() *PutPolicyForbidden {
return &PutPolicyForbidden{}
}
/*
PutPolicyForbidden describes a response with status code 403, with default header values.
Forbidden
*/
type PutPolicyForbidden struct {
}
// IsSuccess returns true when this put policy forbidden response has a 2xx status code
func (o *PutPolicyForbidden) IsSuccess() bool {
return false
}
// IsRedirect returns true when this put policy forbidden response has a 3xx status code
func (o *PutPolicyForbidden) IsRedirect() bool {
return false
}
// IsClientError returns true when this put policy forbidden response has a 4xx status code
func (o *PutPolicyForbidden) IsClientError() bool {
return true
}
// IsServerError returns true when this put policy forbidden response has a 5xx status code
func (o *PutPolicyForbidden) IsServerError() bool {
return false
}
// IsCode returns true when this put policy forbidden response a status code equal to that given
func (o *PutPolicyForbidden) IsCode(code int) bool {
return code == 403
}
// Code gets the status code for the put policy forbidden response
func (o *PutPolicyForbidden) Code() int {
return 403
}
func (o *PutPolicyForbidden) Error() string {
return fmt.Sprintf("[PUT /policy][%d] putPolicyForbidden", 403)
}
func (o *PutPolicyForbidden) String() string {
return fmt.Sprintf("[PUT /policy][%d] putPolicyForbidden", 403)
}
func (o *PutPolicyForbidden) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error {
return nil
}
// NewPutPolicyInvalidPath creates a PutPolicyInvalidPath with default headers values
func NewPutPolicyInvalidPath() *PutPolicyInvalidPath {
return &PutPolicyInvalidPath{}
}
/*
PutPolicyInvalidPath describes a response with status code 460, with default header values.
Invalid path
*/
type PutPolicyInvalidPath struct {
Payload models.Error
}
// IsSuccess returns true when this put policy invalid path response has a 2xx status code
func (o *PutPolicyInvalidPath) IsSuccess() bool {
return false
}
// IsRedirect returns true when this put policy invalid path response has a 3xx status code
func (o *PutPolicyInvalidPath) IsRedirect() bool {
return false
}
// IsClientError returns true when this put policy invalid path response has a 4xx status code
func (o *PutPolicyInvalidPath) IsClientError() bool {
return true
}
// IsServerError returns true when this put policy invalid path response has a 5xx status code
func (o *PutPolicyInvalidPath) IsServerError() bool {
return false
}
// IsCode returns true when this put policy invalid path response a status code equal to that given
func (o *PutPolicyInvalidPath) IsCode(code int) bool {
return code == 460
}
// Code gets the status code for the put policy invalid path response
func (o *PutPolicyInvalidPath) Code() int {
return 460
}
func (o *PutPolicyInvalidPath) Error() string {
payload, _ := json.Marshal(o.Payload)
return fmt.Sprintf("[PUT /policy][%d] putPolicyInvalidPath %s", 460, payload)
}
func (o *PutPolicyInvalidPath) String() string {
payload, _ := json.Marshal(o.Payload)
return fmt.Sprintf("[PUT /policy][%d] putPolicyInvalidPath %s", 460, payload)
}
func (o *PutPolicyInvalidPath) GetPayload() models.Error {
return o.Payload
}
func (o *PutPolicyInvalidPath) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error {
// response payload
if err := consumer.Consume(response.Body(), &o.Payload); err != nil && err != io.EOF {
return err
}
return nil
}
// NewPutPolicyFailure creates a PutPolicyFailure with default headers values
func NewPutPolicyFailure() *PutPolicyFailure {
return &PutPolicyFailure{}
}
/*
PutPolicyFailure describes a response with status code 500, with default header values.
Policy import failed
*/
type PutPolicyFailure struct {
Payload models.Error
}
// IsSuccess returns true when this put policy failure response has a 2xx status code
func (o *PutPolicyFailure) IsSuccess() bool {
return false
}
// IsRedirect returns true when this put policy failure response has a 3xx status code
func (o *PutPolicyFailure) IsRedirect() bool {
return false
}
// IsClientError returns true when this put policy failure response has a 4xx status code
func (o *PutPolicyFailure) IsClientError() bool {
return false
}
// IsServerError returns true when this put policy failure response has a 5xx status code
func (o *PutPolicyFailure) IsServerError() bool {
return true
}
// IsCode returns true when this put policy failure response a status code equal to that given
func (o *PutPolicyFailure) IsCode(code int) bool {
return code == 500
}
// Code gets the status code for the put policy failure response
func (o *PutPolicyFailure) Code() int {
return 500
}
func (o *PutPolicyFailure) Error() string {
payload, _ := json.Marshal(o.Payload)
return fmt.Sprintf("[PUT /policy][%d] putPolicyFailure %s", 500, payload)
}
func (o *PutPolicyFailure) String() string {
payload, _ := json.Marshal(o.Payload)
return fmt.Sprintf("[PUT /policy][%d] putPolicyFailure %s", 500, payload)
}
func (o *PutPolicyFailure) GetPayload() models.Error {
return o.Payload
}
func (o *PutPolicyFailure) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error {
// response payload
if err := consumer.Consume(response.Body(), &o.Payload); err != nil && err != io.EOF {
return err
}
return nil
}
// Code generated by go-swagger; DO NOT EDIT.
// Copyright Authors of Cilium
// SPDX-License-Identifier: Apache-2.0
package prefilter
// This file was generated by the swagger tool.
// Editing this file might prove futile when you re-run the swagger generate command
import (
"context"
"net/http"
"time"
"github.com/go-openapi/errors"
"github.com/go-openapi/runtime"
cr "github.com/go-openapi/runtime/client"
"github.com/go-openapi/strfmt"
"github.com/cilium/cilium/api/v1/models"
)
// NewDeletePrefilterParams creates a new DeletePrefilterParams object,
// with the default timeout for this client.
//
// Default values are not hydrated, since defaults are normally applied by the API server side.
//
// To enforce default values in parameter, use SetDefaults or WithDefaults.
func NewDeletePrefilterParams() *DeletePrefilterParams {
return &DeletePrefilterParams{
timeout: cr.DefaultTimeout,
}
}
// NewDeletePrefilterParamsWithTimeout creates a new DeletePrefilterParams object
// with the ability to set a timeout on a request.
func NewDeletePrefilterParamsWithTimeout(timeout time.Duration) *DeletePrefilterParams {
return &DeletePrefilterParams{
timeout: timeout,
}
}
// NewDeletePrefilterParamsWithContext creates a new DeletePrefilterParams object
// with the ability to set a context for a request.
func NewDeletePrefilterParamsWithContext(ctx context.Context) *DeletePrefilterParams {
return &DeletePrefilterParams{
Context: ctx,
}
}
// NewDeletePrefilterParamsWithHTTPClient creates a new DeletePrefilterParams object
// with the ability to set a custom HTTPClient for a request.
func NewDeletePrefilterParamsWithHTTPClient(client *http.Client) *DeletePrefilterParams {
return &DeletePrefilterParams{
HTTPClient: client,
}
}
/*
DeletePrefilterParams contains all the parameters to send to the API endpoint
for the delete prefilter operation.
Typically these are written to a http.Request.
*/
type DeletePrefilterParams struct {
/* PrefilterSpec.
List of CIDR ranges for filter table
*/
PrefilterSpec *models.PrefilterSpec
timeout time.Duration
Context context.Context
HTTPClient *http.Client
}
// WithDefaults hydrates default values in the delete prefilter params (not the query body).
//
// All values with no default are reset to their zero value.
func (o *DeletePrefilterParams) WithDefaults() *DeletePrefilterParams {
o.SetDefaults()
return o
}
// SetDefaults hydrates default values in the delete prefilter params (not the query body).
//
// All values with no default are reset to their zero value.
func (o *DeletePrefilterParams) SetDefaults() {
// no default values defined for this parameter
}
// WithTimeout adds the timeout to the delete prefilter params
func (o *DeletePrefilterParams) WithTimeout(timeout time.Duration) *DeletePrefilterParams {
o.SetTimeout(timeout)
return o
}
// SetTimeout adds the timeout to the delete prefilter params
func (o *DeletePrefilterParams) SetTimeout(timeout time.Duration) {
o.timeout = timeout
}
// WithContext adds the context to the delete prefilter params
func (o *DeletePrefilterParams) WithContext(ctx context.Context) *DeletePrefilterParams {
o.SetContext(ctx)
return o
}
// SetContext adds the context to the delete prefilter params
func (o *DeletePrefilterParams) SetContext(ctx context.Context) {
o.Context = ctx
}
// WithHTTPClient adds the HTTPClient to the delete prefilter params
func (o *DeletePrefilterParams) WithHTTPClient(client *http.Client) *DeletePrefilterParams {
o.SetHTTPClient(client)
return o
}
// SetHTTPClient adds the HTTPClient to the delete prefilter params
func (o *DeletePrefilterParams) SetHTTPClient(client *http.Client) {
o.HTTPClient = client
}
// WithPrefilterSpec adds the prefilterSpec to the delete prefilter params
func (o *DeletePrefilterParams) WithPrefilterSpec(prefilterSpec *models.PrefilterSpec) *DeletePrefilterParams {
o.SetPrefilterSpec(prefilterSpec)
return o
}
// SetPrefilterSpec adds the prefilterSpec to the delete prefilter params
func (o *DeletePrefilterParams) SetPrefilterSpec(prefilterSpec *models.PrefilterSpec) {
o.PrefilterSpec = prefilterSpec
}
// WriteToRequest writes these params to a swagger request
func (o *DeletePrefilterParams) WriteToRequest(r runtime.ClientRequest, reg strfmt.Registry) error {
if err := r.SetTimeout(o.timeout); err != nil {
return err
}
var res []error
if o.PrefilterSpec != nil {
if err := r.SetBodyParam(o.PrefilterSpec); err != nil {
return err
}
}
if len(res) > 0 {
return errors.CompositeValidationError(res...)
}
return nil
}
// Code generated by go-swagger; DO NOT EDIT.
// Copyright Authors of Cilium
// SPDX-License-Identifier: Apache-2.0
package prefilter
// This file was generated by the swagger tool.
// Editing this file might prove futile when you re-run the swagger generate command
import (
"encoding/json"
"fmt"
"io"
"github.com/go-openapi/runtime"
"github.com/go-openapi/strfmt"
"github.com/cilium/cilium/api/v1/models"
)
// DeletePrefilterReader is a Reader for the DeletePrefilter structure.
type DeletePrefilterReader struct {
formats strfmt.Registry
}
// ReadResponse reads a server response into the received o.
func (o *DeletePrefilterReader) ReadResponse(response runtime.ClientResponse, consumer runtime.Consumer) (interface{}, error) {
switch response.Code() {
case 200:
result := NewDeletePrefilterOK()
if err := result.readResponse(response, consumer, o.formats); err != nil {
return nil, err
}
return result, nil
case 403:
result := NewDeletePrefilterForbidden()
if err := result.readResponse(response, consumer, o.formats); err != nil {
return nil, err
}
return nil, result
case 461:
result := NewDeletePrefilterInvalidCIDR()
if err := result.readResponse(response, consumer, o.formats); err != nil {
return nil, err
}
return nil, result
case 500:
result := NewDeletePrefilterFailure()
if err := result.readResponse(response, consumer, o.formats); err != nil {
return nil, err
}
return nil, result
default:
return nil, runtime.NewAPIError("[DELETE /prefilter] DeletePrefilter", response, response.Code())
}
}
// NewDeletePrefilterOK creates a DeletePrefilterOK with default headers values
func NewDeletePrefilterOK() *DeletePrefilterOK {
return &DeletePrefilterOK{}
}
/*
DeletePrefilterOK describes a response with status code 200, with default header values.
Deleted
*/
type DeletePrefilterOK struct {
Payload *models.Prefilter
}
// IsSuccess returns true when this delete prefilter o k response has a 2xx status code
func (o *DeletePrefilterOK) IsSuccess() bool {
return true
}
// IsRedirect returns true when this delete prefilter o k response has a 3xx status code
func (o *DeletePrefilterOK) IsRedirect() bool {
return false
}
// IsClientError returns true when this delete prefilter o k response has a 4xx status code
func (o *DeletePrefilterOK) IsClientError() bool {
return false
}
// IsServerError returns true when this delete prefilter o k response has a 5xx status code
func (o *DeletePrefilterOK) IsServerError() bool {
return false
}
// IsCode returns true when this delete prefilter o k response a status code equal to that given
func (o *DeletePrefilterOK) IsCode(code int) bool {
return code == 200
}
// Code gets the status code for the delete prefilter o k response
func (o *DeletePrefilterOK) Code() int {
return 200
}
func (o *DeletePrefilterOK) Error() string {
payload, _ := json.Marshal(o.Payload)
return fmt.Sprintf("[DELETE /prefilter][%d] deletePrefilterOK %s", 200, payload)
}
func (o *DeletePrefilterOK) String() string {
payload, _ := json.Marshal(o.Payload)
return fmt.Sprintf("[DELETE /prefilter][%d] deletePrefilterOK %s", 200, payload)
}
func (o *DeletePrefilterOK) GetPayload() *models.Prefilter {
return o.Payload
}
func (o *DeletePrefilterOK) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error {
o.Payload = new(models.Prefilter)
// response payload
if err := consumer.Consume(response.Body(), o.Payload); err != nil && err != io.EOF {
return err
}
return nil
}
// NewDeletePrefilterForbidden creates a DeletePrefilterForbidden with default headers values
func NewDeletePrefilterForbidden() *DeletePrefilterForbidden {
return &DeletePrefilterForbidden{}
}
/*
DeletePrefilterForbidden describes a response with status code 403, with default header values.
Forbidden
*/
type DeletePrefilterForbidden struct {
}
// IsSuccess returns true when this delete prefilter forbidden response has a 2xx status code
func (o *DeletePrefilterForbidden) IsSuccess() bool {
return false
}
// IsRedirect returns true when this delete prefilter forbidden response has a 3xx status code
func (o *DeletePrefilterForbidden) IsRedirect() bool {
return false
}
// IsClientError returns true when this delete prefilter forbidden response has a 4xx status code
func (o *DeletePrefilterForbidden) IsClientError() bool {
return true
}
// IsServerError returns true when this delete prefilter forbidden response has a 5xx status code
func (o *DeletePrefilterForbidden) IsServerError() bool {
return false
}
// IsCode returns true when this delete prefilter forbidden response a status code equal to that given
func (o *DeletePrefilterForbidden) IsCode(code int) bool {
return code == 403
}
// Code gets the status code for the delete prefilter forbidden response
func (o *DeletePrefilterForbidden) Code() int {
return 403
}
func (o *DeletePrefilterForbidden) Error() string {
return fmt.Sprintf("[DELETE /prefilter][%d] deletePrefilterForbidden", 403)
}
func (o *DeletePrefilterForbidden) String() string {
return fmt.Sprintf("[DELETE /prefilter][%d] deletePrefilterForbidden", 403)
}
func (o *DeletePrefilterForbidden) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error {
return nil
}
// NewDeletePrefilterInvalidCIDR creates a DeletePrefilterInvalidCIDR with default headers values
func NewDeletePrefilterInvalidCIDR() *DeletePrefilterInvalidCIDR {
return &DeletePrefilterInvalidCIDR{}
}
/*
DeletePrefilterInvalidCIDR describes a response with status code 461, with default header values.
Invalid CIDR prefix
*/
type DeletePrefilterInvalidCIDR struct {
Payload models.Error
}
// IsSuccess returns true when this delete prefilter invalid c Id r response has a 2xx status code
func (o *DeletePrefilterInvalidCIDR) IsSuccess() bool {
return false
}
// IsRedirect returns true when this delete prefilter invalid c Id r response has a 3xx status code
func (o *DeletePrefilterInvalidCIDR) IsRedirect() bool {
return false
}
// IsClientError returns true when this delete prefilter invalid c Id r response has a 4xx status code
func (o *DeletePrefilterInvalidCIDR) IsClientError() bool {
return true
}
// IsServerError returns true when this delete prefilter invalid c Id r response has a 5xx status code
func (o *DeletePrefilterInvalidCIDR) IsServerError() bool {
return false
}
// IsCode returns true when this delete prefilter invalid c Id r response a status code equal to that given
func (o *DeletePrefilterInvalidCIDR) IsCode(code int) bool {
return code == 461
}
// Code gets the status code for the delete prefilter invalid c Id r response
func (o *DeletePrefilterInvalidCIDR) Code() int {
return 461
}
func (o *DeletePrefilterInvalidCIDR) Error() string {
payload, _ := json.Marshal(o.Payload)
return fmt.Sprintf("[DELETE /prefilter][%d] deletePrefilterInvalidCIdR %s", 461, payload)
}
func (o *DeletePrefilterInvalidCIDR) String() string {
payload, _ := json.Marshal(o.Payload)
return fmt.Sprintf("[DELETE /prefilter][%d] deletePrefilterInvalidCIdR %s", 461, payload)
}
func (o *DeletePrefilterInvalidCIDR) GetPayload() models.Error {
return o.Payload
}
func (o *DeletePrefilterInvalidCIDR) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error {
// response payload
if err := consumer.Consume(response.Body(), &o.Payload); err != nil && err != io.EOF {
return err
}
return nil
}
// NewDeletePrefilterFailure creates a DeletePrefilterFailure with default headers values
func NewDeletePrefilterFailure() *DeletePrefilterFailure {
return &DeletePrefilterFailure{}
}
/*
DeletePrefilterFailure describes a response with status code 500, with default header values.
Prefilter delete failed
*/
type DeletePrefilterFailure struct {
Payload models.Error
}
// IsSuccess returns true when this delete prefilter failure response has a 2xx status code
func (o *DeletePrefilterFailure) IsSuccess() bool {
return false
}
// IsRedirect returns true when this delete prefilter failure response has a 3xx status code
func (o *DeletePrefilterFailure) IsRedirect() bool {
return false
}
// IsClientError returns true when this delete prefilter failure response has a 4xx status code
func (o *DeletePrefilterFailure) IsClientError() bool {
return false
}
// IsServerError returns true when this delete prefilter failure response has a 5xx status code
func (o *DeletePrefilterFailure) IsServerError() bool {
return true
}
// IsCode returns true when this delete prefilter failure response a status code equal to that given
func (o *DeletePrefilterFailure) IsCode(code int) bool {
return code == 500
}
// Code gets the status code for the delete prefilter failure response
func (o *DeletePrefilterFailure) Code() int {
return 500
}
func (o *DeletePrefilterFailure) Error() string {
payload, _ := json.Marshal(o.Payload)
return fmt.Sprintf("[DELETE /prefilter][%d] deletePrefilterFailure %s", 500, payload)
}
func (o *DeletePrefilterFailure) String() string {
payload, _ := json.Marshal(o.Payload)
return fmt.Sprintf("[DELETE /prefilter][%d] deletePrefilterFailure %s", 500, payload)
}
func (o *DeletePrefilterFailure) GetPayload() models.Error {
return o.Payload
}
func (o *DeletePrefilterFailure) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error {
// response payload
if err := consumer.Consume(response.Body(), &o.Payload); err != nil && err != io.EOF {
return err
}
return nil
}
// Code generated by go-swagger; DO NOT EDIT.
// Copyright Authors of Cilium
// SPDX-License-Identifier: Apache-2.0
package prefilter
// This file was generated by the swagger tool.
// Editing this file might prove futile when you re-run the swagger generate command
import (
"context"
"net/http"
"time"
"github.com/go-openapi/errors"
"github.com/go-openapi/runtime"
cr "github.com/go-openapi/runtime/client"
"github.com/go-openapi/strfmt"
)
// NewGetPrefilterParams creates a new GetPrefilterParams object,
// with the default timeout for this client.
//
// Default values are not hydrated, since defaults are normally applied by the API server side.
//
// To enforce default values in parameter, use SetDefaults or WithDefaults.
func NewGetPrefilterParams() *GetPrefilterParams {
return &GetPrefilterParams{
timeout: cr.DefaultTimeout,
}
}
// NewGetPrefilterParamsWithTimeout creates a new GetPrefilterParams object
// with the ability to set a timeout on a request.
func NewGetPrefilterParamsWithTimeout(timeout time.Duration) *GetPrefilterParams {
return &GetPrefilterParams{
timeout: timeout,
}
}
// NewGetPrefilterParamsWithContext creates a new GetPrefilterParams object
// with the ability to set a context for a request.
func NewGetPrefilterParamsWithContext(ctx context.Context) *GetPrefilterParams {
return &GetPrefilterParams{
Context: ctx,
}
}
// NewGetPrefilterParamsWithHTTPClient creates a new GetPrefilterParams object
// with the ability to set a custom HTTPClient for a request.
func NewGetPrefilterParamsWithHTTPClient(client *http.Client) *GetPrefilterParams {
return &GetPrefilterParams{
HTTPClient: client,
}
}
/*
GetPrefilterParams contains all the parameters to send to the API endpoint
for the get prefilter operation.
Typically these are written to a http.Request.
*/
type GetPrefilterParams struct {
timeout time.Duration
Context context.Context
HTTPClient *http.Client
}
// WithDefaults hydrates default values in the get prefilter params (not the query body).
//
// All values with no default are reset to their zero value.
func (o *GetPrefilterParams) WithDefaults() *GetPrefilterParams {
o.SetDefaults()
return o
}
// SetDefaults hydrates default values in the get prefilter params (not the query body).
//
// All values with no default are reset to their zero value.
func (o *GetPrefilterParams) SetDefaults() {
// no default values defined for this parameter
}
// WithTimeout adds the timeout to the get prefilter params
func (o *GetPrefilterParams) WithTimeout(timeout time.Duration) *GetPrefilterParams {
o.SetTimeout(timeout)
return o
}
// SetTimeout adds the timeout to the get prefilter params
func (o *GetPrefilterParams) SetTimeout(timeout time.Duration) {
o.timeout = timeout
}
// WithContext adds the context to the get prefilter params
func (o *GetPrefilterParams) WithContext(ctx context.Context) *GetPrefilterParams {
o.SetContext(ctx)
return o
}
// SetContext adds the context to the get prefilter params
func (o *GetPrefilterParams) SetContext(ctx context.Context) {
o.Context = ctx
}
// WithHTTPClient adds the HTTPClient to the get prefilter params
func (o *GetPrefilterParams) WithHTTPClient(client *http.Client) *GetPrefilterParams {
o.SetHTTPClient(client)
return o
}
// SetHTTPClient adds the HTTPClient to the get prefilter params
func (o *GetPrefilterParams) SetHTTPClient(client *http.Client) {
o.HTTPClient = client
}
// WriteToRequest writes these params to a swagger request
func (o *GetPrefilterParams) WriteToRequest(r runtime.ClientRequest, reg strfmt.Registry) error {
if err := r.SetTimeout(o.timeout); err != nil {
return err
}
var res []error
if len(res) > 0 {
return errors.CompositeValidationError(res...)
}
return nil
}
// Code generated by go-swagger; DO NOT EDIT.
// Copyright Authors of Cilium
// SPDX-License-Identifier: Apache-2.0
package prefilter
// This file was generated by the swagger tool.
// Editing this file might prove futile when you re-run the swagger generate command
import (
"encoding/json"
"fmt"
"io"
"github.com/go-openapi/runtime"
"github.com/go-openapi/strfmt"
"github.com/cilium/cilium/api/v1/models"
)
// GetPrefilterReader is a Reader for the GetPrefilter structure.
type GetPrefilterReader struct {
formats strfmt.Registry
}
// ReadResponse reads a server response into the received o.
func (o *GetPrefilterReader) ReadResponse(response runtime.ClientResponse, consumer runtime.Consumer) (interface{}, error) {
switch response.Code() {
case 200:
result := NewGetPrefilterOK()
if err := result.readResponse(response, consumer, o.formats); err != nil {
return nil, err
}
return result, nil
case 500:
result := NewGetPrefilterFailure()
if err := result.readResponse(response, consumer, o.formats); err != nil {
return nil, err
}
return nil, result
default:
return nil, runtime.NewAPIError("[GET /prefilter] GetPrefilter", response, response.Code())
}
}
// NewGetPrefilterOK creates a GetPrefilterOK with default headers values
func NewGetPrefilterOK() *GetPrefilterOK {
return &GetPrefilterOK{}
}
/*
GetPrefilterOK describes a response with status code 200, with default header values.
Success
*/
type GetPrefilterOK struct {
Payload *models.Prefilter
}
// IsSuccess returns true when this get prefilter o k response has a 2xx status code
func (o *GetPrefilterOK) IsSuccess() bool {
return true
}
// IsRedirect returns true when this get prefilter o k response has a 3xx status code
func (o *GetPrefilterOK) IsRedirect() bool {
return false
}
// IsClientError returns true when this get prefilter o k response has a 4xx status code
func (o *GetPrefilterOK) IsClientError() bool {
return false
}
// IsServerError returns true when this get prefilter o k response has a 5xx status code
func (o *GetPrefilterOK) IsServerError() bool {
return false
}
// IsCode returns true when this get prefilter o k response a status code equal to that given
func (o *GetPrefilterOK) IsCode(code int) bool {
return code == 200
}
// Code gets the status code for the get prefilter o k response
func (o *GetPrefilterOK) Code() int {
return 200
}
func (o *GetPrefilterOK) Error() string {
payload, _ := json.Marshal(o.Payload)
return fmt.Sprintf("[GET /prefilter][%d] getPrefilterOK %s", 200, payload)
}
func (o *GetPrefilterOK) String() string {
payload, _ := json.Marshal(o.Payload)
return fmt.Sprintf("[GET /prefilter][%d] getPrefilterOK %s", 200, payload)
}
func (o *GetPrefilterOK) GetPayload() *models.Prefilter {
return o.Payload
}
func (o *GetPrefilterOK) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error {
o.Payload = new(models.Prefilter)
// response payload
if err := consumer.Consume(response.Body(), o.Payload); err != nil && err != io.EOF {
return err
}
return nil
}
// NewGetPrefilterFailure creates a GetPrefilterFailure with default headers values
func NewGetPrefilterFailure() *GetPrefilterFailure {
return &GetPrefilterFailure{}
}
/*
GetPrefilterFailure describes a response with status code 500, with default header values.
Prefilter get failed
*/
type GetPrefilterFailure struct {
Payload models.Error
}
// IsSuccess returns true when this get prefilter failure response has a 2xx status code
func (o *GetPrefilterFailure) IsSuccess() bool {
return false
}
// IsRedirect returns true when this get prefilter failure response has a 3xx status code
func (o *GetPrefilterFailure) IsRedirect() bool {
return false
}
// IsClientError returns true when this get prefilter failure response has a 4xx status code
func (o *GetPrefilterFailure) IsClientError() bool {
return false
}
// IsServerError returns true when this get prefilter failure response has a 5xx status code
func (o *GetPrefilterFailure) IsServerError() bool {
return true
}
// IsCode returns true when this get prefilter failure response a status code equal to that given
func (o *GetPrefilterFailure) IsCode(code int) bool {
return code == 500
}
// Code gets the status code for the get prefilter failure response
func (o *GetPrefilterFailure) Code() int {
return 500
}
func (o *GetPrefilterFailure) Error() string {
payload, _ := json.Marshal(o.Payload)
return fmt.Sprintf("[GET /prefilter][%d] getPrefilterFailure %s", 500, payload)
}
func (o *GetPrefilterFailure) String() string {
payload, _ := json.Marshal(o.Payload)
return fmt.Sprintf("[GET /prefilter][%d] getPrefilterFailure %s", 500, payload)
}
func (o *GetPrefilterFailure) GetPayload() models.Error {
return o.Payload
}
func (o *GetPrefilterFailure) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error {
// response payload
if err := consumer.Consume(response.Body(), &o.Payload); err != nil && err != io.EOF {
return err
}
return nil
}
// Code generated by go-swagger; DO NOT EDIT.
// Copyright Authors of Cilium
// SPDX-License-Identifier: Apache-2.0
package prefilter
// This file was generated by the swagger tool.
// Editing this file might prove futile when you re-run the swagger generate command
import (
"context"
"net/http"
"time"
"github.com/go-openapi/errors"
"github.com/go-openapi/runtime"
cr "github.com/go-openapi/runtime/client"
"github.com/go-openapi/strfmt"
"github.com/cilium/cilium/api/v1/models"
)
// NewPatchPrefilterParams creates a new PatchPrefilterParams object,
// with the default timeout for this client.
//
// Default values are not hydrated, since defaults are normally applied by the API server side.
//
// To enforce default values in parameter, use SetDefaults or WithDefaults.
func NewPatchPrefilterParams() *PatchPrefilterParams {
return &PatchPrefilterParams{
timeout: cr.DefaultTimeout,
}
}
// NewPatchPrefilterParamsWithTimeout creates a new PatchPrefilterParams object
// with the ability to set a timeout on a request.
func NewPatchPrefilterParamsWithTimeout(timeout time.Duration) *PatchPrefilterParams {
return &PatchPrefilterParams{
timeout: timeout,
}
}
// NewPatchPrefilterParamsWithContext creates a new PatchPrefilterParams object
// with the ability to set a context for a request.
func NewPatchPrefilterParamsWithContext(ctx context.Context) *PatchPrefilterParams {
return &PatchPrefilterParams{
Context: ctx,
}
}
// NewPatchPrefilterParamsWithHTTPClient creates a new PatchPrefilterParams object
// with the ability to set a custom HTTPClient for a request.
func NewPatchPrefilterParamsWithHTTPClient(client *http.Client) *PatchPrefilterParams {
return &PatchPrefilterParams{
HTTPClient: client,
}
}
/*
PatchPrefilterParams contains all the parameters to send to the API endpoint
for the patch prefilter operation.
Typically these are written to a http.Request.
*/
type PatchPrefilterParams struct {
/* PrefilterSpec.
List of CIDR ranges for filter table
*/
PrefilterSpec *models.PrefilterSpec
timeout time.Duration
Context context.Context
HTTPClient *http.Client
}
// WithDefaults hydrates default values in the patch prefilter params (not the query body).
//
// All values with no default are reset to their zero value.
func (o *PatchPrefilterParams) WithDefaults() *PatchPrefilterParams {
o.SetDefaults()
return o
}
// SetDefaults hydrates default values in the patch prefilter params (not the query body).
//
// All values with no default are reset to their zero value.
func (o *PatchPrefilterParams) SetDefaults() {
// no default values defined for this parameter
}
// WithTimeout adds the timeout to the patch prefilter params
func (o *PatchPrefilterParams) WithTimeout(timeout time.Duration) *PatchPrefilterParams {
o.SetTimeout(timeout)
return o
}
// SetTimeout adds the timeout to the patch prefilter params
func (o *PatchPrefilterParams) SetTimeout(timeout time.Duration) {
o.timeout = timeout
}
// WithContext adds the context to the patch prefilter params
func (o *PatchPrefilterParams) WithContext(ctx context.Context) *PatchPrefilterParams {
o.SetContext(ctx)
return o
}
// SetContext adds the context to the patch prefilter params
func (o *PatchPrefilterParams) SetContext(ctx context.Context) {
o.Context = ctx
}
// WithHTTPClient adds the HTTPClient to the patch prefilter params
func (o *PatchPrefilterParams) WithHTTPClient(client *http.Client) *PatchPrefilterParams {
o.SetHTTPClient(client)
return o
}
// SetHTTPClient adds the HTTPClient to the patch prefilter params
func (o *PatchPrefilterParams) SetHTTPClient(client *http.Client) {
o.HTTPClient = client
}
// WithPrefilterSpec adds the prefilterSpec to the patch prefilter params
func (o *PatchPrefilterParams) WithPrefilterSpec(prefilterSpec *models.PrefilterSpec) *PatchPrefilterParams {
o.SetPrefilterSpec(prefilterSpec)
return o
}
// SetPrefilterSpec adds the prefilterSpec to the patch prefilter params
func (o *PatchPrefilterParams) SetPrefilterSpec(prefilterSpec *models.PrefilterSpec) {
o.PrefilterSpec = prefilterSpec
}
// WriteToRequest writes these params to a swagger request
func (o *PatchPrefilterParams) WriteToRequest(r runtime.ClientRequest, reg strfmt.Registry) error {
if err := r.SetTimeout(o.timeout); err != nil {
return err
}
var res []error
if o.PrefilterSpec != nil {
if err := r.SetBodyParam(o.PrefilterSpec); err != nil {
return err
}
}
if len(res) > 0 {
return errors.CompositeValidationError(res...)
}
return nil
}
// Code generated by go-swagger; DO NOT EDIT.
// Copyright Authors of Cilium
// SPDX-License-Identifier: Apache-2.0
package prefilter
// This file was generated by the swagger tool.
// Editing this file might prove futile when you re-run the swagger generate command
import (
"encoding/json"
"fmt"
"io"
"github.com/go-openapi/runtime"
"github.com/go-openapi/strfmt"
"github.com/cilium/cilium/api/v1/models"
)
// PatchPrefilterReader is a Reader for the PatchPrefilter structure.
type PatchPrefilterReader struct {
formats strfmt.Registry
}
// ReadResponse reads a server response into the received o.
func (o *PatchPrefilterReader) ReadResponse(response runtime.ClientResponse, consumer runtime.Consumer) (interface{}, error) {
switch response.Code() {
case 200:
result := NewPatchPrefilterOK()
if err := result.readResponse(response, consumer, o.formats); err != nil {
return nil, err
}
return result, nil
case 403:
result := NewPatchPrefilterForbidden()
if err := result.readResponse(response, consumer, o.formats); err != nil {
return nil, err
}
return nil, result
case 461:
result := NewPatchPrefilterInvalidCIDR()
if err := result.readResponse(response, consumer, o.formats); err != nil {
return nil, err
}
return nil, result
case 500:
result := NewPatchPrefilterFailure()
if err := result.readResponse(response, consumer, o.formats); err != nil {
return nil, err
}
return nil, result
default:
return nil, runtime.NewAPIError("[PATCH /prefilter] PatchPrefilter", response, response.Code())
}
}
// NewPatchPrefilterOK creates a PatchPrefilterOK with default headers values
func NewPatchPrefilterOK() *PatchPrefilterOK {
return &PatchPrefilterOK{}
}
/*
PatchPrefilterOK describes a response with status code 200, with default header values.
Updated
*/
type PatchPrefilterOK struct {
Payload *models.Prefilter
}
// IsSuccess returns true when this patch prefilter o k response has a 2xx status code
func (o *PatchPrefilterOK) IsSuccess() bool {
return true
}
// IsRedirect returns true when this patch prefilter o k response has a 3xx status code
func (o *PatchPrefilterOK) IsRedirect() bool {
return false
}
// IsClientError returns true when this patch prefilter o k response has a 4xx status code
func (o *PatchPrefilterOK) IsClientError() bool {
return false
}
// IsServerError returns true when this patch prefilter o k response has a 5xx status code
func (o *PatchPrefilterOK) IsServerError() bool {
return false
}
// IsCode returns true when this patch prefilter o k response a status code equal to that given
func (o *PatchPrefilterOK) IsCode(code int) bool {
return code == 200
}
// Code gets the status code for the patch prefilter o k response
func (o *PatchPrefilterOK) Code() int {
return 200
}
func (o *PatchPrefilterOK) Error() string {
payload, _ := json.Marshal(o.Payload)
return fmt.Sprintf("[PATCH /prefilter][%d] patchPrefilterOK %s", 200, payload)
}
func (o *PatchPrefilterOK) String() string {
payload, _ := json.Marshal(o.Payload)
return fmt.Sprintf("[PATCH /prefilter][%d] patchPrefilterOK %s", 200, payload)
}
func (o *PatchPrefilterOK) GetPayload() *models.Prefilter {
return o.Payload
}
func (o *PatchPrefilterOK) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error {
o.Payload = new(models.Prefilter)
// response payload
if err := consumer.Consume(response.Body(), o.Payload); err != nil && err != io.EOF {
return err
}
return nil
}
// NewPatchPrefilterForbidden creates a PatchPrefilterForbidden with default headers values
func NewPatchPrefilterForbidden() *PatchPrefilterForbidden {
return &PatchPrefilterForbidden{}
}
/*
PatchPrefilterForbidden describes a response with status code 403, with default header values.
Forbidden
*/
type PatchPrefilterForbidden struct {
}
// IsSuccess returns true when this patch prefilter forbidden response has a 2xx status code
func (o *PatchPrefilterForbidden) IsSuccess() bool {
return false
}
// IsRedirect returns true when this patch prefilter forbidden response has a 3xx status code
func (o *PatchPrefilterForbidden) IsRedirect() bool {
return false
}
// IsClientError returns true when this patch prefilter forbidden response has a 4xx status code
func (o *PatchPrefilterForbidden) IsClientError() bool {
return true
}
// IsServerError returns true when this patch prefilter forbidden response has a 5xx status code
func (o *PatchPrefilterForbidden) IsServerError() bool {
return false
}
// IsCode returns true when this patch prefilter forbidden response a status code equal to that given
func (o *PatchPrefilterForbidden) IsCode(code int) bool {
return code == 403
}
// Code gets the status code for the patch prefilter forbidden response
func (o *PatchPrefilterForbidden) Code() int {
return 403
}
func (o *PatchPrefilterForbidden) Error() string {
return fmt.Sprintf("[PATCH /prefilter][%d] patchPrefilterForbidden", 403)
}
func (o *PatchPrefilterForbidden) String() string {
return fmt.Sprintf("[PATCH /prefilter][%d] patchPrefilterForbidden", 403)
}
func (o *PatchPrefilterForbidden) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error {
return nil
}
// NewPatchPrefilterInvalidCIDR creates a PatchPrefilterInvalidCIDR with default headers values
func NewPatchPrefilterInvalidCIDR() *PatchPrefilterInvalidCIDR {
return &PatchPrefilterInvalidCIDR{}
}
/*
PatchPrefilterInvalidCIDR describes a response with status code 461, with default header values.
Invalid CIDR prefix
*/
type PatchPrefilterInvalidCIDR struct {
Payload models.Error
}
// IsSuccess returns true when this patch prefilter invalid c Id r response has a 2xx status code
func (o *PatchPrefilterInvalidCIDR) IsSuccess() bool {
return false
}
// IsRedirect returns true when this patch prefilter invalid c Id r response has a 3xx status code
func (o *PatchPrefilterInvalidCIDR) IsRedirect() bool {
return false
}
// IsClientError returns true when this patch prefilter invalid c Id r response has a 4xx status code
func (o *PatchPrefilterInvalidCIDR) IsClientError() bool {
return true
}
// IsServerError returns true when this patch prefilter invalid c Id r response has a 5xx status code
func (o *PatchPrefilterInvalidCIDR) IsServerError() bool {
return false
}
// IsCode returns true when this patch prefilter invalid c Id r response a status code equal to that given
func (o *PatchPrefilterInvalidCIDR) IsCode(code int) bool {
return code == 461
}
// Code gets the status code for the patch prefilter invalid c Id r response
func (o *PatchPrefilterInvalidCIDR) Code() int {
return 461
}
func (o *PatchPrefilterInvalidCIDR) Error() string {
payload, _ := json.Marshal(o.Payload)
return fmt.Sprintf("[PATCH /prefilter][%d] patchPrefilterInvalidCIdR %s", 461, payload)
}
func (o *PatchPrefilterInvalidCIDR) String() string {
payload, _ := json.Marshal(o.Payload)
return fmt.Sprintf("[PATCH /prefilter][%d] patchPrefilterInvalidCIdR %s", 461, payload)
}
func (o *PatchPrefilterInvalidCIDR) GetPayload() models.Error {
return o.Payload
}
func (o *PatchPrefilterInvalidCIDR) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error {
// response payload
if err := consumer.Consume(response.Body(), &o.Payload); err != nil && err != io.EOF {
return err
}
return nil
}
// NewPatchPrefilterFailure creates a PatchPrefilterFailure with default headers values
func NewPatchPrefilterFailure() *PatchPrefilterFailure {
return &PatchPrefilterFailure{}
}
/*
PatchPrefilterFailure describes a response with status code 500, with default header values.
Prefilter update failed
*/
type PatchPrefilterFailure struct {
Payload models.Error
}
// IsSuccess returns true when this patch prefilter failure response has a 2xx status code
func (o *PatchPrefilterFailure) IsSuccess() bool {
return false
}
// IsRedirect returns true when this patch prefilter failure response has a 3xx status code
func (o *PatchPrefilterFailure) IsRedirect() bool {
return false
}
// IsClientError returns true when this patch prefilter failure response has a 4xx status code
func (o *PatchPrefilterFailure) IsClientError() bool {
return false
}
// IsServerError returns true when this patch prefilter failure response has a 5xx status code
func (o *PatchPrefilterFailure) IsServerError() bool {
return true
}
// IsCode returns true when this patch prefilter failure response a status code equal to that given
func (o *PatchPrefilterFailure) IsCode(code int) bool {
return code == 500
}
// Code gets the status code for the patch prefilter failure response
func (o *PatchPrefilterFailure) Code() int {
return 500
}
func (o *PatchPrefilterFailure) Error() string {
payload, _ := json.Marshal(o.Payload)
return fmt.Sprintf("[PATCH /prefilter][%d] patchPrefilterFailure %s", 500, payload)
}
func (o *PatchPrefilterFailure) String() string {
payload, _ := json.Marshal(o.Payload)
return fmt.Sprintf("[PATCH /prefilter][%d] patchPrefilterFailure %s", 500, payload)
}
func (o *PatchPrefilterFailure) GetPayload() models.Error {
return o.Payload
}
func (o *PatchPrefilterFailure) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error {
// response payload
if err := consumer.Consume(response.Body(), &o.Payload); err != nil && err != io.EOF {
return err
}
return nil
}
// Code generated by go-swagger; DO NOT EDIT.
// Copyright Authors of Cilium
// SPDX-License-Identifier: Apache-2.0
package prefilter
// This file was generated by the swagger tool.
// Editing this file might prove futile when you re-run the swagger generate command
import (
"fmt"
"github.com/go-openapi/runtime"
httptransport "github.com/go-openapi/runtime/client"
"github.com/go-openapi/strfmt"
)
// New creates a new prefilter API client.
func New(transport runtime.ClientTransport, formats strfmt.Registry) ClientService {
return &Client{transport: transport, formats: formats}
}
// New creates a new prefilter API client with basic auth credentials.
// It takes the following parameters:
// - host: http host (github.com).
// - basePath: any base path for the API client ("/v1", "/v3").
// - scheme: http scheme ("http", "https").
// - user: user for basic authentication header.
// - password: password for basic authentication header.
func NewClientWithBasicAuth(host, basePath, scheme, user, password string) ClientService {
transport := httptransport.New(host, basePath, []string{scheme})
transport.DefaultAuthentication = httptransport.BasicAuth(user, password)
return &Client{transport: transport, formats: strfmt.Default}
}
// New creates a new prefilter API client with a bearer token for authentication.
// It takes the following parameters:
// - host: http host (github.com).
// - basePath: any base path for the API client ("/v1", "/v3").
// - scheme: http scheme ("http", "https").
// - bearerToken: bearer token for Bearer authentication header.
func NewClientWithBearerToken(host, basePath, scheme, bearerToken string) ClientService {
transport := httptransport.New(host, basePath, []string{scheme})
transport.DefaultAuthentication = httptransport.BearerToken(bearerToken)
return &Client{transport: transport, formats: strfmt.Default}
}
/*
Client for prefilter API
*/
type Client struct {
transport runtime.ClientTransport
formats strfmt.Registry
}
// ClientOption may be used to customize the behavior of Client methods.
type ClientOption func(*runtime.ClientOperation)
// ClientService is the interface for Client methods
type ClientService interface {
DeletePrefilter(params *DeletePrefilterParams, opts ...ClientOption) (*DeletePrefilterOK, error)
GetPrefilter(params *GetPrefilterParams, opts ...ClientOption) (*GetPrefilterOK, error)
PatchPrefilter(params *PatchPrefilterParams, opts ...ClientOption) (*PatchPrefilterOK, error)
SetTransport(transport runtime.ClientTransport)
}
/*
DeletePrefilter deletes list of c ID rs
*/
func (a *Client) DeletePrefilter(params *DeletePrefilterParams, opts ...ClientOption) (*DeletePrefilterOK, error) {
// TODO: Validate the params before sending
if params == nil {
params = NewDeletePrefilterParams()
}
op := &runtime.ClientOperation{
ID: "DeletePrefilter",
Method: "DELETE",
PathPattern: "/prefilter",
ProducesMediaTypes: []string{"application/json"},
ConsumesMediaTypes: []string{"application/json"},
Schemes: []string{"http"},
Params: params,
Reader: &DeletePrefilterReader{formats: a.formats},
Context: params.Context,
Client: params.HTTPClient,
}
for _, opt := range opts {
opt(op)
}
result, err := a.transport.Submit(op)
if err != nil {
return nil, err
}
success, ok := result.(*DeletePrefilterOK)
if ok {
return success, nil
}
// unexpected success response
// safeguard: normally, absent a default response, unknown success responses return an error above: so this is a codegen issue
msg := fmt.Sprintf("unexpected success response for DeletePrefilter: API contract not enforced by server. Client expected to get an error, but got: %T", result)
panic(msg)
}
/*
GetPrefilter retrieves list of c ID rs
*/
func (a *Client) GetPrefilter(params *GetPrefilterParams, opts ...ClientOption) (*GetPrefilterOK, error) {
// TODO: Validate the params before sending
if params == nil {
params = NewGetPrefilterParams()
}
op := &runtime.ClientOperation{
ID: "GetPrefilter",
Method: "GET",
PathPattern: "/prefilter",
ProducesMediaTypes: []string{"application/json"},
ConsumesMediaTypes: []string{"application/json"},
Schemes: []string{"http"},
Params: params,
Reader: &GetPrefilterReader{formats: a.formats},
Context: params.Context,
Client: params.HTTPClient,
}
for _, opt := range opts {
opt(op)
}
result, err := a.transport.Submit(op)
if err != nil {
return nil, err
}
success, ok := result.(*GetPrefilterOK)
if ok {
return success, nil
}
// unexpected success response
// safeguard: normally, absent a default response, unknown success responses return an error above: so this is a codegen issue
msg := fmt.Sprintf("unexpected success response for GetPrefilter: API contract not enforced by server. Client expected to get an error, but got: %T", result)
panic(msg)
}
/*
PatchPrefilter updates list of c ID rs
*/
func (a *Client) PatchPrefilter(params *PatchPrefilterParams, opts ...ClientOption) (*PatchPrefilterOK, error) {
// TODO: Validate the params before sending
if params == nil {
params = NewPatchPrefilterParams()
}
op := &runtime.ClientOperation{
ID: "PatchPrefilter",
Method: "PATCH",
PathPattern: "/prefilter",
ProducesMediaTypes: []string{"application/json"},
ConsumesMediaTypes: []string{"application/json"},
Schemes: []string{"http"},
Params: params,
Reader: &PatchPrefilterReader{formats: a.formats},
Context: params.Context,
Client: params.HTTPClient,
}
for _, opt := range opts {
opt(op)
}
result, err := a.transport.Submit(op)
if err != nil {
return nil, err
}
success, ok := result.(*PatchPrefilterOK)
if ok {
return success, nil
}
// unexpected success response
// safeguard: normally, absent a default response, unknown success responses return an error above: so this is a codegen issue
msg := fmt.Sprintf("unexpected success response for PatchPrefilter: API contract not enforced by server. Client expected to get an error, but got: %T", result)
panic(msg)
}
// SetTransport changes the transport on the client
func (a *Client) SetTransport(transport runtime.ClientTransport) {
a.transport = transport
}
// Code generated by go-swagger; DO NOT EDIT.
// Copyright Authors of Cilium
// SPDX-License-Identifier: Apache-2.0
package service
// This file was generated by the swagger tool.
// Editing this file might prove futile when you re-run the swagger generate command
import (
"context"
"net/http"
"time"
"github.com/go-openapi/errors"
"github.com/go-openapi/runtime"
cr "github.com/go-openapi/runtime/client"
"github.com/go-openapi/strfmt"
)
// NewGetLrpParams creates a new GetLrpParams object,
// with the default timeout for this client.
//
// Default values are not hydrated, since defaults are normally applied by the API server side.
//
// To enforce default values in parameter, use SetDefaults or WithDefaults.
func NewGetLrpParams() *GetLrpParams {
return &GetLrpParams{
timeout: cr.DefaultTimeout,
}
}
// NewGetLrpParamsWithTimeout creates a new GetLrpParams object
// with the ability to set a timeout on a request.
func NewGetLrpParamsWithTimeout(timeout time.Duration) *GetLrpParams {
return &GetLrpParams{
timeout: timeout,
}
}
// NewGetLrpParamsWithContext creates a new GetLrpParams object
// with the ability to set a context for a request.
func NewGetLrpParamsWithContext(ctx context.Context) *GetLrpParams {
return &GetLrpParams{
Context: ctx,
}
}
// NewGetLrpParamsWithHTTPClient creates a new GetLrpParams object
// with the ability to set a custom HTTPClient for a request.
func NewGetLrpParamsWithHTTPClient(client *http.Client) *GetLrpParams {
return &GetLrpParams{
HTTPClient: client,
}
}
/*
GetLrpParams contains all the parameters to send to the API endpoint
for the get lrp operation.
Typically these are written to a http.Request.
*/
type GetLrpParams struct {
timeout time.Duration
Context context.Context
HTTPClient *http.Client
}
// WithDefaults hydrates default values in the get lrp params (not the query body).
//
// All values with no default are reset to their zero value.
func (o *GetLrpParams) WithDefaults() *GetLrpParams {
o.SetDefaults()
return o
}
// SetDefaults hydrates default values in the get lrp params (not the query body).
//
// All values with no default are reset to their zero value.
func (o *GetLrpParams) SetDefaults() {
// no default values defined for this parameter
}
// WithTimeout adds the timeout to the get lrp params
func (o *GetLrpParams) WithTimeout(timeout time.Duration) *GetLrpParams {
o.SetTimeout(timeout)
return o
}
// SetTimeout adds the timeout to the get lrp params
func (o *GetLrpParams) SetTimeout(timeout time.Duration) {
o.timeout = timeout
}
// WithContext adds the context to the get lrp params
func (o *GetLrpParams) WithContext(ctx context.Context) *GetLrpParams {
o.SetContext(ctx)
return o
}
// SetContext adds the context to the get lrp params
func (o *GetLrpParams) SetContext(ctx context.Context) {
o.Context = ctx
}
// WithHTTPClient adds the HTTPClient to the get lrp params
func (o *GetLrpParams) WithHTTPClient(client *http.Client) *GetLrpParams {
o.SetHTTPClient(client)
return o
}
// SetHTTPClient adds the HTTPClient to the get lrp params
func (o *GetLrpParams) SetHTTPClient(client *http.Client) {
o.HTTPClient = client
}
// WriteToRequest writes these params to a swagger request
func (o *GetLrpParams) WriteToRequest(r runtime.ClientRequest, reg strfmt.Registry) error {
if err := r.SetTimeout(o.timeout); err != nil {
return err
}
var res []error
if len(res) > 0 {
return errors.CompositeValidationError(res...)
}
return nil
}
// Code generated by go-swagger; DO NOT EDIT.
// Copyright Authors of Cilium
// SPDX-License-Identifier: Apache-2.0
package service
// This file was generated by the swagger tool.
// Editing this file might prove futile when you re-run the swagger generate command
import (
"encoding/json"
"fmt"
"io"
"github.com/go-openapi/runtime"
"github.com/go-openapi/strfmt"
"github.com/cilium/cilium/api/v1/models"
)
// GetLrpReader is a Reader for the GetLrp structure.
type GetLrpReader struct {
formats strfmt.Registry
}
// ReadResponse reads a server response into the received o.
func (o *GetLrpReader) ReadResponse(response runtime.ClientResponse, consumer runtime.Consumer) (interface{}, error) {
switch response.Code() {
case 200:
result := NewGetLrpOK()
if err := result.readResponse(response, consumer, o.formats); err != nil {
return nil, err
}
return result, nil
default:
return nil, runtime.NewAPIError("[GET /lrp] GetLrp", response, response.Code())
}
}
// NewGetLrpOK creates a GetLrpOK with default headers values
func NewGetLrpOK() *GetLrpOK {
return &GetLrpOK{}
}
/*
GetLrpOK describes a response with status code 200, with default header values.
Success
*/
type GetLrpOK struct {
Payload []*models.LRPSpec
}
// IsSuccess returns true when this get lrp o k response has a 2xx status code
func (o *GetLrpOK) IsSuccess() bool {
return true
}
// IsRedirect returns true when this get lrp o k response has a 3xx status code
func (o *GetLrpOK) IsRedirect() bool {
return false
}
// IsClientError returns true when this get lrp o k response has a 4xx status code
func (o *GetLrpOK) IsClientError() bool {
return false
}
// IsServerError returns true when this get lrp o k response has a 5xx status code
func (o *GetLrpOK) IsServerError() bool {
return false
}
// IsCode returns true when this get lrp o k response a status code equal to that given
func (o *GetLrpOK) IsCode(code int) bool {
return code == 200
}
// Code gets the status code for the get lrp o k response
func (o *GetLrpOK) Code() int {
return 200
}
func (o *GetLrpOK) Error() string {
payload, _ := json.Marshal(o.Payload)
return fmt.Sprintf("[GET /lrp][%d] getLrpOK %s", 200, payload)
}
func (o *GetLrpOK) String() string {
payload, _ := json.Marshal(o.Payload)
return fmt.Sprintf("[GET /lrp][%d] getLrpOK %s", 200, payload)
}
func (o *GetLrpOK) GetPayload() []*models.LRPSpec {
return o.Payload
}
func (o *GetLrpOK) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error {
// response payload
if err := consumer.Consume(response.Body(), &o.Payload); err != nil && err != io.EOF {
return err
}
return nil
}
// Code generated by go-swagger; DO NOT EDIT.
// Copyright Authors of Cilium
// SPDX-License-Identifier: Apache-2.0
package service
// This file was generated by the swagger tool.
// Editing this file might prove futile when you re-run the swagger generate command
import (
"context"
"net/http"
"time"
"github.com/go-openapi/errors"
"github.com/go-openapi/runtime"
cr "github.com/go-openapi/runtime/client"
"github.com/go-openapi/strfmt"
)
// NewGetServiceParams creates a new GetServiceParams object,
// with the default timeout for this client.
//
// Default values are not hydrated, since defaults are normally applied by the API server side.
//
// To enforce default values in parameter, use SetDefaults or WithDefaults.
func NewGetServiceParams() *GetServiceParams {
return &GetServiceParams{
timeout: cr.DefaultTimeout,
}
}
// NewGetServiceParamsWithTimeout creates a new GetServiceParams object
// with the ability to set a timeout on a request.
func NewGetServiceParamsWithTimeout(timeout time.Duration) *GetServiceParams {
return &GetServiceParams{
timeout: timeout,
}
}
// NewGetServiceParamsWithContext creates a new GetServiceParams object
// with the ability to set a context for a request.
func NewGetServiceParamsWithContext(ctx context.Context) *GetServiceParams {
return &GetServiceParams{
Context: ctx,
}
}
// NewGetServiceParamsWithHTTPClient creates a new GetServiceParams object
// with the ability to set a custom HTTPClient for a request.
func NewGetServiceParamsWithHTTPClient(client *http.Client) *GetServiceParams {
return &GetServiceParams{
HTTPClient: client,
}
}
/*
GetServiceParams contains all the parameters to send to the API endpoint
for the get service operation.
Typically these are written to a http.Request.
*/
type GetServiceParams struct {
timeout time.Duration
Context context.Context
HTTPClient *http.Client
}
// WithDefaults hydrates default values in the get service params (not the query body).
//
// All values with no default are reset to their zero value.
func (o *GetServiceParams) WithDefaults() *GetServiceParams {
o.SetDefaults()
return o
}
// SetDefaults hydrates default values in the get service params (not the query body).
//
// All values with no default are reset to their zero value.
func (o *GetServiceParams) SetDefaults() {
// no default values defined for this parameter
}
// WithTimeout adds the timeout to the get service params
func (o *GetServiceParams) WithTimeout(timeout time.Duration) *GetServiceParams {
o.SetTimeout(timeout)
return o
}
// SetTimeout adds the timeout to the get service params
func (o *GetServiceParams) SetTimeout(timeout time.Duration) {
o.timeout = timeout
}
// WithContext adds the context to the get service params
func (o *GetServiceParams) WithContext(ctx context.Context) *GetServiceParams {
o.SetContext(ctx)
return o
}
// SetContext adds the context to the get service params
func (o *GetServiceParams) SetContext(ctx context.Context) {
o.Context = ctx
}
// WithHTTPClient adds the HTTPClient to the get service params
func (o *GetServiceParams) WithHTTPClient(client *http.Client) *GetServiceParams {
o.SetHTTPClient(client)
return o
}
// SetHTTPClient adds the HTTPClient to the get service params
func (o *GetServiceParams) SetHTTPClient(client *http.Client) {
o.HTTPClient = client
}
// WriteToRequest writes these params to a swagger request
func (o *GetServiceParams) WriteToRequest(r runtime.ClientRequest, reg strfmt.Registry) error {
if err := r.SetTimeout(o.timeout); err != nil {
return err
}
var res []error
if len(res) > 0 {
return errors.CompositeValidationError(res...)
}
return nil
}
// Code generated by go-swagger; DO NOT EDIT.
// Copyright Authors of Cilium
// SPDX-License-Identifier: Apache-2.0
package service
// This file was generated by the swagger tool.
// Editing this file might prove futile when you re-run the swagger generate command
import (
"encoding/json"
"fmt"
"io"
"github.com/go-openapi/runtime"
"github.com/go-openapi/strfmt"
"github.com/cilium/cilium/api/v1/models"
)
// GetServiceReader is a Reader for the GetService structure.
type GetServiceReader struct {
formats strfmt.Registry
}
// ReadResponse reads a server response into the received o.
func (o *GetServiceReader) ReadResponse(response runtime.ClientResponse, consumer runtime.Consumer) (interface{}, error) {
switch response.Code() {
case 200:
result := NewGetServiceOK()
if err := result.readResponse(response, consumer, o.formats); err != nil {
return nil, err
}
return result, nil
default:
return nil, runtime.NewAPIError("[GET /service] GetService", response, response.Code())
}
}
// NewGetServiceOK creates a GetServiceOK with default headers values
func NewGetServiceOK() *GetServiceOK {
return &GetServiceOK{}
}
/*
GetServiceOK describes a response with status code 200, with default header values.
Success
*/
type GetServiceOK struct {
Payload []*models.Service
}
// IsSuccess returns true when this get service o k response has a 2xx status code
func (o *GetServiceOK) IsSuccess() bool {
return true
}
// IsRedirect returns true when this get service o k response has a 3xx status code
func (o *GetServiceOK) IsRedirect() bool {
return false
}
// IsClientError returns true when this get service o k response has a 4xx status code
func (o *GetServiceOK) IsClientError() bool {
return false
}
// IsServerError returns true when this get service o k response has a 5xx status code
func (o *GetServiceOK) IsServerError() bool {
return false
}
// IsCode returns true when this get service o k response a status code equal to that given
func (o *GetServiceOK) IsCode(code int) bool {
return code == 200
}
// Code gets the status code for the get service o k response
func (o *GetServiceOK) Code() int {
return 200
}
func (o *GetServiceOK) Error() string {
payload, _ := json.Marshal(o.Payload)
return fmt.Sprintf("[GET /service][%d] getServiceOK %s", 200, payload)
}
func (o *GetServiceOK) String() string {
payload, _ := json.Marshal(o.Payload)
return fmt.Sprintf("[GET /service][%d] getServiceOK %s", 200, payload)
}
func (o *GetServiceOK) GetPayload() []*models.Service {
return o.Payload
}
func (o *GetServiceOK) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error {
// response payload
if err := consumer.Consume(response.Body(), &o.Payload); err != nil && err != io.EOF {
return err
}
return nil
}
// Code generated by go-swagger; DO NOT EDIT.
// Copyright Authors of Cilium
// SPDX-License-Identifier: Apache-2.0
package service
// This file was generated by the swagger tool.
// Editing this file might prove futile when you re-run the swagger generate command
import (
"fmt"
"github.com/go-openapi/runtime"
httptransport "github.com/go-openapi/runtime/client"
"github.com/go-openapi/strfmt"
)
// New creates a new service API client.
func New(transport runtime.ClientTransport, formats strfmt.Registry) ClientService {
return &Client{transport: transport, formats: formats}
}
// New creates a new service API client with basic auth credentials.
// It takes the following parameters:
// - host: http host (github.com).
// - basePath: any base path for the API client ("/v1", "/v3").
// - scheme: http scheme ("http", "https").
// - user: user for basic authentication header.
// - password: password for basic authentication header.
func NewClientWithBasicAuth(host, basePath, scheme, user, password string) ClientService {
transport := httptransport.New(host, basePath, []string{scheme})
transport.DefaultAuthentication = httptransport.BasicAuth(user, password)
return &Client{transport: transport, formats: strfmt.Default}
}
// New creates a new service API client with a bearer token for authentication.
// It takes the following parameters:
// - host: http host (github.com).
// - basePath: any base path for the API client ("/v1", "/v3").
// - scheme: http scheme ("http", "https").
// - bearerToken: bearer token for Bearer authentication header.
func NewClientWithBearerToken(host, basePath, scheme, bearerToken string) ClientService {
transport := httptransport.New(host, basePath, []string{scheme})
transport.DefaultAuthentication = httptransport.BearerToken(bearerToken)
return &Client{transport: transport, formats: strfmt.Default}
}
/*
Client for service API
*/
type Client struct {
transport runtime.ClientTransport
formats strfmt.Registry
}
// ClientOption may be used to customize the behavior of Client methods.
type ClientOption func(*runtime.ClientOperation)
// ClientService is the interface for Client methods
type ClientService interface {
GetLrp(params *GetLrpParams, opts ...ClientOption) (*GetLrpOK, error)
GetService(params *GetServiceParams, opts ...ClientOption) (*GetServiceOK, error)
SetTransport(transport runtime.ClientTransport)
}
/*
GetLrp retrieves list of all local redirect policies
*/
func (a *Client) GetLrp(params *GetLrpParams, opts ...ClientOption) (*GetLrpOK, error) {
// TODO: Validate the params before sending
if params == nil {
params = NewGetLrpParams()
}
op := &runtime.ClientOperation{
ID: "GetLrp",
Method: "GET",
PathPattern: "/lrp",
ProducesMediaTypes: []string{"application/json"},
ConsumesMediaTypes: []string{"application/json"},
Schemes: []string{"http"},
Params: params,
Reader: &GetLrpReader{formats: a.formats},
Context: params.Context,
Client: params.HTTPClient,
}
for _, opt := range opts {
opt(op)
}
result, err := a.transport.Submit(op)
if err != nil {
return nil, err
}
success, ok := result.(*GetLrpOK)
if ok {
return success, nil
}
// unexpected success response
// safeguard: normally, absent a default response, unknown success responses return an error above: so this is a codegen issue
msg := fmt.Sprintf("unexpected success response for GetLrp: API contract not enforced by server. Client expected to get an error, but got: %T", result)
panic(msg)
}
/*
GetService retrieves list of all services
*/
func (a *Client) GetService(params *GetServiceParams, opts ...ClientOption) (*GetServiceOK, error) {
// TODO: Validate the params before sending
if params == nil {
params = NewGetServiceParams()
}
op := &runtime.ClientOperation{
ID: "GetService",
Method: "GET",
PathPattern: "/service",
ProducesMediaTypes: []string{"application/json"},
ConsumesMediaTypes: []string{"application/json"},
Schemes: []string{"http"},
Params: params,
Reader: &GetServiceReader{formats: a.formats},
Context: params.Context,
Client: params.HTTPClient,
}
for _, opt := range opts {
opt(op)
}
result, err := a.transport.Submit(op)
if err != nil {
return nil, err
}
success, ok := result.(*GetServiceOK)
if ok {
return success, nil
}
// unexpected success response
// safeguard: normally, absent a default response, unknown success responses return an error above: so this is a codegen issue
msg := fmt.Sprintf("unexpected success response for GetService: API contract not enforced by server. Client expected to get an error, but got: %T", result)
panic(msg)
}
// SetTransport changes the transport on the client
func (a *Client) SetTransport(transport runtime.ClientTransport) {
a.transport = transport
}
// SPDX-License-Identifier: Apache-2.0
// Copyright Authors of Hubble
// Code generated by protoc-gen-go. DO NOT EDIT.
// versions:
// protoc-gen-go v1.36.7
// protoc v6.32.0
// source: flow/flow.proto
package flow
import (
protoreflect "google.golang.org/protobuf/reflect/protoreflect"
protoimpl "google.golang.org/protobuf/runtime/protoimpl"
anypb "google.golang.org/protobuf/types/known/anypb"
timestamppb "google.golang.org/protobuf/types/known/timestamppb"
wrapperspb "google.golang.org/protobuf/types/known/wrapperspb"
reflect "reflect"
sync "sync"
unsafe "unsafe"
)
const (
// Verify that this generated code is sufficiently up-to-date.
_ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion)
// Verify that runtime/protoimpl is sufficiently up-to-date.
_ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20)
)
type FlowType int32
const (
FlowType_UNKNOWN_TYPE FlowType = 0
FlowType_L3_L4 FlowType = 1 // not sure about the underscore here, but `L34` also reads strange
FlowType_L7 FlowType = 2
FlowType_SOCK FlowType = 3
)
// Enum value maps for FlowType.
var (
FlowType_name = map[int32]string{
0: "UNKNOWN_TYPE",
1: "L3_L4",
2: "L7",
3: "SOCK",
}
FlowType_value = map[string]int32{
"UNKNOWN_TYPE": 0,
"L3_L4": 1,
"L7": 2,
"SOCK": 3,
}
)
func (x FlowType) Enum() *FlowType {
p := new(FlowType)
*p = x
return p
}
func (x FlowType) String() string {
return protoimpl.X.EnumStringOf(x.Descriptor(), protoreflect.EnumNumber(x))
}
func (FlowType) Descriptor() protoreflect.EnumDescriptor {
return file_flow_flow_proto_enumTypes[0].Descriptor()
}
func (FlowType) Type() protoreflect.EnumType {
return &file_flow_flow_proto_enumTypes[0]
}
func (x FlowType) Number() protoreflect.EnumNumber {
return protoreflect.EnumNumber(x)
}
// Deprecated: Use FlowType.Descriptor instead.
func (FlowType) EnumDescriptor() ([]byte, []int) {
return file_flow_flow_proto_rawDescGZIP(), []int{0}
}
// These types correspond to definitions in pkg/policy/l4.go.
type AuthType int32
const (
AuthType_DISABLED AuthType = 0
AuthType_SPIRE AuthType = 1
AuthType_TEST_ALWAYS_FAIL AuthType = 2
)
// Enum value maps for AuthType.
var (
AuthType_name = map[int32]string{
0: "DISABLED",
1: "SPIRE",
2: "TEST_ALWAYS_FAIL",
}
AuthType_value = map[string]int32{
"DISABLED": 0,
"SPIRE": 1,
"TEST_ALWAYS_FAIL": 2,
}
)
func (x AuthType) Enum() *AuthType {
p := new(AuthType)
*p = x
return p
}
func (x AuthType) String() string {
return protoimpl.X.EnumStringOf(x.Descriptor(), protoreflect.EnumNumber(x))
}
func (AuthType) Descriptor() protoreflect.EnumDescriptor {
return file_flow_flow_proto_enumTypes[1].Descriptor()
}
func (AuthType) Type() protoreflect.EnumType {
return &file_flow_flow_proto_enumTypes[1]
}
func (x AuthType) Number() protoreflect.EnumNumber {
return protoreflect.EnumNumber(x)
}
// Deprecated: Use AuthType.Descriptor instead.
func (AuthType) EnumDescriptor() ([]byte, []int) {
return file_flow_flow_proto_rawDescGZIP(), []int{1}
}
type TraceObservationPoint int32
const (
// Cilium treats 0 as TO_LXC, but its's something we should work to remove.
// This is intentionally set as unknown, so proto API can guarantee the
// observation point is always going to be present on trace events.
TraceObservationPoint_UNKNOWN_POINT TraceObservationPoint = 0
// TO_PROXY indicates network packets are transmitted towards the l7 proxy.
TraceObservationPoint_TO_PROXY TraceObservationPoint = 1
// TO_HOST indicates network packets are transmitted towards the host
// namespace.
TraceObservationPoint_TO_HOST TraceObservationPoint = 2
// TO_STACK indicates network packets are transmitted towards the Linux
// kernel network stack on host machine.
TraceObservationPoint_TO_STACK TraceObservationPoint = 3
// TO_OVERLAY indicates network packets are transmitted towards the tunnel
// device.
TraceObservationPoint_TO_OVERLAY TraceObservationPoint = 4
// TO_ENDPOINT indicates network packets are transmitted towards endpoints
// (containers).
TraceObservationPoint_TO_ENDPOINT TraceObservationPoint = 101
// FROM_ENDPOINT indicates network packets were received from endpoints
// (containers).
TraceObservationPoint_FROM_ENDPOINT TraceObservationPoint = 5
// FROM_PROXY indicates network packets were received from the l7 proxy.
TraceObservationPoint_FROM_PROXY TraceObservationPoint = 6
// FROM_HOST indicates network packets were received from the host
// namespace.
TraceObservationPoint_FROM_HOST TraceObservationPoint = 7
// FROM_STACK indicates network packets were received from the Linux kernel
// network stack on host machine.
TraceObservationPoint_FROM_STACK TraceObservationPoint = 8
// FROM_OVERLAY indicates network packets were received from the tunnel
// device.
TraceObservationPoint_FROM_OVERLAY TraceObservationPoint = 9
// FROM_NETWORK indicates network packets were received from native
// devices.
TraceObservationPoint_FROM_NETWORK TraceObservationPoint = 10
// TO_NETWORK indicates network packets are transmitted towards native
// devices.
TraceObservationPoint_TO_NETWORK TraceObservationPoint = 11
// FROM_CRYPTO indicates network packets were received from the crypto
// process for decryption.
TraceObservationPoint_FROM_CRYPTO TraceObservationPoint = 12
// TO_CRYPTO indicates network packets are transmitted towards the crypto
// process for encryption.
TraceObservationPoint_TO_CRYPTO TraceObservationPoint = 13
)
// Enum value maps for TraceObservationPoint.
var (
TraceObservationPoint_name = map[int32]string{
0: "UNKNOWN_POINT",
1: "TO_PROXY",
2: "TO_HOST",
3: "TO_STACK",
4: "TO_OVERLAY",
101: "TO_ENDPOINT",
5: "FROM_ENDPOINT",
6: "FROM_PROXY",
7: "FROM_HOST",
8: "FROM_STACK",
9: "FROM_OVERLAY",
10: "FROM_NETWORK",
11: "TO_NETWORK",
12: "FROM_CRYPTO",
13: "TO_CRYPTO",
}
TraceObservationPoint_value = map[string]int32{
"UNKNOWN_POINT": 0,
"TO_PROXY": 1,
"TO_HOST": 2,
"TO_STACK": 3,
"TO_OVERLAY": 4,
"TO_ENDPOINT": 101,
"FROM_ENDPOINT": 5,
"FROM_PROXY": 6,
"FROM_HOST": 7,
"FROM_STACK": 8,
"FROM_OVERLAY": 9,
"FROM_NETWORK": 10,
"TO_NETWORK": 11,
"FROM_CRYPTO": 12,
"TO_CRYPTO": 13,
}
)
func (x TraceObservationPoint) Enum() *TraceObservationPoint {
p := new(TraceObservationPoint)
*p = x
return p
}
func (x TraceObservationPoint) String() string {
return protoimpl.X.EnumStringOf(x.Descriptor(), protoreflect.EnumNumber(x))
}
func (TraceObservationPoint) Descriptor() protoreflect.EnumDescriptor {
return file_flow_flow_proto_enumTypes[2].Descriptor()
}
func (TraceObservationPoint) Type() protoreflect.EnumType {
return &file_flow_flow_proto_enumTypes[2]
}
func (x TraceObservationPoint) Number() protoreflect.EnumNumber {
return protoreflect.EnumNumber(x)
}
// Deprecated: Use TraceObservationPoint.Descriptor instead.
func (TraceObservationPoint) EnumDescriptor() ([]byte, []int) {
return file_flow_flow_proto_rawDescGZIP(), []int{2}
}
type TraceReason int32
const (
TraceReason_TRACE_REASON_UNKNOWN TraceReason = 0
TraceReason_NEW TraceReason = 1
TraceReason_ESTABLISHED TraceReason = 2
TraceReason_REPLY TraceReason = 3
TraceReason_RELATED TraceReason = 4
// Deprecated: Marked as deprecated in flow/flow.proto.
TraceReason_REOPENED TraceReason = 5
TraceReason_SRV6_ENCAP TraceReason = 6
TraceReason_SRV6_DECAP TraceReason = 7
TraceReason_ENCRYPT_OVERLAY TraceReason = 8
)
// Enum value maps for TraceReason.
var (
TraceReason_name = map[int32]string{
0: "TRACE_REASON_UNKNOWN",
1: "NEW",
2: "ESTABLISHED",
3: "REPLY",
4: "RELATED",
5: "REOPENED",
6: "SRV6_ENCAP",
7: "SRV6_DECAP",
8: "ENCRYPT_OVERLAY",
}
TraceReason_value = map[string]int32{
"TRACE_REASON_UNKNOWN": 0,
"NEW": 1,
"ESTABLISHED": 2,
"REPLY": 3,
"RELATED": 4,
"REOPENED": 5,
"SRV6_ENCAP": 6,
"SRV6_DECAP": 7,
"ENCRYPT_OVERLAY": 8,
}
)
func (x TraceReason) Enum() *TraceReason {
p := new(TraceReason)
*p = x
return p
}
func (x TraceReason) String() string {
return protoimpl.X.EnumStringOf(x.Descriptor(), protoreflect.EnumNumber(x))
}
func (TraceReason) Descriptor() protoreflect.EnumDescriptor {
return file_flow_flow_proto_enumTypes[3].Descriptor()
}
func (TraceReason) Type() protoreflect.EnumType {
return &file_flow_flow_proto_enumTypes[3]
}
func (x TraceReason) Number() protoreflect.EnumNumber {
return protoreflect.EnumNumber(x)
}
// Deprecated: Use TraceReason.Descriptor instead.
func (TraceReason) EnumDescriptor() ([]byte, []int) {
return file_flow_flow_proto_rawDescGZIP(), []int{3}
}
// This enum corresponds to Cilium's L7 accesslog [FlowType](https://github.com/cilium/cilium/blob/728c79e427438ab6f8d9375b62fccd6fed4ace3a/pkg/proxy/accesslog/record.go#L26):
type L7FlowType int32
const (
L7FlowType_UNKNOWN_L7_TYPE L7FlowType = 0
L7FlowType_REQUEST L7FlowType = 1
L7FlowType_RESPONSE L7FlowType = 2
L7FlowType_SAMPLE L7FlowType = 3
)
// Enum value maps for L7FlowType.
var (
L7FlowType_name = map[int32]string{
0: "UNKNOWN_L7_TYPE",
1: "REQUEST",
2: "RESPONSE",
3: "SAMPLE",
}
L7FlowType_value = map[string]int32{
"UNKNOWN_L7_TYPE": 0,
"REQUEST": 1,
"RESPONSE": 2,
"SAMPLE": 3,
}
)
func (x L7FlowType) Enum() *L7FlowType {
p := new(L7FlowType)
*p = x
return p
}
func (x L7FlowType) String() string {
return protoimpl.X.EnumStringOf(x.Descriptor(), protoreflect.EnumNumber(x))
}
func (L7FlowType) Descriptor() protoreflect.EnumDescriptor {
return file_flow_flow_proto_enumTypes[4].Descriptor()
}
func (L7FlowType) Type() protoreflect.EnumType {
return &file_flow_flow_proto_enumTypes[4]
}
func (x L7FlowType) Number() protoreflect.EnumNumber {
return protoreflect.EnumNumber(x)
}
// Deprecated: Use L7FlowType.Descriptor instead.
func (L7FlowType) EnumDescriptor() ([]byte, []int) {
return file_flow_flow_proto_rawDescGZIP(), []int{4}
}
type IPVersion int32
const (
IPVersion_IP_NOT_USED IPVersion = 0
IPVersion_IPv4 IPVersion = 1
IPVersion_IPv6 IPVersion = 2
)
// Enum value maps for IPVersion.
var (
IPVersion_name = map[int32]string{
0: "IP_NOT_USED",
1: "IPv4",
2: "IPv6",
}
IPVersion_value = map[string]int32{
"IP_NOT_USED": 0,
"IPv4": 1,
"IPv6": 2,
}
)
func (x IPVersion) Enum() *IPVersion {
p := new(IPVersion)
*p = x
return p
}
func (x IPVersion) String() string {
return protoimpl.X.EnumStringOf(x.Descriptor(), protoreflect.EnumNumber(x))
}
func (IPVersion) Descriptor() protoreflect.EnumDescriptor {
return file_flow_flow_proto_enumTypes[5].Descriptor()
}
func (IPVersion) Type() protoreflect.EnumType {
return &file_flow_flow_proto_enumTypes[5]
}
func (x IPVersion) Number() protoreflect.EnumNumber {
return protoreflect.EnumNumber(x)
}
// Deprecated: Use IPVersion.Descriptor instead.
func (IPVersion) EnumDescriptor() ([]byte, []int) {
return file_flow_flow_proto_rawDescGZIP(), []int{5}
}
type Verdict int32
const (
// UNKNOWN is used if there is no verdict for this flow event
Verdict_VERDICT_UNKNOWN Verdict = 0
// FORWARDED is used for flow events where the trace point has forwarded
// this packet or connection to the next processing entity.
Verdict_FORWARDED Verdict = 1
// DROPPED is used for flow events where the connection or packet has
// been dropped (e.g. due to a malformed packet, it being rejected by a
// network policy etc). The exact drop reason may be found in drop_reason_desc.
Verdict_DROPPED Verdict = 2
// ERROR is used for flow events where an error occurred during processing
Verdict_ERROR Verdict = 3
// AUDIT is used on policy verdict events in policy audit mode, to
// denominate flows that would have been dropped by policy if audit mode
// was turned off
Verdict_AUDIT Verdict = 4
// REDIRECTED is used for flow events which have been redirected to the proxy
Verdict_REDIRECTED Verdict = 5
// TRACED is used for flow events which have been observed at a trace point,
// but no particular verdict has been reached yet
Verdict_TRACED Verdict = 6
// TRANSLATED is used for flow events where an address has been translated
Verdict_TRANSLATED Verdict = 7
)
// Enum value maps for Verdict.
var (
Verdict_name = map[int32]string{
0: "VERDICT_UNKNOWN",
1: "FORWARDED",
2: "DROPPED",
3: "ERROR",
4: "AUDIT",
5: "REDIRECTED",
6: "TRACED",
7: "TRANSLATED",
}
Verdict_value = map[string]int32{
"VERDICT_UNKNOWN": 0,
"FORWARDED": 1,
"DROPPED": 2,
"ERROR": 3,
"AUDIT": 4,
"REDIRECTED": 5,
"TRACED": 6,
"TRANSLATED": 7,
}
)
func (x Verdict) Enum() *Verdict {
p := new(Verdict)
*p = x
return p
}
func (x Verdict) String() string {
return protoimpl.X.EnumStringOf(x.Descriptor(), protoreflect.EnumNumber(x))
}
func (Verdict) Descriptor() protoreflect.EnumDescriptor {
return file_flow_flow_proto_enumTypes[6].Descriptor()
}
func (Verdict) Type() protoreflect.EnumType {
return &file_flow_flow_proto_enumTypes[6]
}
func (x Verdict) Number() protoreflect.EnumNumber {
return protoreflect.EnumNumber(x)
}
// Deprecated: Use Verdict.Descriptor instead.
func (Verdict) EnumDescriptor() ([]byte, []int) {
return file_flow_flow_proto_rawDescGZIP(), []int{6}
}
// These values are shared with pkg/monitor/api/drop.go and bpf/lib/common.h.
// Note that non-drop reasons (i.e. values less than api.DropMin) are not used
// here.
type DropReason int32
const (
// non-drop reasons
DropReason_DROP_REASON_UNKNOWN DropReason = 0
// drop reasons
//
// Deprecated: Marked as deprecated in flow/flow.proto.
DropReason_INVALID_SOURCE_MAC DropReason = 130
// Deprecated: Marked as deprecated in flow/flow.proto.
DropReason_INVALID_DESTINATION_MAC DropReason = 131
DropReason_INVALID_SOURCE_IP DropReason = 132
DropReason_POLICY_DENIED DropReason = 133
DropReason_INVALID_PACKET_DROPPED DropReason = 134
DropReason_CT_TRUNCATED_OR_INVALID_HEADER DropReason = 135
DropReason_CT_MISSING_TCP_ACK_FLAG DropReason = 136
DropReason_CT_UNKNOWN_L4_PROTOCOL DropReason = 137
// Deprecated: Marked as deprecated in flow/flow.proto.
DropReason_CT_CANNOT_CREATE_ENTRY_FROM_PACKET DropReason = 138
DropReason_UNSUPPORTED_L3_PROTOCOL DropReason = 139
DropReason_MISSED_TAIL_CALL DropReason = 140
DropReason_ERROR_WRITING_TO_PACKET DropReason = 141
DropReason_UNKNOWN_L4_PROTOCOL DropReason = 142
DropReason_UNKNOWN_ICMPV4_CODE DropReason = 143
DropReason_UNKNOWN_ICMPV4_TYPE DropReason = 144
DropReason_UNKNOWN_ICMPV6_CODE DropReason = 145
DropReason_UNKNOWN_ICMPV6_TYPE DropReason = 146
DropReason_ERROR_RETRIEVING_TUNNEL_KEY DropReason = 147
// Deprecated: Marked as deprecated in flow/flow.proto.
DropReason_ERROR_RETRIEVING_TUNNEL_OPTIONS DropReason = 148
// Deprecated: Marked as deprecated in flow/flow.proto.
DropReason_INVALID_GENEVE_OPTION DropReason = 149
DropReason_UNKNOWN_L3_TARGET_ADDRESS DropReason = 150
DropReason_STALE_OR_UNROUTABLE_IP DropReason = 151
// Deprecated: Marked as deprecated in flow/flow.proto.
DropReason_NO_MATCHING_LOCAL_CONTAINER_FOUND DropReason = 152
DropReason_ERROR_WHILE_CORRECTING_L3_CHECKSUM DropReason = 153
DropReason_ERROR_WHILE_CORRECTING_L4_CHECKSUM DropReason = 154
DropReason_CT_MAP_INSERTION_FAILED DropReason = 155
DropReason_INVALID_IPV6_EXTENSION_HEADER DropReason = 156
DropReason_IP_FRAGMENTATION_NOT_SUPPORTED DropReason = 157
DropReason_SERVICE_BACKEND_NOT_FOUND DropReason = 158
DropReason_NO_TUNNEL_OR_ENCAPSULATION_ENDPOINT DropReason = 160
DropReason_FAILED_TO_INSERT_INTO_PROXYMAP DropReason = 161
DropReason_REACHED_EDT_RATE_LIMITING_DROP_HORIZON DropReason = 162
DropReason_UNKNOWN_CONNECTION_TRACKING_STATE DropReason = 163
DropReason_LOCAL_HOST_IS_UNREACHABLE DropReason = 164
DropReason_NO_CONFIGURATION_AVAILABLE_TO_PERFORM_POLICY_DECISION DropReason = 165
DropReason_UNSUPPORTED_L2_PROTOCOL DropReason = 166
DropReason_NO_MAPPING_FOR_NAT_MASQUERADE DropReason = 167
DropReason_UNSUPPORTED_PROTOCOL_FOR_NAT_MASQUERADE DropReason = 168
DropReason_FIB_LOOKUP_FAILED DropReason = 169
DropReason_ENCAPSULATION_TRAFFIC_IS_PROHIBITED DropReason = 170
DropReason_INVALID_IDENTITY DropReason = 171
DropReason_UNKNOWN_SENDER DropReason = 172
DropReason_NAT_NOT_NEEDED DropReason = 173
DropReason_IS_A_CLUSTERIP DropReason = 174
DropReason_FIRST_LOGICAL_DATAGRAM_FRAGMENT_NOT_FOUND DropReason = 175
DropReason_FORBIDDEN_ICMPV6_MESSAGE DropReason = 176
DropReason_DENIED_BY_LB_SRC_RANGE_CHECK DropReason = 177
DropReason_SOCKET_LOOKUP_FAILED DropReason = 178
DropReason_SOCKET_ASSIGN_FAILED DropReason = 179
DropReason_PROXY_REDIRECTION_NOT_SUPPORTED_FOR_PROTOCOL DropReason = 180
DropReason_POLICY_DENY DropReason = 181
DropReason_VLAN_FILTERED DropReason = 182
DropReason_INVALID_VNI DropReason = 183
DropReason_INVALID_TC_BUFFER DropReason = 184
DropReason_NO_SID DropReason = 185
// Deprecated: Marked as deprecated in flow/flow.proto.
DropReason_MISSING_SRV6_STATE DropReason = 186
DropReason_NAT46 DropReason = 187
DropReason_NAT64 DropReason = 188
DropReason_AUTH_REQUIRED DropReason = 189
DropReason_CT_NO_MAP_FOUND DropReason = 190
DropReason_SNAT_NO_MAP_FOUND DropReason = 191
DropReason_INVALID_CLUSTER_ID DropReason = 192
DropReason_UNSUPPORTED_PROTOCOL_FOR_DSR_ENCAP DropReason = 193
DropReason_NO_EGRESS_GATEWAY DropReason = 194
DropReason_UNENCRYPTED_TRAFFIC DropReason = 195
DropReason_TTL_EXCEEDED DropReason = 196
DropReason_NO_NODE_ID DropReason = 197
DropReason_DROP_RATE_LIMITED DropReason = 198
DropReason_IGMP_HANDLED DropReason = 199
DropReason_IGMP_SUBSCRIBED DropReason = 200
DropReason_MULTICAST_HANDLED DropReason = 201
// A BPF program wants to tail call into bpf_host, but the host datapath
// hasn't been loaded yet.
DropReason_DROP_HOST_NOT_READY DropReason = 202
// A BPF program wants to tail call some endpoint's policy program in
// cilium_call_policy, but the program is not available.
DropReason_DROP_EP_NOT_READY DropReason = 203
// An Egress Gateway node matched a packet against an Egress Gateway policy
// that didn't select a valid Egress IP.
DropReason_DROP_NO_EGRESS_IP DropReason = 204
// Punt packet to a user space proxy.
DropReason_DROP_PUNT_PROXY DropReason = 205
)
// Enum value maps for DropReason.
var (
DropReason_name = map[int32]string{
0: "DROP_REASON_UNKNOWN",
130: "INVALID_SOURCE_MAC",
131: "INVALID_DESTINATION_MAC",
132: "INVALID_SOURCE_IP",
133: "POLICY_DENIED",
134: "INVALID_PACKET_DROPPED",
135: "CT_TRUNCATED_OR_INVALID_HEADER",
136: "CT_MISSING_TCP_ACK_FLAG",
137: "CT_UNKNOWN_L4_PROTOCOL",
138: "CT_CANNOT_CREATE_ENTRY_FROM_PACKET",
139: "UNSUPPORTED_L3_PROTOCOL",
140: "MISSED_TAIL_CALL",
141: "ERROR_WRITING_TO_PACKET",
142: "UNKNOWN_L4_PROTOCOL",
143: "UNKNOWN_ICMPV4_CODE",
144: "UNKNOWN_ICMPV4_TYPE",
145: "UNKNOWN_ICMPV6_CODE",
146: "UNKNOWN_ICMPV6_TYPE",
147: "ERROR_RETRIEVING_TUNNEL_KEY",
148: "ERROR_RETRIEVING_TUNNEL_OPTIONS",
149: "INVALID_GENEVE_OPTION",
150: "UNKNOWN_L3_TARGET_ADDRESS",
151: "STALE_OR_UNROUTABLE_IP",
152: "NO_MATCHING_LOCAL_CONTAINER_FOUND",
153: "ERROR_WHILE_CORRECTING_L3_CHECKSUM",
154: "ERROR_WHILE_CORRECTING_L4_CHECKSUM",
155: "CT_MAP_INSERTION_FAILED",
156: "INVALID_IPV6_EXTENSION_HEADER",
157: "IP_FRAGMENTATION_NOT_SUPPORTED",
158: "SERVICE_BACKEND_NOT_FOUND",
160: "NO_TUNNEL_OR_ENCAPSULATION_ENDPOINT",
161: "FAILED_TO_INSERT_INTO_PROXYMAP",
162: "REACHED_EDT_RATE_LIMITING_DROP_HORIZON",
163: "UNKNOWN_CONNECTION_TRACKING_STATE",
164: "LOCAL_HOST_IS_UNREACHABLE",
165: "NO_CONFIGURATION_AVAILABLE_TO_PERFORM_POLICY_DECISION",
166: "UNSUPPORTED_L2_PROTOCOL",
167: "NO_MAPPING_FOR_NAT_MASQUERADE",
168: "UNSUPPORTED_PROTOCOL_FOR_NAT_MASQUERADE",
169: "FIB_LOOKUP_FAILED",
170: "ENCAPSULATION_TRAFFIC_IS_PROHIBITED",
171: "INVALID_IDENTITY",
172: "UNKNOWN_SENDER",
173: "NAT_NOT_NEEDED",
174: "IS_A_CLUSTERIP",
175: "FIRST_LOGICAL_DATAGRAM_FRAGMENT_NOT_FOUND",
176: "FORBIDDEN_ICMPV6_MESSAGE",
177: "DENIED_BY_LB_SRC_RANGE_CHECK",
178: "SOCKET_LOOKUP_FAILED",
179: "SOCKET_ASSIGN_FAILED",
180: "PROXY_REDIRECTION_NOT_SUPPORTED_FOR_PROTOCOL",
181: "POLICY_DENY",
182: "VLAN_FILTERED",
183: "INVALID_VNI",
184: "INVALID_TC_BUFFER",
185: "NO_SID",
186: "MISSING_SRV6_STATE",
187: "NAT46",
188: "NAT64",
189: "AUTH_REQUIRED",
190: "CT_NO_MAP_FOUND",
191: "SNAT_NO_MAP_FOUND",
192: "INVALID_CLUSTER_ID",
193: "UNSUPPORTED_PROTOCOL_FOR_DSR_ENCAP",
194: "NO_EGRESS_GATEWAY",
195: "UNENCRYPTED_TRAFFIC",
196: "TTL_EXCEEDED",
197: "NO_NODE_ID",
198: "DROP_RATE_LIMITED",
199: "IGMP_HANDLED",
200: "IGMP_SUBSCRIBED",
201: "MULTICAST_HANDLED",
202: "DROP_HOST_NOT_READY",
203: "DROP_EP_NOT_READY",
204: "DROP_NO_EGRESS_IP",
205: "DROP_PUNT_PROXY",
}
DropReason_value = map[string]int32{
"DROP_REASON_UNKNOWN": 0,
"INVALID_SOURCE_MAC": 130,
"INVALID_DESTINATION_MAC": 131,
"INVALID_SOURCE_IP": 132,
"POLICY_DENIED": 133,
"INVALID_PACKET_DROPPED": 134,
"CT_TRUNCATED_OR_INVALID_HEADER": 135,
"CT_MISSING_TCP_ACK_FLAG": 136,
"CT_UNKNOWN_L4_PROTOCOL": 137,
"CT_CANNOT_CREATE_ENTRY_FROM_PACKET": 138,
"UNSUPPORTED_L3_PROTOCOL": 139,
"MISSED_TAIL_CALL": 140,
"ERROR_WRITING_TO_PACKET": 141,
"UNKNOWN_L4_PROTOCOL": 142,
"UNKNOWN_ICMPV4_CODE": 143,
"UNKNOWN_ICMPV4_TYPE": 144,
"UNKNOWN_ICMPV6_CODE": 145,
"UNKNOWN_ICMPV6_TYPE": 146,
"ERROR_RETRIEVING_TUNNEL_KEY": 147,
"ERROR_RETRIEVING_TUNNEL_OPTIONS": 148,
"INVALID_GENEVE_OPTION": 149,
"UNKNOWN_L3_TARGET_ADDRESS": 150,
"STALE_OR_UNROUTABLE_IP": 151,
"NO_MATCHING_LOCAL_CONTAINER_FOUND": 152,
"ERROR_WHILE_CORRECTING_L3_CHECKSUM": 153,
"ERROR_WHILE_CORRECTING_L4_CHECKSUM": 154,
"CT_MAP_INSERTION_FAILED": 155,
"INVALID_IPV6_EXTENSION_HEADER": 156,
"IP_FRAGMENTATION_NOT_SUPPORTED": 157,
"SERVICE_BACKEND_NOT_FOUND": 158,
"NO_TUNNEL_OR_ENCAPSULATION_ENDPOINT": 160,
"FAILED_TO_INSERT_INTO_PROXYMAP": 161,
"REACHED_EDT_RATE_LIMITING_DROP_HORIZON": 162,
"UNKNOWN_CONNECTION_TRACKING_STATE": 163,
"LOCAL_HOST_IS_UNREACHABLE": 164,
"NO_CONFIGURATION_AVAILABLE_TO_PERFORM_POLICY_DECISION": 165,
"UNSUPPORTED_L2_PROTOCOL": 166,
"NO_MAPPING_FOR_NAT_MASQUERADE": 167,
"UNSUPPORTED_PROTOCOL_FOR_NAT_MASQUERADE": 168,
"FIB_LOOKUP_FAILED": 169,
"ENCAPSULATION_TRAFFIC_IS_PROHIBITED": 170,
"INVALID_IDENTITY": 171,
"UNKNOWN_SENDER": 172,
"NAT_NOT_NEEDED": 173,
"IS_A_CLUSTERIP": 174,
"FIRST_LOGICAL_DATAGRAM_FRAGMENT_NOT_FOUND": 175,
"FORBIDDEN_ICMPV6_MESSAGE": 176,
"DENIED_BY_LB_SRC_RANGE_CHECK": 177,
"SOCKET_LOOKUP_FAILED": 178,
"SOCKET_ASSIGN_FAILED": 179,
"PROXY_REDIRECTION_NOT_SUPPORTED_FOR_PROTOCOL": 180,
"POLICY_DENY": 181,
"VLAN_FILTERED": 182,
"INVALID_VNI": 183,
"INVALID_TC_BUFFER": 184,
"NO_SID": 185,
"MISSING_SRV6_STATE": 186,
"NAT46": 187,
"NAT64": 188,
"AUTH_REQUIRED": 189,
"CT_NO_MAP_FOUND": 190,
"SNAT_NO_MAP_FOUND": 191,
"INVALID_CLUSTER_ID": 192,
"UNSUPPORTED_PROTOCOL_FOR_DSR_ENCAP": 193,
"NO_EGRESS_GATEWAY": 194,
"UNENCRYPTED_TRAFFIC": 195,
"TTL_EXCEEDED": 196,
"NO_NODE_ID": 197,
"DROP_RATE_LIMITED": 198,
"IGMP_HANDLED": 199,
"IGMP_SUBSCRIBED": 200,
"MULTICAST_HANDLED": 201,
"DROP_HOST_NOT_READY": 202,
"DROP_EP_NOT_READY": 203,
"DROP_NO_EGRESS_IP": 204,
"DROP_PUNT_PROXY": 205,
}
)
func (x DropReason) Enum() *DropReason {
p := new(DropReason)
*p = x
return p
}
func (x DropReason) String() string {
return protoimpl.X.EnumStringOf(x.Descriptor(), protoreflect.EnumNumber(x))
}
func (DropReason) Descriptor() protoreflect.EnumDescriptor {
return file_flow_flow_proto_enumTypes[7].Descriptor()
}
func (DropReason) Type() protoreflect.EnumType {
return &file_flow_flow_proto_enumTypes[7]
}
func (x DropReason) Number() protoreflect.EnumNumber {
return protoreflect.EnumNumber(x)
}
// Deprecated: Use DropReason.Descriptor instead.
func (DropReason) EnumDescriptor() ([]byte, []int) {
return file_flow_flow_proto_rawDescGZIP(), []int{7}
}
type TrafficDirection int32
const (
TrafficDirection_TRAFFIC_DIRECTION_UNKNOWN TrafficDirection = 0
TrafficDirection_INGRESS TrafficDirection = 1
TrafficDirection_EGRESS TrafficDirection = 2
)
// Enum value maps for TrafficDirection.
var (
TrafficDirection_name = map[int32]string{
0: "TRAFFIC_DIRECTION_UNKNOWN",
1: "INGRESS",
2: "EGRESS",
}
TrafficDirection_value = map[string]int32{
"TRAFFIC_DIRECTION_UNKNOWN": 0,
"INGRESS": 1,
"EGRESS": 2,
}
)
func (x TrafficDirection) Enum() *TrafficDirection {
p := new(TrafficDirection)
*p = x
return p
}
func (x TrafficDirection) String() string {
return protoimpl.X.EnumStringOf(x.Descriptor(), protoreflect.EnumNumber(x))
}
func (TrafficDirection) Descriptor() protoreflect.EnumDescriptor {
return file_flow_flow_proto_enumTypes[8].Descriptor()
}
func (TrafficDirection) Type() protoreflect.EnumType {
return &file_flow_flow_proto_enumTypes[8]
}
func (x TrafficDirection) Number() protoreflect.EnumNumber {
return protoreflect.EnumNumber(x)
}
// Deprecated: Use TrafficDirection.Descriptor instead.
func (TrafficDirection) EnumDescriptor() ([]byte, []int) {
return file_flow_flow_proto_rawDescGZIP(), []int{8}
}
// These values are shared with pkg/monitor/api/datapath_debug.go and bpf/lib/dbg.h.
type DebugCapturePoint int32
const (
DebugCapturePoint_DBG_CAPTURE_POINT_UNKNOWN DebugCapturePoint = 0
DebugCapturePoint_DBG_CAPTURE_DELIVERY DebugCapturePoint = 4
DebugCapturePoint_DBG_CAPTURE_FROM_LB DebugCapturePoint = 5
DebugCapturePoint_DBG_CAPTURE_AFTER_V46 DebugCapturePoint = 6
DebugCapturePoint_DBG_CAPTURE_AFTER_V64 DebugCapturePoint = 7
DebugCapturePoint_DBG_CAPTURE_PROXY_PRE DebugCapturePoint = 8
DebugCapturePoint_DBG_CAPTURE_PROXY_POST DebugCapturePoint = 9
DebugCapturePoint_DBG_CAPTURE_SNAT_PRE DebugCapturePoint = 10
DebugCapturePoint_DBG_CAPTURE_SNAT_POST DebugCapturePoint = 11
)
// Enum value maps for DebugCapturePoint.
var (
DebugCapturePoint_name = map[int32]string{
0: "DBG_CAPTURE_POINT_UNKNOWN",
4: "DBG_CAPTURE_DELIVERY",
5: "DBG_CAPTURE_FROM_LB",
6: "DBG_CAPTURE_AFTER_V46",
7: "DBG_CAPTURE_AFTER_V64",
8: "DBG_CAPTURE_PROXY_PRE",
9: "DBG_CAPTURE_PROXY_POST",
10: "DBG_CAPTURE_SNAT_PRE",
11: "DBG_CAPTURE_SNAT_POST",
}
DebugCapturePoint_value = map[string]int32{
"DBG_CAPTURE_POINT_UNKNOWN": 0,
"DBG_CAPTURE_DELIVERY": 4,
"DBG_CAPTURE_FROM_LB": 5,
"DBG_CAPTURE_AFTER_V46": 6,
"DBG_CAPTURE_AFTER_V64": 7,
"DBG_CAPTURE_PROXY_PRE": 8,
"DBG_CAPTURE_PROXY_POST": 9,
"DBG_CAPTURE_SNAT_PRE": 10,
"DBG_CAPTURE_SNAT_POST": 11,
}
)
func (x DebugCapturePoint) Enum() *DebugCapturePoint {
p := new(DebugCapturePoint)
*p = x
return p
}
func (x DebugCapturePoint) String() string {
return protoimpl.X.EnumStringOf(x.Descriptor(), protoreflect.EnumNumber(x))
}
func (DebugCapturePoint) Descriptor() protoreflect.EnumDescriptor {
return file_flow_flow_proto_enumTypes[9].Descriptor()
}
func (DebugCapturePoint) Type() protoreflect.EnumType {
return &file_flow_flow_proto_enumTypes[9]
}
func (x DebugCapturePoint) Number() protoreflect.EnumNumber {
return protoreflect.EnumNumber(x)
}
// Deprecated: Use DebugCapturePoint.Descriptor instead.
func (DebugCapturePoint) EnumDescriptor() ([]byte, []int) {
return file_flow_flow_proto_rawDescGZIP(), []int{9}
}
// EventType are constants are based on the ones from <linux/perf_event.h>.
type EventType int32
const (
EventType_UNKNOWN EventType = 0
// EventSample is equivalent to PERF_RECORD_SAMPLE.
EventType_EventSample EventType = 9
// RecordLost is equivalent to PERF_RECORD_LOST.
EventType_RecordLost EventType = 2
)
// Enum value maps for EventType.
var (
EventType_name = map[int32]string{
0: "UNKNOWN",
9: "EventSample",
2: "RecordLost",
}
EventType_value = map[string]int32{
"UNKNOWN": 0,
"EventSample": 9,
"RecordLost": 2,
}
)
func (x EventType) Enum() *EventType {
p := new(EventType)
*p = x
return p
}
func (x EventType) String() string {
return protoimpl.X.EnumStringOf(x.Descriptor(), protoreflect.EnumNumber(x))
}
func (EventType) Descriptor() protoreflect.EnumDescriptor {
return file_flow_flow_proto_enumTypes[10].Descriptor()
}
func (EventType) Type() protoreflect.EnumType {
return &file_flow_flow_proto_enumTypes[10]
}
func (x EventType) Number() protoreflect.EnumNumber {
return protoreflect.EnumNumber(x)
}
// Deprecated: Use EventType.Descriptor instead.
func (EventType) EnumDescriptor() ([]byte, []int) {
return file_flow_flow_proto_rawDescGZIP(), []int{10}
}
type LostEventSource int32
const (
LostEventSource_UNKNOWN_LOST_EVENT_SOURCE LostEventSource = 0
// PERF_EVENT_RING_BUFFER indicates that events were dropped in the BPF
// perf event ring buffer, indicating that userspace agent did not keep up
// with the events produced by the datapath.
LostEventSource_PERF_EVENT_RING_BUFFER LostEventSource = 1
// OBSERVER_EVENTS_QUEUE indicates that events were dropped because the
// Hubble events queue was full, indicating that the Hubble observer did
// not keep up.
LostEventSource_OBSERVER_EVENTS_QUEUE LostEventSource = 2
// HUBBLE_RING_BUFFER indicates that the event was dropped because it could
// not be read from Hubble's ring buffer in time before being overwritten.
LostEventSource_HUBBLE_RING_BUFFER LostEventSource = 3
)
// Enum value maps for LostEventSource.
var (
LostEventSource_name = map[int32]string{
0: "UNKNOWN_LOST_EVENT_SOURCE",
1: "PERF_EVENT_RING_BUFFER",
2: "OBSERVER_EVENTS_QUEUE",
3: "HUBBLE_RING_BUFFER",
}
LostEventSource_value = map[string]int32{
"UNKNOWN_LOST_EVENT_SOURCE": 0,
"PERF_EVENT_RING_BUFFER": 1,
"OBSERVER_EVENTS_QUEUE": 2,
"HUBBLE_RING_BUFFER": 3,
}
)
func (x LostEventSource) Enum() *LostEventSource {
p := new(LostEventSource)
*p = x
return p
}
func (x LostEventSource) String() string {
return protoimpl.X.EnumStringOf(x.Descriptor(), protoreflect.EnumNumber(x))
}
func (LostEventSource) Descriptor() protoreflect.EnumDescriptor {
return file_flow_flow_proto_enumTypes[11].Descriptor()
}
func (LostEventSource) Type() protoreflect.EnumType {
return &file_flow_flow_proto_enumTypes[11]
}
func (x LostEventSource) Number() protoreflect.EnumNumber {
return protoreflect.EnumNumber(x)
}
// Deprecated: Use LostEventSource.Descriptor instead.
func (LostEventSource) EnumDescriptor() ([]byte, []int) {
return file_flow_flow_proto_rawDescGZIP(), []int{11}
}
// AgentEventType is the type of agent event. These values are shared with type
// AgentNotification in pkg/monitor/api/types.go.
type AgentEventType int32
const (
AgentEventType_AGENT_EVENT_UNKNOWN AgentEventType = 0
AgentEventType_AGENT_STARTED AgentEventType = 2
AgentEventType_POLICY_UPDATED AgentEventType = 3
AgentEventType_POLICY_DELETED AgentEventType = 4
AgentEventType_ENDPOINT_REGENERATE_SUCCESS AgentEventType = 5
AgentEventType_ENDPOINT_REGENERATE_FAILURE AgentEventType = 6
AgentEventType_ENDPOINT_CREATED AgentEventType = 7
AgentEventType_ENDPOINT_DELETED AgentEventType = 8
AgentEventType_IPCACHE_UPSERTED AgentEventType = 9
AgentEventType_IPCACHE_DELETED AgentEventType = 10
// Deprecated: Marked as deprecated in flow/flow.proto.
AgentEventType_SERVICE_UPSERTED AgentEventType = 11
// Deprecated: Marked as deprecated in flow/flow.proto.
AgentEventType_SERVICE_DELETED AgentEventType = 12
)
// Enum value maps for AgentEventType.
var (
AgentEventType_name = map[int32]string{
0: "AGENT_EVENT_UNKNOWN",
2: "AGENT_STARTED",
3: "POLICY_UPDATED",
4: "POLICY_DELETED",
5: "ENDPOINT_REGENERATE_SUCCESS",
6: "ENDPOINT_REGENERATE_FAILURE",
7: "ENDPOINT_CREATED",
8: "ENDPOINT_DELETED",
9: "IPCACHE_UPSERTED",
10: "IPCACHE_DELETED",
11: "SERVICE_UPSERTED",
12: "SERVICE_DELETED",
}
AgentEventType_value = map[string]int32{
"AGENT_EVENT_UNKNOWN": 0,
"AGENT_STARTED": 2,
"POLICY_UPDATED": 3,
"POLICY_DELETED": 4,
"ENDPOINT_REGENERATE_SUCCESS": 5,
"ENDPOINT_REGENERATE_FAILURE": 6,
"ENDPOINT_CREATED": 7,
"ENDPOINT_DELETED": 8,
"IPCACHE_UPSERTED": 9,
"IPCACHE_DELETED": 10,
"SERVICE_UPSERTED": 11,
"SERVICE_DELETED": 12,
}
)
func (x AgentEventType) Enum() *AgentEventType {
p := new(AgentEventType)
*p = x
return p
}
func (x AgentEventType) String() string {
return protoimpl.X.EnumStringOf(x.Descriptor(), protoreflect.EnumNumber(x))
}
func (AgentEventType) Descriptor() protoreflect.EnumDescriptor {
return file_flow_flow_proto_enumTypes[12].Descriptor()
}
func (AgentEventType) Type() protoreflect.EnumType {
return &file_flow_flow_proto_enumTypes[12]
}
func (x AgentEventType) Number() protoreflect.EnumNumber {
return protoreflect.EnumNumber(x)
}
// Deprecated: Use AgentEventType.Descriptor instead.
func (AgentEventType) EnumDescriptor() ([]byte, []int) {
return file_flow_flow_proto_rawDescGZIP(), []int{12}
}
// This mirrors enum xlate_point in bpf/lib/trace_sock.h
type SocketTranslationPoint int32
const (
SocketTranslationPoint_SOCK_XLATE_POINT_UNKNOWN SocketTranslationPoint = 0
SocketTranslationPoint_SOCK_XLATE_POINT_PRE_DIRECTION_FWD SocketTranslationPoint = 1 // Pre service translation
SocketTranslationPoint_SOCK_XLATE_POINT_POST_DIRECTION_FWD SocketTranslationPoint = 2 // Post service translation
SocketTranslationPoint_SOCK_XLATE_POINT_PRE_DIRECTION_REV SocketTranslationPoint = 3 // Pre reverse service translation
SocketTranslationPoint_SOCK_XLATE_POINT_POST_DIRECTION_REV SocketTranslationPoint = 4 // Post reverse service translation
)
// Enum value maps for SocketTranslationPoint.
var (
SocketTranslationPoint_name = map[int32]string{
0: "SOCK_XLATE_POINT_UNKNOWN",
1: "SOCK_XLATE_POINT_PRE_DIRECTION_FWD",
2: "SOCK_XLATE_POINT_POST_DIRECTION_FWD",
3: "SOCK_XLATE_POINT_PRE_DIRECTION_REV",
4: "SOCK_XLATE_POINT_POST_DIRECTION_REV",
}
SocketTranslationPoint_value = map[string]int32{
"SOCK_XLATE_POINT_UNKNOWN": 0,
"SOCK_XLATE_POINT_PRE_DIRECTION_FWD": 1,
"SOCK_XLATE_POINT_POST_DIRECTION_FWD": 2,
"SOCK_XLATE_POINT_PRE_DIRECTION_REV": 3,
"SOCK_XLATE_POINT_POST_DIRECTION_REV": 4,
}
)
func (x SocketTranslationPoint) Enum() *SocketTranslationPoint {
p := new(SocketTranslationPoint)
*p = x
return p
}
func (x SocketTranslationPoint) String() string {
return protoimpl.X.EnumStringOf(x.Descriptor(), protoreflect.EnumNumber(x))
}
func (SocketTranslationPoint) Descriptor() protoreflect.EnumDescriptor {
return file_flow_flow_proto_enumTypes[13].Descriptor()
}
func (SocketTranslationPoint) Type() protoreflect.EnumType {
return &file_flow_flow_proto_enumTypes[13]
}
func (x SocketTranslationPoint) Number() protoreflect.EnumNumber {
return protoreflect.EnumNumber(x)
}
// Deprecated: Use SocketTranslationPoint.Descriptor instead.
func (SocketTranslationPoint) EnumDescriptor() ([]byte, []int) {
return file_flow_flow_proto_rawDescGZIP(), []int{13}
}
// These values are shared with pkg/monitor/api/datapath_debug.go and bpf/lib/dbg.h.
type DebugEventType int32
const (
DebugEventType_DBG_EVENT_UNKNOWN DebugEventType = 0
DebugEventType_DBG_GENERIC DebugEventType = 1
DebugEventType_DBG_LOCAL_DELIVERY DebugEventType = 2
DebugEventType_DBG_ENCAP DebugEventType = 3
DebugEventType_DBG_LXC_FOUND DebugEventType = 4
DebugEventType_DBG_POLICY_DENIED DebugEventType = 5
DebugEventType_DBG_CT_LOOKUP DebugEventType = 6
DebugEventType_DBG_CT_LOOKUP_REV DebugEventType = 7
DebugEventType_DBG_CT_MATCH DebugEventType = 8
DebugEventType_DBG_CT_CREATED DebugEventType = 9
DebugEventType_DBG_CT_CREATED2 DebugEventType = 10
DebugEventType_DBG_ICMP6_HANDLE DebugEventType = 11
DebugEventType_DBG_ICMP6_REQUEST DebugEventType = 12
DebugEventType_DBG_ICMP6_NS DebugEventType = 13
DebugEventType_DBG_ICMP6_TIME_EXCEEDED DebugEventType = 14
DebugEventType_DBG_CT_VERDICT DebugEventType = 15
DebugEventType_DBG_DECAP DebugEventType = 16
DebugEventType_DBG_PORT_MAP DebugEventType = 17
DebugEventType_DBG_ERROR_RET DebugEventType = 18
DebugEventType_DBG_TO_HOST DebugEventType = 19
DebugEventType_DBG_TO_STACK DebugEventType = 20
DebugEventType_DBG_PKT_HASH DebugEventType = 21
DebugEventType_DBG_LB6_LOOKUP_FRONTEND DebugEventType = 22
DebugEventType_DBG_LB6_LOOKUP_FRONTEND_FAIL DebugEventType = 23
DebugEventType_DBG_LB6_LOOKUP_BACKEND_SLOT DebugEventType = 24
DebugEventType_DBG_LB6_LOOKUP_BACKEND_SLOT_SUCCESS DebugEventType = 25
DebugEventType_DBG_LB6_LOOKUP_BACKEND_SLOT_V2_FAIL DebugEventType = 26
DebugEventType_DBG_LB6_LOOKUP_BACKEND_FAIL DebugEventType = 27
DebugEventType_DBG_LB6_REVERSE_NAT_LOOKUP DebugEventType = 28
DebugEventType_DBG_LB6_REVERSE_NAT DebugEventType = 29
DebugEventType_DBG_LB4_LOOKUP_FRONTEND DebugEventType = 30
DebugEventType_DBG_LB4_LOOKUP_FRONTEND_FAIL DebugEventType = 31
DebugEventType_DBG_LB4_LOOKUP_BACKEND_SLOT DebugEventType = 32
DebugEventType_DBG_LB4_LOOKUP_BACKEND_SLOT_SUCCESS DebugEventType = 33
DebugEventType_DBG_LB4_LOOKUP_BACKEND_SLOT_V2_FAIL DebugEventType = 34
DebugEventType_DBG_LB4_LOOKUP_BACKEND_FAIL DebugEventType = 35
DebugEventType_DBG_LB4_REVERSE_NAT_LOOKUP DebugEventType = 36
DebugEventType_DBG_LB4_REVERSE_NAT DebugEventType = 37
DebugEventType_DBG_LB4_LOOPBACK_SNAT DebugEventType = 38
DebugEventType_DBG_LB4_LOOPBACK_SNAT_REV DebugEventType = 39
DebugEventType_DBG_CT_LOOKUP4 DebugEventType = 40
DebugEventType_DBG_RR_BACKEND_SLOT_SEL DebugEventType = 41
DebugEventType_DBG_REV_PROXY_LOOKUP DebugEventType = 42
DebugEventType_DBG_REV_PROXY_FOUND DebugEventType = 43
DebugEventType_DBG_REV_PROXY_UPDATE DebugEventType = 44
DebugEventType_DBG_L4_POLICY DebugEventType = 45
DebugEventType_DBG_NETDEV_IN_CLUSTER DebugEventType = 46
DebugEventType_DBG_NETDEV_ENCAP4 DebugEventType = 47
DebugEventType_DBG_CT_LOOKUP4_1 DebugEventType = 48
DebugEventType_DBG_CT_LOOKUP4_2 DebugEventType = 49
DebugEventType_DBG_CT_CREATED4 DebugEventType = 50
DebugEventType_DBG_CT_LOOKUP6_1 DebugEventType = 51
DebugEventType_DBG_CT_LOOKUP6_2 DebugEventType = 52
DebugEventType_DBG_CT_CREATED6 DebugEventType = 53
DebugEventType_DBG_SKIP_PROXY DebugEventType = 54
DebugEventType_DBG_L4_CREATE DebugEventType = 55
DebugEventType_DBG_IP_ID_MAP_FAILED4 DebugEventType = 56
DebugEventType_DBG_IP_ID_MAP_FAILED6 DebugEventType = 57
DebugEventType_DBG_IP_ID_MAP_SUCCEED4 DebugEventType = 58
DebugEventType_DBG_IP_ID_MAP_SUCCEED6 DebugEventType = 59
DebugEventType_DBG_LB_STALE_CT DebugEventType = 60
DebugEventType_DBG_INHERIT_IDENTITY DebugEventType = 61
DebugEventType_DBG_SK_LOOKUP4 DebugEventType = 62
DebugEventType_DBG_SK_LOOKUP6 DebugEventType = 63
DebugEventType_DBG_SK_ASSIGN DebugEventType = 64
DebugEventType_DBG_L7_LB DebugEventType = 65
DebugEventType_DBG_SKIP_POLICY DebugEventType = 66
)
// Enum value maps for DebugEventType.
var (
DebugEventType_name = map[int32]string{
0: "DBG_EVENT_UNKNOWN",
1: "DBG_GENERIC",
2: "DBG_LOCAL_DELIVERY",
3: "DBG_ENCAP",
4: "DBG_LXC_FOUND",
5: "DBG_POLICY_DENIED",
6: "DBG_CT_LOOKUP",
7: "DBG_CT_LOOKUP_REV",
8: "DBG_CT_MATCH",
9: "DBG_CT_CREATED",
10: "DBG_CT_CREATED2",
11: "DBG_ICMP6_HANDLE",
12: "DBG_ICMP6_REQUEST",
13: "DBG_ICMP6_NS",
14: "DBG_ICMP6_TIME_EXCEEDED",
15: "DBG_CT_VERDICT",
16: "DBG_DECAP",
17: "DBG_PORT_MAP",
18: "DBG_ERROR_RET",
19: "DBG_TO_HOST",
20: "DBG_TO_STACK",
21: "DBG_PKT_HASH",
22: "DBG_LB6_LOOKUP_FRONTEND",
23: "DBG_LB6_LOOKUP_FRONTEND_FAIL",
24: "DBG_LB6_LOOKUP_BACKEND_SLOT",
25: "DBG_LB6_LOOKUP_BACKEND_SLOT_SUCCESS",
26: "DBG_LB6_LOOKUP_BACKEND_SLOT_V2_FAIL",
27: "DBG_LB6_LOOKUP_BACKEND_FAIL",
28: "DBG_LB6_REVERSE_NAT_LOOKUP",
29: "DBG_LB6_REVERSE_NAT",
30: "DBG_LB4_LOOKUP_FRONTEND",
31: "DBG_LB4_LOOKUP_FRONTEND_FAIL",
32: "DBG_LB4_LOOKUP_BACKEND_SLOT",
33: "DBG_LB4_LOOKUP_BACKEND_SLOT_SUCCESS",
34: "DBG_LB4_LOOKUP_BACKEND_SLOT_V2_FAIL",
35: "DBG_LB4_LOOKUP_BACKEND_FAIL",
36: "DBG_LB4_REVERSE_NAT_LOOKUP",
37: "DBG_LB4_REVERSE_NAT",
38: "DBG_LB4_LOOPBACK_SNAT",
39: "DBG_LB4_LOOPBACK_SNAT_REV",
40: "DBG_CT_LOOKUP4",
41: "DBG_RR_BACKEND_SLOT_SEL",
42: "DBG_REV_PROXY_LOOKUP",
43: "DBG_REV_PROXY_FOUND",
44: "DBG_REV_PROXY_UPDATE",
45: "DBG_L4_POLICY",
46: "DBG_NETDEV_IN_CLUSTER",
47: "DBG_NETDEV_ENCAP4",
48: "DBG_CT_LOOKUP4_1",
49: "DBG_CT_LOOKUP4_2",
50: "DBG_CT_CREATED4",
51: "DBG_CT_LOOKUP6_1",
52: "DBG_CT_LOOKUP6_2",
53: "DBG_CT_CREATED6",
54: "DBG_SKIP_PROXY",
55: "DBG_L4_CREATE",
56: "DBG_IP_ID_MAP_FAILED4",
57: "DBG_IP_ID_MAP_FAILED6",
58: "DBG_IP_ID_MAP_SUCCEED4",
59: "DBG_IP_ID_MAP_SUCCEED6",
60: "DBG_LB_STALE_CT",
61: "DBG_INHERIT_IDENTITY",
62: "DBG_SK_LOOKUP4",
63: "DBG_SK_LOOKUP6",
64: "DBG_SK_ASSIGN",
65: "DBG_L7_LB",
66: "DBG_SKIP_POLICY",
}
DebugEventType_value = map[string]int32{
"DBG_EVENT_UNKNOWN": 0,
"DBG_GENERIC": 1,
"DBG_LOCAL_DELIVERY": 2,
"DBG_ENCAP": 3,
"DBG_LXC_FOUND": 4,
"DBG_POLICY_DENIED": 5,
"DBG_CT_LOOKUP": 6,
"DBG_CT_LOOKUP_REV": 7,
"DBG_CT_MATCH": 8,
"DBG_CT_CREATED": 9,
"DBG_CT_CREATED2": 10,
"DBG_ICMP6_HANDLE": 11,
"DBG_ICMP6_REQUEST": 12,
"DBG_ICMP6_NS": 13,
"DBG_ICMP6_TIME_EXCEEDED": 14,
"DBG_CT_VERDICT": 15,
"DBG_DECAP": 16,
"DBG_PORT_MAP": 17,
"DBG_ERROR_RET": 18,
"DBG_TO_HOST": 19,
"DBG_TO_STACK": 20,
"DBG_PKT_HASH": 21,
"DBG_LB6_LOOKUP_FRONTEND": 22,
"DBG_LB6_LOOKUP_FRONTEND_FAIL": 23,
"DBG_LB6_LOOKUP_BACKEND_SLOT": 24,
"DBG_LB6_LOOKUP_BACKEND_SLOT_SUCCESS": 25,
"DBG_LB6_LOOKUP_BACKEND_SLOT_V2_FAIL": 26,
"DBG_LB6_LOOKUP_BACKEND_FAIL": 27,
"DBG_LB6_REVERSE_NAT_LOOKUP": 28,
"DBG_LB6_REVERSE_NAT": 29,
"DBG_LB4_LOOKUP_FRONTEND": 30,
"DBG_LB4_LOOKUP_FRONTEND_FAIL": 31,
"DBG_LB4_LOOKUP_BACKEND_SLOT": 32,
"DBG_LB4_LOOKUP_BACKEND_SLOT_SUCCESS": 33,
"DBG_LB4_LOOKUP_BACKEND_SLOT_V2_FAIL": 34,
"DBG_LB4_LOOKUP_BACKEND_FAIL": 35,
"DBG_LB4_REVERSE_NAT_LOOKUP": 36,
"DBG_LB4_REVERSE_NAT": 37,
"DBG_LB4_LOOPBACK_SNAT": 38,
"DBG_LB4_LOOPBACK_SNAT_REV": 39,
"DBG_CT_LOOKUP4": 40,
"DBG_RR_BACKEND_SLOT_SEL": 41,
"DBG_REV_PROXY_LOOKUP": 42,
"DBG_REV_PROXY_FOUND": 43,
"DBG_REV_PROXY_UPDATE": 44,
"DBG_L4_POLICY": 45,
"DBG_NETDEV_IN_CLUSTER": 46,
"DBG_NETDEV_ENCAP4": 47,
"DBG_CT_LOOKUP4_1": 48,
"DBG_CT_LOOKUP4_2": 49,
"DBG_CT_CREATED4": 50,
"DBG_CT_LOOKUP6_1": 51,
"DBG_CT_LOOKUP6_2": 52,
"DBG_CT_CREATED6": 53,
"DBG_SKIP_PROXY": 54,
"DBG_L4_CREATE": 55,
"DBG_IP_ID_MAP_FAILED4": 56,
"DBG_IP_ID_MAP_FAILED6": 57,
"DBG_IP_ID_MAP_SUCCEED4": 58,
"DBG_IP_ID_MAP_SUCCEED6": 59,
"DBG_LB_STALE_CT": 60,
"DBG_INHERIT_IDENTITY": 61,
"DBG_SK_LOOKUP4": 62,
"DBG_SK_LOOKUP6": 63,
"DBG_SK_ASSIGN": 64,
"DBG_L7_LB": 65,
"DBG_SKIP_POLICY": 66,
}
)
func (x DebugEventType) Enum() *DebugEventType {
p := new(DebugEventType)
*p = x
return p
}
func (x DebugEventType) String() string {
return protoimpl.X.EnumStringOf(x.Descriptor(), protoreflect.EnumNumber(x))
}
func (DebugEventType) Descriptor() protoreflect.EnumDescriptor {
return file_flow_flow_proto_enumTypes[14].Descriptor()
}
func (DebugEventType) Type() protoreflect.EnumType {
return &file_flow_flow_proto_enumTypes[14]
}
func (x DebugEventType) Number() protoreflect.EnumNumber {
return protoreflect.EnumNumber(x)
}
// Deprecated: Use DebugEventType.Descriptor instead.
func (DebugEventType) EnumDescriptor() ([]byte, []int) {
return file_flow_flow_proto_rawDescGZIP(), []int{14}
}
type Tunnel_Protocol int32
const (
Tunnel_UNKNOWN Tunnel_Protocol = 0
Tunnel_VXLAN Tunnel_Protocol = 1
Tunnel_GENEVE Tunnel_Protocol = 2
)
// Enum value maps for Tunnel_Protocol.
var (
Tunnel_Protocol_name = map[int32]string{
0: "UNKNOWN",
1: "VXLAN",
2: "GENEVE",
}
Tunnel_Protocol_value = map[string]int32{
"UNKNOWN": 0,
"VXLAN": 1,
"GENEVE": 2,
}
)
func (x Tunnel_Protocol) Enum() *Tunnel_Protocol {
p := new(Tunnel_Protocol)
*p = x
return p
}
func (x Tunnel_Protocol) String() string {
return protoimpl.X.EnumStringOf(x.Descriptor(), protoreflect.EnumNumber(x))
}
func (Tunnel_Protocol) Descriptor() protoreflect.EnumDescriptor {
return file_flow_flow_proto_enumTypes[15].Descriptor()
}
func (Tunnel_Protocol) Type() protoreflect.EnumType {
return &file_flow_flow_proto_enumTypes[15]
}
func (x Tunnel_Protocol) Number() protoreflect.EnumNumber {
return protoreflect.EnumNumber(x)
}
// Deprecated: Use Tunnel_Protocol.Descriptor instead.
func (Tunnel_Protocol) EnumDescriptor() ([]byte, []int) {
return file_flow_flow_proto_rawDescGZIP(), []int{16, 0}
}
type Flow struct {
state protoimpl.MessageState `protogen:"open.v1"`
Time *timestamppb.Timestamp `protobuf:"bytes,1,opt,name=time,proto3" json:"time,omitempty"`
// uuid is a universally unique identifier for this flow.
Uuid string `protobuf:"bytes,34,opt,name=uuid,proto3" json:"uuid,omitempty"`
Verdict Verdict `protobuf:"varint,2,opt,name=verdict,proto3,enum=flow.Verdict" json:"verdict,omitempty"`
// only applicable to Verdict = DROPPED.
// deprecated in favor of drop_reason_desc.
//
// Deprecated: Marked as deprecated in flow/flow.proto.
DropReason uint32 `protobuf:"varint,3,opt,name=drop_reason,json=dropReason,proto3" json:"drop_reason,omitempty"`
// auth_type is the authentication type specified for the flow in Cilium Network Policy.
// Only set on policy verdict events.
AuthType AuthType `protobuf:"varint,35,opt,name=auth_type,json=authType,proto3,enum=flow.AuthType" json:"auth_type,omitempty"`
// l2
Ethernet *Ethernet `protobuf:"bytes,4,opt,name=ethernet,proto3" json:"ethernet,omitempty"`
// l3
IP *IP `protobuf:"bytes,5,opt,name=IP,proto3" json:"IP,omitempty"`
// l4
L4 *Layer4 `protobuf:"bytes,6,opt,name=l4,proto3" json:"l4,omitempty"`
Tunnel *Tunnel `protobuf:"bytes,39,opt,name=tunnel,proto3" json:"tunnel,omitempty"`
Source *Endpoint `protobuf:"bytes,8,opt,name=source,proto3" json:"source,omitempty"`
Destination *Endpoint `protobuf:"bytes,9,opt,name=destination,proto3" json:"destination,omitempty"`
Type FlowType `protobuf:"varint,10,opt,name=Type,proto3,enum=flow.FlowType" json:"Type,omitempty"`
// NodeName is the name of the node from which this Flow was captured.
NodeName string `protobuf:"bytes,11,opt,name=node_name,json=nodeName,proto3" json:"node_name,omitempty"`
// node labels in `foo=bar` format.
NodeLabels []string `protobuf:"bytes,37,rep,name=node_labels,json=nodeLabels,proto3" json:"node_labels,omitempty"`
// all names the source IP can have.
SourceNames []string `protobuf:"bytes,13,rep,name=source_names,json=sourceNames,proto3" json:"source_names,omitempty"`
// all names the destination IP can have.
DestinationNames []string `protobuf:"bytes,14,rep,name=destination_names,json=destinationNames,proto3" json:"destination_names,omitempty"`
// L7 information. This field is set if and only if FlowType is L7.
L7 *Layer7 `protobuf:"bytes,15,opt,name=l7,proto3" json:"l7,omitempty"`
// Deprecated. This suffers from false negatives due to protobuf not being
// able to distinguish between the value being false or it being absent.
// Please use is_reply instead.
//
// Deprecated: Marked as deprecated in flow/flow.proto.
Reply bool `protobuf:"varint,16,opt,name=reply,proto3" json:"reply,omitempty"`
// EventType of the originating Cilium event
EventType *CiliumEventType `protobuf:"bytes,19,opt,name=event_type,json=eventType,proto3" json:"event_type,omitempty"`
// source_service contains the service name of the source
SourceService *Service `protobuf:"bytes,20,opt,name=source_service,json=sourceService,proto3" json:"source_service,omitempty"`
// destination_service contains the service name of the destination
DestinationService *Service `protobuf:"bytes,21,opt,name=destination_service,json=destinationService,proto3" json:"destination_service,omitempty"`
// traffic_direction of the connection, e.g. ingress or egress
TrafficDirection TrafficDirection `protobuf:"varint,22,opt,name=traffic_direction,json=trafficDirection,proto3,enum=flow.TrafficDirection" json:"traffic_direction,omitempty"`
// policy_match_type is only applicable to the cilium event type PolicyVerdict
// https://github.com/cilium/cilium/blob/e831859b5cc336c6d964a6d35bbd34d1840e21b9/pkg/monitor/datapath_policy.go#L50
PolicyMatchType uint32 `protobuf:"varint,23,opt,name=policy_match_type,json=policyMatchType,proto3" json:"policy_match_type,omitempty"`
// Only applicable to cilium trace notifications, blank for other types.
TraceObservationPoint TraceObservationPoint `protobuf:"varint,24,opt,name=trace_observation_point,json=traceObservationPoint,proto3,enum=flow.TraceObservationPoint" json:"trace_observation_point,omitempty"`
// Cilium datapath trace reason info.
TraceReason TraceReason `protobuf:"varint,36,opt,name=trace_reason,json=traceReason,proto3,enum=flow.TraceReason" json:"trace_reason,omitempty"`
// Cilium datapath filename and line number. Currently only applicable when
// Verdict = DROPPED.
File *FileInfo `protobuf:"bytes,38,opt,name=file,proto3" json:"file,omitempty"`
// only applicable to Verdict = DROPPED.
DropReasonDesc DropReason `protobuf:"varint,25,opt,name=drop_reason_desc,json=dropReasonDesc,proto3,enum=flow.DropReason" json:"drop_reason_desc,omitempty"`
// is_reply indicates that this was a packet (L4) or message (L7) in the
// reply direction. May be absent (in which case it is unknown whether it
// is a reply or not).
IsReply *wrapperspb.BoolValue `protobuf:"bytes,26,opt,name=is_reply,json=isReply,proto3" json:"is_reply,omitempty"`
// Only applicable to cilium debug capture events, blank for other types
DebugCapturePoint DebugCapturePoint `protobuf:"varint,27,opt,name=debug_capture_point,json=debugCapturePoint,proto3,enum=flow.DebugCapturePoint" json:"debug_capture_point,omitempty"`
// interface is the network interface on which this flow was observed
Interface *NetworkInterface `protobuf:"bytes,28,opt,name=interface,proto3" json:"interface,omitempty"`
// proxy_port indicates the port of the proxy to which the flow was forwarded
ProxyPort uint32 `protobuf:"varint,29,opt,name=proxy_port,json=proxyPort,proto3" json:"proxy_port,omitempty"`
// trace_context contains information about a trace related to the flow, if
// any.
TraceContext *TraceContext `protobuf:"bytes,30,opt,name=trace_context,json=traceContext,proto3" json:"trace_context,omitempty"`
// sock_xlate_point is the socket translation point.
// Only applicable to TraceSock notifications, blank for other types
SockXlatePoint SocketTranslationPoint `protobuf:"varint,31,opt,name=sock_xlate_point,json=sockXlatePoint,proto3,enum=flow.SocketTranslationPoint" json:"sock_xlate_point,omitempty"`
// socket_cookie is the Linux kernel socket cookie for this flow.
// Only applicable to TraceSock notifications, zero for other types
SocketCookie uint64 `protobuf:"varint,32,opt,name=socket_cookie,json=socketCookie,proto3" json:"socket_cookie,omitempty"`
// cgroup_id of the process which emitted this event.
// Only applicable to TraceSock notifications, zero for other types
CgroupId uint64 `protobuf:"varint,33,opt,name=cgroup_id,json=cgroupId,proto3" json:"cgroup_id,omitempty"`
// This is a temporary workaround to support summary field for pb.Flow without
// duplicating logic from the old parser. This field will be removed once we
// fully migrate to the new parser.
//
// Deprecated: Marked as deprecated in flow/flow.proto.
Summary string `protobuf:"bytes,100000,opt,name=Summary,proto3" json:"Summary,omitempty"`
// extensions can be used to add arbitrary additional metadata to flows.
// This can be used to extend functionality for other Hubble compatible
// APIs, or experiment with new functionality without needing to change the public API.
Extensions *anypb.Any `protobuf:"bytes,150000,opt,name=extensions,proto3" json:"extensions,omitempty"`
// The CiliumNetworkPolicies allowing the egress of the flow.
EgressAllowedBy []*Policy `protobuf:"bytes,21001,rep,name=egress_allowed_by,json=egressAllowedBy,proto3" json:"egress_allowed_by,omitempty"`
// The CiliumNetworkPolicies allowing the ingress of the flow.
IngressAllowedBy []*Policy `protobuf:"bytes,21002,rep,name=ingress_allowed_by,json=ingressAllowedBy,proto3" json:"ingress_allowed_by,omitempty"`
// The CiliumNetworkPolicies denying the egress of the flow.
EgressDeniedBy []*Policy `protobuf:"bytes,21004,rep,name=egress_denied_by,json=egressDeniedBy,proto3" json:"egress_denied_by,omitempty"`
// The CiliumNetworkPolicies denying the ingress of the flow.
IngressDeniedBy []*Policy `protobuf:"bytes,21005,rep,name=ingress_denied_by,json=ingressDeniedBy,proto3" json:"ingress_denied_by,omitempty"`
// The set of Log values for policies that matched this flow.
// If no matched policies have an explicit log value configured,
// this list is empty. Duplicate values are elided; each
// entry is unique.
PolicyLog []string `protobuf:"bytes,21006,rep,name=policy_log,json=policyLog,proto3" json:"policy_log,omitempty"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *Flow) Reset() {
*x = Flow{}
mi := &file_flow_flow_proto_msgTypes[0]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *Flow) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*Flow) ProtoMessage() {}
func (x *Flow) ProtoReflect() protoreflect.Message {
mi := &file_flow_flow_proto_msgTypes[0]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use Flow.ProtoReflect.Descriptor instead.
func (*Flow) Descriptor() ([]byte, []int) {
return file_flow_flow_proto_rawDescGZIP(), []int{0}
}
func (x *Flow) GetTime() *timestamppb.Timestamp {
if x != nil {
return x.Time
}
return nil
}
func (x *Flow) GetUuid() string {
if x != nil {
return x.Uuid
}
return ""
}
func (x *Flow) GetVerdict() Verdict {
if x != nil {
return x.Verdict
}
return Verdict_VERDICT_UNKNOWN
}
// Deprecated: Marked as deprecated in flow/flow.proto.
func (x *Flow) GetDropReason() uint32 {
if x != nil {
return x.DropReason
}
return 0
}
func (x *Flow) GetAuthType() AuthType {
if x != nil {
return x.AuthType
}
return AuthType_DISABLED
}
func (x *Flow) GetEthernet() *Ethernet {
if x != nil {
return x.Ethernet
}
return nil
}
func (x *Flow) GetIP() *IP {
if x != nil {
return x.IP
}
return nil
}
func (x *Flow) GetL4() *Layer4 {
if x != nil {
return x.L4
}
return nil
}
func (x *Flow) GetTunnel() *Tunnel {
if x != nil {
return x.Tunnel
}
return nil
}
func (x *Flow) GetSource() *Endpoint {
if x != nil {
return x.Source
}
return nil
}
func (x *Flow) GetDestination() *Endpoint {
if x != nil {
return x.Destination
}
return nil
}
func (x *Flow) GetType() FlowType {
if x != nil {
return x.Type
}
return FlowType_UNKNOWN_TYPE
}
func (x *Flow) GetNodeName() string {
if x != nil {
return x.NodeName
}
return ""
}
func (x *Flow) GetNodeLabels() []string {
if x != nil {
return x.NodeLabels
}
return nil
}
func (x *Flow) GetSourceNames() []string {
if x != nil {
return x.SourceNames
}
return nil
}
func (x *Flow) GetDestinationNames() []string {
if x != nil {
return x.DestinationNames
}
return nil
}
func (x *Flow) GetL7() *Layer7 {
if x != nil {
return x.L7
}
return nil
}
// Deprecated: Marked as deprecated in flow/flow.proto.
func (x *Flow) GetReply() bool {
if x != nil {
return x.Reply
}
return false
}
func (x *Flow) GetEventType() *CiliumEventType {
if x != nil {
return x.EventType
}
return nil
}
func (x *Flow) GetSourceService() *Service {
if x != nil {
return x.SourceService
}
return nil
}
func (x *Flow) GetDestinationService() *Service {
if x != nil {
return x.DestinationService
}
return nil
}
func (x *Flow) GetTrafficDirection() TrafficDirection {
if x != nil {
return x.TrafficDirection
}
return TrafficDirection_TRAFFIC_DIRECTION_UNKNOWN
}
func (x *Flow) GetPolicyMatchType() uint32 {
if x != nil {
return x.PolicyMatchType
}
return 0
}
func (x *Flow) GetTraceObservationPoint() TraceObservationPoint {
if x != nil {
return x.TraceObservationPoint
}
return TraceObservationPoint_UNKNOWN_POINT
}
func (x *Flow) GetTraceReason() TraceReason {
if x != nil {
return x.TraceReason
}
return TraceReason_TRACE_REASON_UNKNOWN
}
func (x *Flow) GetFile() *FileInfo {
if x != nil {
return x.File
}
return nil
}
func (x *Flow) GetDropReasonDesc() DropReason {
if x != nil {
return x.DropReasonDesc
}
return DropReason_DROP_REASON_UNKNOWN
}
func (x *Flow) GetIsReply() *wrapperspb.BoolValue {
if x != nil {
return x.IsReply
}
return nil
}
func (x *Flow) GetDebugCapturePoint() DebugCapturePoint {
if x != nil {
return x.DebugCapturePoint
}
return DebugCapturePoint_DBG_CAPTURE_POINT_UNKNOWN
}
func (x *Flow) GetInterface() *NetworkInterface {
if x != nil {
return x.Interface
}
return nil
}
func (x *Flow) GetProxyPort() uint32 {
if x != nil {
return x.ProxyPort
}
return 0
}
func (x *Flow) GetTraceContext() *TraceContext {
if x != nil {
return x.TraceContext
}
return nil
}
func (x *Flow) GetSockXlatePoint() SocketTranslationPoint {
if x != nil {
return x.SockXlatePoint
}
return SocketTranslationPoint_SOCK_XLATE_POINT_UNKNOWN
}
func (x *Flow) GetSocketCookie() uint64 {
if x != nil {
return x.SocketCookie
}
return 0
}
func (x *Flow) GetCgroupId() uint64 {
if x != nil {
return x.CgroupId
}
return 0
}
// Deprecated: Marked as deprecated in flow/flow.proto.
func (x *Flow) GetSummary() string {
if x != nil {
return x.Summary
}
return ""
}
func (x *Flow) GetExtensions() *anypb.Any {
if x != nil {
return x.Extensions
}
return nil
}
func (x *Flow) GetEgressAllowedBy() []*Policy {
if x != nil {
return x.EgressAllowedBy
}
return nil
}
func (x *Flow) GetIngressAllowedBy() []*Policy {
if x != nil {
return x.IngressAllowedBy
}
return nil
}
func (x *Flow) GetEgressDeniedBy() []*Policy {
if x != nil {
return x.EgressDeniedBy
}
return nil
}
func (x *Flow) GetIngressDeniedBy() []*Policy {
if x != nil {
return x.IngressDeniedBy
}
return nil
}
func (x *Flow) GetPolicyLog() []string {
if x != nil {
return x.PolicyLog
}
return nil
}
type FileInfo struct {
state protoimpl.MessageState `protogen:"open.v1"`
Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"`
Line uint32 `protobuf:"varint,2,opt,name=line,proto3" json:"line,omitempty"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *FileInfo) Reset() {
*x = FileInfo{}
mi := &file_flow_flow_proto_msgTypes[1]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *FileInfo) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*FileInfo) ProtoMessage() {}
func (x *FileInfo) ProtoReflect() protoreflect.Message {
mi := &file_flow_flow_proto_msgTypes[1]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use FileInfo.ProtoReflect.Descriptor instead.
func (*FileInfo) Descriptor() ([]byte, []int) {
return file_flow_flow_proto_rawDescGZIP(), []int{1}
}
func (x *FileInfo) GetName() string {
if x != nil {
return x.Name
}
return ""
}
func (x *FileInfo) GetLine() uint32 {
if x != nil {
return x.Line
}
return 0
}
type Layer4 struct {
state protoimpl.MessageState `protogen:"open.v1"`
// Types that are valid to be assigned to Protocol:
//
// *Layer4_TCP
// *Layer4_UDP
// *Layer4_ICMPv4
// *Layer4_ICMPv6
// *Layer4_SCTP
Protocol isLayer4_Protocol `protobuf_oneof:"protocol"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *Layer4) Reset() {
*x = Layer4{}
mi := &file_flow_flow_proto_msgTypes[2]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *Layer4) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*Layer4) ProtoMessage() {}
func (x *Layer4) ProtoReflect() protoreflect.Message {
mi := &file_flow_flow_proto_msgTypes[2]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use Layer4.ProtoReflect.Descriptor instead.
func (*Layer4) Descriptor() ([]byte, []int) {
return file_flow_flow_proto_rawDescGZIP(), []int{2}
}
func (x *Layer4) GetProtocol() isLayer4_Protocol {
if x != nil {
return x.Protocol
}
return nil
}
func (x *Layer4) GetTCP() *TCP {
if x != nil {
if x, ok := x.Protocol.(*Layer4_TCP); ok {
return x.TCP
}
}
return nil
}
func (x *Layer4) GetUDP() *UDP {
if x != nil {
if x, ok := x.Protocol.(*Layer4_UDP); ok {
return x.UDP
}
}
return nil
}
func (x *Layer4) GetICMPv4() *ICMPv4 {
if x != nil {
if x, ok := x.Protocol.(*Layer4_ICMPv4); ok {
return x.ICMPv4
}
}
return nil
}
func (x *Layer4) GetICMPv6() *ICMPv6 {
if x != nil {
if x, ok := x.Protocol.(*Layer4_ICMPv6); ok {
return x.ICMPv6
}
}
return nil
}
func (x *Layer4) GetSCTP() *SCTP {
if x != nil {
if x, ok := x.Protocol.(*Layer4_SCTP); ok {
return x.SCTP
}
}
return nil
}
type isLayer4_Protocol interface {
isLayer4_Protocol()
}
type Layer4_TCP struct {
TCP *TCP `protobuf:"bytes,1,opt,name=TCP,proto3,oneof"`
}
type Layer4_UDP struct {
UDP *UDP `protobuf:"bytes,2,opt,name=UDP,proto3,oneof"`
}
type Layer4_ICMPv4 struct {
// ICMP is technically not L4, but mutually exclusive with the above
ICMPv4 *ICMPv4 `protobuf:"bytes,3,opt,name=ICMPv4,proto3,oneof"`
}
type Layer4_ICMPv6 struct {
ICMPv6 *ICMPv6 `protobuf:"bytes,4,opt,name=ICMPv6,proto3,oneof"`
}
type Layer4_SCTP struct {
SCTP *SCTP `protobuf:"bytes,5,opt,name=SCTP,proto3,oneof"`
}
func (*Layer4_TCP) isLayer4_Protocol() {}
func (*Layer4_UDP) isLayer4_Protocol() {}
func (*Layer4_ICMPv4) isLayer4_Protocol() {}
func (*Layer4_ICMPv6) isLayer4_Protocol() {}
func (*Layer4_SCTP) isLayer4_Protocol() {}
// Message for L7 flow, which roughly corresponds to Cilium's accesslog [LogRecord](https://github.com/cilium/cilium/blob/728c79e427438ab6f8d9375b62fccd6fed4ace3a/pkg/proxy/accesslog/record.go#L141):
type Layer7 struct {
state protoimpl.MessageState `protogen:"open.v1"`
Type L7FlowType `protobuf:"varint,1,opt,name=type,proto3,enum=flow.L7FlowType" json:"type,omitempty"`
// Latency of the response
LatencyNs uint64 `protobuf:"varint,2,opt,name=latency_ns,json=latencyNs,proto3" json:"latency_ns,omitempty"`
// L7 field. This field is set if and only if FlowType is L7.
//
// Types that are valid to be assigned to Record:
//
// *Layer7_Dns
// *Layer7_Http
// *Layer7_Kafka
Record isLayer7_Record `protobuf_oneof:"record"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *Layer7) Reset() {
*x = Layer7{}
mi := &file_flow_flow_proto_msgTypes[3]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *Layer7) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*Layer7) ProtoMessage() {}
func (x *Layer7) ProtoReflect() protoreflect.Message {
mi := &file_flow_flow_proto_msgTypes[3]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use Layer7.ProtoReflect.Descriptor instead.
func (*Layer7) Descriptor() ([]byte, []int) {
return file_flow_flow_proto_rawDescGZIP(), []int{3}
}
func (x *Layer7) GetType() L7FlowType {
if x != nil {
return x.Type
}
return L7FlowType_UNKNOWN_L7_TYPE
}
func (x *Layer7) GetLatencyNs() uint64 {
if x != nil {
return x.LatencyNs
}
return 0
}
func (x *Layer7) GetRecord() isLayer7_Record {
if x != nil {
return x.Record
}
return nil
}
func (x *Layer7) GetDns() *DNS {
if x != nil {
if x, ok := x.Record.(*Layer7_Dns); ok {
return x.Dns
}
}
return nil
}
func (x *Layer7) GetHttp() *HTTP {
if x != nil {
if x, ok := x.Record.(*Layer7_Http); ok {
return x.Http
}
}
return nil
}
func (x *Layer7) GetKafka() *Kafka {
if x != nil {
if x, ok := x.Record.(*Layer7_Kafka); ok {
return x.Kafka
}
}
return nil
}
type isLayer7_Record interface {
isLayer7_Record()
}
type Layer7_Dns struct {
Dns *DNS `protobuf:"bytes,100,opt,name=dns,proto3,oneof"`
}
type Layer7_Http struct {
Http *HTTP `protobuf:"bytes,101,opt,name=http,proto3,oneof"`
}
type Layer7_Kafka struct {
Kafka *Kafka `protobuf:"bytes,102,opt,name=kafka,proto3,oneof"`
}
func (*Layer7_Dns) isLayer7_Record() {}
func (*Layer7_Http) isLayer7_Record() {}
func (*Layer7_Kafka) isLayer7_Record() {}
// TraceContext contains trace context propagation data, i.e. information about a
// distributed trace.
// For more information about trace context, check the [W3C Trace Context specification](https://www.w3.org/TR/trace-context/).
type TraceContext struct {
state protoimpl.MessageState `protogen:"open.v1"`
// parent identifies the incoming request in a tracing system.
Parent *TraceParent `protobuf:"bytes,1,opt,name=parent,proto3" json:"parent,omitempty"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *TraceContext) Reset() {
*x = TraceContext{}
mi := &file_flow_flow_proto_msgTypes[4]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *TraceContext) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*TraceContext) ProtoMessage() {}
func (x *TraceContext) ProtoReflect() protoreflect.Message {
mi := &file_flow_flow_proto_msgTypes[4]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use TraceContext.ProtoReflect.Descriptor instead.
func (*TraceContext) Descriptor() ([]byte, []int) {
return file_flow_flow_proto_rawDescGZIP(), []int{4}
}
func (x *TraceContext) GetParent() *TraceParent {
if x != nil {
return x.Parent
}
return nil
}
// TraceParent identifies the incoming request in a tracing system.
type TraceParent struct {
state protoimpl.MessageState `protogen:"open.v1"`
// trace_id is a unique value that identifies a trace. It is a byte array
// represented as a hex string.
TraceId string `protobuf:"bytes,1,opt,name=trace_id,json=traceId,proto3" json:"trace_id,omitempty"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *TraceParent) Reset() {
*x = TraceParent{}
mi := &file_flow_flow_proto_msgTypes[5]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *TraceParent) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*TraceParent) ProtoMessage() {}
func (x *TraceParent) ProtoReflect() protoreflect.Message {
mi := &file_flow_flow_proto_msgTypes[5]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use TraceParent.ProtoReflect.Descriptor instead.
func (*TraceParent) Descriptor() ([]byte, []int) {
return file_flow_flow_proto_rawDescGZIP(), []int{5}
}
func (x *TraceParent) GetTraceId() string {
if x != nil {
return x.TraceId
}
return ""
}
type Endpoint struct {
state protoimpl.MessageState `protogen:"open.v1"`
ID uint32 `protobuf:"varint,1,opt,name=ID,proto3" json:"ID,omitempty"`
Identity uint32 `protobuf:"varint,2,opt,name=identity,proto3" json:"identity,omitempty"`
ClusterName string `protobuf:"bytes,7,opt,name=cluster_name,json=clusterName,proto3" json:"cluster_name,omitempty"`
Namespace string `protobuf:"bytes,3,opt,name=namespace,proto3" json:"namespace,omitempty"`
// labels in `foo=bar` format.
Labels []string `protobuf:"bytes,4,rep,name=labels,proto3" json:"labels,omitempty"`
PodName string `protobuf:"bytes,5,opt,name=pod_name,json=podName,proto3" json:"pod_name,omitempty"`
Workloads []*Workload `protobuf:"bytes,6,rep,name=workloads,proto3" json:"workloads,omitempty"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *Endpoint) Reset() {
*x = Endpoint{}
mi := &file_flow_flow_proto_msgTypes[6]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *Endpoint) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*Endpoint) ProtoMessage() {}
func (x *Endpoint) ProtoReflect() protoreflect.Message {
mi := &file_flow_flow_proto_msgTypes[6]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use Endpoint.ProtoReflect.Descriptor instead.
func (*Endpoint) Descriptor() ([]byte, []int) {
return file_flow_flow_proto_rawDescGZIP(), []int{6}
}
func (x *Endpoint) GetID() uint32 {
if x != nil {
return x.ID
}
return 0
}
func (x *Endpoint) GetIdentity() uint32 {
if x != nil {
return x.Identity
}
return 0
}
func (x *Endpoint) GetClusterName() string {
if x != nil {
return x.ClusterName
}
return ""
}
func (x *Endpoint) GetNamespace() string {
if x != nil {
return x.Namespace
}
return ""
}
func (x *Endpoint) GetLabels() []string {
if x != nil {
return x.Labels
}
return nil
}
func (x *Endpoint) GetPodName() string {
if x != nil {
return x.PodName
}
return ""
}
func (x *Endpoint) GetWorkloads() []*Workload {
if x != nil {
return x.Workloads
}
return nil
}
type Workload struct {
state protoimpl.MessageState `protogen:"open.v1"`
Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"`
Kind string `protobuf:"bytes,2,opt,name=kind,proto3" json:"kind,omitempty"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *Workload) Reset() {
*x = Workload{}
mi := &file_flow_flow_proto_msgTypes[7]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *Workload) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*Workload) ProtoMessage() {}
func (x *Workload) ProtoReflect() protoreflect.Message {
mi := &file_flow_flow_proto_msgTypes[7]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use Workload.ProtoReflect.Descriptor instead.
func (*Workload) Descriptor() ([]byte, []int) {
return file_flow_flow_proto_rawDescGZIP(), []int{7}
}
func (x *Workload) GetName() string {
if x != nil {
return x.Name
}
return ""
}
func (x *Workload) GetKind() string {
if x != nil {
return x.Kind
}
return ""
}
type TCP struct {
state protoimpl.MessageState `protogen:"open.v1"`
SourcePort uint32 `protobuf:"varint,1,opt,name=source_port,json=sourcePort,proto3" json:"source_port,omitempty"`
DestinationPort uint32 `protobuf:"varint,2,opt,name=destination_port,json=destinationPort,proto3" json:"destination_port,omitempty"`
Flags *TCPFlags `protobuf:"bytes,3,opt,name=flags,proto3" json:"flags,omitempty"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *TCP) Reset() {
*x = TCP{}
mi := &file_flow_flow_proto_msgTypes[8]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *TCP) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*TCP) ProtoMessage() {}
func (x *TCP) ProtoReflect() protoreflect.Message {
mi := &file_flow_flow_proto_msgTypes[8]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use TCP.ProtoReflect.Descriptor instead.
func (*TCP) Descriptor() ([]byte, []int) {
return file_flow_flow_proto_rawDescGZIP(), []int{8}
}
func (x *TCP) GetSourcePort() uint32 {
if x != nil {
return x.SourcePort
}
return 0
}
func (x *TCP) GetDestinationPort() uint32 {
if x != nil {
return x.DestinationPort
}
return 0
}
func (x *TCP) GetFlags() *TCPFlags {
if x != nil {
return x.Flags
}
return nil
}
type IP struct {
state protoimpl.MessageState `protogen:"open.v1"`
Source string `protobuf:"bytes,1,opt,name=source,proto3" json:"source,omitempty"`
// source_xlated is the post-translation source IP when the flow was
// SNATed. When "source_xlated" is set, the "source" field is populated
// with the pre-translation source IP address.
SourceXlated string `protobuf:"bytes,5,opt,name=source_xlated,json=sourceXlated,proto3" json:"source_xlated,omitempty"`
Destination string `protobuf:"bytes,2,opt,name=destination,proto3" json:"destination,omitempty"`
IpVersion IPVersion `protobuf:"varint,3,opt,name=ipVersion,proto3,enum=flow.IPVersion" json:"ipVersion,omitempty"`
// This field indicates whether the TraceReasonEncryptMask is set or not.
// https://github.com/cilium/cilium/blob/ba0ed147bd5bb342f67b1794c2ad13c6e99d5236/pkg/monitor/datapath_trace.go#L27
Encrypted bool `protobuf:"varint,4,opt,name=encrypted,proto3" json:"encrypted,omitempty"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *IP) Reset() {
*x = IP{}
mi := &file_flow_flow_proto_msgTypes[9]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *IP) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*IP) ProtoMessage() {}
func (x *IP) ProtoReflect() protoreflect.Message {
mi := &file_flow_flow_proto_msgTypes[9]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use IP.ProtoReflect.Descriptor instead.
func (*IP) Descriptor() ([]byte, []int) {
return file_flow_flow_proto_rawDescGZIP(), []int{9}
}
func (x *IP) GetSource() string {
if x != nil {
return x.Source
}
return ""
}
func (x *IP) GetSourceXlated() string {
if x != nil {
return x.SourceXlated
}
return ""
}
func (x *IP) GetDestination() string {
if x != nil {
return x.Destination
}
return ""
}
func (x *IP) GetIpVersion() IPVersion {
if x != nil {
return x.IpVersion
}
return IPVersion_IP_NOT_USED
}
func (x *IP) GetEncrypted() bool {
if x != nil {
return x.Encrypted
}
return false
}
type Ethernet struct {
state protoimpl.MessageState `protogen:"open.v1"`
Source string `protobuf:"bytes,1,opt,name=source,proto3" json:"source,omitempty"`
Destination string `protobuf:"bytes,2,opt,name=destination,proto3" json:"destination,omitempty"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *Ethernet) Reset() {
*x = Ethernet{}
mi := &file_flow_flow_proto_msgTypes[10]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *Ethernet) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*Ethernet) ProtoMessage() {}
func (x *Ethernet) ProtoReflect() protoreflect.Message {
mi := &file_flow_flow_proto_msgTypes[10]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use Ethernet.ProtoReflect.Descriptor instead.
func (*Ethernet) Descriptor() ([]byte, []int) {
return file_flow_flow_proto_rawDescGZIP(), []int{10}
}
func (x *Ethernet) GetSource() string {
if x != nil {
return x.Source
}
return ""
}
func (x *Ethernet) GetDestination() string {
if x != nil {
return x.Destination
}
return ""
}
type TCPFlags struct {
state protoimpl.MessageState `protogen:"open.v1"`
FIN bool `protobuf:"varint,1,opt,name=FIN,proto3" json:"FIN,omitempty"`
SYN bool `protobuf:"varint,2,opt,name=SYN,proto3" json:"SYN,omitempty"`
RST bool `protobuf:"varint,3,opt,name=RST,proto3" json:"RST,omitempty"`
PSH bool `protobuf:"varint,4,opt,name=PSH,proto3" json:"PSH,omitempty"`
ACK bool `protobuf:"varint,5,opt,name=ACK,proto3" json:"ACK,omitempty"`
URG bool `protobuf:"varint,6,opt,name=URG,proto3" json:"URG,omitempty"`
ECE bool `protobuf:"varint,7,opt,name=ECE,proto3" json:"ECE,omitempty"`
CWR bool `protobuf:"varint,8,opt,name=CWR,proto3" json:"CWR,omitempty"`
NS bool `protobuf:"varint,9,opt,name=NS,proto3" json:"NS,omitempty"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *TCPFlags) Reset() {
*x = TCPFlags{}
mi := &file_flow_flow_proto_msgTypes[11]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *TCPFlags) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*TCPFlags) ProtoMessage() {}
func (x *TCPFlags) ProtoReflect() protoreflect.Message {
mi := &file_flow_flow_proto_msgTypes[11]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use TCPFlags.ProtoReflect.Descriptor instead.
func (*TCPFlags) Descriptor() ([]byte, []int) {
return file_flow_flow_proto_rawDescGZIP(), []int{11}
}
func (x *TCPFlags) GetFIN() bool {
if x != nil {
return x.FIN
}
return false
}
func (x *TCPFlags) GetSYN() bool {
if x != nil {
return x.SYN
}
return false
}
func (x *TCPFlags) GetRST() bool {
if x != nil {
return x.RST
}
return false
}
func (x *TCPFlags) GetPSH() bool {
if x != nil {
return x.PSH
}
return false
}
func (x *TCPFlags) GetACK() bool {
if x != nil {
return x.ACK
}
return false
}
func (x *TCPFlags) GetURG() bool {
if x != nil {
return x.URG
}
return false
}
func (x *TCPFlags) GetECE() bool {
if x != nil {
return x.ECE
}
return false
}
func (x *TCPFlags) GetCWR() bool {
if x != nil {
return x.CWR
}
return false
}
func (x *TCPFlags) GetNS() bool {
if x != nil {
return x.NS
}
return false
}
type UDP struct {
state protoimpl.MessageState `protogen:"open.v1"`
SourcePort uint32 `protobuf:"varint,1,opt,name=source_port,json=sourcePort,proto3" json:"source_port,omitempty"`
DestinationPort uint32 `protobuf:"varint,2,opt,name=destination_port,json=destinationPort,proto3" json:"destination_port,omitempty"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *UDP) Reset() {
*x = UDP{}
mi := &file_flow_flow_proto_msgTypes[12]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *UDP) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*UDP) ProtoMessage() {}
func (x *UDP) ProtoReflect() protoreflect.Message {
mi := &file_flow_flow_proto_msgTypes[12]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use UDP.ProtoReflect.Descriptor instead.
func (*UDP) Descriptor() ([]byte, []int) {
return file_flow_flow_proto_rawDescGZIP(), []int{12}
}
func (x *UDP) GetSourcePort() uint32 {
if x != nil {
return x.SourcePort
}
return 0
}
func (x *UDP) GetDestinationPort() uint32 {
if x != nil {
return x.DestinationPort
}
return 0
}
type SCTP struct {
state protoimpl.MessageState `protogen:"open.v1"`
SourcePort uint32 `protobuf:"varint,1,opt,name=source_port,json=sourcePort,proto3" json:"source_port,omitempty"`
DestinationPort uint32 `protobuf:"varint,2,opt,name=destination_port,json=destinationPort,proto3" json:"destination_port,omitempty"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *SCTP) Reset() {
*x = SCTP{}
mi := &file_flow_flow_proto_msgTypes[13]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *SCTP) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*SCTP) ProtoMessage() {}
func (x *SCTP) ProtoReflect() protoreflect.Message {
mi := &file_flow_flow_proto_msgTypes[13]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use SCTP.ProtoReflect.Descriptor instead.
func (*SCTP) Descriptor() ([]byte, []int) {
return file_flow_flow_proto_rawDescGZIP(), []int{13}
}
func (x *SCTP) GetSourcePort() uint32 {
if x != nil {
return x.SourcePort
}
return 0
}
func (x *SCTP) GetDestinationPort() uint32 {
if x != nil {
return x.DestinationPort
}
return 0
}
type ICMPv4 struct {
state protoimpl.MessageState `protogen:"open.v1"`
Type uint32 `protobuf:"varint,1,opt,name=type,proto3" json:"type,omitempty"`
Code uint32 `protobuf:"varint,2,opt,name=code,proto3" json:"code,omitempty"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *ICMPv4) Reset() {
*x = ICMPv4{}
mi := &file_flow_flow_proto_msgTypes[14]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *ICMPv4) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*ICMPv4) ProtoMessage() {}
func (x *ICMPv4) ProtoReflect() protoreflect.Message {
mi := &file_flow_flow_proto_msgTypes[14]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use ICMPv4.ProtoReflect.Descriptor instead.
func (*ICMPv4) Descriptor() ([]byte, []int) {
return file_flow_flow_proto_rawDescGZIP(), []int{14}
}
func (x *ICMPv4) GetType() uint32 {
if x != nil {
return x.Type
}
return 0
}
func (x *ICMPv4) GetCode() uint32 {
if x != nil {
return x.Code
}
return 0
}
type ICMPv6 struct {
state protoimpl.MessageState `protogen:"open.v1"`
Type uint32 `protobuf:"varint,1,opt,name=type,proto3" json:"type,omitempty"`
Code uint32 `protobuf:"varint,2,opt,name=code,proto3" json:"code,omitempty"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *ICMPv6) Reset() {
*x = ICMPv6{}
mi := &file_flow_flow_proto_msgTypes[15]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *ICMPv6) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*ICMPv6) ProtoMessage() {}
func (x *ICMPv6) ProtoReflect() protoreflect.Message {
mi := &file_flow_flow_proto_msgTypes[15]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use ICMPv6.ProtoReflect.Descriptor instead.
func (*ICMPv6) Descriptor() ([]byte, []int) {
return file_flow_flow_proto_rawDescGZIP(), []int{15}
}
func (x *ICMPv6) GetType() uint32 {
if x != nil {
return x.Type
}
return 0
}
func (x *ICMPv6) GetCode() uint32 {
if x != nil {
return x.Code
}
return 0
}
type Tunnel struct {
state protoimpl.MessageState `protogen:"open.v1"`
Protocol Tunnel_Protocol `protobuf:"varint,1,opt,name=protocol,proto3,enum=flow.Tunnel_Protocol" json:"protocol,omitempty"`
IP *IP `protobuf:"bytes,2,opt,name=IP,proto3" json:"IP,omitempty"`
L4 *Layer4 `protobuf:"bytes,3,opt,name=l4,proto3" json:"l4,omitempty"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *Tunnel) Reset() {
*x = Tunnel{}
mi := &file_flow_flow_proto_msgTypes[16]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *Tunnel) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*Tunnel) ProtoMessage() {}
func (x *Tunnel) ProtoReflect() protoreflect.Message {
mi := &file_flow_flow_proto_msgTypes[16]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use Tunnel.ProtoReflect.Descriptor instead.
func (*Tunnel) Descriptor() ([]byte, []int) {
return file_flow_flow_proto_rawDescGZIP(), []int{16}
}
func (x *Tunnel) GetProtocol() Tunnel_Protocol {
if x != nil {
return x.Protocol
}
return Tunnel_UNKNOWN
}
func (x *Tunnel) GetIP() *IP {
if x != nil {
return x.IP
}
return nil
}
func (x *Tunnel) GetL4() *Layer4 {
if x != nil {
return x.L4
}
return nil
}
type Policy struct {
state protoimpl.MessageState `protogen:"open.v1"`
Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"`
Namespace string `protobuf:"bytes,2,opt,name=namespace,proto3" json:"namespace,omitempty"`
Labels []string `protobuf:"bytes,3,rep,name=labels,proto3" json:"labels,omitempty"`
Revision uint64 `protobuf:"varint,4,opt,name=revision,proto3" json:"revision,omitempty"`
Kind string `protobuf:"bytes,5,opt,name=kind,proto3" json:"kind,omitempty"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *Policy) Reset() {
*x = Policy{}
mi := &file_flow_flow_proto_msgTypes[17]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *Policy) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*Policy) ProtoMessage() {}
func (x *Policy) ProtoReflect() protoreflect.Message {
mi := &file_flow_flow_proto_msgTypes[17]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use Policy.ProtoReflect.Descriptor instead.
func (*Policy) Descriptor() ([]byte, []int) {
return file_flow_flow_proto_rawDescGZIP(), []int{17}
}
func (x *Policy) GetName() string {
if x != nil {
return x.Name
}
return ""
}
func (x *Policy) GetNamespace() string {
if x != nil {
return x.Namespace
}
return ""
}
func (x *Policy) GetLabels() []string {
if x != nil {
return x.Labels
}
return nil
}
func (x *Policy) GetRevision() uint64 {
if x != nil {
return x.Revision
}
return 0
}
func (x *Policy) GetKind() string {
if x != nil {
return x.Kind
}
return ""
}
// EventTypeFilter is a filter describing a particular event type.
type EventTypeFilter struct {
state protoimpl.MessageState `protogen:"open.v1"`
// type is the primary flow type as defined by:
// github.com/cilium/cilium/pkg/monitor/api.MessageType*
Type int32 `protobuf:"varint,1,opt,name=type,proto3" json:"type,omitempty"`
// match_sub_type is set to true when matching on the sub_type should
// be done. This flag is required as 0 is a valid sub_type.
MatchSubType bool `protobuf:"varint,2,opt,name=match_sub_type,json=matchSubType,proto3" json:"match_sub_type,omitempty"`
// sub_type is the secondary type, e.g.
// - github.com/cilium/cilium/pkg/monitor/api.Trace*
SubType int32 `protobuf:"varint,3,opt,name=sub_type,json=subType,proto3" json:"sub_type,omitempty"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *EventTypeFilter) Reset() {
*x = EventTypeFilter{}
mi := &file_flow_flow_proto_msgTypes[18]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *EventTypeFilter) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*EventTypeFilter) ProtoMessage() {}
func (x *EventTypeFilter) ProtoReflect() protoreflect.Message {
mi := &file_flow_flow_proto_msgTypes[18]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use EventTypeFilter.ProtoReflect.Descriptor instead.
func (*EventTypeFilter) Descriptor() ([]byte, []int) {
return file_flow_flow_proto_rawDescGZIP(), []int{18}
}
func (x *EventTypeFilter) GetType() int32 {
if x != nil {
return x.Type
}
return 0
}
func (x *EventTypeFilter) GetMatchSubType() bool {
if x != nil {
return x.MatchSubType
}
return false
}
func (x *EventTypeFilter) GetSubType() int32 {
if x != nil {
return x.SubType
}
return 0
}
// CiliumEventType from which the flow originated.
type CiliumEventType struct {
state protoimpl.MessageState `protogen:"open.v1"`
// type of event the flow originated from, i.e.
// github.com/cilium/cilium/pkg/monitor/api.MessageType*
Type int32 `protobuf:"varint,1,opt,name=type,proto3" json:"type,omitempty"`
// sub_type may indicate more details depending on type, e.g.
// - github.com/cilium/cilium/pkg/monitor/api.Trace*
// - github.com/cilium/cilium/pkg/monitor/api.Drop*
// - github.com/cilium/cilium/pkg/monitor/api.DbgCapture*
SubType int32 `protobuf:"varint,2,opt,name=sub_type,json=subType,proto3" json:"sub_type,omitempty"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *CiliumEventType) Reset() {
*x = CiliumEventType{}
mi := &file_flow_flow_proto_msgTypes[19]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *CiliumEventType) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*CiliumEventType) ProtoMessage() {}
func (x *CiliumEventType) ProtoReflect() protoreflect.Message {
mi := &file_flow_flow_proto_msgTypes[19]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use CiliumEventType.ProtoReflect.Descriptor instead.
func (*CiliumEventType) Descriptor() ([]byte, []int) {
return file_flow_flow_proto_rawDescGZIP(), []int{19}
}
func (x *CiliumEventType) GetType() int32 {
if x != nil {
return x.Type
}
return 0
}
func (x *CiliumEventType) GetSubType() int32 {
if x != nil {
return x.SubType
}
return 0
}
// FlowFilter represent an individual flow filter. All fields are optional. If
// multiple fields are set, then all fields must match for the filter to match.
type FlowFilter struct {
state protoimpl.MessageState `protogen:"open.v1"`
// uuid filters by a list of flow uuids.
Uuid []string `protobuf:"bytes,29,rep,name=uuid,proto3" json:"uuid,omitempty"`
// source_ip filters by a list of source ips. Each of the source ips can be
// specified as an exact match (e.g. "1.1.1.1") or as a CIDR range (e.g.
// "1.1.1.0/24").
SourceIp []string `protobuf:"bytes,1,rep,name=source_ip,json=sourceIp,proto3" json:"source_ip,omitempty"`
// source_ip_xlated filters by a list IPs. Each of the IPs can be specified
// as an exact match (e.g. "1.1.1.1") or as a CIDR range (e.g.
// "1.1.1.0/24").
SourceIpXlated []string `protobuf:"bytes,34,rep,name=source_ip_xlated,json=sourceIpXlated,proto3" json:"source_ip_xlated,omitempty"`
// source_pod filters by a list of source pod name prefixes, optionally
// within a given namespace (e.g. "xwing", "kube-system/coredns-").
// The pod name can be omitted to only filter by namespace
// (e.g. "kube-system/") or the namespace can be omitted to filter for
// pods in any namespace (e.g. "/xwing")
SourcePod []string `protobuf:"bytes,2,rep,name=source_pod,json=sourcePod,proto3" json:"source_pod,omitempty"`
// source_fqdn filters by a list of source fully qualified domain names
SourceFqdn []string `protobuf:"bytes,7,rep,name=source_fqdn,json=sourceFqdn,proto3" json:"source_fqdn,omitempty"`
// source_labels filters on a list of source label selectors. Selectors
// support the full Kubernetes label selector syntax.
SourceLabel []string `protobuf:"bytes,10,rep,name=source_label,json=sourceLabel,proto3" json:"source_label,omitempty"`
// source_service filters on a list of source service names. This field
// supports the same syntax as the source_pod field.
SourceService []string `protobuf:"bytes,16,rep,name=source_service,json=sourceService,proto3" json:"source_service,omitempty"`
// source_workload filters by a list of source workload.
SourceWorkload []*Workload `protobuf:"bytes,26,rep,name=source_workload,json=sourceWorkload,proto3" json:"source_workload,omitempty"`
// source_cluster_name filters by a list of source cluster names.
SourceClusterName []string `protobuf:"bytes,37,rep,name=source_cluster_name,json=sourceClusterName,proto3" json:"source_cluster_name,omitempty"`
// destination_ip filters by a list of destination ips. Each of the
// destination ips can be specified as an exact match (e.g. "1.1.1.1") or
// as a CIDR range (e.g. "1.1.1.0/24").
DestinationIp []string `protobuf:"bytes,3,rep,name=destination_ip,json=destinationIp,proto3" json:"destination_ip,omitempty"`
// destination_pod filters by a list of destination pod names
DestinationPod []string `protobuf:"bytes,4,rep,name=destination_pod,json=destinationPod,proto3" json:"destination_pod,omitempty"`
// destination_fqdn filters by a list of destination fully qualified domain names
DestinationFqdn []string `protobuf:"bytes,8,rep,name=destination_fqdn,json=destinationFqdn,proto3" json:"destination_fqdn,omitempty"`
// destination_label filters on a list of destination label selectors
DestinationLabel []string `protobuf:"bytes,11,rep,name=destination_label,json=destinationLabel,proto3" json:"destination_label,omitempty"`
// destination_service filters on a list of destination service names
DestinationService []string `protobuf:"bytes,17,rep,name=destination_service,json=destinationService,proto3" json:"destination_service,omitempty"`
// destination_workload filters by a list of destination workload.
DestinationWorkload []*Workload `protobuf:"bytes,27,rep,name=destination_workload,json=destinationWorkload,proto3" json:"destination_workload,omitempty"`
// destination_cluster_name filters by a list of destination cluster names.
DestinationClusterName []string `protobuf:"bytes,38,rep,name=destination_cluster_name,json=destinationClusterName,proto3" json:"destination_cluster_name,omitempty"`
// traffic_direction filters flow by direction of the connection, e.g.
// ingress or egress.
TrafficDirection []TrafficDirection `protobuf:"varint,30,rep,packed,name=traffic_direction,json=trafficDirection,proto3,enum=flow.TrafficDirection" json:"traffic_direction,omitempty"`
// only return Flows that were classified with a particular verdict.
Verdict []Verdict `protobuf:"varint,5,rep,packed,name=verdict,proto3,enum=flow.Verdict" json:"verdict,omitempty"`
// only applicable to Verdict = DROPPED (e.g. "POLICY_DENIED", "UNSUPPORTED_L3_PROTOCOL")
DropReasonDesc []DropReason `protobuf:"varint,33,rep,packed,name=drop_reason_desc,json=dropReasonDesc,proto3,enum=flow.DropReason" json:"drop_reason_desc,omitempty"`
// interface is the network interface on which this flow was observed.
Interface []*NetworkInterface `protobuf:"bytes,35,rep,name=interface,proto3" json:"interface,omitempty"`
// event_type is the list of event types to filter on
EventType []*EventTypeFilter `protobuf:"bytes,6,rep,name=event_type,json=eventType,proto3" json:"event_type,omitempty"`
// http_status_code is a list of string prefixes (e.g. "4+", "404", "5+")
// to filter on the HTTP status code
HttpStatusCode []string `protobuf:"bytes,9,rep,name=http_status_code,json=httpStatusCode,proto3" json:"http_status_code,omitempty"`
// protocol filters flows by L4 or L7 protocol, e.g. (e.g. "tcp", "http")
Protocol []string `protobuf:"bytes,12,rep,name=protocol,proto3" json:"protocol,omitempty"`
// source_port filters flows by L4 source port
SourcePort []string `protobuf:"bytes,13,rep,name=source_port,json=sourcePort,proto3" json:"source_port,omitempty"`
// destination_port filters flows by L4 destination port
DestinationPort []string `protobuf:"bytes,14,rep,name=destination_port,json=destinationPort,proto3" json:"destination_port,omitempty"`
// reply filters flows based on the direction of the flow.
Reply []bool `protobuf:"varint,15,rep,packed,name=reply,proto3" json:"reply,omitempty"`
// dns_query filters L7 DNS flows by query patterns (RE2 regex), e.g. 'kube.*local'.
DnsQuery []string `protobuf:"bytes,18,rep,name=dns_query,json=dnsQuery,proto3" json:"dns_query,omitempty"`
// source_identity filters by the security identity of the source endpoint.
SourceIdentity []uint32 `protobuf:"varint,19,rep,packed,name=source_identity,json=sourceIdentity,proto3" json:"source_identity,omitempty"`
// destination_identity filters by the security identity of the destination endpoint.
DestinationIdentity []uint32 `protobuf:"varint,20,rep,packed,name=destination_identity,json=destinationIdentity,proto3" json:"destination_identity,omitempty"`
// GET, POST, PUT, etc. methods. This type of field is well suited for an
// enum but every single existing place is using a string already.
HttpMethod []string `protobuf:"bytes,21,rep,name=http_method,json=httpMethod,proto3" json:"http_method,omitempty"`
// http_path is a list of regular expressions to filter on the HTTP path.
HttpPath []string `protobuf:"bytes,22,rep,name=http_path,json=httpPath,proto3" json:"http_path,omitempty"`
// http_url is a list of regular expressions to filter on the HTTP URL.
HttpUrl []string `protobuf:"bytes,31,rep,name=http_url,json=httpUrl,proto3" json:"http_url,omitempty"`
// http_header is a list of key:value pairs to filter on the HTTP headers.
HttpHeader []*HTTPHeader `protobuf:"bytes,32,rep,name=http_header,json=httpHeader,proto3" json:"http_header,omitempty"`
// tcp_flags filters flows based on TCP header flags
TcpFlags []*TCPFlags `protobuf:"bytes,23,rep,name=tcp_flags,json=tcpFlags,proto3" json:"tcp_flags,omitempty"`
// node_name is a list of patterns to filter on the node name, e.g. "k8s*",
// "test-cluster/*.domain.com", "cluster-name/" etc.
NodeName []string `protobuf:"bytes,24,rep,name=node_name,json=nodeName,proto3" json:"node_name,omitempty"`
// node_labels filters on a list of node label selectors. Selectors support
// the full Kubernetes label selector syntax.
NodeLabels []string `protobuf:"bytes,36,rep,name=node_labels,json=nodeLabels,proto3" json:"node_labels,omitempty"`
// filter based on IP version (ipv4 or ipv6)
IpVersion []IPVersion `protobuf:"varint,25,rep,packed,name=ip_version,json=ipVersion,proto3,enum=flow.IPVersion" json:"ip_version,omitempty"`
// trace_id filters flows by trace ID
TraceId []string `protobuf:"bytes,28,rep,name=trace_id,json=traceId,proto3" json:"trace_id,omitempty"`
// experimental contains filters that are not stable yet. Support for
// experimental features is always optional and subject to change.
Experimental *FlowFilter_Experimental `protobuf:"bytes,999,opt,name=experimental,proto3" json:"experimental,omitempty"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *FlowFilter) Reset() {
*x = FlowFilter{}
mi := &file_flow_flow_proto_msgTypes[20]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *FlowFilter) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*FlowFilter) ProtoMessage() {}
func (x *FlowFilter) ProtoReflect() protoreflect.Message {
mi := &file_flow_flow_proto_msgTypes[20]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use FlowFilter.ProtoReflect.Descriptor instead.
func (*FlowFilter) Descriptor() ([]byte, []int) {
return file_flow_flow_proto_rawDescGZIP(), []int{20}
}
func (x *FlowFilter) GetUuid() []string {
if x != nil {
return x.Uuid
}
return nil
}
func (x *FlowFilter) GetSourceIp() []string {
if x != nil {
return x.SourceIp
}
return nil
}
func (x *FlowFilter) GetSourceIpXlated() []string {
if x != nil {
return x.SourceIpXlated
}
return nil
}
func (x *FlowFilter) GetSourcePod() []string {
if x != nil {
return x.SourcePod
}
return nil
}
func (x *FlowFilter) GetSourceFqdn() []string {
if x != nil {
return x.SourceFqdn
}
return nil
}
func (x *FlowFilter) GetSourceLabel() []string {
if x != nil {
return x.SourceLabel
}
return nil
}
func (x *FlowFilter) GetSourceService() []string {
if x != nil {
return x.SourceService
}
return nil
}
func (x *FlowFilter) GetSourceWorkload() []*Workload {
if x != nil {
return x.SourceWorkload
}
return nil
}
func (x *FlowFilter) GetSourceClusterName() []string {
if x != nil {
return x.SourceClusterName
}
return nil
}
func (x *FlowFilter) GetDestinationIp() []string {
if x != nil {
return x.DestinationIp
}
return nil
}
func (x *FlowFilter) GetDestinationPod() []string {
if x != nil {
return x.DestinationPod
}
return nil
}
func (x *FlowFilter) GetDestinationFqdn() []string {
if x != nil {
return x.DestinationFqdn
}
return nil
}
func (x *FlowFilter) GetDestinationLabel() []string {
if x != nil {
return x.DestinationLabel
}
return nil
}
func (x *FlowFilter) GetDestinationService() []string {
if x != nil {
return x.DestinationService
}
return nil
}
func (x *FlowFilter) GetDestinationWorkload() []*Workload {
if x != nil {
return x.DestinationWorkload
}
return nil
}
func (x *FlowFilter) GetDestinationClusterName() []string {
if x != nil {
return x.DestinationClusterName
}
return nil
}
func (x *FlowFilter) GetTrafficDirection() []TrafficDirection {
if x != nil {
return x.TrafficDirection
}
return nil
}
func (x *FlowFilter) GetVerdict() []Verdict {
if x != nil {
return x.Verdict
}
return nil
}
func (x *FlowFilter) GetDropReasonDesc() []DropReason {
if x != nil {
return x.DropReasonDesc
}
return nil
}
func (x *FlowFilter) GetInterface() []*NetworkInterface {
if x != nil {
return x.Interface
}
return nil
}
func (x *FlowFilter) GetEventType() []*EventTypeFilter {
if x != nil {
return x.EventType
}
return nil
}
func (x *FlowFilter) GetHttpStatusCode() []string {
if x != nil {
return x.HttpStatusCode
}
return nil
}
func (x *FlowFilter) GetProtocol() []string {
if x != nil {
return x.Protocol
}
return nil
}
func (x *FlowFilter) GetSourcePort() []string {
if x != nil {
return x.SourcePort
}
return nil
}
func (x *FlowFilter) GetDestinationPort() []string {
if x != nil {
return x.DestinationPort
}
return nil
}
func (x *FlowFilter) GetReply() []bool {
if x != nil {
return x.Reply
}
return nil
}
func (x *FlowFilter) GetDnsQuery() []string {
if x != nil {
return x.DnsQuery
}
return nil
}
func (x *FlowFilter) GetSourceIdentity() []uint32 {
if x != nil {
return x.SourceIdentity
}
return nil
}
func (x *FlowFilter) GetDestinationIdentity() []uint32 {
if x != nil {
return x.DestinationIdentity
}
return nil
}
func (x *FlowFilter) GetHttpMethod() []string {
if x != nil {
return x.HttpMethod
}
return nil
}
func (x *FlowFilter) GetHttpPath() []string {
if x != nil {
return x.HttpPath
}
return nil
}
func (x *FlowFilter) GetHttpUrl() []string {
if x != nil {
return x.HttpUrl
}
return nil
}
func (x *FlowFilter) GetHttpHeader() []*HTTPHeader {
if x != nil {
return x.HttpHeader
}
return nil
}
func (x *FlowFilter) GetTcpFlags() []*TCPFlags {
if x != nil {
return x.TcpFlags
}
return nil
}
func (x *FlowFilter) GetNodeName() []string {
if x != nil {
return x.NodeName
}
return nil
}
func (x *FlowFilter) GetNodeLabels() []string {
if x != nil {
return x.NodeLabels
}
return nil
}
func (x *FlowFilter) GetIpVersion() []IPVersion {
if x != nil {
return x.IpVersion
}
return nil
}
func (x *FlowFilter) GetTraceId() []string {
if x != nil {
return x.TraceId
}
return nil
}
func (x *FlowFilter) GetExperimental() *FlowFilter_Experimental {
if x != nil {
return x.Experimental
}
return nil
}
// DNS flow. This is basically directly mapped from Cilium's [LogRecordDNS](https://github.com/cilium/cilium/blob/04f3889d627774f79e56d14ddbc165b3169e2d01/pkg/proxy/accesslog/record.go#L264):
type DNS struct {
state protoimpl.MessageState `protogen:"open.v1"`
// DNS name that's being looked up: e.g. "isovalent.com."
Query string `protobuf:"bytes,1,opt,name=query,proto3" json:"query,omitempty"`
// List of IP addresses in the DNS response.
Ips []string `protobuf:"bytes,2,rep,name=ips,proto3" json:"ips,omitempty"`
// TTL in the DNS response.
Ttl uint32 `protobuf:"varint,3,opt,name=ttl,proto3" json:"ttl,omitempty"`
// List of CNames in the DNS response.
Cnames []string `protobuf:"bytes,4,rep,name=cnames,proto3" json:"cnames,omitempty"`
// Corresponds to DNSDataSource defined in:
//
// https://github.com/cilium/cilium/blob/04f3889d627774f79e56d14ddbc165b3169e2d01/pkg/proxy/accesslog/record.go#L253
ObservationSource string `protobuf:"bytes,5,opt,name=observation_source,json=observationSource,proto3" json:"observation_source,omitempty"`
// Return code of the DNS request defined in:
//
// https://www.iana.org/assignments/dns-parameters/dns-parameters.xhtml#dns-parameters-6
Rcode uint32 `protobuf:"varint,6,opt,name=rcode,proto3" json:"rcode,omitempty"`
// String representation of qtypes defined in:
//
// https://tools.ietf.org/html/rfc1035#section-3.2.3
Qtypes []string `protobuf:"bytes,7,rep,name=qtypes,proto3" json:"qtypes,omitempty"`
// String representation of rrtypes defined in:
// https://www.iana.org/assignments/dns-parameters/dns-parameters.xhtml#dns-parameters-4
Rrtypes []string `protobuf:"bytes,8,rep,name=rrtypes,proto3" json:"rrtypes,omitempty"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *DNS) Reset() {
*x = DNS{}
mi := &file_flow_flow_proto_msgTypes[21]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *DNS) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*DNS) ProtoMessage() {}
func (x *DNS) ProtoReflect() protoreflect.Message {
mi := &file_flow_flow_proto_msgTypes[21]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use DNS.ProtoReflect.Descriptor instead.
func (*DNS) Descriptor() ([]byte, []int) {
return file_flow_flow_proto_rawDescGZIP(), []int{21}
}
func (x *DNS) GetQuery() string {
if x != nil {
return x.Query
}
return ""
}
func (x *DNS) GetIps() []string {
if x != nil {
return x.Ips
}
return nil
}
func (x *DNS) GetTtl() uint32 {
if x != nil {
return x.Ttl
}
return 0
}
func (x *DNS) GetCnames() []string {
if x != nil {
return x.Cnames
}
return nil
}
func (x *DNS) GetObservationSource() string {
if x != nil {
return x.ObservationSource
}
return ""
}
func (x *DNS) GetRcode() uint32 {
if x != nil {
return x.Rcode
}
return 0
}
func (x *DNS) GetQtypes() []string {
if x != nil {
return x.Qtypes
}
return nil
}
func (x *DNS) GetRrtypes() []string {
if x != nil {
return x.Rrtypes
}
return nil
}
type HTTPHeader struct {
state protoimpl.MessageState `protogen:"open.v1"`
Key string `protobuf:"bytes,1,opt,name=key,proto3" json:"key,omitempty"`
Value string `protobuf:"bytes,2,opt,name=value,proto3" json:"value,omitempty"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *HTTPHeader) Reset() {
*x = HTTPHeader{}
mi := &file_flow_flow_proto_msgTypes[22]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *HTTPHeader) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*HTTPHeader) ProtoMessage() {}
func (x *HTTPHeader) ProtoReflect() protoreflect.Message {
mi := &file_flow_flow_proto_msgTypes[22]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use HTTPHeader.ProtoReflect.Descriptor instead.
func (*HTTPHeader) Descriptor() ([]byte, []int) {
return file_flow_flow_proto_rawDescGZIP(), []int{22}
}
func (x *HTTPHeader) GetKey() string {
if x != nil {
return x.Key
}
return ""
}
func (x *HTTPHeader) GetValue() string {
if x != nil {
return x.Value
}
return ""
}
// L7 information for HTTP flows. It corresponds to Cilium's [accesslog.LogRecordHTTP](https://github.com/cilium/cilium/blob/728c79e427438ab6f8d9375b62fccd6fed4ace3a/pkg/proxy/accesslog/record.go#L206) type.
type HTTP struct {
state protoimpl.MessageState `protogen:"open.v1"`
Code uint32 `protobuf:"varint,1,opt,name=code,proto3" json:"code,omitempty"`
Method string `protobuf:"bytes,2,opt,name=method,proto3" json:"method,omitempty"`
Url string `protobuf:"bytes,3,opt,name=url,proto3" json:"url,omitempty"`
Protocol string `protobuf:"bytes,4,opt,name=protocol,proto3" json:"protocol,omitempty"`
Headers []*HTTPHeader `protobuf:"bytes,5,rep,name=headers,proto3" json:"headers,omitempty"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *HTTP) Reset() {
*x = HTTP{}
mi := &file_flow_flow_proto_msgTypes[23]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *HTTP) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*HTTP) ProtoMessage() {}
func (x *HTTP) ProtoReflect() protoreflect.Message {
mi := &file_flow_flow_proto_msgTypes[23]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use HTTP.ProtoReflect.Descriptor instead.
func (*HTTP) Descriptor() ([]byte, []int) {
return file_flow_flow_proto_rawDescGZIP(), []int{23}
}
func (x *HTTP) GetCode() uint32 {
if x != nil {
return x.Code
}
return 0
}
func (x *HTTP) GetMethod() string {
if x != nil {
return x.Method
}
return ""
}
func (x *HTTP) GetUrl() string {
if x != nil {
return x.Url
}
return ""
}
func (x *HTTP) GetProtocol() string {
if x != nil {
return x.Protocol
}
return ""
}
func (x *HTTP) GetHeaders() []*HTTPHeader {
if x != nil {
return x.Headers
}
return nil
}
// L7 information for Kafka flows. It corresponds to Cilium's [accesslog.LogRecordKafka](https://github.com/cilium/cilium/blob/728c79e427438ab6f8d9375b62fccd6fed4ace3a/pkg/proxy/accesslog/record.go#L229) type.
type Kafka struct {
state protoimpl.MessageState `protogen:"open.v1"`
ErrorCode int32 `protobuf:"varint,1,opt,name=error_code,json=errorCode,proto3" json:"error_code,omitempty"`
ApiVersion int32 `protobuf:"varint,2,opt,name=api_version,json=apiVersion,proto3" json:"api_version,omitempty"`
ApiKey string `protobuf:"bytes,3,opt,name=api_key,json=apiKey,proto3" json:"api_key,omitempty"`
CorrelationId int32 `protobuf:"varint,4,opt,name=correlation_id,json=correlationId,proto3" json:"correlation_id,omitempty"`
Topic string `protobuf:"bytes,5,opt,name=topic,proto3" json:"topic,omitempty"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *Kafka) Reset() {
*x = Kafka{}
mi := &file_flow_flow_proto_msgTypes[24]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *Kafka) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*Kafka) ProtoMessage() {}
func (x *Kafka) ProtoReflect() protoreflect.Message {
mi := &file_flow_flow_proto_msgTypes[24]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use Kafka.ProtoReflect.Descriptor instead.
func (*Kafka) Descriptor() ([]byte, []int) {
return file_flow_flow_proto_rawDescGZIP(), []int{24}
}
func (x *Kafka) GetErrorCode() int32 {
if x != nil {
return x.ErrorCode
}
return 0
}
func (x *Kafka) GetApiVersion() int32 {
if x != nil {
return x.ApiVersion
}
return 0
}
func (x *Kafka) GetApiKey() string {
if x != nil {
return x.ApiKey
}
return ""
}
func (x *Kafka) GetCorrelationId() int32 {
if x != nil {
return x.CorrelationId
}
return 0
}
func (x *Kafka) GetTopic() string {
if x != nil {
return x.Topic
}
return ""
}
type Service struct {
state protoimpl.MessageState `protogen:"open.v1"`
Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"`
Namespace string `protobuf:"bytes,2,opt,name=namespace,proto3" json:"namespace,omitempty"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *Service) Reset() {
*x = Service{}
mi := &file_flow_flow_proto_msgTypes[25]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *Service) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*Service) ProtoMessage() {}
func (x *Service) ProtoReflect() protoreflect.Message {
mi := &file_flow_flow_proto_msgTypes[25]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use Service.ProtoReflect.Descriptor instead.
func (*Service) Descriptor() ([]byte, []int) {
return file_flow_flow_proto_rawDescGZIP(), []int{25}
}
func (x *Service) GetName() string {
if x != nil {
return x.Name
}
return ""
}
func (x *Service) GetNamespace() string {
if x != nil {
return x.Namespace
}
return ""
}
// LostEvent is a message which notifies consumers about a loss of events
// that happened before the events were captured by Hubble.
type LostEvent struct {
state protoimpl.MessageState `protogen:"open.v1"`
// source is the location where events got lost.
Source LostEventSource `protobuf:"varint,1,opt,name=source,proto3,enum=flow.LostEventSource" json:"source,omitempty"`
// num_events_lost is the number of events that haven been lost at source.
NumEventsLost uint64 `protobuf:"varint,2,opt,name=num_events_lost,json=numEventsLost,proto3" json:"num_events_lost,omitempty"`
// cpu on which the event was lost if the source of lost events is
// PERF_EVENT_RING_BUFFER.
Cpu *wrapperspb.Int32Value `protobuf:"bytes,3,opt,name=cpu,proto3" json:"cpu,omitempty"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *LostEvent) Reset() {
*x = LostEvent{}
mi := &file_flow_flow_proto_msgTypes[26]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *LostEvent) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*LostEvent) ProtoMessage() {}
func (x *LostEvent) ProtoReflect() protoreflect.Message {
mi := &file_flow_flow_proto_msgTypes[26]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use LostEvent.ProtoReflect.Descriptor instead.
func (*LostEvent) Descriptor() ([]byte, []int) {
return file_flow_flow_proto_rawDescGZIP(), []int{26}
}
func (x *LostEvent) GetSource() LostEventSource {
if x != nil {
return x.Source
}
return LostEventSource_UNKNOWN_LOST_EVENT_SOURCE
}
func (x *LostEvent) GetNumEventsLost() uint64 {
if x != nil {
return x.NumEventsLost
}
return 0
}
func (x *LostEvent) GetCpu() *wrapperspb.Int32Value {
if x != nil {
return x.Cpu
}
return nil
}
type AgentEvent struct {
state protoimpl.MessageState `protogen:"open.v1"`
Type AgentEventType `protobuf:"varint,1,opt,name=type,proto3,enum=flow.AgentEventType" json:"type,omitempty"`
// Types that are valid to be assigned to Notification:
//
// *AgentEvent_Unknown
// *AgentEvent_AgentStart
// *AgentEvent_PolicyUpdate
// *AgentEvent_EndpointRegenerate
// *AgentEvent_EndpointUpdate
// *AgentEvent_IpcacheUpdate
// *AgentEvent_ServiceUpsert
// *AgentEvent_ServiceDelete
Notification isAgentEvent_Notification `protobuf_oneof:"notification"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *AgentEvent) Reset() {
*x = AgentEvent{}
mi := &file_flow_flow_proto_msgTypes[27]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *AgentEvent) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*AgentEvent) ProtoMessage() {}
func (x *AgentEvent) ProtoReflect() protoreflect.Message {
mi := &file_flow_flow_proto_msgTypes[27]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use AgentEvent.ProtoReflect.Descriptor instead.
func (*AgentEvent) Descriptor() ([]byte, []int) {
return file_flow_flow_proto_rawDescGZIP(), []int{27}
}
func (x *AgentEvent) GetType() AgentEventType {
if x != nil {
return x.Type
}
return AgentEventType_AGENT_EVENT_UNKNOWN
}
func (x *AgentEvent) GetNotification() isAgentEvent_Notification {
if x != nil {
return x.Notification
}
return nil
}
func (x *AgentEvent) GetUnknown() *AgentEventUnknown {
if x != nil {
if x, ok := x.Notification.(*AgentEvent_Unknown); ok {
return x.Unknown
}
}
return nil
}
func (x *AgentEvent) GetAgentStart() *TimeNotification {
if x != nil {
if x, ok := x.Notification.(*AgentEvent_AgentStart); ok {
return x.AgentStart
}
}
return nil
}
func (x *AgentEvent) GetPolicyUpdate() *PolicyUpdateNotification {
if x != nil {
if x, ok := x.Notification.(*AgentEvent_PolicyUpdate); ok {
return x.PolicyUpdate
}
}
return nil
}
func (x *AgentEvent) GetEndpointRegenerate() *EndpointRegenNotification {
if x != nil {
if x, ok := x.Notification.(*AgentEvent_EndpointRegenerate); ok {
return x.EndpointRegenerate
}
}
return nil
}
func (x *AgentEvent) GetEndpointUpdate() *EndpointUpdateNotification {
if x != nil {
if x, ok := x.Notification.(*AgentEvent_EndpointUpdate); ok {
return x.EndpointUpdate
}
}
return nil
}
func (x *AgentEvent) GetIpcacheUpdate() *IPCacheNotification {
if x != nil {
if x, ok := x.Notification.(*AgentEvent_IpcacheUpdate); ok {
return x.IpcacheUpdate
}
}
return nil
}
// Deprecated: Marked as deprecated in flow/flow.proto.
func (x *AgentEvent) GetServiceUpsert() *ServiceUpsertNotification {
if x != nil {
if x, ok := x.Notification.(*AgentEvent_ServiceUpsert); ok {
return x.ServiceUpsert
}
}
return nil
}
// Deprecated: Marked as deprecated in flow/flow.proto.
func (x *AgentEvent) GetServiceDelete() *ServiceDeleteNotification {
if x != nil {
if x, ok := x.Notification.(*AgentEvent_ServiceDelete); ok {
return x.ServiceDelete
}
}
return nil
}
type isAgentEvent_Notification interface {
isAgentEvent_Notification()
}
type AgentEvent_Unknown struct {
Unknown *AgentEventUnknown `protobuf:"bytes,100,opt,name=unknown,proto3,oneof"`
}
type AgentEvent_AgentStart struct {
AgentStart *TimeNotification `protobuf:"bytes,101,opt,name=agent_start,json=agentStart,proto3,oneof"`
}
type AgentEvent_PolicyUpdate struct {
// used for POLICY_UPDATED and POLICY_DELETED
PolicyUpdate *PolicyUpdateNotification `protobuf:"bytes,102,opt,name=policy_update,json=policyUpdate,proto3,oneof"`
}
type AgentEvent_EndpointRegenerate struct {
// used for ENDPOINT_REGENERATE_SUCCESS and ENDPOINT_REGENERATE_FAILURE
EndpointRegenerate *EndpointRegenNotification `protobuf:"bytes,103,opt,name=endpoint_regenerate,json=endpointRegenerate,proto3,oneof"`
}
type AgentEvent_EndpointUpdate struct {
// used for ENDPOINT_CREATED and ENDPOINT_DELETED
EndpointUpdate *EndpointUpdateNotification `protobuf:"bytes,104,opt,name=endpoint_update,json=endpointUpdate,proto3,oneof"`
}
type AgentEvent_IpcacheUpdate struct {
// used for IPCACHE_UPSERTED and IPCACHE_DELETED
IpcacheUpdate *IPCacheNotification `protobuf:"bytes,105,opt,name=ipcache_update,json=ipcacheUpdate,proto3,oneof"`
}
type AgentEvent_ServiceUpsert struct {
// Deprecated: Marked as deprecated in flow/flow.proto.
ServiceUpsert *ServiceUpsertNotification `protobuf:"bytes,106,opt,name=service_upsert,json=serviceUpsert,proto3,oneof"`
}
type AgentEvent_ServiceDelete struct {
// Deprecated: Marked as deprecated in flow/flow.proto.
ServiceDelete *ServiceDeleteNotification `protobuf:"bytes,107,opt,name=service_delete,json=serviceDelete,proto3,oneof"`
}
func (*AgentEvent_Unknown) isAgentEvent_Notification() {}
func (*AgentEvent_AgentStart) isAgentEvent_Notification() {}
func (*AgentEvent_PolicyUpdate) isAgentEvent_Notification() {}
func (*AgentEvent_EndpointRegenerate) isAgentEvent_Notification() {}
func (*AgentEvent_EndpointUpdate) isAgentEvent_Notification() {}
func (*AgentEvent_IpcacheUpdate) isAgentEvent_Notification() {}
func (*AgentEvent_ServiceUpsert) isAgentEvent_Notification() {}
func (*AgentEvent_ServiceDelete) isAgentEvent_Notification() {}
type AgentEventUnknown struct {
state protoimpl.MessageState `protogen:"open.v1"`
Type string `protobuf:"bytes,1,opt,name=type,proto3" json:"type,omitempty"`
Notification string `protobuf:"bytes,2,opt,name=notification,proto3" json:"notification,omitempty"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *AgentEventUnknown) Reset() {
*x = AgentEventUnknown{}
mi := &file_flow_flow_proto_msgTypes[28]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *AgentEventUnknown) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*AgentEventUnknown) ProtoMessage() {}
func (x *AgentEventUnknown) ProtoReflect() protoreflect.Message {
mi := &file_flow_flow_proto_msgTypes[28]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use AgentEventUnknown.ProtoReflect.Descriptor instead.
func (*AgentEventUnknown) Descriptor() ([]byte, []int) {
return file_flow_flow_proto_rawDescGZIP(), []int{28}
}
func (x *AgentEventUnknown) GetType() string {
if x != nil {
return x.Type
}
return ""
}
func (x *AgentEventUnknown) GetNotification() string {
if x != nil {
return x.Notification
}
return ""
}
type TimeNotification struct {
state protoimpl.MessageState `protogen:"open.v1"`
Time *timestamppb.Timestamp `protobuf:"bytes,1,opt,name=time,proto3" json:"time,omitempty"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *TimeNotification) Reset() {
*x = TimeNotification{}
mi := &file_flow_flow_proto_msgTypes[29]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *TimeNotification) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*TimeNotification) ProtoMessage() {}
func (x *TimeNotification) ProtoReflect() protoreflect.Message {
mi := &file_flow_flow_proto_msgTypes[29]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use TimeNotification.ProtoReflect.Descriptor instead.
func (*TimeNotification) Descriptor() ([]byte, []int) {
return file_flow_flow_proto_rawDescGZIP(), []int{29}
}
func (x *TimeNotification) GetTime() *timestamppb.Timestamp {
if x != nil {
return x.Time
}
return nil
}
type PolicyUpdateNotification struct {
state protoimpl.MessageState `protogen:"open.v1"`
Labels []string `protobuf:"bytes,1,rep,name=labels,proto3" json:"labels,omitempty"`
Revision uint64 `protobuf:"varint,2,opt,name=revision,proto3" json:"revision,omitempty"`
RuleCount int64 `protobuf:"varint,3,opt,name=rule_count,json=ruleCount,proto3" json:"rule_count,omitempty"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *PolicyUpdateNotification) Reset() {
*x = PolicyUpdateNotification{}
mi := &file_flow_flow_proto_msgTypes[30]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *PolicyUpdateNotification) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*PolicyUpdateNotification) ProtoMessage() {}
func (x *PolicyUpdateNotification) ProtoReflect() protoreflect.Message {
mi := &file_flow_flow_proto_msgTypes[30]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use PolicyUpdateNotification.ProtoReflect.Descriptor instead.
func (*PolicyUpdateNotification) Descriptor() ([]byte, []int) {
return file_flow_flow_proto_rawDescGZIP(), []int{30}
}
func (x *PolicyUpdateNotification) GetLabels() []string {
if x != nil {
return x.Labels
}
return nil
}
func (x *PolicyUpdateNotification) GetRevision() uint64 {
if x != nil {
return x.Revision
}
return 0
}
func (x *PolicyUpdateNotification) GetRuleCount() int64 {
if x != nil {
return x.RuleCount
}
return 0
}
type EndpointRegenNotification struct {
state protoimpl.MessageState `protogen:"open.v1"`
Id uint64 `protobuf:"varint,1,opt,name=id,proto3" json:"id,omitempty"`
Labels []string `protobuf:"bytes,2,rep,name=labels,proto3" json:"labels,omitempty"`
Error string `protobuf:"bytes,3,opt,name=error,proto3" json:"error,omitempty"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *EndpointRegenNotification) Reset() {
*x = EndpointRegenNotification{}
mi := &file_flow_flow_proto_msgTypes[31]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *EndpointRegenNotification) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*EndpointRegenNotification) ProtoMessage() {}
func (x *EndpointRegenNotification) ProtoReflect() protoreflect.Message {
mi := &file_flow_flow_proto_msgTypes[31]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use EndpointRegenNotification.ProtoReflect.Descriptor instead.
func (*EndpointRegenNotification) Descriptor() ([]byte, []int) {
return file_flow_flow_proto_rawDescGZIP(), []int{31}
}
func (x *EndpointRegenNotification) GetId() uint64 {
if x != nil {
return x.Id
}
return 0
}
func (x *EndpointRegenNotification) GetLabels() []string {
if x != nil {
return x.Labels
}
return nil
}
func (x *EndpointRegenNotification) GetError() string {
if x != nil {
return x.Error
}
return ""
}
type EndpointUpdateNotification struct {
state protoimpl.MessageState `protogen:"open.v1"`
Id uint64 `protobuf:"varint,1,opt,name=id,proto3" json:"id,omitempty"`
Labels []string `protobuf:"bytes,2,rep,name=labels,proto3" json:"labels,omitempty"`
Error string `protobuf:"bytes,3,opt,name=error,proto3" json:"error,omitempty"`
PodName string `protobuf:"bytes,4,opt,name=pod_name,json=podName,proto3" json:"pod_name,omitempty"`
Namespace string `protobuf:"bytes,5,opt,name=namespace,proto3" json:"namespace,omitempty"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *EndpointUpdateNotification) Reset() {
*x = EndpointUpdateNotification{}
mi := &file_flow_flow_proto_msgTypes[32]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *EndpointUpdateNotification) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*EndpointUpdateNotification) ProtoMessage() {}
func (x *EndpointUpdateNotification) ProtoReflect() protoreflect.Message {
mi := &file_flow_flow_proto_msgTypes[32]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use EndpointUpdateNotification.ProtoReflect.Descriptor instead.
func (*EndpointUpdateNotification) Descriptor() ([]byte, []int) {
return file_flow_flow_proto_rawDescGZIP(), []int{32}
}
func (x *EndpointUpdateNotification) GetId() uint64 {
if x != nil {
return x.Id
}
return 0
}
func (x *EndpointUpdateNotification) GetLabels() []string {
if x != nil {
return x.Labels
}
return nil
}
func (x *EndpointUpdateNotification) GetError() string {
if x != nil {
return x.Error
}
return ""
}
func (x *EndpointUpdateNotification) GetPodName() string {
if x != nil {
return x.PodName
}
return ""
}
func (x *EndpointUpdateNotification) GetNamespace() string {
if x != nil {
return x.Namespace
}
return ""
}
type IPCacheNotification struct {
state protoimpl.MessageState `protogen:"open.v1"`
Cidr string `protobuf:"bytes,1,opt,name=cidr,proto3" json:"cidr,omitempty"`
Identity uint32 `protobuf:"varint,2,opt,name=identity,proto3" json:"identity,omitempty"`
OldIdentity *wrapperspb.UInt32Value `protobuf:"bytes,3,opt,name=old_identity,json=oldIdentity,proto3" json:"old_identity,omitempty"`
HostIp string `protobuf:"bytes,4,opt,name=host_ip,json=hostIp,proto3" json:"host_ip,omitempty"`
OldHostIp string `protobuf:"bytes,5,opt,name=old_host_ip,json=oldHostIp,proto3" json:"old_host_ip,omitempty"`
EncryptKey uint32 `protobuf:"varint,6,opt,name=encrypt_key,json=encryptKey,proto3" json:"encrypt_key,omitempty"`
Namespace string `protobuf:"bytes,7,opt,name=namespace,proto3" json:"namespace,omitempty"`
PodName string `protobuf:"bytes,8,opt,name=pod_name,json=podName,proto3" json:"pod_name,omitempty"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *IPCacheNotification) Reset() {
*x = IPCacheNotification{}
mi := &file_flow_flow_proto_msgTypes[33]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *IPCacheNotification) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*IPCacheNotification) ProtoMessage() {}
func (x *IPCacheNotification) ProtoReflect() protoreflect.Message {
mi := &file_flow_flow_proto_msgTypes[33]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use IPCacheNotification.ProtoReflect.Descriptor instead.
func (*IPCacheNotification) Descriptor() ([]byte, []int) {
return file_flow_flow_proto_rawDescGZIP(), []int{33}
}
func (x *IPCacheNotification) GetCidr() string {
if x != nil {
return x.Cidr
}
return ""
}
func (x *IPCacheNotification) GetIdentity() uint32 {
if x != nil {
return x.Identity
}
return 0
}
func (x *IPCacheNotification) GetOldIdentity() *wrapperspb.UInt32Value {
if x != nil {
return x.OldIdentity
}
return nil
}
func (x *IPCacheNotification) GetHostIp() string {
if x != nil {
return x.HostIp
}
return ""
}
func (x *IPCacheNotification) GetOldHostIp() string {
if x != nil {
return x.OldHostIp
}
return ""
}
func (x *IPCacheNotification) GetEncryptKey() uint32 {
if x != nil {
return x.EncryptKey
}
return 0
}
func (x *IPCacheNotification) GetNamespace() string {
if x != nil {
return x.Namespace
}
return ""
}
func (x *IPCacheNotification) GetPodName() string {
if x != nil {
return x.PodName
}
return ""
}
// Deprecated: Marked as deprecated in flow/flow.proto.
type ServiceUpsertNotificationAddr struct {
state protoimpl.MessageState `protogen:"open.v1"`
Ip string `protobuf:"bytes,1,opt,name=ip,proto3" json:"ip,omitempty"`
Port uint32 `protobuf:"varint,2,opt,name=port,proto3" json:"port,omitempty"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *ServiceUpsertNotificationAddr) Reset() {
*x = ServiceUpsertNotificationAddr{}
mi := &file_flow_flow_proto_msgTypes[34]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *ServiceUpsertNotificationAddr) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*ServiceUpsertNotificationAddr) ProtoMessage() {}
func (x *ServiceUpsertNotificationAddr) ProtoReflect() protoreflect.Message {
mi := &file_flow_flow_proto_msgTypes[34]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use ServiceUpsertNotificationAddr.ProtoReflect.Descriptor instead.
func (*ServiceUpsertNotificationAddr) Descriptor() ([]byte, []int) {
return file_flow_flow_proto_rawDescGZIP(), []int{34}
}
func (x *ServiceUpsertNotificationAddr) GetIp() string {
if x != nil {
return x.Ip
}
return ""
}
func (x *ServiceUpsertNotificationAddr) GetPort() uint32 {
if x != nil {
return x.Port
}
return 0
}
// Deprecated: Marked as deprecated in flow/flow.proto.
type ServiceUpsertNotification struct {
state protoimpl.MessageState `protogen:"open.v1"`
Id uint32 `protobuf:"varint,1,opt,name=id,proto3" json:"id,omitempty"`
FrontendAddress *ServiceUpsertNotificationAddr `protobuf:"bytes,2,opt,name=frontend_address,json=frontendAddress,proto3" json:"frontend_address,omitempty"`
BackendAddresses []*ServiceUpsertNotificationAddr `protobuf:"bytes,3,rep,name=backend_addresses,json=backendAddresses,proto3" json:"backend_addresses,omitempty"`
Type string `protobuf:"bytes,4,opt,name=type,proto3" json:"type,omitempty"`
// Deprecated: Marked as deprecated in flow/flow.proto.
TrafficPolicy string `protobuf:"bytes,5,opt,name=traffic_policy,json=trafficPolicy,proto3" json:"traffic_policy,omitempty"`
Name string `protobuf:"bytes,6,opt,name=name,proto3" json:"name,omitempty"`
Namespace string `protobuf:"bytes,7,opt,name=namespace,proto3" json:"namespace,omitempty"`
ExtTrafficPolicy string `protobuf:"bytes,8,opt,name=ext_traffic_policy,json=extTrafficPolicy,proto3" json:"ext_traffic_policy,omitempty"`
IntTrafficPolicy string `protobuf:"bytes,9,opt,name=int_traffic_policy,json=intTrafficPolicy,proto3" json:"int_traffic_policy,omitempty"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *ServiceUpsertNotification) Reset() {
*x = ServiceUpsertNotification{}
mi := &file_flow_flow_proto_msgTypes[35]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *ServiceUpsertNotification) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*ServiceUpsertNotification) ProtoMessage() {}
func (x *ServiceUpsertNotification) ProtoReflect() protoreflect.Message {
mi := &file_flow_flow_proto_msgTypes[35]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use ServiceUpsertNotification.ProtoReflect.Descriptor instead.
func (*ServiceUpsertNotification) Descriptor() ([]byte, []int) {
return file_flow_flow_proto_rawDescGZIP(), []int{35}
}
func (x *ServiceUpsertNotification) GetId() uint32 {
if x != nil {
return x.Id
}
return 0
}
func (x *ServiceUpsertNotification) GetFrontendAddress() *ServiceUpsertNotificationAddr {
if x != nil {
return x.FrontendAddress
}
return nil
}
func (x *ServiceUpsertNotification) GetBackendAddresses() []*ServiceUpsertNotificationAddr {
if x != nil {
return x.BackendAddresses
}
return nil
}
func (x *ServiceUpsertNotification) GetType() string {
if x != nil {
return x.Type
}
return ""
}
// Deprecated: Marked as deprecated in flow/flow.proto.
func (x *ServiceUpsertNotification) GetTrafficPolicy() string {
if x != nil {
return x.TrafficPolicy
}
return ""
}
func (x *ServiceUpsertNotification) GetName() string {
if x != nil {
return x.Name
}
return ""
}
func (x *ServiceUpsertNotification) GetNamespace() string {
if x != nil {
return x.Namespace
}
return ""
}
func (x *ServiceUpsertNotification) GetExtTrafficPolicy() string {
if x != nil {
return x.ExtTrafficPolicy
}
return ""
}
func (x *ServiceUpsertNotification) GetIntTrafficPolicy() string {
if x != nil {
return x.IntTrafficPolicy
}
return ""
}
// Deprecated: Marked as deprecated in flow/flow.proto.
type ServiceDeleteNotification struct {
state protoimpl.MessageState `protogen:"open.v1"`
Id uint32 `protobuf:"varint,1,opt,name=id,proto3" json:"id,omitempty"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *ServiceDeleteNotification) Reset() {
*x = ServiceDeleteNotification{}
mi := &file_flow_flow_proto_msgTypes[36]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *ServiceDeleteNotification) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*ServiceDeleteNotification) ProtoMessage() {}
func (x *ServiceDeleteNotification) ProtoReflect() protoreflect.Message {
mi := &file_flow_flow_proto_msgTypes[36]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use ServiceDeleteNotification.ProtoReflect.Descriptor instead.
func (*ServiceDeleteNotification) Descriptor() ([]byte, []int) {
return file_flow_flow_proto_rawDescGZIP(), []int{36}
}
func (x *ServiceDeleteNotification) GetId() uint32 {
if x != nil {
return x.Id
}
return 0
}
type NetworkInterface struct {
state protoimpl.MessageState `protogen:"open.v1"`
Index uint32 `protobuf:"varint,1,opt,name=index,proto3" json:"index,omitempty"`
Name string `protobuf:"bytes,2,opt,name=name,proto3" json:"name,omitempty"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *NetworkInterface) Reset() {
*x = NetworkInterface{}
mi := &file_flow_flow_proto_msgTypes[37]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *NetworkInterface) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*NetworkInterface) ProtoMessage() {}
func (x *NetworkInterface) ProtoReflect() protoreflect.Message {
mi := &file_flow_flow_proto_msgTypes[37]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use NetworkInterface.ProtoReflect.Descriptor instead.
func (*NetworkInterface) Descriptor() ([]byte, []int) {
return file_flow_flow_proto_rawDescGZIP(), []int{37}
}
func (x *NetworkInterface) GetIndex() uint32 {
if x != nil {
return x.Index
}
return 0
}
func (x *NetworkInterface) GetName() string {
if x != nil {
return x.Name
}
return ""
}
type DebugEvent struct {
state protoimpl.MessageState `protogen:"open.v1"`
Type DebugEventType `protobuf:"varint,1,opt,name=type,proto3,enum=flow.DebugEventType" json:"type,omitempty"`
Source *Endpoint `protobuf:"bytes,2,opt,name=source,proto3" json:"source,omitempty"`
Hash *wrapperspb.UInt32Value `protobuf:"bytes,3,opt,name=hash,proto3" json:"hash,omitempty"`
Arg1 *wrapperspb.UInt32Value `protobuf:"bytes,4,opt,name=arg1,proto3" json:"arg1,omitempty"`
Arg2 *wrapperspb.UInt32Value `protobuf:"bytes,5,opt,name=arg2,proto3" json:"arg2,omitempty"`
Arg3 *wrapperspb.UInt32Value `protobuf:"bytes,6,opt,name=arg3,proto3" json:"arg3,omitempty"`
Message string `protobuf:"bytes,7,opt,name=message,proto3" json:"message,omitempty"`
Cpu *wrapperspb.Int32Value `protobuf:"bytes,8,opt,name=cpu,proto3" json:"cpu,omitempty"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *DebugEvent) Reset() {
*x = DebugEvent{}
mi := &file_flow_flow_proto_msgTypes[38]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *DebugEvent) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*DebugEvent) ProtoMessage() {}
func (x *DebugEvent) ProtoReflect() protoreflect.Message {
mi := &file_flow_flow_proto_msgTypes[38]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use DebugEvent.ProtoReflect.Descriptor instead.
func (*DebugEvent) Descriptor() ([]byte, []int) {
return file_flow_flow_proto_rawDescGZIP(), []int{38}
}
func (x *DebugEvent) GetType() DebugEventType {
if x != nil {
return x.Type
}
return DebugEventType_DBG_EVENT_UNKNOWN
}
func (x *DebugEvent) GetSource() *Endpoint {
if x != nil {
return x.Source
}
return nil
}
func (x *DebugEvent) GetHash() *wrapperspb.UInt32Value {
if x != nil {
return x.Hash
}
return nil
}
func (x *DebugEvent) GetArg1() *wrapperspb.UInt32Value {
if x != nil {
return x.Arg1
}
return nil
}
func (x *DebugEvent) GetArg2() *wrapperspb.UInt32Value {
if x != nil {
return x.Arg2
}
return nil
}
func (x *DebugEvent) GetArg3() *wrapperspb.UInt32Value {
if x != nil {
return x.Arg3
}
return nil
}
func (x *DebugEvent) GetMessage() string {
if x != nil {
return x.Message
}
return ""
}
func (x *DebugEvent) GetCpu() *wrapperspb.Int32Value {
if x != nil {
return x.Cpu
}
return nil
}
// Experimental contains filters that are not stable yet. Support for
// experimental features is always optional and subject to change.
type FlowFilter_Experimental struct {
state protoimpl.MessageState `protogen:"open.v1"`
// cel_expression takes a common expression language (CEL) expression
// returning a boolean to determine if the filter matched or not.
// You can use the `_flow` variable to access fields on the flow using
// the flow.Flow protobuf field names.
// See https://github.com/google/cel-spec/blob/v0.14.0/doc/intro.md#introduction
// for more details on CEL and accessing the protobuf fields in CEL.
// Using CEL has performance cost compared to other filters, so prefer
// using non-CEL filters when possible, and try to specify CEL filters
// last in the list of FlowFilters.
CelExpression []string `protobuf:"bytes,1,rep,name=cel_expression,json=celExpression,proto3" json:"cel_expression,omitempty"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *FlowFilter_Experimental) Reset() {
*x = FlowFilter_Experimental{}
mi := &file_flow_flow_proto_msgTypes[39]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *FlowFilter_Experimental) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*FlowFilter_Experimental) ProtoMessage() {}
func (x *FlowFilter_Experimental) ProtoReflect() protoreflect.Message {
mi := &file_flow_flow_proto_msgTypes[39]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use FlowFilter_Experimental.ProtoReflect.Descriptor instead.
func (*FlowFilter_Experimental) Descriptor() ([]byte, []int) {
return file_flow_flow_proto_rawDescGZIP(), []int{20, 0}
}
func (x *FlowFilter_Experimental) GetCelExpression() []string {
if x != nil {
return x.CelExpression
}
return nil
}
var File_flow_flow_proto protoreflect.FileDescriptor
const file_flow_flow_proto_rawDesc = "" +
"\n" +
"\x0fflow/flow.proto\x12\x04flow\x1a\x19google/protobuf/any.proto\x1a\x1egoogle/protobuf/wrappers.proto\x1a\x1fgoogle/protobuf/timestamp.proto\"\xb4\x0f\n" +
"\x04Flow\x12.\n" +
"\x04time\x18\x01 \x01(\v2\x1a.google.protobuf.TimestampR\x04time\x12\x12\n" +
"\x04uuid\x18\" \x01(\tR\x04uuid\x12'\n" +
"\averdict\x18\x02 \x01(\x0e2\r.flow.VerdictR\averdict\x12#\n" +
"\vdrop_reason\x18\x03 \x01(\rB\x02\x18\x01R\n" +
"dropReason\x12+\n" +
"\tauth_type\x18# \x01(\x0e2\x0e.flow.AuthTypeR\bauthType\x12*\n" +
"\bethernet\x18\x04 \x01(\v2\x0e.flow.EthernetR\bethernet\x12\x18\n" +
"\x02IP\x18\x05 \x01(\v2\b.flow.IPR\x02IP\x12\x1c\n" +
"\x02l4\x18\x06 \x01(\v2\f.flow.Layer4R\x02l4\x12$\n" +
"\x06tunnel\x18' \x01(\v2\f.flow.TunnelR\x06tunnel\x12&\n" +
"\x06source\x18\b \x01(\v2\x0e.flow.EndpointR\x06source\x120\n" +
"\vdestination\x18\t \x01(\v2\x0e.flow.EndpointR\vdestination\x12\"\n" +
"\x04Type\x18\n" +
" \x01(\x0e2\x0e.flow.FlowTypeR\x04Type\x12\x1b\n" +
"\tnode_name\x18\v \x01(\tR\bnodeName\x12\x1f\n" +
"\vnode_labels\x18% \x03(\tR\n" +
"nodeLabels\x12!\n" +
"\fsource_names\x18\r \x03(\tR\vsourceNames\x12+\n" +
"\x11destination_names\x18\x0e \x03(\tR\x10destinationNames\x12\x1c\n" +
"\x02l7\x18\x0f \x01(\v2\f.flow.Layer7R\x02l7\x12\x18\n" +
"\x05reply\x18\x10 \x01(\bB\x02\x18\x01R\x05reply\x124\n" +
"\n" +
"event_type\x18\x13 \x01(\v2\x15.flow.CiliumEventTypeR\teventType\x124\n" +
"\x0esource_service\x18\x14 \x01(\v2\r.flow.ServiceR\rsourceService\x12>\n" +
"\x13destination_service\x18\x15 \x01(\v2\r.flow.ServiceR\x12destinationService\x12C\n" +
"\x11traffic_direction\x18\x16 \x01(\x0e2\x16.flow.TrafficDirectionR\x10trafficDirection\x12*\n" +
"\x11policy_match_type\x18\x17 \x01(\rR\x0fpolicyMatchType\x12S\n" +
"\x17trace_observation_point\x18\x18 \x01(\x0e2\x1b.flow.TraceObservationPointR\x15traceObservationPoint\x124\n" +
"\ftrace_reason\x18$ \x01(\x0e2\x11.flow.TraceReasonR\vtraceReason\x12\"\n" +
"\x04file\x18& \x01(\v2\x0e.flow.FileInfoR\x04file\x12:\n" +
"\x10drop_reason_desc\x18\x19 \x01(\x0e2\x10.flow.DropReasonR\x0edropReasonDesc\x125\n" +
"\bis_reply\x18\x1a \x01(\v2\x1a.google.protobuf.BoolValueR\aisReply\x12G\n" +
"\x13debug_capture_point\x18\x1b \x01(\x0e2\x17.flow.DebugCapturePointR\x11debugCapturePoint\x124\n" +
"\tinterface\x18\x1c \x01(\v2\x16.flow.NetworkInterfaceR\tinterface\x12\x1d\n" +
"\n" +
"proxy_port\x18\x1d \x01(\rR\tproxyPort\x127\n" +
"\rtrace_context\x18\x1e \x01(\v2\x12.flow.TraceContextR\ftraceContext\x12F\n" +
"\x10sock_xlate_point\x18\x1f \x01(\x0e2\x1c.flow.SocketTranslationPointR\x0esockXlatePoint\x12#\n" +
"\rsocket_cookie\x18 \x01(\x04R\fsocketCookie\x12\x1b\n" +
"\tcgroup_id\x18! \x01(\x04R\bcgroupId\x12\x1e\n" +
"\aSummary\x18\xa0\x8d\x06 \x01(\tB\x02\x18\x01R\aSummary\x126\n" +
"\n" +
"extensions\x18\xf0\x93\t \x01(\v2\x14.google.protobuf.AnyR\n" +
"extensions\x12:\n" +
"\x11egress_allowed_by\x18\x89\xa4\x01 \x03(\v2\f.flow.PolicyR\x0fegressAllowedBy\x12<\n" +
"\x12ingress_allowed_by\x18\x8a\xa4\x01 \x03(\v2\f.flow.PolicyR\x10ingressAllowedBy\x128\n" +
"\x10egress_denied_by\x18\x8c\xa4\x01 \x03(\v2\f.flow.PolicyR\x0eegressDeniedBy\x12:\n" +
"\x11ingress_denied_by\x18\x8d\xa4\x01 \x03(\v2\f.flow.PolicyR\x0fingressDeniedBy\x12\x1f\n" +
"\n" +
"policy_log\x18\x8e\xa4\x01 \x03(\tR\tpolicyLogJ\x04\b\a\x10\bJ\x04\b\f\x10\rJ\x04\b\x11\x10\x12J\x04\b\x12\x10\x13\"2\n" +
"\bFileInfo\x12\x12\n" +
"\x04name\x18\x01 \x01(\tR\x04name\x12\x12\n" +
"\x04line\x18\x02 \x01(\rR\x04line\"\xc4\x01\n" +
"\x06Layer4\x12\x1d\n" +
"\x03TCP\x18\x01 \x01(\v2\t.flow.TCPH\x00R\x03TCP\x12\x1d\n" +
"\x03UDP\x18\x02 \x01(\v2\t.flow.UDPH\x00R\x03UDP\x12&\n" +
"\x06ICMPv4\x18\x03 \x01(\v2\f.flow.ICMPv4H\x00R\x06ICMPv4\x12&\n" +
"\x06ICMPv6\x18\x04 \x01(\v2\f.flow.ICMPv6H\x00R\x06ICMPv6\x12 \n" +
"\x04SCTP\x18\x05 \x01(\v2\n" +
".flow.SCTPH\x00R\x04SCTPB\n" +
"\n" +
"\bprotocol\"\xbd\x01\n" +
"\x06Layer7\x12$\n" +
"\x04type\x18\x01 \x01(\x0e2\x10.flow.L7FlowTypeR\x04type\x12\x1d\n" +
"\n" +
"latency_ns\x18\x02 \x01(\x04R\tlatencyNs\x12\x1d\n" +
"\x03dns\x18d \x01(\v2\t.flow.DNSH\x00R\x03dns\x12 \n" +
"\x04http\x18e \x01(\v2\n" +
".flow.HTTPH\x00R\x04http\x12#\n" +
"\x05kafka\x18f \x01(\v2\v.flow.KafkaH\x00R\x05kafkaB\b\n" +
"\x06record\"9\n" +
"\fTraceContext\x12)\n" +
"\x06parent\x18\x01 \x01(\v2\x11.flow.TraceParentR\x06parent\"(\n" +
"\vTraceParent\x12\x19\n" +
"\btrace_id\x18\x01 \x01(\tR\atraceId\"\xd8\x01\n" +
"\bEndpoint\x12\x0e\n" +
"\x02ID\x18\x01 \x01(\rR\x02ID\x12\x1a\n" +
"\bidentity\x18\x02 \x01(\rR\bidentity\x12!\n" +
"\fcluster_name\x18\a \x01(\tR\vclusterName\x12\x1c\n" +
"\tnamespace\x18\x03 \x01(\tR\tnamespace\x12\x16\n" +
"\x06labels\x18\x04 \x03(\tR\x06labels\x12\x19\n" +
"\bpod_name\x18\x05 \x01(\tR\apodName\x12,\n" +
"\tworkloads\x18\x06 \x03(\v2\x0e.flow.WorkloadR\tworkloads\"2\n" +
"\bWorkload\x12\x12\n" +
"\x04name\x18\x01 \x01(\tR\x04name\x12\x12\n" +
"\x04kind\x18\x02 \x01(\tR\x04kind\"w\n" +
"\x03TCP\x12\x1f\n" +
"\vsource_port\x18\x01 \x01(\rR\n" +
"sourcePort\x12)\n" +
"\x10destination_port\x18\x02 \x01(\rR\x0fdestinationPort\x12$\n" +
"\x05flags\x18\x03 \x01(\v2\x0e.flow.TCPFlagsR\x05flags\"\xb0\x01\n" +
"\x02IP\x12\x16\n" +
"\x06source\x18\x01 \x01(\tR\x06source\x12#\n" +
"\rsource_xlated\x18\x05 \x01(\tR\fsourceXlated\x12 \n" +
"\vdestination\x18\x02 \x01(\tR\vdestination\x12-\n" +
"\tipVersion\x18\x03 \x01(\x0e2\x0f.flow.IPVersionR\tipVersion\x12\x1c\n" +
"\tencrypted\x18\x04 \x01(\bR\tencrypted\"D\n" +
"\bEthernet\x12\x16\n" +
"\x06source\x18\x01 \x01(\tR\x06source\x12 \n" +
"\vdestination\x18\x02 \x01(\tR\vdestination\"\xaa\x01\n" +
"\bTCPFlags\x12\x10\n" +
"\x03FIN\x18\x01 \x01(\bR\x03FIN\x12\x10\n" +
"\x03SYN\x18\x02 \x01(\bR\x03SYN\x12\x10\n" +
"\x03RST\x18\x03 \x01(\bR\x03RST\x12\x10\n" +
"\x03PSH\x18\x04 \x01(\bR\x03PSH\x12\x10\n" +
"\x03ACK\x18\x05 \x01(\bR\x03ACK\x12\x10\n" +
"\x03URG\x18\x06 \x01(\bR\x03URG\x12\x10\n" +
"\x03ECE\x18\a \x01(\bR\x03ECE\x12\x10\n" +
"\x03CWR\x18\b \x01(\bR\x03CWR\x12\x0e\n" +
"\x02NS\x18\t \x01(\bR\x02NS\"Q\n" +
"\x03UDP\x12\x1f\n" +
"\vsource_port\x18\x01 \x01(\rR\n" +
"sourcePort\x12)\n" +
"\x10destination_port\x18\x02 \x01(\rR\x0fdestinationPort\"R\n" +
"\x04SCTP\x12\x1f\n" +
"\vsource_port\x18\x01 \x01(\rR\n" +
"sourcePort\x12)\n" +
"\x10destination_port\x18\x02 \x01(\rR\x0fdestinationPort\"0\n" +
"\x06ICMPv4\x12\x12\n" +
"\x04type\x18\x01 \x01(\rR\x04type\x12\x12\n" +
"\x04code\x18\x02 \x01(\rR\x04code\"0\n" +
"\x06ICMPv6\x12\x12\n" +
"\x04type\x18\x01 \x01(\rR\x04type\x12\x12\n" +
"\x04code\x18\x02 \x01(\rR\x04code\"\xa3\x01\n" +
"\x06Tunnel\x121\n" +
"\bprotocol\x18\x01 \x01(\x0e2\x15.flow.Tunnel.ProtocolR\bprotocol\x12\x18\n" +
"\x02IP\x18\x02 \x01(\v2\b.flow.IPR\x02IP\x12\x1c\n" +
"\x02l4\x18\x03 \x01(\v2\f.flow.Layer4R\x02l4\".\n" +
"\bProtocol\x12\v\n" +
"\aUNKNOWN\x10\x00\x12\t\n" +
"\x05VXLAN\x10\x01\x12\n" +
"\n" +
"\x06GENEVE\x10\x02\"\x82\x01\n" +
"\x06Policy\x12\x12\n" +
"\x04name\x18\x01 \x01(\tR\x04name\x12\x1c\n" +
"\tnamespace\x18\x02 \x01(\tR\tnamespace\x12\x16\n" +
"\x06labels\x18\x03 \x03(\tR\x06labels\x12\x1a\n" +
"\brevision\x18\x04 \x01(\x04R\brevision\x12\x12\n" +
"\x04kind\x18\x05 \x01(\tR\x04kind\"f\n" +
"\x0fEventTypeFilter\x12\x12\n" +
"\x04type\x18\x01 \x01(\x05R\x04type\x12$\n" +
"\x0ematch_sub_type\x18\x02 \x01(\bR\fmatchSubType\x12\x19\n" +
"\bsub_type\x18\x03 \x01(\x05R\asubType\"@\n" +
"\x0fCiliumEventType\x12\x12\n" +
"\x04type\x18\x01 \x01(\x05R\x04type\x12\x19\n" +
"\bsub_type\x18\x02 \x01(\x05R\asubType\"\xa4\r\n" +
"\n" +
"FlowFilter\x12\x12\n" +
"\x04uuid\x18\x1d \x03(\tR\x04uuid\x12\x1b\n" +
"\tsource_ip\x18\x01 \x03(\tR\bsourceIp\x12(\n" +
"\x10source_ip_xlated\x18\" \x03(\tR\x0esourceIpXlated\x12\x1d\n" +
"\n" +
"source_pod\x18\x02 \x03(\tR\tsourcePod\x12\x1f\n" +
"\vsource_fqdn\x18\a \x03(\tR\n" +
"sourceFqdn\x12!\n" +
"\fsource_label\x18\n" +
" \x03(\tR\vsourceLabel\x12%\n" +
"\x0esource_service\x18\x10 \x03(\tR\rsourceService\x127\n" +
"\x0fsource_workload\x18\x1a \x03(\v2\x0e.flow.WorkloadR\x0esourceWorkload\x12.\n" +
"\x13source_cluster_name\x18% \x03(\tR\x11sourceClusterName\x12%\n" +
"\x0edestination_ip\x18\x03 \x03(\tR\rdestinationIp\x12'\n" +
"\x0fdestination_pod\x18\x04 \x03(\tR\x0edestinationPod\x12)\n" +
"\x10destination_fqdn\x18\b \x03(\tR\x0fdestinationFqdn\x12+\n" +
"\x11destination_label\x18\v \x03(\tR\x10destinationLabel\x12/\n" +
"\x13destination_service\x18\x11 \x03(\tR\x12destinationService\x12A\n" +
"\x14destination_workload\x18\x1b \x03(\v2\x0e.flow.WorkloadR\x13destinationWorkload\x128\n" +
"\x18destination_cluster_name\x18& \x03(\tR\x16destinationClusterName\x12C\n" +
"\x11traffic_direction\x18\x1e \x03(\x0e2\x16.flow.TrafficDirectionR\x10trafficDirection\x12'\n" +
"\averdict\x18\x05 \x03(\x0e2\r.flow.VerdictR\averdict\x12:\n" +
"\x10drop_reason_desc\x18! \x03(\x0e2\x10.flow.DropReasonR\x0edropReasonDesc\x124\n" +
"\tinterface\x18# \x03(\v2\x16.flow.NetworkInterfaceR\tinterface\x124\n" +
"\n" +
"event_type\x18\x06 \x03(\v2\x15.flow.EventTypeFilterR\teventType\x12(\n" +
"\x10http_status_code\x18\t \x03(\tR\x0ehttpStatusCode\x12\x1a\n" +
"\bprotocol\x18\f \x03(\tR\bprotocol\x12\x1f\n" +
"\vsource_port\x18\r \x03(\tR\n" +
"sourcePort\x12)\n" +
"\x10destination_port\x18\x0e \x03(\tR\x0fdestinationPort\x12\x14\n" +
"\x05reply\x18\x0f \x03(\bR\x05reply\x12\x1b\n" +
"\tdns_query\x18\x12 \x03(\tR\bdnsQuery\x12'\n" +
"\x0fsource_identity\x18\x13 \x03(\rR\x0esourceIdentity\x121\n" +
"\x14destination_identity\x18\x14 \x03(\rR\x13destinationIdentity\x12\x1f\n" +
"\vhttp_method\x18\x15 \x03(\tR\n" +
"httpMethod\x12\x1b\n" +
"\thttp_path\x18\x16 \x03(\tR\bhttpPath\x12\x19\n" +
"\bhttp_url\x18\x1f \x03(\tR\ahttpUrl\x121\n" +
"\vhttp_header\x18 \x03(\v2\x10.flow.HTTPHeaderR\n" +
"httpHeader\x12+\n" +
"\ttcp_flags\x18\x17 \x03(\v2\x0e.flow.TCPFlagsR\btcpFlags\x12\x1b\n" +
"\tnode_name\x18\x18 \x03(\tR\bnodeName\x12\x1f\n" +
"\vnode_labels\x18$ \x03(\tR\n" +
"nodeLabels\x12.\n" +
"\n" +
"ip_version\x18\x19 \x03(\x0e2\x0f.flow.IPVersionR\tipVersion\x12\x19\n" +
"\btrace_id\x18\x1c \x03(\tR\atraceId\x12B\n" +
"\fexperimental\x18\xe7\a \x01(\v2\x1d.flow.FlowFilter.ExperimentalR\fexperimental\x1a5\n" +
"\fExperimental\x12%\n" +
"\x0ecel_expression\x18\x01 \x03(\tR\rcelExpression\"\xce\x01\n" +
"\x03DNS\x12\x14\n" +
"\x05query\x18\x01 \x01(\tR\x05query\x12\x10\n" +
"\x03ips\x18\x02 \x03(\tR\x03ips\x12\x10\n" +
"\x03ttl\x18\x03 \x01(\rR\x03ttl\x12\x16\n" +
"\x06cnames\x18\x04 \x03(\tR\x06cnames\x12-\n" +
"\x12observation_source\x18\x05 \x01(\tR\x11observationSource\x12\x14\n" +
"\x05rcode\x18\x06 \x01(\rR\x05rcode\x12\x16\n" +
"\x06qtypes\x18\a \x03(\tR\x06qtypes\x12\x18\n" +
"\arrtypes\x18\b \x03(\tR\arrtypes\"4\n" +
"\n" +
"HTTPHeader\x12\x10\n" +
"\x03key\x18\x01 \x01(\tR\x03key\x12\x14\n" +
"\x05value\x18\x02 \x01(\tR\x05value\"\x8c\x01\n" +
"\x04HTTP\x12\x12\n" +
"\x04code\x18\x01 \x01(\rR\x04code\x12\x16\n" +
"\x06method\x18\x02 \x01(\tR\x06method\x12\x10\n" +
"\x03url\x18\x03 \x01(\tR\x03url\x12\x1a\n" +
"\bprotocol\x18\x04 \x01(\tR\bprotocol\x12*\n" +
"\aheaders\x18\x05 \x03(\v2\x10.flow.HTTPHeaderR\aheaders\"\x9d\x01\n" +
"\x05Kafka\x12\x1d\n" +
"\n" +
"error_code\x18\x01 \x01(\x05R\terrorCode\x12\x1f\n" +
"\vapi_version\x18\x02 \x01(\x05R\n" +
"apiVersion\x12\x17\n" +
"\aapi_key\x18\x03 \x01(\tR\x06apiKey\x12%\n" +
"\x0ecorrelation_id\x18\x04 \x01(\x05R\rcorrelationId\x12\x14\n" +
"\x05topic\x18\x05 \x01(\tR\x05topic\";\n" +
"\aService\x12\x12\n" +
"\x04name\x18\x01 \x01(\tR\x04name\x12\x1c\n" +
"\tnamespace\x18\x02 \x01(\tR\tnamespace\"\x91\x01\n" +
"\tLostEvent\x12-\n" +
"\x06source\x18\x01 \x01(\x0e2\x15.flow.LostEventSourceR\x06source\x12&\n" +
"\x0fnum_events_lost\x18\x02 \x01(\x04R\rnumEventsLost\x12-\n" +
"\x03cpu\x18\x03 \x01(\v2\x1b.google.protobuf.Int32ValueR\x03cpu\"\xfe\x04\n" +
"\n" +
"AgentEvent\x12(\n" +
"\x04type\x18\x01 \x01(\x0e2\x14.flow.AgentEventTypeR\x04type\x123\n" +
"\aunknown\x18d \x01(\v2\x17.flow.AgentEventUnknownH\x00R\aunknown\x129\n" +
"\vagent_start\x18e \x01(\v2\x16.flow.TimeNotificationH\x00R\n" +
"agentStart\x12E\n" +
"\rpolicy_update\x18f \x01(\v2\x1e.flow.PolicyUpdateNotificationH\x00R\fpolicyUpdate\x12R\n" +
"\x13endpoint_regenerate\x18g \x01(\v2\x1f.flow.EndpointRegenNotificationH\x00R\x12endpointRegenerate\x12K\n" +
"\x0fendpoint_update\x18h \x01(\v2 .flow.EndpointUpdateNotificationH\x00R\x0eendpointUpdate\x12B\n" +
"\x0eipcache_update\x18i \x01(\v2\x19.flow.IPCacheNotificationH\x00R\ripcacheUpdate\x12L\n" +
"\x0eservice_upsert\x18j \x01(\v2\x1f.flow.ServiceUpsertNotificationB\x02\x18\x01H\x00R\rserviceUpsert\x12L\n" +
"\x0eservice_delete\x18k \x01(\v2\x1f.flow.ServiceDeleteNotificationB\x02\x18\x01H\x00R\rserviceDeleteB\x0e\n" +
"\fnotification\"K\n" +
"\x11AgentEventUnknown\x12\x12\n" +
"\x04type\x18\x01 \x01(\tR\x04type\x12\"\n" +
"\fnotification\x18\x02 \x01(\tR\fnotification\"B\n" +
"\x10TimeNotification\x12.\n" +
"\x04time\x18\x01 \x01(\v2\x1a.google.protobuf.TimestampR\x04time\"m\n" +
"\x18PolicyUpdateNotification\x12\x16\n" +
"\x06labels\x18\x01 \x03(\tR\x06labels\x12\x1a\n" +
"\brevision\x18\x02 \x01(\x04R\brevision\x12\x1d\n" +
"\n" +
"rule_count\x18\x03 \x01(\x03R\truleCount\"Y\n" +
"\x19EndpointRegenNotification\x12\x0e\n" +
"\x02id\x18\x01 \x01(\x04R\x02id\x12\x16\n" +
"\x06labels\x18\x02 \x03(\tR\x06labels\x12\x14\n" +
"\x05error\x18\x03 \x01(\tR\x05error\"\x93\x01\n" +
"\x1aEndpointUpdateNotification\x12\x0e\n" +
"\x02id\x18\x01 \x01(\x04R\x02id\x12\x16\n" +
"\x06labels\x18\x02 \x03(\tR\x06labels\x12\x14\n" +
"\x05error\x18\x03 \x01(\tR\x05error\x12\x19\n" +
"\bpod_name\x18\x04 \x01(\tR\apodName\x12\x1c\n" +
"\tnamespace\x18\x05 \x01(\tR\tnamespace\"\x99\x02\n" +
"\x13IPCacheNotification\x12\x12\n" +
"\x04cidr\x18\x01 \x01(\tR\x04cidr\x12\x1a\n" +
"\bidentity\x18\x02 \x01(\rR\bidentity\x12?\n" +
"\fold_identity\x18\x03 \x01(\v2\x1c.google.protobuf.UInt32ValueR\voldIdentity\x12\x17\n" +
"\ahost_ip\x18\x04 \x01(\tR\x06hostIp\x12\x1e\n" +
"\vold_host_ip\x18\x05 \x01(\tR\toldHostIp\x12\x1f\n" +
"\vencrypt_key\x18\x06 \x01(\rR\n" +
"encryptKey\x12\x1c\n" +
"\tnamespace\x18\a \x01(\tR\tnamespace\x12\x19\n" +
"\bpod_name\x18\b \x01(\tR\apodName\"G\n" +
"\x1dServiceUpsertNotificationAddr\x12\x0e\n" +
"\x02ip\x18\x01 \x01(\tR\x02ip\x12\x12\n" +
"\x04port\x18\x02 \x01(\rR\x04port:\x02\x18\x01\"\x9e\x03\n" +
"\x19ServiceUpsertNotification\x12\x0e\n" +
"\x02id\x18\x01 \x01(\rR\x02id\x12N\n" +
"\x10frontend_address\x18\x02 \x01(\v2#.flow.ServiceUpsertNotificationAddrR\x0ffrontendAddress\x12P\n" +
"\x11backend_addresses\x18\x03 \x03(\v2#.flow.ServiceUpsertNotificationAddrR\x10backendAddresses\x12\x12\n" +
"\x04type\x18\x04 \x01(\tR\x04type\x12)\n" +
"\x0etraffic_policy\x18\x05 \x01(\tB\x02\x18\x01R\rtrafficPolicy\x12\x12\n" +
"\x04name\x18\x06 \x01(\tR\x04name\x12\x1c\n" +
"\tnamespace\x18\a \x01(\tR\tnamespace\x12,\n" +
"\x12ext_traffic_policy\x18\b \x01(\tR\x10extTrafficPolicy\x12,\n" +
"\x12int_traffic_policy\x18\t \x01(\tR\x10intTrafficPolicy:\x02\x18\x01\"/\n" +
"\x19ServiceDeleteNotification\x12\x0e\n" +
"\x02id\x18\x01 \x01(\rR\x02id:\x02\x18\x01\"<\n" +
"\x10NetworkInterface\x12\x14\n" +
"\x05index\x18\x01 \x01(\rR\x05index\x12\x12\n" +
"\x04name\x18\x02 \x01(\tR\x04name\"\xef\x02\n" +
"\n" +
"DebugEvent\x12(\n" +
"\x04type\x18\x01 \x01(\x0e2\x14.flow.DebugEventTypeR\x04type\x12&\n" +
"\x06source\x18\x02 \x01(\v2\x0e.flow.EndpointR\x06source\x120\n" +
"\x04hash\x18\x03 \x01(\v2\x1c.google.protobuf.UInt32ValueR\x04hash\x120\n" +
"\x04arg1\x18\x04 \x01(\v2\x1c.google.protobuf.UInt32ValueR\x04arg1\x120\n" +
"\x04arg2\x18\x05 \x01(\v2\x1c.google.protobuf.UInt32ValueR\x04arg2\x120\n" +
"\x04arg3\x18\x06 \x01(\v2\x1c.google.protobuf.UInt32ValueR\x04arg3\x12\x18\n" +
"\amessage\x18\a \x01(\tR\amessage\x12-\n" +
"\x03cpu\x18\b \x01(\v2\x1b.google.protobuf.Int32ValueR\x03cpu*9\n" +
"\bFlowType\x12\x10\n" +
"\fUNKNOWN_TYPE\x10\x00\x12\t\n" +
"\x05L3_L4\x10\x01\x12\x06\n" +
"\x02L7\x10\x02\x12\b\n" +
"\x04SOCK\x10\x03*9\n" +
"\bAuthType\x12\f\n" +
"\bDISABLED\x10\x00\x12\t\n" +
"\x05SPIRE\x10\x01\x12\x14\n" +
"\x10TEST_ALWAYS_FAIL\x10\x02*\x8a\x02\n" +
"\x15TraceObservationPoint\x12\x11\n" +
"\rUNKNOWN_POINT\x10\x00\x12\f\n" +
"\bTO_PROXY\x10\x01\x12\v\n" +
"\aTO_HOST\x10\x02\x12\f\n" +
"\bTO_STACK\x10\x03\x12\x0e\n" +
"\n" +
"TO_OVERLAY\x10\x04\x12\x0f\n" +
"\vTO_ENDPOINT\x10e\x12\x11\n" +
"\rFROM_ENDPOINT\x10\x05\x12\x0e\n" +
"\n" +
"FROM_PROXY\x10\x06\x12\r\n" +
"\tFROM_HOST\x10\a\x12\x0e\n" +
"\n" +
"FROM_STACK\x10\b\x12\x10\n" +
"\fFROM_OVERLAY\x10\t\x12\x10\n" +
"\fFROM_NETWORK\x10\n" +
"\x12\x0e\n" +
"\n" +
"TO_NETWORK\x10\v\x12\x0f\n" +
"\vFROM_CRYPTO\x10\f\x12\r\n" +
"\tTO_CRYPTO\x10\r*\xa0\x01\n" +
"\vTraceReason\x12\x18\n" +
"\x14TRACE_REASON_UNKNOWN\x10\x00\x12\a\n" +
"\x03NEW\x10\x01\x12\x0f\n" +
"\vESTABLISHED\x10\x02\x12\t\n" +
"\x05REPLY\x10\x03\x12\v\n" +
"\aRELATED\x10\x04\x12\x10\n" +
"\bREOPENED\x10\x05\x1a\x02\b\x01\x12\x0e\n" +
"\n" +
"SRV6_ENCAP\x10\x06\x12\x0e\n" +
"\n" +
"SRV6_DECAP\x10\a\x12\x13\n" +
"\x0fENCRYPT_OVERLAY\x10\b*H\n" +
"\n" +
"L7FlowType\x12\x13\n" +
"\x0fUNKNOWN_L7_TYPE\x10\x00\x12\v\n" +
"\aREQUEST\x10\x01\x12\f\n" +
"\bRESPONSE\x10\x02\x12\n" +
"\n" +
"\x06SAMPLE\x10\x03*0\n" +
"\tIPVersion\x12\x0f\n" +
"\vIP_NOT_USED\x10\x00\x12\b\n" +
"\x04IPv4\x10\x01\x12\b\n" +
"\x04IPv6\x10\x02*|\n" +
"\aVerdict\x12\x13\n" +
"\x0fVERDICT_UNKNOWN\x10\x00\x12\r\n" +
"\tFORWARDED\x10\x01\x12\v\n" +
"\aDROPPED\x10\x02\x12\t\n" +
"\x05ERROR\x10\x03\x12\t\n" +
"\x05AUDIT\x10\x04\x12\x0e\n" +
"\n" +
"REDIRECTED\x10\x05\x12\n" +
"\n" +
"\x06TRACED\x10\x06\x12\x0e\n" +
"\n" +
"TRANSLATED\x10\a*\xc5\x11\n" +
"\n" +
"DropReason\x12\x17\n" +
"\x13DROP_REASON_UNKNOWN\x10\x00\x12\x1b\n" +
"\x12INVALID_SOURCE_MAC\x10\x82\x01\x1a\x02\b\x01\x12 \n" +
"\x17INVALID_DESTINATION_MAC\x10\x83\x01\x1a\x02\b\x01\x12\x16\n" +
"\x11INVALID_SOURCE_IP\x10\x84\x01\x12\x12\n" +
"\rPOLICY_DENIED\x10\x85\x01\x12\x1b\n" +
"\x16INVALID_PACKET_DROPPED\x10\x86\x01\x12#\n" +
"\x1eCT_TRUNCATED_OR_INVALID_HEADER\x10\x87\x01\x12\x1c\n" +
"\x17CT_MISSING_TCP_ACK_FLAG\x10\x88\x01\x12\x1b\n" +
"\x16CT_UNKNOWN_L4_PROTOCOL\x10\x89\x01\x12+\n" +
"\"CT_CANNOT_CREATE_ENTRY_FROM_PACKET\x10\x8a\x01\x1a\x02\b\x01\x12\x1c\n" +
"\x17UNSUPPORTED_L3_PROTOCOL\x10\x8b\x01\x12\x15\n" +
"\x10MISSED_TAIL_CALL\x10\x8c\x01\x12\x1c\n" +
"\x17ERROR_WRITING_TO_PACKET\x10\x8d\x01\x12\x18\n" +
"\x13UNKNOWN_L4_PROTOCOL\x10\x8e\x01\x12\x18\n" +
"\x13UNKNOWN_ICMPV4_CODE\x10\x8f\x01\x12\x18\n" +
"\x13UNKNOWN_ICMPV4_TYPE\x10\x90\x01\x12\x18\n" +
"\x13UNKNOWN_ICMPV6_CODE\x10\x91\x01\x12\x18\n" +
"\x13UNKNOWN_ICMPV6_TYPE\x10\x92\x01\x12 \n" +
"\x1bERROR_RETRIEVING_TUNNEL_KEY\x10\x93\x01\x12(\n" +
"\x1fERROR_RETRIEVING_TUNNEL_OPTIONS\x10\x94\x01\x1a\x02\b\x01\x12\x1e\n" +
"\x15INVALID_GENEVE_OPTION\x10\x95\x01\x1a\x02\b\x01\x12\x1e\n" +
"\x19UNKNOWN_L3_TARGET_ADDRESS\x10\x96\x01\x12\x1b\n" +
"\x16STALE_OR_UNROUTABLE_IP\x10\x97\x01\x12*\n" +
"!NO_MATCHING_LOCAL_CONTAINER_FOUND\x10\x98\x01\x1a\x02\b\x01\x12'\n" +
"\"ERROR_WHILE_CORRECTING_L3_CHECKSUM\x10\x99\x01\x12'\n" +
"\"ERROR_WHILE_CORRECTING_L4_CHECKSUM\x10\x9a\x01\x12\x1c\n" +
"\x17CT_MAP_INSERTION_FAILED\x10\x9b\x01\x12\"\n" +
"\x1dINVALID_IPV6_EXTENSION_HEADER\x10\x9c\x01\x12#\n" +
"\x1eIP_FRAGMENTATION_NOT_SUPPORTED\x10\x9d\x01\x12\x1e\n" +
"\x19SERVICE_BACKEND_NOT_FOUND\x10\x9e\x01\x12(\n" +
"#NO_TUNNEL_OR_ENCAPSULATION_ENDPOINT\x10\xa0\x01\x12#\n" +
"\x1eFAILED_TO_INSERT_INTO_PROXYMAP\x10\xa1\x01\x12+\n" +
"&REACHED_EDT_RATE_LIMITING_DROP_HORIZON\x10\xa2\x01\x12&\n" +
"!UNKNOWN_CONNECTION_TRACKING_STATE\x10\xa3\x01\x12\x1e\n" +
"\x19LOCAL_HOST_IS_UNREACHABLE\x10\xa4\x01\x12:\n" +
"5NO_CONFIGURATION_AVAILABLE_TO_PERFORM_POLICY_DECISION\x10\xa5\x01\x12\x1c\n" +
"\x17UNSUPPORTED_L2_PROTOCOL\x10\xa6\x01\x12\"\n" +
"\x1dNO_MAPPING_FOR_NAT_MASQUERADE\x10\xa7\x01\x12,\n" +
"'UNSUPPORTED_PROTOCOL_FOR_NAT_MASQUERADE\x10\xa8\x01\x12\x16\n" +
"\x11FIB_LOOKUP_FAILED\x10\xa9\x01\x12(\n" +
"#ENCAPSULATION_TRAFFIC_IS_PROHIBITED\x10\xaa\x01\x12\x15\n" +
"\x10INVALID_IDENTITY\x10\xab\x01\x12\x13\n" +
"\x0eUNKNOWN_SENDER\x10\xac\x01\x12\x13\n" +
"\x0eNAT_NOT_NEEDED\x10\xad\x01\x12\x13\n" +
"\x0eIS_A_CLUSTERIP\x10\xae\x01\x12.\n" +
")FIRST_LOGICAL_DATAGRAM_FRAGMENT_NOT_FOUND\x10\xaf\x01\x12\x1d\n" +
"\x18FORBIDDEN_ICMPV6_MESSAGE\x10\xb0\x01\x12!\n" +
"\x1cDENIED_BY_LB_SRC_RANGE_CHECK\x10\xb1\x01\x12\x19\n" +
"\x14SOCKET_LOOKUP_FAILED\x10\xb2\x01\x12\x19\n" +
"\x14SOCKET_ASSIGN_FAILED\x10\xb3\x01\x121\n" +
",PROXY_REDIRECTION_NOT_SUPPORTED_FOR_PROTOCOL\x10\xb4\x01\x12\x10\n" +
"\vPOLICY_DENY\x10\xb5\x01\x12\x12\n" +
"\rVLAN_FILTERED\x10\xb6\x01\x12\x10\n" +
"\vINVALID_VNI\x10\xb7\x01\x12\x16\n" +
"\x11INVALID_TC_BUFFER\x10\xb8\x01\x12\v\n" +
"\x06NO_SID\x10\xb9\x01\x12\x1b\n" +
"\x12MISSING_SRV6_STATE\x10\xba\x01\x1a\x02\b\x01\x12\n" +
"\n" +
"\x05NAT46\x10\xbb\x01\x12\n" +
"\n" +
"\x05NAT64\x10\xbc\x01\x12\x12\n" +
"\rAUTH_REQUIRED\x10\xbd\x01\x12\x14\n" +
"\x0fCT_NO_MAP_FOUND\x10\xbe\x01\x12\x16\n" +
"\x11SNAT_NO_MAP_FOUND\x10\xbf\x01\x12\x17\n" +
"\x12INVALID_CLUSTER_ID\x10\xc0\x01\x12'\n" +
"\"UNSUPPORTED_PROTOCOL_FOR_DSR_ENCAP\x10\xc1\x01\x12\x16\n" +
"\x11NO_EGRESS_GATEWAY\x10\xc2\x01\x12\x18\n" +
"\x13UNENCRYPTED_TRAFFIC\x10\xc3\x01\x12\x11\n" +
"\fTTL_EXCEEDED\x10\xc4\x01\x12\x0f\n" +
"\n" +
"NO_NODE_ID\x10\xc5\x01\x12\x16\n" +
"\x11DROP_RATE_LIMITED\x10\xc6\x01\x12\x11\n" +
"\fIGMP_HANDLED\x10\xc7\x01\x12\x14\n" +
"\x0fIGMP_SUBSCRIBED\x10\xc8\x01\x12\x16\n" +
"\x11MULTICAST_HANDLED\x10\xc9\x01\x12\x18\n" +
"\x13DROP_HOST_NOT_READY\x10\xca\x01\x12\x16\n" +
"\x11DROP_EP_NOT_READY\x10\xcb\x01\x12\x16\n" +
"\x11DROP_NO_EGRESS_IP\x10\xcc\x01\x12\x14\n" +
"\x0fDROP_PUNT_PROXY\x10\xcd\x01*J\n" +
"\x10TrafficDirection\x12\x1d\n" +
"\x19TRAFFIC_DIRECTION_UNKNOWN\x10\x00\x12\v\n" +
"\aINGRESS\x10\x01\x12\n" +
"\n" +
"\x06EGRESS\x10\x02*\x8d\x02\n" +
"\x11DebugCapturePoint\x12\x1d\n" +
"\x19DBG_CAPTURE_POINT_UNKNOWN\x10\x00\x12\x18\n" +
"\x14DBG_CAPTURE_DELIVERY\x10\x04\x12\x17\n" +
"\x13DBG_CAPTURE_FROM_LB\x10\x05\x12\x19\n" +
"\x15DBG_CAPTURE_AFTER_V46\x10\x06\x12\x19\n" +
"\x15DBG_CAPTURE_AFTER_V64\x10\a\x12\x19\n" +
"\x15DBG_CAPTURE_PROXY_PRE\x10\b\x12\x1a\n" +
"\x16DBG_CAPTURE_PROXY_POST\x10\t\x12\x18\n" +
"\x14DBG_CAPTURE_SNAT_PRE\x10\n" +
"\x12\x19\n" +
"\x15DBG_CAPTURE_SNAT_POST\x10\v\"\x04\b\x01\x10\x03*9\n" +
"\tEventType\x12\v\n" +
"\aUNKNOWN\x10\x00\x12\x0f\n" +
"\vEventSample\x10\t\x12\x0e\n" +
"\n" +
"RecordLost\x10\x02*\x7f\n" +
"\x0fLostEventSource\x12\x1d\n" +
"\x19UNKNOWN_LOST_EVENT_SOURCE\x10\x00\x12\x1a\n" +
"\x16PERF_EVENT_RING_BUFFER\x10\x01\x12\x19\n" +
"\x15OBSERVER_EVENTS_QUEUE\x10\x02\x12\x16\n" +
"\x12HUBBLE_RING_BUFFER\x10\x03*\xb6\x02\n" +
"\x0eAgentEventType\x12\x17\n" +
"\x13AGENT_EVENT_UNKNOWN\x10\x00\x12\x11\n" +
"\rAGENT_STARTED\x10\x02\x12\x12\n" +
"\x0ePOLICY_UPDATED\x10\x03\x12\x12\n" +
"\x0ePOLICY_DELETED\x10\x04\x12\x1f\n" +
"\x1bENDPOINT_REGENERATE_SUCCESS\x10\x05\x12\x1f\n" +
"\x1bENDPOINT_REGENERATE_FAILURE\x10\x06\x12\x14\n" +
"\x10ENDPOINT_CREATED\x10\a\x12\x14\n" +
"\x10ENDPOINT_DELETED\x10\b\x12\x14\n" +
"\x10IPCACHE_UPSERTED\x10\t\x12\x13\n" +
"\x0fIPCACHE_DELETED\x10\n" +
"\x12\x18\n" +
"\x10SERVICE_UPSERTED\x10\v\x1a\x02\b\x01\x12\x17\n" +
"\x0fSERVICE_DELETED\x10\f\x1a\x02\b\x01\"\x04\b\x01\x10\x01*\xd8\x01\n" +
"\x16SocketTranslationPoint\x12\x1c\n" +
"\x18SOCK_XLATE_POINT_UNKNOWN\x10\x00\x12&\n" +
"\"SOCK_XLATE_POINT_PRE_DIRECTION_FWD\x10\x01\x12'\n" +
"#SOCK_XLATE_POINT_POST_DIRECTION_FWD\x10\x02\x12&\n" +
"\"SOCK_XLATE_POINT_PRE_DIRECTION_REV\x10\x03\x12'\n" +
"#SOCK_XLATE_POINT_POST_DIRECTION_REV\x10\x04*\x81\r\n" +
"\x0eDebugEventType\x12\x15\n" +
"\x11DBG_EVENT_UNKNOWN\x10\x00\x12\x0f\n" +
"\vDBG_GENERIC\x10\x01\x12\x16\n" +
"\x12DBG_LOCAL_DELIVERY\x10\x02\x12\r\n" +
"\tDBG_ENCAP\x10\x03\x12\x11\n" +
"\rDBG_LXC_FOUND\x10\x04\x12\x15\n" +
"\x11DBG_POLICY_DENIED\x10\x05\x12\x11\n" +
"\rDBG_CT_LOOKUP\x10\x06\x12\x15\n" +
"\x11DBG_CT_LOOKUP_REV\x10\a\x12\x10\n" +
"\fDBG_CT_MATCH\x10\b\x12\x12\n" +
"\x0eDBG_CT_CREATED\x10\t\x12\x13\n" +
"\x0fDBG_CT_CREATED2\x10\n" +
"\x12\x14\n" +
"\x10DBG_ICMP6_HANDLE\x10\v\x12\x15\n" +
"\x11DBG_ICMP6_REQUEST\x10\f\x12\x10\n" +
"\fDBG_ICMP6_NS\x10\r\x12\x1b\n" +
"\x17DBG_ICMP6_TIME_EXCEEDED\x10\x0e\x12\x12\n" +
"\x0eDBG_CT_VERDICT\x10\x0f\x12\r\n" +
"\tDBG_DECAP\x10\x10\x12\x10\n" +
"\fDBG_PORT_MAP\x10\x11\x12\x11\n" +
"\rDBG_ERROR_RET\x10\x12\x12\x0f\n" +
"\vDBG_TO_HOST\x10\x13\x12\x10\n" +
"\fDBG_TO_STACK\x10\x14\x12\x10\n" +
"\fDBG_PKT_HASH\x10\x15\x12\x1b\n" +
"\x17DBG_LB6_LOOKUP_FRONTEND\x10\x16\x12 \n" +
"\x1cDBG_LB6_LOOKUP_FRONTEND_FAIL\x10\x17\x12\x1f\n" +
"\x1bDBG_LB6_LOOKUP_BACKEND_SLOT\x10\x18\x12'\n" +
"#DBG_LB6_LOOKUP_BACKEND_SLOT_SUCCESS\x10\x19\x12'\n" +
"#DBG_LB6_LOOKUP_BACKEND_SLOT_V2_FAIL\x10\x1a\x12\x1f\n" +
"\x1bDBG_LB6_LOOKUP_BACKEND_FAIL\x10\x1b\x12\x1e\n" +
"\x1aDBG_LB6_REVERSE_NAT_LOOKUP\x10\x1c\x12\x17\n" +
"\x13DBG_LB6_REVERSE_NAT\x10\x1d\x12\x1b\n" +
"\x17DBG_LB4_LOOKUP_FRONTEND\x10\x1e\x12 \n" +
"\x1cDBG_LB4_LOOKUP_FRONTEND_FAIL\x10\x1f\x12\x1f\n" +
"\x1bDBG_LB4_LOOKUP_BACKEND_SLOT\x10 \x12'\n" +
"#DBG_LB4_LOOKUP_BACKEND_SLOT_SUCCESS\x10!\x12'\n" +
"#DBG_LB4_LOOKUP_BACKEND_SLOT_V2_FAIL\x10\"\x12\x1f\n" +
"\x1bDBG_LB4_LOOKUP_BACKEND_FAIL\x10#\x12\x1e\n" +
"\x1aDBG_LB4_REVERSE_NAT_LOOKUP\x10$\x12\x17\n" +
"\x13DBG_LB4_REVERSE_NAT\x10%\x12\x19\n" +
"\x15DBG_LB4_LOOPBACK_SNAT\x10&\x12\x1d\n" +
"\x19DBG_LB4_LOOPBACK_SNAT_REV\x10'\x12\x12\n" +
"\x0eDBG_CT_LOOKUP4\x10(\x12\x1b\n" +
"\x17DBG_RR_BACKEND_SLOT_SEL\x10)\x12\x18\n" +
"\x14DBG_REV_PROXY_LOOKUP\x10*\x12\x17\n" +
"\x13DBG_REV_PROXY_FOUND\x10+\x12\x18\n" +
"\x14DBG_REV_PROXY_UPDATE\x10,\x12\x11\n" +
"\rDBG_L4_POLICY\x10-\x12\x19\n" +
"\x15DBG_NETDEV_IN_CLUSTER\x10.\x12\x15\n" +
"\x11DBG_NETDEV_ENCAP4\x10/\x12\x14\n" +
"\x10DBG_CT_LOOKUP4_1\x100\x12\x14\n" +
"\x10DBG_CT_LOOKUP4_2\x101\x12\x13\n" +
"\x0fDBG_CT_CREATED4\x102\x12\x14\n" +
"\x10DBG_CT_LOOKUP6_1\x103\x12\x14\n" +
"\x10DBG_CT_LOOKUP6_2\x104\x12\x13\n" +
"\x0fDBG_CT_CREATED6\x105\x12\x12\n" +
"\x0eDBG_SKIP_PROXY\x106\x12\x11\n" +
"\rDBG_L4_CREATE\x107\x12\x19\n" +
"\x15DBG_IP_ID_MAP_FAILED4\x108\x12\x19\n" +
"\x15DBG_IP_ID_MAP_FAILED6\x109\x12\x1a\n" +
"\x16DBG_IP_ID_MAP_SUCCEED4\x10:\x12\x1a\n" +
"\x16DBG_IP_ID_MAP_SUCCEED6\x10;\x12\x13\n" +
"\x0fDBG_LB_STALE_CT\x10<\x12\x18\n" +
"\x14DBG_INHERIT_IDENTITY\x10=\x12\x12\n" +
"\x0eDBG_SK_LOOKUP4\x10>\x12\x12\n" +
"\x0eDBG_SK_LOOKUP6\x10?\x12\x11\n" +
"\rDBG_SK_ASSIGN\x10@\x12\r\n" +
"\tDBG_L7_LB\x10A\x12\x13\n" +
"\x0fDBG_SKIP_POLICY\x10BB&Z$github.com/cilium/cilium/api/v1/flowb\x06proto3"
var (
file_flow_flow_proto_rawDescOnce sync.Once
file_flow_flow_proto_rawDescData []byte
)
func file_flow_flow_proto_rawDescGZIP() []byte {
file_flow_flow_proto_rawDescOnce.Do(func() {
file_flow_flow_proto_rawDescData = protoimpl.X.CompressGZIP(unsafe.Slice(unsafe.StringData(file_flow_flow_proto_rawDesc), len(file_flow_flow_proto_rawDesc)))
})
return file_flow_flow_proto_rawDescData
}
var file_flow_flow_proto_enumTypes = make([]protoimpl.EnumInfo, 16)
var file_flow_flow_proto_msgTypes = make([]protoimpl.MessageInfo, 40)
var file_flow_flow_proto_goTypes = []any{
(FlowType)(0), // 0: flow.FlowType
(AuthType)(0), // 1: flow.AuthType
(TraceObservationPoint)(0), // 2: flow.TraceObservationPoint
(TraceReason)(0), // 3: flow.TraceReason
(L7FlowType)(0), // 4: flow.L7FlowType
(IPVersion)(0), // 5: flow.IPVersion
(Verdict)(0), // 6: flow.Verdict
(DropReason)(0), // 7: flow.DropReason
(TrafficDirection)(0), // 8: flow.TrafficDirection
(DebugCapturePoint)(0), // 9: flow.DebugCapturePoint
(EventType)(0), // 10: flow.EventType
(LostEventSource)(0), // 11: flow.LostEventSource
(AgentEventType)(0), // 12: flow.AgentEventType
(SocketTranslationPoint)(0), // 13: flow.SocketTranslationPoint
(DebugEventType)(0), // 14: flow.DebugEventType
(Tunnel_Protocol)(0), // 15: flow.Tunnel.Protocol
(*Flow)(nil), // 16: flow.Flow
(*FileInfo)(nil), // 17: flow.FileInfo
(*Layer4)(nil), // 18: flow.Layer4
(*Layer7)(nil), // 19: flow.Layer7
(*TraceContext)(nil), // 20: flow.TraceContext
(*TraceParent)(nil), // 21: flow.TraceParent
(*Endpoint)(nil), // 22: flow.Endpoint
(*Workload)(nil), // 23: flow.Workload
(*TCP)(nil), // 24: flow.TCP
(*IP)(nil), // 25: flow.IP
(*Ethernet)(nil), // 26: flow.Ethernet
(*TCPFlags)(nil), // 27: flow.TCPFlags
(*UDP)(nil), // 28: flow.UDP
(*SCTP)(nil), // 29: flow.SCTP
(*ICMPv4)(nil), // 30: flow.ICMPv4
(*ICMPv6)(nil), // 31: flow.ICMPv6
(*Tunnel)(nil), // 32: flow.Tunnel
(*Policy)(nil), // 33: flow.Policy
(*EventTypeFilter)(nil), // 34: flow.EventTypeFilter
(*CiliumEventType)(nil), // 35: flow.CiliumEventType
(*FlowFilter)(nil), // 36: flow.FlowFilter
(*DNS)(nil), // 37: flow.DNS
(*HTTPHeader)(nil), // 38: flow.HTTPHeader
(*HTTP)(nil), // 39: flow.HTTP
(*Kafka)(nil), // 40: flow.Kafka
(*Service)(nil), // 41: flow.Service
(*LostEvent)(nil), // 42: flow.LostEvent
(*AgentEvent)(nil), // 43: flow.AgentEvent
(*AgentEventUnknown)(nil), // 44: flow.AgentEventUnknown
(*TimeNotification)(nil), // 45: flow.TimeNotification
(*PolicyUpdateNotification)(nil), // 46: flow.PolicyUpdateNotification
(*EndpointRegenNotification)(nil), // 47: flow.EndpointRegenNotification
(*EndpointUpdateNotification)(nil), // 48: flow.EndpointUpdateNotification
(*IPCacheNotification)(nil), // 49: flow.IPCacheNotification
(*ServiceUpsertNotificationAddr)(nil), // 50: flow.ServiceUpsertNotificationAddr
(*ServiceUpsertNotification)(nil), // 51: flow.ServiceUpsertNotification
(*ServiceDeleteNotification)(nil), // 52: flow.ServiceDeleteNotification
(*NetworkInterface)(nil), // 53: flow.NetworkInterface
(*DebugEvent)(nil), // 54: flow.DebugEvent
(*FlowFilter_Experimental)(nil), // 55: flow.FlowFilter.Experimental
(*timestamppb.Timestamp)(nil), // 56: google.protobuf.Timestamp
(*wrapperspb.BoolValue)(nil), // 57: google.protobuf.BoolValue
(*anypb.Any)(nil), // 58: google.protobuf.Any
(*wrapperspb.Int32Value)(nil), // 59: google.protobuf.Int32Value
(*wrapperspb.UInt32Value)(nil), // 60: google.protobuf.UInt32Value
}
var file_flow_flow_proto_depIdxs = []int32{
56, // 0: flow.Flow.time:type_name -> google.protobuf.Timestamp
6, // 1: flow.Flow.verdict:type_name -> flow.Verdict
1, // 2: flow.Flow.auth_type:type_name -> flow.AuthType
26, // 3: flow.Flow.ethernet:type_name -> flow.Ethernet
25, // 4: flow.Flow.IP:type_name -> flow.IP
18, // 5: flow.Flow.l4:type_name -> flow.Layer4
32, // 6: flow.Flow.tunnel:type_name -> flow.Tunnel
22, // 7: flow.Flow.source:type_name -> flow.Endpoint
22, // 8: flow.Flow.destination:type_name -> flow.Endpoint
0, // 9: flow.Flow.Type:type_name -> flow.FlowType
19, // 10: flow.Flow.l7:type_name -> flow.Layer7
35, // 11: flow.Flow.event_type:type_name -> flow.CiliumEventType
41, // 12: flow.Flow.source_service:type_name -> flow.Service
41, // 13: flow.Flow.destination_service:type_name -> flow.Service
8, // 14: flow.Flow.traffic_direction:type_name -> flow.TrafficDirection
2, // 15: flow.Flow.trace_observation_point:type_name -> flow.TraceObservationPoint
3, // 16: flow.Flow.trace_reason:type_name -> flow.TraceReason
17, // 17: flow.Flow.file:type_name -> flow.FileInfo
7, // 18: flow.Flow.drop_reason_desc:type_name -> flow.DropReason
57, // 19: flow.Flow.is_reply:type_name -> google.protobuf.BoolValue
9, // 20: flow.Flow.debug_capture_point:type_name -> flow.DebugCapturePoint
53, // 21: flow.Flow.interface:type_name -> flow.NetworkInterface
20, // 22: flow.Flow.trace_context:type_name -> flow.TraceContext
13, // 23: flow.Flow.sock_xlate_point:type_name -> flow.SocketTranslationPoint
58, // 24: flow.Flow.extensions:type_name -> google.protobuf.Any
33, // 25: flow.Flow.egress_allowed_by:type_name -> flow.Policy
33, // 26: flow.Flow.ingress_allowed_by:type_name -> flow.Policy
33, // 27: flow.Flow.egress_denied_by:type_name -> flow.Policy
33, // 28: flow.Flow.ingress_denied_by:type_name -> flow.Policy
24, // 29: flow.Layer4.TCP:type_name -> flow.TCP
28, // 30: flow.Layer4.UDP:type_name -> flow.UDP
30, // 31: flow.Layer4.ICMPv4:type_name -> flow.ICMPv4
31, // 32: flow.Layer4.ICMPv6:type_name -> flow.ICMPv6
29, // 33: flow.Layer4.SCTP:type_name -> flow.SCTP
4, // 34: flow.Layer7.type:type_name -> flow.L7FlowType
37, // 35: flow.Layer7.dns:type_name -> flow.DNS
39, // 36: flow.Layer7.http:type_name -> flow.HTTP
40, // 37: flow.Layer7.kafka:type_name -> flow.Kafka
21, // 38: flow.TraceContext.parent:type_name -> flow.TraceParent
23, // 39: flow.Endpoint.workloads:type_name -> flow.Workload
27, // 40: flow.TCP.flags:type_name -> flow.TCPFlags
5, // 41: flow.IP.ipVersion:type_name -> flow.IPVersion
15, // 42: flow.Tunnel.protocol:type_name -> flow.Tunnel.Protocol
25, // 43: flow.Tunnel.IP:type_name -> flow.IP
18, // 44: flow.Tunnel.l4:type_name -> flow.Layer4
23, // 45: flow.FlowFilter.source_workload:type_name -> flow.Workload
23, // 46: flow.FlowFilter.destination_workload:type_name -> flow.Workload
8, // 47: flow.FlowFilter.traffic_direction:type_name -> flow.TrafficDirection
6, // 48: flow.FlowFilter.verdict:type_name -> flow.Verdict
7, // 49: flow.FlowFilter.drop_reason_desc:type_name -> flow.DropReason
53, // 50: flow.FlowFilter.interface:type_name -> flow.NetworkInterface
34, // 51: flow.FlowFilter.event_type:type_name -> flow.EventTypeFilter
38, // 52: flow.FlowFilter.http_header:type_name -> flow.HTTPHeader
27, // 53: flow.FlowFilter.tcp_flags:type_name -> flow.TCPFlags
5, // 54: flow.FlowFilter.ip_version:type_name -> flow.IPVersion
55, // 55: flow.FlowFilter.experimental:type_name -> flow.FlowFilter.Experimental
38, // 56: flow.HTTP.headers:type_name -> flow.HTTPHeader
11, // 57: flow.LostEvent.source:type_name -> flow.LostEventSource
59, // 58: flow.LostEvent.cpu:type_name -> google.protobuf.Int32Value
12, // 59: flow.AgentEvent.type:type_name -> flow.AgentEventType
44, // 60: flow.AgentEvent.unknown:type_name -> flow.AgentEventUnknown
45, // 61: flow.AgentEvent.agent_start:type_name -> flow.TimeNotification
46, // 62: flow.AgentEvent.policy_update:type_name -> flow.PolicyUpdateNotification
47, // 63: flow.AgentEvent.endpoint_regenerate:type_name -> flow.EndpointRegenNotification
48, // 64: flow.AgentEvent.endpoint_update:type_name -> flow.EndpointUpdateNotification
49, // 65: flow.AgentEvent.ipcache_update:type_name -> flow.IPCacheNotification
51, // 66: flow.AgentEvent.service_upsert:type_name -> flow.ServiceUpsertNotification
52, // 67: flow.AgentEvent.service_delete:type_name -> flow.ServiceDeleteNotification
56, // 68: flow.TimeNotification.time:type_name -> google.protobuf.Timestamp
60, // 69: flow.IPCacheNotification.old_identity:type_name -> google.protobuf.UInt32Value
50, // 70: flow.ServiceUpsertNotification.frontend_address:type_name -> flow.ServiceUpsertNotificationAddr
50, // 71: flow.ServiceUpsertNotification.backend_addresses:type_name -> flow.ServiceUpsertNotificationAddr
14, // 72: flow.DebugEvent.type:type_name -> flow.DebugEventType
22, // 73: flow.DebugEvent.source:type_name -> flow.Endpoint
60, // 74: flow.DebugEvent.hash:type_name -> google.protobuf.UInt32Value
60, // 75: flow.DebugEvent.arg1:type_name -> google.protobuf.UInt32Value
60, // 76: flow.DebugEvent.arg2:type_name -> google.protobuf.UInt32Value
60, // 77: flow.DebugEvent.arg3:type_name -> google.protobuf.UInt32Value
59, // 78: flow.DebugEvent.cpu:type_name -> google.protobuf.Int32Value
79, // [79:79] is the sub-list for method output_type
79, // [79:79] is the sub-list for method input_type
79, // [79:79] is the sub-list for extension type_name
79, // [79:79] is the sub-list for extension extendee
0, // [0:79] is the sub-list for field type_name
}
func init() { file_flow_flow_proto_init() }
func file_flow_flow_proto_init() {
if File_flow_flow_proto != nil {
return
}
file_flow_flow_proto_msgTypes[2].OneofWrappers = []any{
(*Layer4_TCP)(nil),
(*Layer4_UDP)(nil),
(*Layer4_ICMPv4)(nil),
(*Layer4_ICMPv6)(nil),
(*Layer4_SCTP)(nil),
}
file_flow_flow_proto_msgTypes[3].OneofWrappers = []any{
(*Layer7_Dns)(nil),
(*Layer7_Http)(nil),
(*Layer7_Kafka)(nil),
}
file_flow_flow_proto_msgTypes[27].OneofWrappers = []any{
(*AgentEvent_Unknown)(nil),
(*AgentEvent_AgentStart)(nil),
(*AgentEvent_PolicyUpdate)(nil),
(*AgentEvent_EndpointRegenerate)(nil),
(*AgentEvent_EndpointUpdate)(nil),
(*AgentEvent_IpcacheUpdate)(nil),
(*AgentEvent_ServiceUpsert)(nil),
(*AgentEvent_ServiceDelete)(nil),
}
type x struct{}
out := protoimpl.TypeBuilder{
File: protoimpl.DescBuilder{
GoPackagePath: reflect.TypeOf(x{}).PkgPath(),
RawDescriptor: unsafe.Slice(unsafe.StringData(file_flow_flow_proto_rawDesc), len(file_flow_flow_proto_rawDesc)),
NumEnums: 16,
NumMessages: 40,
NumExtensions: 0,
NumServices: 0,
},
GoTypes: file_flow_flow_proto_goTypes,
DependencyIndexes: file_flow_flow_proto_depIdxs,
EnumInfos: file_flow_flow_proto_enumTypes,
MessageInfos: file_flow_flow_proto_msgTypes,
}.Build()
File_flow_flow_proto = out.File
file_flow_flow_proto_goTypes = nil
file_flow_flow_proto_depIdxs = nil
}
// SPDX-License-Identifier: Apache-2.0
// Copyright Authors of Hubble
// Code generated by protoc-gen-go-json. DO NOT EDIT.
// source: flow/flow.proto
package flow
import (
"google.golang.org/protobuf/encoding/protojson"
)
// MarshalJSON implements json.Marshaler
func (msg *Flow) MarshalJSON() ([]byte, error) {
return protojson.MarshalOptions{
UseProtoNames: true,
}.Marshal(msg)
}
// UnmarshalJSON implements json.Unmarshaler
func (msg *Flow) UnmarshalJSON(b []byte) error {
return protojson.UnmarshalOptions{}.Unmarshal(b, msg)
}
// MarshalJSON implements json.Marshaler
func (msg *FileInfo) MarshalJSON() ([]byte, error) {
return protojson.MarshalOptions{
UseProtoNames: true,
}.Marshal(msg)
}
// UnmarshalJSON implements json.Unmarshaler
func (msg *FileInfo) UnmarshalJSON(b []byte) error {
return protojson.UnmarshalOptions{}.Unmarshal(b, msg)
}
// MarshalJSON implements json.Marshaler
func (msg *Layer4) MarshalJSON() ([]byte, error) {
return protojson.MarshalOptions{
UseProtoNames: true,
}.Marshal(msg)
}
// UnmarshalJSON implements json.Unmarshaler
func (msg *Layer4) UnmarshalJSON(b []byte) error {
return protojson.UnmarshalOptions{}.Unmarshal(b, msg)
}
// MarshalJSON implements json.Marshaler
func (msg *Layer7) MarshalJSON() ([]byte, error) {
return protojson.MarshalOptions{
UseProtoNames: true,
}.Marshal(msg)
}
// UnmarshalJSON implements json.Unmarshaler
func (msg *Layer7) UnmarshalJSON(b []byte) error {
return protojson.UnmarshalOptions{}.Unmarshal(b, msg)
}
// MarshalJSON implements json.Marshaler
func (msg *TraceContext) MarshalJSON() ([]byte, error) {
return protojson.MarshalOptions{
UseProtoNames: true,
}.Marshal(msg)
}
// UnmarshalJSON implements json.Unmarshaler
func (msg *TraceContext) UnmarshalJSON(b []byte) error {
return protojson.UnmarshalOptions{}.Unmarshal(b, msg)
}
// MarshalJSON implements json.Marshaler
func (msg *TraceParent) MarshalJSON() ([]byte, error) {
return protojson.MarshalOptions{
UseProtoNames: true,
}.Marshal(msg)
}
// UnmarshalJSON implements json.Unmarshaler
func (msg *TraceParent) UnmarshalJSON(b []byte) error {
return protojson.UnmarshalOptions{}.Unmarshal(b, msg)
}
// MarshalJSON implements json.Marshaler
func (msg *Endpoint) MarshalJSON() ([]byte, error) {
return protojson.MarshalOptions{
UseProtoNames: true,
}.Marshal(msg)
}
// UnmarshalJSON implements json.Unmarshaler
func (msg *Endpoint) UnmarshalJSON(b []byte) error {
return protojson.UnmarshalOptions{}.Unmarshal(b, msg)
}
// MarshalJSON implements json.Marshaler
func (msg *Workload) MarshalJSON() ([]byte, error) {
return protojson.MarshalOptions{
UseProtoNames: true,
}.Marshal(msg)
}
// UnmarshalJSON implements json.Unmarshaler
func (msg *Workload) UnmarshalJSON(b []byte) error {
return protojson.UnmarshalOptions{}.Unmarshal(b, msg)
}
// MarshalJSON implements json.Marshaler
func (msg *TCP) MarshalJSON() ([]byte, error) {
return protojson.MarshalOptions{
UseProtoNames: true,
}.Marshal(msg)
}
// UnmarshalJSON implements json.Unmarshaler
func (msg *TCP) UnmarshalJSON(b []byte) error {
return protojson.UnmarshalOptions{}.Unmarshal(b, msg)
}
// MarshalJSON implements json.Marshaler
func (msg *IP) MarshalJSON() ([]byte, error) {
return protojson.MarshalOptions{
UseProtoNames: true,
}.Marshal(msg)
}
// UnmarshalJSON implements json.Unmarshaler
func (msg *IP) UnmarshalJSON(b []byte) error {
return protojson.UnmarshalOptions{}.Unmarshal(b, msg)
}
// MarshalJSON implements json.Marshaler
func (msg *Ethernet) MarshalJSON() ([]byte, error) {
return protojson.MarshalOptions{
UseProtoNames: true,
}.Marshal(msg)
}
// UnmarshalJSON implements json.Unmarshaler
func (msg *Ethernet) UnmarshalJSON(b []byte) error {
return protojson.UnmarshalOptions{}.Unmarshal(b, msg)
}
// MarshalJSON implements json.Marshaler
func (msg *TCPFlags) MarshalJSON() ([]byte, error) {
return protojson.MarshalOptions{
UseProtoNames: true,
}.Marshal(msg)
}
// UnmarshalJSON implements json.Unmarshaler
func (msg *TCPFlags) UnmarshalJSON(b []byte) error {
return protojson.UnmarshalOptions{}.Unmarshal(b, msg)
}
// MarshalJSON implements json.Marshaler
func (msg *UDP) MarshalJSON() ([]byte, error) {
return protojson.MarshalOptions{
UseProtoNames: true,
}.Marshal(msg)
}
// UnmarshalJSON implements json.Unmarshaler
func (msg *UDP) UnmarshalJSON(b []byte) error {
return protojson.UnmarshalOptions{}.Unmarshal(b, msg)
}
// MarshalJSON implements json.Marshaler
func (msg *SCTP) MarshalJSON() ([]byte, error) {
return protojson.MarshalOptions{
UseProtoNames: true,
}.Marshal(msg)
}
// UnmarshalJSON implements json.Unmarshaler
func (msg *SCTP) UnmarshalJSON(b []byte) error {
return protojson.UnmarshalOptions{}.Unmarshal(b, msg)
}
// MarshalJSON implements json.Marshaler
func (msg *ICMPv4) MarshalJSON() ([]byte, error) {
return protojson.MarshalOptions{
UseProtoNames: true,
}.Marshal(msg)
}
// UnmarshalJSON implements json.Unmarshaler
func (msg *ICMPv4) UnmarshalJSON(b []byte) error {
return protojson.UnmarshalOptions{}.Unmarshal(b, msg)
}
// MarshalJSON implements json.Marshaler
func (msg *ICMPv6) MarshalJSON() ([]byte, error) {
return protojson.MarshalOptions{
UseProtoNames: true,
}.Marshal(msg)
}
// UnmarshalJSON implements json.Unmarshaler
func (msg *ICMPv6) UnmarshalJSON(b []byte) error {
return protojson.UnmarshalOptions{}.Unmarshal(b, msg)
}
// MarshalJSON implements json.Marshaler
func (msg *Tunnel) MarshalJSON() ([]byte, error) {
return protojson.MarshalOptions{
UseProtoNames: true,
}.Marshal(msg)
}
// UnmarshalJSON implements json.Unmarshaler
func (msg *Tunnel) UnmarshalJSON(b []byte) error {
return protojson.UnmarshalOptions{}.Unmarshal(b, msg)
}
// MarshalJSON implements json.Marshaler
func (msg *Policy) MarshalJSON() ([]byte, error) {
return protojson.MarshalOptions{
UseProtoNames: true,
}.Marshal(msg)
}
// UnmarshalJSON implements json.Unmarshaler
func (msg *Policy) UnmarshalJSON(b []byte) error {
return protojson.UnmarshalOptions{}.Unmarshal(b, msg)
}
// MarshalJSON implements json.Marshaler
func (msg *EventTypeFilter) MarshalJSON() ([]byte, error) {
return protojson.MarshalOptions{
UseProtoNames: true,
}.Marshal(msg)
}
// UnmarshalJSON implements json.Unmarshaler
func (msg *EventTypeFilter) UnmarshalJSON(b []byte) error {
return protojson.UnmarshalOptions{}.Unmarshal(b, msg)
}
// MarshalJSON implements json.Marshaler
func (msg *CiliumEventType) MarshalJSON() ([]byte, error) {
return protojson.MarshalOptions{
UseProtoNames: true,
}.Marshal(msg)
}
// UnmarshalJSON implements json.Unmarshaler
func (msg *CiliumEventType) UnmarshalJSON(b []byte) error {
return protojson.UnmarshalOptions{}.Unmarshal(b, msg)
}
// MarshalJSON implements json.Marshaler
func (msg *FlowFilter) MarshalJSON() ([]byte, error) {
return protojson.MarshalOptions{
UseProtoNames: true,
}.Marshal(msg)
}
// UnmarshalJSON implements json.Unmarshaler
func (msg *FlowFilter) UnmarshalJSON(b []byte) error {
return protojson.UnmarshalOptions{}.Unmarshal(b, msg)
}
// MarshalJSON implements json.Marshaler
func (msg *FlowFilter_Experimental) MarshalJSON() ([]byte, error) {
return protojson.MarshalOptions{
UseProtoNames: true,
}.Marshal(msg)
}
// UnmarshalJSON implements json.Unmarshaler
func (msg *FlowFilter_Experimental) UnmarshalJSON(b []byte) error {
return protojson.UnmarshalOptions{}.Unmarshal(b, msg)
}
// MarshalJSON implements json.Marshaler
func (msg *DNS) MarshalJSON() ([]byte, error) {
return protojson.MarshalOptions{
UseProtoNames: true,
}.Marshal(msg)
}
// UnmarshalJSON implements json.Unmarshaler
func (msg *DNS) UnmarshalJSON(b []byte) error {
return protojson.UnmarshalOptions{}.Unmarshal(b, msg)
}
// MarshalJSON implements json.Marshaler
func (msg *HTTPHeader) MarshalJSON() ([]byte, error) {
return protojson.MarshalOptions{
UseProtoNames: true,
}.Marshal(msg)
}
// UnmarshalJSON implements json.Unmarshaler
func (msg *HTTPHeader) UnmarshalJSON(b []byte) error {
return protojson.UnmarshalOptions{}.Unmarshal(b, msg)
}
// MarshalJSON implements json.Marshaler
func (msg *HTTP) MarshalJSON() ([]byte, error) {
return protojson.MarshalOptions{
UseProtoNames: true,
}.Marshal(msg)
}
// UnmarshalJSON implements json.Unmarshaler
func (msg *HTTP) UnmarshalJSON(b []byte) error {
return protojson.UnmarshalOptions{}.Unmarshal(b, msg)
}
// MarshalJSON implements json.Marshaler
func (msg *Kafka) MarshalJSON() ([]byte, error) {
return protojson.MarshalOptions{
UseProtoNames: true,
}.Marshal(msg)
}
// UnmarshalJSON implements json.Unmarshaler
func (msg *Kafka) UnmarshalJSON(b []byte) error {
return protojson.UnmarshalOptions{}.Unmarshal(b, msg)
}
// MarshalJSON implements json.Marshaler
func (msg *Service) MarshalJSON() ([]byte, error) {
return protojson.MarshalOptions{
UseProtoNames: true,
}.Marshal(msg)
}
// UnmarshalJSON implements json.Unmarshaler
func (msg *Service) UnmarshalJSON(b []byte) error {
return protojson.UnmarshalOptions{}.Unmarshal(b, msg)
}
// MarshalJSON implements json.Marshaler
func (msg *LostEvent) MarshalJSON() ([]byte, error) {
return protojson.MarshalOptions{
UseProtoNames: true,
}.Marshal(msg)
}
// UnmarshalJSON implements json.Unmarshaler
func (msg *LostEvent) UnmarshalJSON(b []byte) error {
return protojson.UnmarshalOptions{}.Unmarshal(b, msg)
}
// MarshalJSON implements json.Marshaler
func (msg *AgentEvent) MarshalJSON() ([]byte, error) {
return protojson.MarshalOptions{
UseProtoNames: true,
}.Marshal(msg)
}
// UnmarshalJSON implements json.Unmarshaler
func (msg *AgentEvent) UnmarshalJSON(b []byte) error {
return protojson.UnmarshalOptions{}.Unmarshal(b, msg)
}
// MarshalJSON implements json.Marshaler
func (msg *AgentEventUnknown) MarshalJSON() ([]byte, error) {
return protojson.MarshalOptions{
UseProtoNames: true,
}.Marshal(msg)
}
// UnmarshalJSON implements json.Unmarshaler
func (msg *AgentEventUnknown) UnmarshalJSON(b []byte) error {
return protojson.UnmarshalOptions{}.Unmarshal(b, msg)
}
// MarshalJSON implements json.Marshaler
func (msg *TimeNotification) MarshalJSON() ([]byte, error) {
return protojson.MarshalOptions{
UseProtoNames: true,
}.Marshal(msg)
}
// UnmarshalJSON implements json.Unmarshaler
func (msg *TimeNotification) UnmarshalJSON(b []byte) error {
return protojson.UnmarshalOptions{}.Unmarshal(b, msg)
}
// MarshalJSON implements json.Marshaler
func (msg *PolicyUpdateNotification) MarshalJSON() ([]byte, error) {
return protojson.MarshalOptions{
UseProtoNames: true,
}.Marshal(msg)
}
// UnmarshalJSON implements json.Unmarshaler
func (msg *PolicyUpdateNotification) UnmarshalJSON(b []byte) error {
return protojson.UnmarshalOptions{}.Unmarshal(b, msg)
}
// MarshalJSON implements json.Marshaler
func (msg *EndpointRegenNotification) MarshalJSON() ([]byte, error) {
return protojson.MarshalOptions{
UseProtoNames: true,
}.Marshal(msg)
}
// UnmarshalJSON implements json.Unmarshaler
func (msg *EndpointRegenNotification) UnmarshalJSON(b []byte) error {
return protojson.UnmarshalOptions{}.Unmarshal(b, msg)
}
// MarshalJSON implements json.Marshaler
func (msg *EndpointUpdateNotification) MarshalJSON() ([]byte, error) {
return protojson.MarshalOptions{
UseProtoNames: true,
}.Marshal(msg)
}
// UnmarshalJSON implements json.Unmarshaler
func (msg *EndpointUpdateNotification) UnmarshalJSON(b []byte) error {
return protojson.UnmarshalOptions{}.Unmarshal(b, msg)
}
// MarshalJSON implements json.Marshaler
func (msg *IPCacheNotification) MarshalJSON() ([]byte, error) {
return protojson.MarshalOptions{
UseProtoNames: true,
}.Marshal(msg)
}
// UnmarshalJSON implements json.Unmarshaler
func (msg *IPCacheNotification) UnmarshalJSON(b []byte) error {
return protojson.UnmarshalOptions{}.Unmarshal(b, msg)
}
// MarshalJSON implements json.Marshaler
func (msg *ServiceUpsertNotificationAddr) MarshalJSON() ([]byte, error) {
return protojson.MarshalOptions{
UseProtoNames: true,
}.Marshal(msg)
}
// UnmarshalJSON implements json.Unmarshaler
func (msg *ServiceUpsertNotificationAddr) UnmarshalJSON(b []byte) error {
return protojson.UnmarshalOptions{}.Unmarshal(b, msg)
}
// MarshalJSON implements json.Marshaler
func (msg *ServiceUpsertNotification) MarshalJSON() ([]byte, error) {
return protojson.MarshalOptions{
UseProtoNames: true,
}.Marshal(msg)
}
// UnmarshalJSON implements json.Unmarshaler
func (msg *ServiceUpsertNotification) UnmarshalJSON(b []byte) error {
return protojson.UnmarshalOptions{}.Unmarshal(b, msg)
}
// MarshalJSON implements json.Marshaler
func (msg *ServiceDeleteNotification) MarshalJSON() ([]byte, error) {
return protojson.MarshalOptions{
UseProtoNames: true,
}.Marshal(msg)
}
// UnmarshalJSON implements json.Unmarshaler
func (msg *ServiceDeleteNotification) UnmarshalJSON(b []byte) error {
return protojson.UnmarshalOptions{}.Unmarshal(b, msg)
}
// MarshalJSON implements json.Marshaler
func (msg *NetworkInterface) MarshalJSON() ([]byte, error) {
return protojson.MarshalOptions{
UseProtoNames: true,
}.Marshal(msg)
}
// UnmarshalJSON implements json.Unmarshaler
func (msg *NetworkInterface) UnmarshalJSON(b []byte) error {
return protojson.UnmarshalOptions{}.Unmarshal(b, msg)
}
// MarshalJSON implements json.Marshaler
func (msg *DebugEvent) MarshalJSON() ([]byte, error) {
return protojson.MarshalOptions{
UseProtoNames: true,
}.Marshal(msg)
}
// UnmarshalJSON implements json.Unmarshaler
func (msg *DebugEvent) UnmarshalJSON(b []byte) error {
return protojson.UnmarshalOptions{}.Unmarshal(b, msg)
}
// Code generated by go-swagger; DO NOT EDIT.
// Copyright Authors of Cilium
// SPDX-License-Identifier: Apache-2.0
package client
// This file was generated by the swagger tool.
// Editing this file might prove futile when you re-run the swagger generate command
import (
"github.com/go-openapi/runtime"
httptransport "github.com/go-openapi/runtime/client"
"github.com/go-openapi/strfmt"
"github.com/cilium/cilium/api/v1/health/client/connectivity"
"github.com/cilium/cilium/api/v1/health/client/restapi"
)
// Default cilium health API HTTP client.
var Default = NewHTTPClient(nil)
const (
// DefaultHost is the default Host
// found in Meta (info) section of spec file
DefaultHost string = "localhost"
// DefaultBasePath is the default BasePath
// found in Meta (info) section of spec file
DefaultBasePath string = "/v1beta"
)
// DefaultSchemes are the default schemes found in Meta (info) section of spec file
var DefaultSchemes = []string{"http"}
// NewHTTPClient creates a new cilium health API HTTP client.
func NewHTTPClient(formats strfmt.Registry) *CiliumHealthAPI {
return NewHTTPClientWithConfig(formats, nil)
}
// NewHTTPClientWithConfig creates a new cilium health API HTTP client,
// using a customizable transport config.
func NewHTTPClientWithConfig(formats strfmt.Registry, cfg *TransportConfig) *CiliumHealthAPI {
// ensure nullable parameters have default
if cfg == nil {
cfg = DefaultTransportConfig()
}
// create transport and client
transport := httptransport.New(cfg.Host, cfg.BasePath, cfg.Schemes)
return New(transport, formats)
}
// New creates a new cilium health API client
func New(transport runtime.ClientTransport, formats strfmt.Registry) *CiliumHealthAPI {
// ensure nullable parameters have default
if formats == nil {
formats = strfmt.Default
}
cli := new(CiliumHealthAPI)
cli.Transport = transport
cli.Connectivity = connectivity.New(transport, formats)
cli.Restapi = restapi.New(transport, formats)
return cli
}
// DefaultTransportConfig creates a TransportConfig with the
// default settings taken from the meta section of the spec file.
func DefaultTransportConfig() *TransportConfig {
return &TransportConfig{
Host: DefaultHost,
BasePath: DefaultBasePath,
Schemes: DefaultSchemes,
}
}
// TransportConfig contains the transport related info,
// found in the meta section of the spec file.
type TransportConfig struct {
Host string
BasePath string
Schemes []string
}
// WithHost overrides the default host,
// provided by the meta section of the spec file.
func (cfg *TransportConfig) WithHost(host string) *TransportConfig {
cfg.Host = host
return cfg
}
// WithBasePath overrides the default basePath,
// provided by the meta section of the spec file.
func (cfg *TransportConfig) WithBasePath(basePath string) *TransportConfig {
cfg.BasePath = basePath
return cfg
}
// WithSchemes overrides the default schemes,
// provided by the meta section of the spec file.
func (cfg *TransportConfig) WithSchemes(schemes []string) *TransportConfig {
cfg.Schemes = schemes
return cfg
}
// CiliumHealthAPI is a client for cilium health API
type CiliumHealthAPI struct {
Connectivity connectivity.ClientService
Restapi restapi.ClientService
Transport runtime.ClientTransport
}
// SetTransport changes the transport on the client and all its subresources
func (c *CiliumHealthAPI) SetTransport(transport runtime.ClientTransport) {
c.Transport = transport
c.Connectivity.SetTransport(transport)
c.Restapi.SetTransport(transport)
}
// Code generated by go-swagger; DO NOT EDIT.
// Copyright Authors of Cilium
// SPDX-License-Identifier: Apache-2.0
package connectivity
// This file was generated by the swagger tool.
// Editing this file might prove futile when you re-run the swagger generate command
import (
"fmt"
"github.com/go-openapi/runtime"
httptransport "github.com/go-openapi/runtime/client"
"github.com/go-openapi/strfmt"
)
// New creates a new connectivity API client.
func New(transport runtime.ClientTransport, formats strfmt.Registry) ClientService {
return &Client{transport: transport, formats: formats}
}
// New creates a new connectivity API client with basic auth credentials.
// It takes the following parameters:
// - host: http host (github.com).
// - basePath: any base path for the API client ("/v1", "/v3").
// - scheme: http scheme ("http", "https").
// - user: user for basic authentication header.
// - password: password for basic authentication header.
func NewClientWithBasicAuth(host, basePath, scheme, user, password string) ClientService {
transport := httptransport.New(host, basePath, []string{scheme})
transport.DefaultAuthentication = httptransport.BasicAuth(user, password)
return &Client{transport: transport, formats: strfmt.Default}
}
// New creates a new connectivity API client with a bearer token for authentication.
// It takes the following parameters:
// - host: http host (github.com).
// - basePath: any base path for the API client ("/v1", "/v3").
// - scheme: http scheme ("http", "https").
// - bearerToken: bearer token for Bearer authentication header.
func NewClientWithBearerToken(host, basePath, scheme, bearerToken string) ClientService {
transport := httptransport.New(host, basePath, []string{scheme})
transport.DefaultAuthentication = httptransport.BearerToken(bearerToken)
return &Client{transport: transport, formats: strfmt.Default}
}
/*
Client for connectivity API
*/
type Client struct {
transport runtime.ClientTransport
formats strfmt.Registry
}
// ClientOption may be used to customize the behavior of Client methods.
type ClientOption func(*runtime.ClientOperation)
// ClientService is the interface for Client methods
type ClientService interface {
GetStatus(params *GetStatusParams, opts ...ClientOption) (*GetStatusOK, error)
PutStatusProbe(params *PutStatusProbeParams, opts ...ClientOption) (*PutStatusProbeOK, error)
SetTransport(transport runtime.ClientTransport)
}
/*
GetStatus gets connectivity status of the cilium cluster
Returns the connectivity status to all other cilium-health instances
using interval-based probing.
*/
func (a *Client) GetStatus(params *GetStatusParams, opts ...ClientOption) (*GetStatusOK, error) {
// TODO: Validate the params before sending
if params == nil {
params = NewGetStatusParams()
}
op := &runtime.ClientOperation{
ID: "GetStatus",
Method: "GET",
PathPattern: "/status",
ProducesMediaTypes: []string{"application/json"},
ConsumesMediaTypes: []string{"application/json"},
Schemes: []string{"http"},
Params: params,
Reader: &GetStatusReader{formats: a.formats},
Context: params.Context,
Client: params.HTTPClient,
}
for _, opt := range opts {
opt(op)
}
result, err := a.transport.Submit(op)
if err != nil {
return nil, err
}
success, ok := result.(*GetStatusOK)
if ok {
return success, nil
}
// unexpected success response
// safeguard: normally, absent a default response, unknown success responses return an error above: so this is a codegen issue
msg := fmt.Sprintf("unexpected success response for GetStatus: API contract not enforced by server. Client expected to get an error, but got: %T", result)
panic(msg)
}
/*
PutStatusProbe runs synchronous connectivity probe to determine status of the cilium cluster
Runs a synchronous probe to all other cilium-health instances and
returns the connectivity status.
*/
func (a *Client) PutStatusProbe(params *PutStatusProbeParams, opts ...ClientOption) (*PutStatusProbeOK, error) {
// TODO: Validate the params before sending
if params == nil {
params = NewPutStatusProbeParams()
}
op := &runtime.ClientOperation{
ID: "PutStatusProbe",
Method: "PUT",
PathPattern: "/status/probe",
ProducesMediaTypes: []string{"application/json"},
ConsumesMediaTypes: []string{"application/json"},
Schemes: []string{"http"},
Params: params,
Reader: &PutStatusProbeReader{formats: a.formats},
Context: params.Context,
Client: params.HTTPClient,
}
for _, opt := range opts {
opt(op)
}
result, err := a.transport.Submit(op)
if err != nil {
return nil, err
}
success, ok := result.(*PutStatusProbeOK)
if ok {
return success, nil
}
// unexpected success response
// safeguard: normally, absent a default response, unknown success responses return an error above: so this is a codegen issue
msg := fmt.Sprintf("unexpected success response for PutStatusProbe: API contract not enforced by server. Client expected to get an error, but got: %T", result)
panic(msg)
}
// SetTransport changes the transport on the client
func (a *Client) SetTransport(transport runtime.ClientTransport) {
a.transport = transport
}
// Code generated by go-swagger; DO NOT EDIT.
// Copyright Authors of Cilium
// SPDX-License-Identifier: Apache-2.0
package connectivity
// This file was generated by the swagger tool.
// Editing this file might prove futile when you re-run the swagger generate command
import (
"context"
"net/http"
"time"
"github.com/go-openapi/errors"
"github.com/go-openapi/runtime"
cr "github.com/go-openapi/runtime/client"
"github.com/go-openapi/strfmt"
)
// NewGetStatusParams creates a new GetStatusParams object,
// with the default timeout for this client.
//
// Default values are not hydrated, since defaults are normally applied by the API server side.
//
// To enforce default values in parameter, use SetDefaults or WithDefaults.
func NewGetStatusParams() *GetStatusParams {
return &GetStatusParams{
timeout: cr.DefaultTimeout,
}
}
// NewGetStatusParamsWithTimeout creates a new GetStatusParams object
// with the ability to set a timeout on a request.
func NewGetStatusParamsWithTimeout(timeout time.Duration) *GetStatusParams {
return &GetStatusParams{
timeout: timeout,
}
}
// NewGetStatusParamsWithContext creates a new GetStatusParams object
// with the ability to set a context for a request.
func NewGetStatusParamsWithContext(ctx context.Context) *GetStatusParams {
return &GetStatusParams{
Context: ctx,
}
}
// NewGetStatusParamsWithHTTPClient creates a new GetStatusParams object
// with the ability to set a custom HTTPClient for a request.
func NewGetStatusParamsWithHTTPClient(client *http.Client) *GetStatusParams {
return &GetStatusParams{
HTTPClient: client,
}
}
/*
GetStatusParams contains all the parameters to send to the API endpoint
for the get status operation.
Typically these are written to a http.Request.
*/
type GetStatusParams struct {
timeout time.Duration
Context context.Context
HTTPClient *http.Client
}
// WithDefaults hydrates default values in the get status params (not the query body).
//
// All values with no default are reset to their zero value.
func (o *GetStatusParams) WithDefaults() *GetStatusParams {
o.SetDefaults()
return o
}
// SetDefaults hydrates default values in the get status params (not the query body).
//
// All values with no default are reset to their zero value.
func (o *GetStatusParams) SetDefaults() {
// no default values defined for this parameter
}
// WithTimeout adds the timeout to the get status params
func (o *GetStatusParams) WithTimeout(timeout time.Duration) *GetStatusParams {
o.SetTimeout(timeout)
return o
}
// SetTimeout adds the timeout to the get status params
func (o *GetStatusParams) SetTimeout(timeout time.Duration) {
o.timeout = timeout
}
// WithContext adds the context to the get status params
func (o *GetStatusParams) WithContext(ctx context.Context) *GetStatusParams {
o.SetContext(ctx)
return o
}
// SetContext adds the context to the get status params
func (o *GetStatusParams) SetContext(ctx context.Context) {
o.Context = ctx
}
// WithHTTPClient adds the HTTPClient to the get status params
func (o *GetStatusParams) WithHTTPClient(client *http.Client) *GetStatusParams {
o.SetHTTPClient(client)
return o
}
// SetHTTPClient adds the HTTPClient to the get status params
func (o *GetStatusParams) SetHTTPClient(client *http.Client) {
o.HTTPClient = client
}
// WriteToRequest writes these params to a swagger request
func (o *GetStatusParams) WriteToRequest(r runtime.ClientRequest, reg strfmt.Registry) error {
if err := r.SetTimeout(o.timeout); err != nil {
return err
}
var res []error
if len(res) > 0 {
return errors.CompositeValidationError(res...)
}
return nil
}
// Code generated by go-swagger; DO NOT EDIT.
// Copyright Authors of Cilium
// SPDX-License-Identifier: Apache-2.0
package connectivity
// This file was generated by the swagger tool.
// Editing this file might prove futile when you re-run the swagger generate command
import (
"encoding/json"
"fmt"
"io"
"github.com/go-openapi/runtime"
"github.com/go-openapi/strfmt"
"github.com/cilium/cilium/api/v1/health/models"
)
// GetStatusReader is a Reader for the GetStatus structure.
type GetStatusReader struct {
formats strfmt.Registry
}
// ReadResponse reads a server response into the received o.
func (o *GetStatusReader) ReadResponse(response runtime.ClientResponse, consumer runtime.Consumer) (interface{}, error) {
switch response.Code() {
case 200:
result := NewGetStatusOK()
if err := result.readResponse(response, consumer, o.formats); err != nil {
return nil, err
}
return result, nil
default:
return nil, runtime.NewAPIError("[GET /status] GetStatus", response, response.Code())
}
}
// NewGetStatusOK creates a GetStatusOK with default headers values
func NewGetStatusOK() *GetStatusOK {
return &GetStatusOK{}
}
/*
GetStatusOK describes a response with status code 200, with default header values.
Success
*/
type GetStatusOK struct {
Payload *models.HealthStatusResponse
}
// IsSuccess returns true when this get status o k response has a 2xx status code
func (o *GetStatusOK) IsSuccess() bool {
return true
}
// IsRedirect returns true when this get status o k response has a 3xx status code
func (o *GetStatusOK) IsRedirect() bool {
return false
}
// IsClientError returns true when this get status o k response has a 4xx status code
func (o *GetStatusOK) IsClientError() bool {
return false
}
// IsServerError returns true when this get status o k response has a 5xx status code
func (o *GetStatusOK) IsServerError() bool {
return false
}
// IsCode returns true when this get status o k response a status code equal to that given
func (o *GetStatusOK) IsCode(code int) bool {
return code == 200
}
// Code gets the status code for the get status o k response
func (o *GetStatusOK) Code() int {
return 200
}
func (o *GetStatusOK) Error() string {
payload, _ := json.Marshal(o.Payload)
return fmt.Sprintf("[GET /status][%d] getStatusOK %s", 200, payload)
}
func (o *GetStatusOK) String() string {
payload, _ := json.Marshal(o.Payload)
return fmt.Sprintf("[GET /status][%d] getStatusOK %s", 200, payload)
}
func (o *GetStatusOK) GetPayload() *models.HealthStatusResponse {
return o.Payload
}
func (o *GetStatusOK) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error {
o.Payload = new(models.HealthStatusResponse)
// response payload
if err := consumer.Consume(response.Body(), o.Payload); err != nil && err != io.EOF {
return err
}
return nil
}
// Code generated by go-swagger; DO NOT EDIT.
// Copyright Authors of Cilium
// SPDX-License-Identifier: Apache-2.0
package connectivity
// This file was generated by the swagger tool.
// Editing this file might prove futile when you re-run the swagger generate command
import (
"context"
"net/http"
"time"
"github.com/go-openapi/errors"
"github.com/go-openapi/runtime"
cr "github.com/go-openapi/runtime/client"
"github.com/go-openapi/strfmt"
)
// NewPutStatusProbeParams creates a new PutStatusProbeParams object,
// with the default timeout for this client.
//
// Default values are not hydrated, since defaults are normally applied by the API server side.
//
// To enforce default values in parameter, use SetDefaults or WithDefaults.
func NewPutStatusProbeParams() *PutStatusProbeParams {
return &PutStatusProbeParams{
timeout: cr.DefaultTimeout,
}
}
// NewPutStatusProbeParamsWithTimeout creates a new PutStatusProbeParams object
// with the ability to set a timeout on a request.
func NewPutStatusProbeParamsWithTimeout(timeout time.Duration) *PutStatusProbeParams {
return &PutStatusProbeParams{
timeout: timeout,
}
}
// NewPutStatusProbeParamsWithContext creates a new PutStatusProbeParams object
// with the ability to set a context for a request.
func NewPutStatusProbeParamsWithContext(ctx context.Context) *PutStatusProbeParams {
return &PutStatusProbeParams{
Context: ctx,
}
}
// NewPutStatusProbeParamsWithHTTPClient creates a new PutStatusProbeParams object
// with the ability to set a custom HTTPClient for a request.
func NewPutStatusProbeParamsWithHTTPClient(client *http.Client) *PutStatusProbeParams {
return &PutStatusProbeParams{
HTTPClient: client,
}
}
/*
PutStatusProbeParams contains all the parameters to send to the API endpoint
for the put status probe operation.
Typically these are written to a http.Request.
*/
type PutStatusProbeParams struct {
timeout time.Duration
Context context.Context
HTTPClient *http.Client
}
// WithDefaults hydrates default values in the put status probe params (not the query body).
//
// All values with no default are reset to their zero value.
func (o *PutStatusProbeParams) WithDefaults() *PutStatusProbeParams {
o.SetDefaults()
return o
}
// SetDefaults hydrates default values in the put status probe params (not the query body).
//
// All values with no default are reset to their zero value.
func (o *PutStatusProbeParams) SetDefaults() {
// no default values defined for this parameter
}
// WithTimeout adds the timeout to the put status probe params
func (o *PutStatusProbeParams) WithTimeout(timeout time.Duration) *PutStatusProbeParams {
o.SetTimeout(timeout)
return o
}
// SetTimeout adds the timeout to the put status probe params
func (o *PutStatusProbeParams) SetTimeout(timeout time.Duration) {
o.timeout = timeout
}
// WithContext adds the context to the put status probe params
func (o *PutStatusProbeParams) WithContext(ctx context.Context) *PutStatusProbeParams {
o.SetContext(ctx)
return o
}
// SetContext adds the context to the put status probe params
func (o *PutStatusProbeParams) SetContext(ctx context.Context) {
o.Context = ctx
}
// WithHTTPClient adds the HTTPClient to the put status probe params
func (o *PutStatusProbeParams) WithHTTPClient(client *http.Client) *PutStatusProbeParams {
o.SetHTTPClient(client)
return o
}
// SetHTTPClient adds the HTTPClient to the put status probe params
func (o *PutStatusProbeParams) SetHTTPClient(client *http.Client) {
o.HTTPClient = client
}
// WriteToRequest writes these params to a swagger request
func (o *PutStatusProbeParams) WriteToRequest(r runtime.ClientRequest, reg strfmt.Registry) error {
if err := r.SetTimeout(o.timeout); err != nil {
return err
}
var res []error
if len(res) > 0 {
return errors.CompositeValidationError(res...)
}
return nil
}
// Code generated by go-swagger; DO NOT EDIT.
// Copyright Authors of Cilium
// SPDX-License-Identifier: Apache-2.0
package connectivity
// This file was generated by the swagger tool.
// Editing this file might prove futile when you re-run the swagger generate command
import (
"encoding/json"
"fmt"
"io"
"github.com/go-openapi/runtime"
"github.com/go-openapi/strfmt"
"github.com/cilium/cilium/api/v1/health/models"
)
// PutStatusProbeReader is a Reader for the PutStatusProbe structure.
type PutStatusProbeReader struct {
formats strfmt.Registry
}
// ReadResponse reads a server response into the received o.
func (o *PutStatusProbeReader) ReadResponse(response runtime.ClientResponse, consumer runtime.Consumer) (interface{}, error) {
switch response.Code() {
case 200:
result := NewPutStatusProbeOK()
if err := result.readResponse(response, consumer, o.formats); err != nil {
return nil, err
}
return result, nil
case 403:
result := NewPutStatusProbeForbidden()
if err := result.readResponse(response, consumer, o.formats); err != nil {
return nil, err
}
return nil, result
case 500:
result := NewPutStatusProbeFailed()
if err := result.readResponse(response, consumer, o.formats); err != nil {
return nil, err
}
return nil, result
default:
return nil, runtime.NewAPIError("[PUT /status/probe] PutStatusProbe", response, response.Code())
}
}
// NewPutStatusProbeOK creates a PutStatusProbeOK with default headers values
func NewPutStatusProbeOK() *PutStatusProbeOK {
return &PutStatusProbeOK{}
}
/*
PutStatusProbeOK describes a response with status code 200, with default header values.
Success
*/
type PutStatusProbeOK struct {
Payload *models.HealthStatusResponse
}
// IsSuccess returns true when this put status probe o k response has a 2xx status code
func (o *PutStatusProbeOK) IsSuccess() bool {
return true
}
// IsRedirect returns true when this put status probe o k response has a 3xx status code
func (o *PutStatusProbeOK) IsRedirect() bool {
return false
}
// IsClientError returns true when this put status probe o k response has a 4xx status code
func (o *PutStatusProbeOK) IsClientError() bool {
return false
}
// IsServerError returns true when this put status probe o k response has a 5xx status code
func (o *PutStatusProbeOK) IsServerError() bool {
return false
}
// IsCode returns true when this put status probe o k response a status code equal to that given
func (o *PutStatusProbeOK) IsCode(code int) bool {
return code == 200
}
// Code gets the status code for the put status probe o k response
func (o *PutStatusProbeOK) Code() int {
return 200
}
func (o *PutStatusProbeOK) Error() string {
payload, _ := json.Marshal(o.Payload)
return fmt.Sprintf("[PUT /status/probe][%d] putStatusProbeOK %s", 200, payload)
}
func (o *PutStatusProbeOK) String() string {
payload, _ := json.Marshal(o.Payload)
return fmt.Sprintf("[PUT /status/probe][%d] putStatusProbeOK %s", 200, payload)
}
func (o *PutStatusProbeOK) GetPayload() *models.HealthStatusResponse {
return o.Payload
}
func (o *PutStatusProbeOK) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error {
o.Payload = new(models.HealthStatusResponse)
// response payload
if err := consumer.Consume(response.Body(), o.Payload); err != nil && err != io.EOF {
return err
}
return nil
}
// NewPutStatusProbeForbidden creates a PutStatusProbeForbidden with default headers values
func NewPutStatusProbeForbidden() *PutStatusProbeForbidden {
return &PutStatusProbeForbidden{}
}
/*
PutStatusProbeForbidden describes a response with status code 403, with default header values.
Forbidden
*/
type PutStatusProbeForbidden struct {
}
// IsSuccess returns true when this put status probe forbidden response has a 2xx status code
func (o *PutStatusProbeForbidden) IsSuccess() bool {
return false
}
// IsRedirect returns true when this put status probe forbidden response has a 3xx status code
func (o *PutStatusProbeForbidden) IsRedirect() bool {
return false
}
// IsClientError returns true when this put status probe forbidden response has a 4xx status code
func (o *PutStatusProbeForbidden) IsClientError() bool {
return true
}
// IsServerError returns true when this put status probe forbidden response has a 5xx status code
func (o *PutStatusProbeForbidden) IsServerError() bool {
return false
}
// IsCode returns true when this put status probe forbidden response a status code equal to that given
func (o *PutStatusProbeForbidden) IsCode(code int) bool {
return code == 403
}
// Code gets the status code for the put status probe forbidden response
func (o *PutStatusProbeForbidden) Code() int {
return 403
}
func (o *PutStatusProbeForbidden) Error() string {
return fmt.Sprintf("[PUT /status/probe][%d] putStatusProbeForbidden", 403)
}
func (o *PutStatusProbeForbidden) String() string {
return fmt.Sprintf("[PUT /status/probe][%d] putStatusProbeForbidden", 403)
}
func (o *PutStatusProbeForbidden) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error {
return nil
}
// NewPutStatusProbeFailed creates a PutStatusProbeFailed with default headers values
func NewPutStatusProbeFailed() *PutStatusProbeFailed {
return &PutStatusProbeFailed{}
}
/*
PutStatusProbeFailed describes a response with status code 500, with default header values.
Internal error occurred while conducting connectivity probe
*/
type PutStatusProbeFailed struct {
Payload models.Error
}
// IsSuccess returns true when this put status probe failed response has a 2xx status code
func (o *PutStatusProbeFailed) IsSuccess() bool {
return false
}
// IsRedirect returns true when this put status probe failed response has a 3xx status code
func (o *PutStatusProbeFailed) IsRedirect() bool {
return false
}
// IsClientError returns true when this put status probe failed response has a 4xx status code
func (o *PutStatusProbeFailed) IsClientError() bool {
return false
}
// IsServerError returns true when this put status probe failed response has a 5xx status code
func (o *PutStatusProbeFailed) IsServerError() bool {
return true
}
// IsCode returns true when this put status probe failed response a status code equal to that given
func (o *PutStatusProbeFailed) IsCode(code int) bool {
return code == 500
}
// Code gets the status code for the put status probe failed response
func (o *PutStatusProbeFailed) Code() int {
return 500
}
func (o *PutStatusProbeFailed) Error() string {
payload, _ := json.Marshal(o.Payload)
return fmt.Sprintf("[PUT /status/probe][%d] putStatusProbeFailed %s", 500, payload)
}
func (o *PutStatusProbeFailed) String() string {
payload, _ := json.Marshal(o.Payload)
return fmt.Sprintf("[PUT /status/probe][%d] putStatusProbeFailed %s", 500, payload)
}
func (o *PutStatusProbeFailed) GetPayload() models.Error {
return o.Payload
}
func (o *PutStatusProbeFailed) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error {
// response payload
if err := consumer.Consume(response.Body(), &o.Payload); err != nil && err != io.EOF {
return err
}
return nil
}
// Code generated by go-swagger; DO NOT EDIT.
// Copyright Authors of Cilium
// SPDX-License-Identifier: Apache-2.0
package restapi
// This file was generated by the swagger tool.
// Editing this file might prove futile when you re-run the swagger generate command
import (
"context"
"net/http"
"time"
"github.com/go-openapi/errors"
"github.com/go-openapi/runtime"
cr "github.com/go-openapi/runtime/client"
"github.com/go-openapi/strfmt"
)
// NewGetHealthzParams creates a new GetHealthzParams object,
// with the default timeout for this client.
//
// Default values are not hydrated, since defaults are normally applied by the API server side.
//
// To enforce default values in parameter, use SetDefaults or WithDefaults.
func NewGetHealthzParams() *GetHealthzParams {
return &GetHealthzParams{
timeout: cr.DefaultTimeout,
}
}
// NewGetHealthzParamsWithTimeout creates a new GetHealthzParams object
// with the ability to set a timeout on a request.
func NewGetHealthzParamsWithTimeout(timeout time.Duration) *GetHealthzParams {
return &GetHealthzParams{
timeout: timeout,
}
}
// NewGetHealthzParamsWithContext creates a new GetHealthzParams object
// with the ability to set a context for a request.
func NewGetHealthzParamsWithContext(ctx context.Context) *GetHealthzParams {
return &GetHealthzParams{
Context: ctx,
}
}
// NewGetHealthzParamsWithHTTPClient creates a new GetHealthzParams object
// with the ability to set a custom HTTPClient for a request.
func NewGetHealthzParamsWithHTTPClient(client *http.Client) *GetHealthzParams {
return &GetHealthzParams{
HTTPClient: client,
}
}
/*
GetHealthzParams contains all the parameters to send to the API endpoint
for the get healthz operation.
Typically these are written to a http.Request.
*/
type GetHealthzParams struct {
timeout time.Duration
Context context.Context
HTTPClient *http.Client
}
// WithDefaults hydrates default values in the get healthz params (not the query body).
//
// All values with no default are reset to their zero value.
func (o *GetHealthzParams) WithDefaults() *GetHealthzParams {
o.SetDefaults()
return o
}
// SetDefaults hydrates default values in the get healthz params (not the query body).
//
// All values with no default are reset to their zero value.
func (o *GetHealthzParams) SetDefaults() {
// no default values defined for this parameter
}
// WithTimeout adds the timeout to the get healthz params
func (o *GetHealthzParams) WithTimeout(timeout time.Duration) *GetHealthzParams {
o.SetTimeout(timeout)
return o
}
// SetTimeout adds the timeout to the get healthz params
func (o *GetHealthzParams) SetTimeout(timeout time.Duration) {
o.timeout = timeout
}
// WithContext adds the context to the get healthz params
func (o *GetHealthzParams) WithContext(ctx context.Context) *GetHealthzParams {
o.SetContext(ctx)
return o
}
// SetContext adds the context to the get healthz params
func (o *GetHealthzParams) SetContext(ctx context.Context) {
o.Context = ctx
}
// WithHTTPClient adds the HTTPClient to the get healthz params
func (o *GetHealthzParams) WithHTTPClient(client *http.Client) *GetHealthzParams {
o.SetHTTPClient(client)
return o
}
// SetHTTPClient adds the HTTPClient to the get healthz params
func (o *GetHealthzParams) SetHTTPClient(client *http.Client) {
o.HTTPClient = client
}
// WriteToRequest writes these params to a swagger request
func (o *GetHealthzParams) WriteToRequest(r runtime.ClientRequest, reg strfmt.Registry) error {
if err := r.SetTimeout(o.timeout); err != nil {
return err
}
var res []error
if len(res) > 0 {
return errors.CompositeValidationError(res...)
}
return nil
}
// Code generated by go-swagger; DO NOT EDIT.
// Copyright Authors of Cilium
// SPDX-License-Identifier: Apache-2.0
package restapi
// This file was generated by the swagger tool.
// Editing this file might prove futile when you re-run the swagger generate command
import (
"encoding/json"
"fmt"
"io"
"github.com/go-openapi/runtime"
"github.com/go-openapi/strfmt"
"github.com/cilium/cilium/api/v1/health/models"
)
// GetHealthzReader is a Reader for the GetHealthz structure.
type GetHealthzReader struct {
formats strfmt.Registry
}
// ReadResponse reads a server response into the received o.
func (o *GetHealthzReader) ReadResponse(response runtime.ClientResponse, consumer runtime.Consumer) (interface{}, error) {
switch response.Code() {
case 200:
result := NewGetHealthzOK()
if err := result.readResponse(response, consumer, o.formats); err != nil {
return nil, err
}
return result, nil
case 500:
result := NewGetHealthzFailed()
if err := result.readResponse(response, consumer, o.formats); err != nil {
return nil, err
}
return nil, result
default:
return nil, runtime.NewAPIError("[GET /healthz] GetHealthz", response, response.Code())
}
}
// NewGetHealthzOK creates a GetHealthzOK with default headers values
func NewGetHealthzOK() *GetHealthzOK {
return &GetHealthzOK{}
}
/*
GetHealthzOK describes a response with status code 200, with default header values.
Success
*/
type GetHealthzOK struct {
Payload *models.HealthResponse
}
// IsSuccess returns true when this get healthz o k response has a 2xx status code
func (o *GetHealthzOK) IsSuccess() bool {
return true
}
// IsRedirect returns true when this get healthz o k response has a 3xx status code
func (o *GetHealthzOK) IsRedirect() bool {
return false
}
// IsClientError returns true when this get healthz o k response has a 4xx status code
func (o *GetHealthzOK) IsClientError() bool {
return false
}
// IsServerError returns true when this get healthz o k response has a 5xx status code
func (o *GetHealthzOK) IsServerError() bool {
return false
}
// IsCode returns true when this get healthz o k response a status code equal to that given
func (o *GetHealthzOK) IsCode(code int) bool {
return code == 200
}
// Code gets the status code for the get healthz o k response
func (o *GetHealthzOK) Code() int {
return 200
}
func (o *GetHealthzOK) Error() string {
payload, _ := json.Marshal(o.Payload)
return fmt.Sprintf("[GET /healthz][%d] getHealthzOK %s", 200, payload)
}
func (o *GetHealthzOK) String() string {
payload, _ := json.Marshal(o.Payload)
return fmt.Sprintf("[GET /healthz][%d] getHealthzOK %s", 200, payload)
}
func (o *GetHealthzOK) GetPayload() *models.HealthResponse {
return o.Payload
}
func (o *GetHealthzOK) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error {
o.Payload = new(models.HealthResponse)
// response payload
if err := consumer.Consume(response.Body(), o.Payload); err != nil && err != io.EOF {
return err
}
return nil
}
// NewGetHealthzFailed creates a GetHealthzFailed with default headers values
func NewGetHealthzFailed() *GetHealthzFailed {
return &GetHealthzFailed{}
}
/*
GetHealthzFailed describes a response with status code 500, with default header values.
Failed to contact local Cilium daemon
*/
type GetHealthzFailed struct {
Payload models.Error
}
// IsSuccess returns true when this get healthz failed response has a 2xx status code
func (o *GetHealthzFailed) IsSuccess() bool {
return false
}
// IsRedirect returns true when this get healthz failed response has a 3xx status code
func (o *GetHealthzFailed) IsRedirect() bool {
return false
}
// IsClientError returns true when this get healthz failed response has a 4xx status code
func (o *GetHealthzFailed) IsClientError() bool {
return false
}
// IsServerError returns true when this get healthz failed response has a 5xx status code
func (o *GetHealthzFailed) IsServerError() bool {
return true
}
// IsCode returns true when this get healthz failed response a status code equal to that given
func (o *GetHealthzFailed) IsCode(code int) bool {
return code == 500
}
// Code gets the status code for the get healthz failed response
func (o *GetHealthzFailed) Code() int {
return 500
}
func (o *GetHealthzFailed) Error() string {
payload, _ := json.Marshal(o.Payload)
return fmt.Sprintf("[GET /healthz][%d] getHealthzFailed %s", 500, payload)
}
func (o *GetHealthzFailed) String() string {
payload, _ := json.Marshal(o.Payload)
return fmt.Sprintf("[GET /healthz][%d] getHealthzFailed %s", 500, payload)
}
func (o *GetHealthzFailed) GetPayload() models.Error {
return o.Payload
}
func (o *GetHealthzFailed) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error {
// response payload
if err := consumer.Consume(response.Body(), &o.Payload); err != nil && err != io.EOF {
return err
}
return nil
}
// Code generated by go-swagger; DO NOT EDIT.
// Copyright Authors of Cilium
// SPDX-License-Identifier: Apache-2.0
package restapi
// This file was generated by the swagger tool.
// Editing this file might prove futile when you re-run the swagger generate command
import (
"fmt"
"github.com/go-openapi/runtime"
httptransport "github.com/go-openapi/runtime/client"
"github.com/go-openapi/strfmt"
)
// New creates a new restapi API client.
func New(transport runtime.ClientTransport, formats strfmt.Registry) ClientService {
return &Client{transport: transport, formats: formats}
}
// New creates a new restapi API client with basic auth credentials.
// It takes the following parameters:
// - host: http host (github.com).
// - basePath: any base path for the API client ("/v1", "/v3").
// - scheme: http scheme ("http", "https").
// - user: user for basic authentication header.
// - password: password for basic authentication header.
func NewClientWithBasicAuth(host, basePath, scheme, user, password string) ClientService {
transport := httptransport.New(host, basePath, []string{scheme})
transport.DefaultAuthentication = httptransport.BasicAuth(user, password)
return &Client{transport: transport, formats: strfmt.Default}
}
// New creates a new restapi API client with a bearer token for authentication.
// It takes the following parameters:
// - host: http host (github.com).
// - basePath: any base path for the API client ("/v1", "/v3").
// - scheme: http scheme ("http", "https").
// - bearerToken: bearer token for Bearer authentication header.
func NewClientWithBearerToken(host, basePath, scheme, bearerToken string) ClientService {
transport := httptransport.New(host, basePath, []string{scheme})
transport.DefaultAuthentication = httptransport.BearerToken(bearerToken)
return &Client{transport: transport, formats: strfmt.Default}
}
/*
Client for restapi API
*/
type Client struct {
transport runtime.ClientTransport
formats strfmt.Registry
}
// ClientOption may be used to customize the behavior of Client methods.
type ClientOption func(*runtime.ClientOperation)
// ClientService is the interface for Client methods
type ClientService interface {
GetHealthz(params *GetHealthzParams, opts ...ClientOption) (*GetHealthzOK, error)
SetTransport(transport runtime.ClientTransport)
}
/*
GetHealthz gets health of cilium node
Returns health and status information of the local node including
load and uptime, as well as the status of related components including
the Cilium daemon.
*/
func (a *Client) GetHealthz(params *GetHealthzParams, opts ...ClientOption) (*GetHealthzOK, error) {
// TODO: Validate the params before sending
if params == nil {
params = NewGetHealthzParams()
}
op := &runtime.ClientOperation{
ID: "GetHealthz",
Method: "GET",
PathPattern: "/healthz",
ProducesMediaTypes: []string{"application/json"},
ConsumesMediaTypes: []string{"application/json"},
Schemes: []string{"http"},
Params: params,
Reader: &GetHealthzReader{formats: a.formats},
Context: params.Context,
Client: params.HTTPClient,
}
for _, opt := range opts {
opt(op)
}
result, err := a.transport.Submit(op)
if err != nil {
return nil, err
}
success, ok := result.(*GetHealthzOK)
if ok {
return success, nil
}
// unexpected success response
// safeguard: normally, absent a default response, unknown success responses return an error above: so this is a codegen issue
msg := fmt.Sprintf("unexpected success response for GetHealthz: API contract not enforced by server. Client expected to get an error, but got: %T", result)
panic(msg)
}
// SetTransport changes the transport on the client
func (a *Client) SetTransport(transport runtime.ClientTransport) {
a.transport = transport
}
// Code generated by go-swagger; DO NOT EDIT.
// Copyright Authors of Cilium
// SPDX-License-Identifier: Apache-2.0
package models
// This file was generated by the swagger tool.
// Editing this file might prove futile when you re-run the swagger generate command
import (
"context"
"github.com/go-openapi/strfmt"
"github.com/go-openapi/swag"
)
// ConnectivityStatus Connectivity status of a path
//
// swagger:model ConnectivityStatus
type ConnectivityStatus struct {
// Timestamp of last probe completion
LastProbed string `json:"lastProbed,omitempty"`
// Round trip time to node in nanoseconds
Latency int64 `json:"latency,omitempty"`
// Human readable status/error/warning message
Status string `json:"status,omitempty"`
}
// Validate validates this connectivity status
func (m *ConnectivityStatus) Validate(formats strfmt.Registry) error {
return nil
}
// ContextValidate validates this connectivity status based on context it is used
func (m *ConnectivityStatus) ContextValidate(ctx context.Context, formats strfmt.Registry) error {
return nil
}
// MarshalBinary interface implementation
func (m *ConnectivityStatus) MarshalBinary() ([]byte, error) {
if m == nil {
return nil, nil
}
return swag.WriteJSON(m)
}
// UnmarshalBinary interface implementation
func (m *ConnectivityStatus) UnmarshalBinary(b []byte) error {
var res ConnectivityStatus
if err := swag.ReadJSON(b, &res); err != nil {
return err
}
*m = res
return nil
}
// Code generated by go-swagger; DO NOT EDIT.
// Copyright Authors of Cilium
// SPDX-License-Identifier: Apache-2.0
package models
// This file was generated by the swagger tool.
// Editing this file might prove futile when you re-run the swagger generate command
import (
"context"
"strconv"
"github.com/go-openapi/errors"
"github.com/go-openapi/strfmt"
"github.com/go-openapi/swag"
)
// EndpointStatus Connectivity status to host cilium-health endpoints via different paths
//
// swagger:model EndpointStatus
type EndpointStatus struct {
// primary address
PrimaryAddress *PathStatus `json:"primary-address,omitempty"`
// secondary addresses
SecondaryAddresses []*PathStatus `json:"secondary-addresses"`
}
// Validate validates this endpoint status
func (m *EndpointStatus) Validate(formats strfmt.Registry) error {
var res []error
if err := m.validatePrimaryAddress(formats); err != nil {
res = append(res, err)
}
if err := m.validateSecondaryAddresses(formats); err != nil {
res = append(res, err)
}
if len(res) > 0 {
return errors.CompositeValidationError(res...)
}
return nil
}
func (m *EndpointStatus) validatePrimaryAddress(formats strfmt.Registry) error {
if swag.IsZero(m.PrimaryAddress) { // not required
return nil
}
if m.PrimaryAddress != nil {
if err := m.PrimaryAddress.Validate(formats); err != nil {
if ve, ok := err.(*errors.Validation); ok {
return ve.ValidateName("primary-address")
} else if ce, ok := err.(*errors.CompositeError); ok {
return ce.ValidateName("primary-address")
}
return err
}
}
return nil
}
func (m *EndpointStatus) validateSecondaryAddresses(formats strfmt.Registry) error {
if swag.IsZero(m.SecondaryAddresses) { // not required
return nil
}
for i := 0; i < len(m.SecondaryAddresses); i++ {
if swag.IsZero(m.SecondaryAddresses[i]) { // not required
continue
}
if m.SecondaryAddresses[i] != nil {
if err := m.SecondaryAddresses[i].Validate(formats); err != nil {
if ve, ok := err.(*errors.Validation); ok {
return ve.ValidateName("secondary-addresses" + "." + strconv.Itoa(i))
} else if ce, ok := err.(*errors.CompositeError); ok {
return ce.ValidateName("secondary-addresses" + "." + strconv.Itoa(i))
}
return err
}
}
}
return nil
}
// ContextValidate validate this endpoint status based on the context it is used
func (m *EndpointStatus) ContextValidate(ctx context.Context, formats strfmt.Registry) error {
var res []error
if err := m.contextValidatePrimaryAddress(ctx, formats); err != nil {
res = append(res, err)
}
if err := m.contextValidateSecondaryAddresses(ctx, formats); err != nil {
res = append(res, err)
}
if len(res) > 0 {
return errors.CompositeValidationError(res...)
}
return nil
}
func (m *EndpointStatus) contextValidatePrimaryAddress(ctx context.Context, formats strfmt.Registry) error {
if m.PrimaryAddress != nil {
if swag.IsZero(m.PrimaryAddress) { // not required
return nil
}
if err := m.PrimaryAddress.ContextValidate(ctx, formats); err != nil {
if ve, ok := err.(*errors.Validation); ok {
return ve.ValidateName("primary-address")
} else if ce, ok := err.(*errors.CompositeError); ok {
return ce.ValidateName("primary-address")
}
return err
}
}
return nil
}
func (m *EndpointStatus) contextValidateSecondaryAddresses(ctx context.Context, formats strfmt.Registry) error {
for i := 0; i < len(m.SecondaryAddresses); i++ {
if m.SecondaryAddresses[i] != nil {
if swag.IsZero(m.SecondaryAddresses[i]) { // not required
return nil
}
if err := m.SecondaryAddresses[i].ContextValidate(ctx, formats); err != nil {
if ve, ok := err.(*errors.Validation); ok {
return ve.ValidateName("secondary-addresses" + "." + strconv.Itoa(i))
} else if ce, ok := err.(*errors.CompositeError); ok {
return ce.ValidateName("secondary-addresses" + "." + strconv.Itoa(i))
}
return err
}
}
}
return nil
}
// MarshalBinary interface implementation
func (m *EndpointStatus) MarshalBinary() ([]byte, error) {
if m == nil {
return nil, nil
}
return swag.WriteJSON(m)
}
// UnmarshalBinary interface implementation
func (m *EndpointStatus) UnmarshalBinary(b []byte) error {
var res EndpointStatus
if err := swag.ReadJSON(b, &res); err != nil {
return err
}
*m = res
return nil
}
// Code generated by go-swagger; DO NOT EDIT.
// Copyright Authors of Cilium
// SPDX-License-Identifier: Apache-2.0
package models
// This file was generated by the swagger tool.
// Editing this file might prove futile when you re-run the swagger generate command
import (
"context"
"github.com/go-openapi/strfmt"
)
// Error error
//
// swagger:model error
type Error string
// Validate validates this error
func (m Error) Validate(formats strfmt.Registry) error {
return nil
}
// ContextValidate validates this error based on context it is used
func (m Error) ContextValidate(ctx context.Context, formats strfmt.Registry) error {
return nil
}
// Code generated by go-swagger; DO NOT EDIT.
// Copyright Authors of Cilium
// SPDX-License-Identifier: Apache-2.0
package models
// This file was generated by the swagger tool.
// Editing this file might prove futile when you re-run the swagger generate command
import (
"context"
ciliumModels "github.com/cilium/cilium/api/v1/models"
"github.com/go-openapi/errors"
"github.com/go-openapi/strfmt"
"github.com/go-openapi/swag"
)
// HealthResponse Health and status information of local node
//
// swagger:model HealthResponse
type HealthResponse struct {
// Status of Cilium daemon
Cilium ciliumModels.StatusResponse `json:"cilium,omitempty"`
// System load on node
SystemLoad *LoadResponse `json:"system-load,omitempty"`
// Uptime of cilium-health instance
Uptime string `json:"uptime,omitempty"`
}
// Validate validates this health response
func (m *HealthResponse) Validate(formats strfmt.Registry) error {
var res []error
if err := m.validateCilium(formats); err != nil {
res = append(res, err)
}
if err := m.validateSystemLoad(formats); err != nil {
res = append(res, err)
}
if len(res) > 0 {
return errors.CompositeValidationError(res...)
}
return nil
}
func (m *HealthResponse) validateCilium(formats strfmt.Registry) error {
if swag.IsZero(m.Cilium) { // not required
return nil
}
if err := m.Cilium.Validate(formats); err != nil {
if ve, ok := err.(*errors.Validation); ok {
return ve.ValidateName("cilium")
} else if ce, ok := err.(*errors.CompositeError); ok {
return ce.ValidateName("cilium")
}
return err
}
return nil
}
func (m *HealthResponse) validateSystemLoad(formats strfmt.Registry) error {
if swag.IsZero(m.SystemLoad) { // not required
return nil
}
if m.SystemLoad != nil {
if err := m.SystemLoad.Validate(formats); err != nil {
if ve, ok := err.(*errors.Validation); ok {
return ve.ValidateName("system-load")
} else if ce, ok := err.(*errors.CompositeError); ok {
return ce.ValidateName("system-load")
}
return err
}
}
return nil
}
// ContextValidate validate this health response based on the context it is used
func (m *HealthResponse) ContextValidate(ctx context.Context, formats strfmt.Registry) error {
var res []error
if err := m.contextValidateCilium(ctx, formats); err != nil {
res = append(res, err)
}
if err := m.contextValidateSystemLoad(ctx, formats); err != nil {
res = append(res, err)
}
if len(res) > 0 {
return errors.CompositeValidationError(res...)
}
return nil
}
func (m *HealthResponse) contextValidateCilium(ctx context.Context, formats strfmt.Registry) error {
if swag.IsZero(m.Cilium) { // not required
return nil
}
if err := m.Cilium.ContextValidate(ctx, formats); err != nil {
if ve, ok := err.(*errors.Validation); ok {
return ve.ValidateName("cilium")
} else if ce, ok := err.(*errors.CompositeError); ok {
return ce.ValidateName("cilium")
}
return err
}
return nil
}
func (m *HealthResponse) contextValidateSystemLoad(ctx context.Context, formats strfmt.Registry) error {
if m.SystemLoad != nil {
if swag.IsZero(m.SystemLoad) { // not required
return nil
}
if err := m.SystemLoad.ContextValidate(ctx, formats); err != nil {
if ve, ok := err.(*errors.Validation); ok {
return ve.ValidateName("system-load")
} else if ce, ok := err.(*errors.CompositeError); ok {
return ce.ValidateName("system-load")
}
return err
}
}
return nil
}
// MarshalBinary interface implementation
func (m *HealthResponse) MarshalBinary() ([]byte, error) {
if m == nil {
return nil, nil
}
return swag.WriteJSON(m)
}
// UnmarshalBinary interface implementation
func (m *HealthResponse) UnmarshalBinary(b []byte) error {
var res HealthResponse
if err := swag.ReadJSON(b, &res); err != nil {
return err
}
*m = res
return nil
}
// Code generated by go-swagger; DO NOT EDIT.
// Copyright Authors of Cilium
// SPDX-License-Identifier: Apache-2.0
package models
// This file was generated by the swagger tool.
// Editing this file might prove futile when you re-run the swagger generate command
import (
"context"
"strconv"
"github.com/go-openapi/errors"
"github.com/go-openapi/strfmt"
"github.com/go-openapi/swag"
)
// HealthStatusResponse Connectivity status to other daemons
//
// swagger:model HealthStatusResponse
type HealthStatusResponse struct {
// Description of the local node
Local *SelfStatus `json:"local,omitempty"`
// Connectivity status to each other node
Nodes []*NodeStatus `json:"nodes"`
// Interval in seconds between probes
ProbeInterval string `json:"probeInterval,omitempty"`
// timestamp
Timestamp string `json:"timestamp,omitempty"`
}
// Validate validates this health status response
func (m *HealthStatusResponse) Validate(formats strfmt.Registry) error {
var res []error
if err := m.validateLocal(formats); err != nil {
res = append(res, err)
}
if err := m.validateNodes(formats); err != nil {
res = append(res, err)
}
if len(res) > 0 {
return errors.CompositeValidationError(res...)
}
return nil
}
func (m *HealthStatusResponse) validateLocal(formats strfmt.Registry) error {
if swag.IsZero(m.Local) { // not required
return nil
}
if m.Local != nil {
if err := m.Local.Validate(formats); err != nil {
if ve, ok := err.(*errors.Validation); ok {
return ve.ValidateName("local")
} else if ce, ok := err.(*errors.CompositeError); ok {
return ce.ValidateName("local")
}
return err
}
}
return nil
}
func (m *HealthStatusResponse) validateNodes(formats strfmt.Registry) error {
if swag.IsZero(m.Nodes) { // not required
return nil
}
for i := 0; i < len(m.Nodes); i++ {
if swag.IsZero(m.Nodes[i]) { // not required
continue
}
if m.Nodes[i] != nil {
if err := m.Nodes[i].Validate(formats); err != nil {
if ve, ok := err.(*errors.Validation); ok {
return ve.ValidateName("nodes" + "." + strconv.Itoa(i))
} else if ce, ok := err.(*errors.CompositeError); ok {
return ce.ValidateName("nodes" + "." + strconv.Itoa(i))
}
return err
}
}
}
return nil
}
// ContextValidate validate this health status response based on the context it is used
func (m *HealthStatusResponse) ContextValidate(ctx context.Context, formats strfmt.Registry) error {
var res []error
if err := m.contextValidateLocal(ctx, formats); err != nil {
res = append(res, err)
}
if err := m.contextValidateNodes(ctx, formats); err != nil {
res = append(res, err)
}
if len(res) > 0 {
return errors.CompositeValidationError(res...)
}
return nil
}
func (m *HealthStatusResponse) contextValidateLocal(ctx context.Context, formats strfmt.Registry) error {
if m.Local != nil {
if swag.IsZero(m.Local) { // not required
return nil
}
if err := m.Local.ContextValidate(ctx, formats); err != nil {
if ve, ok := err.(*errors.Validation); ok {
return ve.ValidateName("local")
} else if ce, ok := err.(*errors.CompositeError); ok {
return ce.ValidateName("local")
}
return err
}
}
return nil
}
func (m *HealthStatusResponse) contextValidateNodes(ctx context.Context, formats strfmt.Registry) error {
for i := 0; i < len(m.Nodes); i++ {
if m.Nodes[i] != nil {
if swag.IsZero(m.Nodes[i]) { // not required
return nil
}
if err := m.Nodes[i].ContextValidate(ctx, formats); err != nil {
if ve, ok := err.(*errors.Validation); ok {
return ve.ValidateName("nodes" + "." + strconv.Itoa(i))
} else if ce, ok := err.(*errors.CompositeError); ok {
return ce.ValidateName("nodes" + "." + strconv.Itoa(i))
}
return err
}
}
}
return nil
}
// MarshalBinary interface implementation
func (m *HealthStatusResponse) MarshalBinary() ([]byte, error) {
if m == nil {
return nil, nil
}
return swag.WriteJSON(m)
}
// UnmarshalBinary interface implementation
func (m *HealthStatusResponse) UnmarshalBinary(b []byte) error {
var res HealthStatusResponse
if err := swag.ReadJSON(b, &res); err != nil {
return err
}
*m = res
return nil
}
// Code generated by go-swagger; DO NOT EDIT.
// Copyright Authors of Cilium
// SPDX-License-Identifier: Apache-2.0
package models
// This file was generated by the swagger tool.
// Editing this file might prove futile when you re-run the swagger generate command
import (
"context"
"strconv"
"github.com/go-openapi/errors"
"github.com/go-openapi/strfmt"
"github.com/go-openapi/swag"
)
// HostStatus Connectivity status to host cilium-health instance via different paths,
// probing via all known IP addresses
//
// swagger:model HostStatus
type HostStatus struct {
// primary address
PrimaryAddress *PathStatus `json:"primary-address,omitempty"`
// secondary addresses
SecondaryAddresses []*PathStatus `json:"secondary-addresses"`
}
// Validate validates this host status
func (m *HostStatus) Validate(formats strfmt.Registry) error {
var res []error
if err := m.validatePrimaryAddress(formats); err != nil {
res = append(res, err)
}
if err := m.validateSecondaryAddresses(formats); err != nil {
res = append(res, err)
}
if len(res) > 0 {
return errors.CompositeValidationError(res...)
}
return nil
}
func (m *HostStatus) validatePrimaryAddress(formats strfmt.Registry) error {
if swag.IsZero(m.PrimaryAddress) { // not required
return nil
}
if m.PrimaryAddress != nil {
if err := m.PrimaryAddress.Validate(formats); err != nil {
if ve, ok := err.(*errors.Validation); ok {
return ve.ValidateName("primary-address")
} else if ce, ok := err.(*errors.CompositeError); ok {
return ce.ValidateName("primary-address")
}
return err
}
}
return nil
}
func (m *HostStatus) validateSecondaryAddresses(formats strfmt.Registry) error {
if swag.IsZero(m.SecondaryAddresses) { // not required
return nil
}
for i := 0; i < len(m.SecondaryAddresses); i++ {
if swag.IsZero(m.SecondaryAddresses[i]) { // not required
continue
}
if m.SecondaryAddresses[i] != nil {
if err := m.SecondaryAddresses[i].Validate(formats); err != nil {
if ve, ok := err.(*errors.Validation); ok {
return ve.ValidateName("secondary-addresses" + "." + strconv.Itoa(i))
} else if ce, ok := err.(*errors.CompositeError); ok {
return ce.ValidateName("secondary-addresses" + "." + strconv.Itoa(i))
}
return err
}
}
}
return nil
}
// ContextValidate validate this host status based on the context it is used
func (m *HostStatus) ContextValidate(ctx context.Context, formats strfmt.Registry) error {
var res []error
if err := m.contextValidatePrimaryAddress(ctx, formats); err != nil {
res = append(res, err)
}
if err := m.contextValidateSecondaryAddresses(ctx, formats); err != nil {
res = append(res, err)
}
if len(res) > 0 {
return errors.CompositeValidationError(res...)
}
return nil
}
func (m *HostStatus) contextValidatePrimaryAddress(ctx context.Context, formats strfmt.Registry) error {
if m.PrimaryAddress != nil {
if swag.IsZero(m.PrimaryAddress) { // not required
return nil
}
if err := m.PrimaryAddress.ContextValidate(ctx, formats); err != nil {
if ve, ok := err.(*errors.Validation); ok {
return ve.ValidateName("primary-address")
} else if ce, ok := err.(*errors.CompositeError); ok {
return ce.ValidateName("primary-address")
}
return err
}
}
return nil
}
func (m *HostStatus) contextValidateSecondaryAddresses(ctx context.Context, formats strfmt.Registry) error {
for i := 0; i < len(m.SecondaryAddresses); i++ {
if m.SecondaryAddresses[i] != nil {
if swag.IsZero(m.SecondaryAddresses[i]) { // not required
return nil
}
if err := m.SecondaryAddresses[i].ContextValidate(ctx, formats); err != nil {
if ve, ok := err.(*errors.Validation); ok {
return ve.ValidateName("secondary-addresses" + "." + strconv.Itoa(i))
} else if ce, ok := err.(*errors.CompositeError); ok {
return ce.ValidateName("secondary-addresses" + "." + strconv.Itoa(i))
}
return err
}
}
}
return nil
}
// MarshalBinary interface implementation
func (m *HostStatus) MarshalBinary() ([]byte, error) {
if m == nil {
return nil, nil
}
return swag.WriteJSON(m)
}
// UnmarshalBinary interface implementation
func (m *HostStatus) UnmarshalBinary(b []byte) error {
var res HostStatus
if err := swag.ReadJSON(b, &res); err != nil {
return err
}
*m = res
return nil
}
// Code generated by go-swagger; DO NOT EDIT.
// Copyright Authors of Cilium
// SPDX-License-Identifier: Apache-2.0
package models
// This file was generated by the swagger tool.
// Editing this file might prove futile when you re-run the swagger generate command
import (
"context"
"github.com/go-openapi/strfmt"
"github.com/go-openapi/swag"
)
// LoadResponse System load on node
//
// swagger:model LoadResponse
type LoadResponse struct {
// Load average over the past 15 minutes
Last15min string `json:"last15min,omitempty"`
// Load average over the past minute
Last1min string `json:"last1min,omitempty"`
// Load average over the past 5 minutes
Last5min string `json:"last5min,omitempty"`
}
// Validate validates this load response
func (m *LoadResponse) Validate(formats strfmt.Registry) error {
return nil
}
// ContextValidate validates this load response based on context it is used
func (m *LoadResponse) ContextValidate(ctx context.Context, formats strfmt.Registry) error {
return nil
}
// MarshalBinary interface implementation
func (m *LoadResponse) MarshalBinary() ([]byte, error) {
if m == nil {
return nil, nil
}
return swag.WriteJSON(m)
}
// UnmarshalBinary interface implementation
func (m *LoadResponse) UnmarshalBinary(b []byte) error {
var res LoadResponse
if err := swag.ReadJSON(b, &res); err != nil {
return err
}
*m = res
return nil
}
// Code generated by go-swagger; DO NOT EDIT.
// Copyright Authors of Cilium
// SPDX-License-Identifier: Apache-2.0
package models
// This file was generated by the swagger tool.
// Editing this file might prove futile when you re-run the swagger generate command
import (
"context"
"github.com/go-openapi/errors"
"github.com/go-openapi/strfmt"
"github.com/go-openapi/swag"
)
// NodeStatus Connectivity status of a remote cilium-health instance
//
// swagger:model NodeStatus
type NodeStatus struct {
// DEPRECATED: Please use the health-endpoint field instead, which
// supports reporting the status of different addresses for the endpoint
//
Endpoint *PathStatus `json:"endpoint,omitempty"`
// Connectivity status to simulated endpoint on the node
HealthEndpoint *EndpointStatus `json:"health-endpoint,omitempty"`
// Connectivity status to cilium-health instance on node IP
Host *HostStatus `json:"host,omitempty"`
// Identifying name for the node
Name string `json:"name,omitempty"`
}
// Validate validates this node status
func (m *NodeStatus) Validate(formats strfmt.Registry) error {
var res []error
if err := m.validateEndpoint(formats); err != nil {
res = append(res, err)
}
if err := m.validateHealthEndpoint(formats); err != nil {
res = append(res, err)
}
if err := m.validateHost(formats); err != nil {
res = append(res, err)
}
if len(res) > 0 {
return errors.CompositeValidationError(res...)
}
return nil
}
func (m *NodeStatus) validateEndpoint(formats strfmt.Registry) error {
if swag.IsZero(m.Endpoint) { // not required
return nil
}
if m.Endpoint != nil {
if err := m.Endpoint.Validate(formats); err != nil {
if ve, ok := err.(*errors.Validation); ok {
return ve.ValidateName("endpoint")
} else if ce, ok := err.(*errors.CompositeError); ok {
return ce.ValidateName("endpoint")
}
return err
}
}
return nil
}
func (m *NodeStatus) validateHealthEndpoint(formats strfmt.Registry) error {
if swag.IsZero(m.HealthEndpoint) { // not required
return nil
}
if m.HealthEndpoint != nil {
if err := m.HealthEndpoint.Validate(formats); err != nil {
if ve, ok := err.(*errors.Validation); ok {
return ve.ValidateName("health-endpoint")
} else if ce, ok := err.(*errors.CompositeError); ok {
return ce.ValidateName("health-endpoint")
}
return err
}
}
return nil
}
func (m *NodeStatus) validateHost(formats strfmt.Registry) error {
if swag.IsZero(m.Host) { // not required
return nil
}
if m.Host != nil {
if err := m.Host.Validate(formats); err != nil {
if ve, ok := err.(*errors.Validation); ok {
return ve.ValidateName("host")
} else if ce, ok := err.(*errors.CompositeError); ok {
return ce.ValidateName("host")
}
return err
}
}
return nil
}
// ContextValidate validate this node status based on the context it is used
func (m *NodeStatus) ContextValidate(ctx context.Context, formats strfmt.Registry) error {
var res []error
if err := m.contextValidateEndpoint(ctx, formats); err != nil {
res = append(res, err)
}
if err := m.contextValidateHealthEndpoint(ctx, formats); err != nil {
res = append(res, err)
}
if err := m.contextValidateHost(ctx, formats); err != nil {
res = append(res, err)
}
if len(res) > 0 {
return errors.CompositeValidationError(res...)
}
return nil
}
func (m *NodeStatus) contextValidateEndpoint(ctx context.Context, formats strfmt.Registry) error {
if m.Endpoint != nil {
if swag.IsZero(m.Endpoint) { // not required
return nil
}
if err := m.Endpoint.ContextValidate(ctx, formats); err != nil {
if ve, ok := err.(*errors.Validation); ok {
return ve.ValidateName("endpoint")
} else if ce, ok := err.(*errors.CompositeError); ok {
return ce.ValidateName("endpoint")
}
return err
}
}
return nil
}
func (m *NodeStatus) contextValidateHealthEndpoint(ctx context.Context, formats strfmt.Registry) error {
if m.HealthEndpoint != nil {
if swag.IsZero(m.HealthEndpoint) { // not required
return nil
}
if err := m.HealthEndpoint.ContextValidate(ctx, formats); err != nil {
if ve, ok := err.(*errors.Validation); ok {
return ve.ValidateName("health-endpoint")
} else if ce, ok := err.(*errors.CompositeError); ok {
return ce.ValidateName("health-endpoint")
}
return err
}
}
return nil
}
func (m *NodeStatus) contextValidateHost(ctx context.Context, formats strfmt.Registry) error {
if m.Host != nil {
if swag.IsZero(m.Host) { // not required
return nil
}
if err := m.Host.ContextValidate(ctx, formats); err != nil {
if ve, ok := err.(*errors.Validation); ok {
return ve.ValidateName("host")
} else if ce, ok := err.(*errors.CompositeError); ok {
return ce.ValidateName("host")
}
return err
}
}
return nil
}
// MarshalBinary interface implementation
func (m *NodeStatus) MarshalBinary() ([]byte, error) {
if m == nil {
return nil, nil
}
return swag.WriteJSON(m)
}
// UnmarshalBinary interface implementation
func (m *NodeStatus) UnmarshalBinary(b []byte) error {
var res NodeStatus
if err := swag.ReadJSON(b, &res); err != nil {
return err
}
*m = res
return nil
}
// Code generated by go-swagger; DO NOT EDIT.
// Copyright Authors of Cilium
// SPDX-License-Identifier: Apache-2.0
package models
// This file was generated by the swagger tool.
// Editing this file might prove futile when you re-run the swagger generate command
import (
"context"
"github.com/go-openapi/errors"
"github.com/go-openapi/strfmt"
"github.com/go-openapi/swag"
)
// PathStatus Connectivity status via different paths, for example using different
// policies or service redirection
//
// swagger:model PathStatus
type PathStatus struct {
// Connectivity status without policy applied
HTTP *ConnectivityStatus `json:"http,omitempty"`
// Basic ping connectivity status to node IP
Icmp *ConnectivityStatus `json:"icmp,omitempty"`
// IP address queried for the connectivity status
IP string `json:"ip,omitempty"`
}
// Validate validates this path status
func (m *PathStatus) Validate(formats strfmt.Registry) error {
var res []error
if err := m.validateHTTP(formats); err != nil {
res = append(res, err)
}
if err := m.validateIcmp(formats); err != nil {
res = append(res, err)
}
if len(res) > 0 {
return errors.CompositeValidationError(res...)
}
return nil
}
func (m *PathStatus) validateHTTP(formats strfmt.Registry) error {
if swag.IsZero(m.HTTP) { // not required
return nil
}
if m.HTTP != nil {
if err := m.HTTP.Validate(formats); err != nil {
if ve, ok := err.(*errors.Validation); ok {
return ve.ValidateName("http")
} else if ce, ok := err.(*errors.CompositeError); ok {
return ce.ValidateName("http")
}
return err
}
}
return nil
}
func (m *PathStatus) validateIcmp(formats strfmt.Registry) error {
if swag.IsZero(m.Icmp) { // not required
return nil
}
if m.Icmp != nil {
if err := m.Icmp.Validate(formats); err != nil {
if ve, ok := err.(*errors.Validation); ok {
return ve.ValidateName("icmp")
} else if ce, ok := err.(*errors.CompositeError); ok {
return ce.ValidateName("icmp")
}
return err
}
}
return nil
}
// ContextValidate validate this path status based on the context it is used
func (m *PathStatus) ContextValidate(ctx context.Context, formats strfmt.Registry) error {
var res []error
if err := m.contextValidateHTTP(ctx, formats); err != nil {
res = append(res, err)
}
if err := m.contextValidateIcmp(ctx, formats); err != nil {
res = append(res, err)
}
if len(res) > 0 {
return errors.CompositeValidationError(res...)
}
return nil
}
func (m *PathStatus) contextValidateHTTP(ctx context.Context, formats strfmt.Registry) error {
if m.HTTP != nil {
if swag.IsZero(m.HTTP) { // not required
return nil
}
if err := m.HTTP.ContextValidate(ctx, formats); err != nil {
if ve, ok := err.(*errors.Validation); ok {
return ve.ValidateName("http")
} else if ce, ok := err.(*errors.CompositeError); ok {
return ce.ValidateName("http")
}
return err
}
}
return nil
}
func (m *PathStatus) contextValidateIcmp(ctx context.Context, formats strfmt.Registry) error {
if m.Icmp != nil {
if swag.IsZero(m.Icmp) { // not required
return nil
}
if err := m.Icmp.ContextValidate(ctx, formats); err != nil {
if ve, ok := err.(*errors.Validation); ok {
return ve.ValidateName("icmp")
} else if ce, ok := err.(*errors.CompositeError); ok {
return ce.ValidateName("icmp")
}
return err
}
}
return nil
}
// MarshalBinary interface implementation
func (m *PathStatus) MarshalBinary() ([]byte, error) {
if m == nil {
return nil, nil
}
return swag.WriteJSON(m)
}
// UnmarshalBinary interface implementation
func (m *PathStatus) UnmarshalBinary(b []byte) error {
var res PathStatus
if err := swag.ReadJSON(b, &res); err != nil {
return err
}
*m = res
return nil
}
// Code generated by go-swagger; DO NOT EDIT.
// Copyright Authors of Cilium
// SPDX-License-Identifier: Apache-2.0
package models
// This file was generated by the swagger tool.
// Editing this file might prove futile when you re-run the swagger generate command
import (
"context"
"github.com/go-openapi/strfmt"
"github.com/go-openapi/swag"
)
// SelfStatus Description of the cilium-health node
//
// swagger:model SelfStatus
type SelfStatus struct {
// Name associated with this node
Name string `json:"name,omitempty"`
}
// Validate validates this self status
func (m *SelfStatus) Validate(formats strfmt.Registry) error {
return nil
}
// ContextValidate validates this self status based on context it is used
func (m *SelfStatus) ContextValidate(ctx context.Context, formats strfmt.Registry) error {
return nil
}
// MarshalBinary interface implementation
func (m *SelfStatus) MarshalBinary() ([]byte, error) {
if m == nil {
return nil, nil
}
return swag.WriteJSON(m)
}
// UnmarshalBinary interface implementation
func (m *SelfStatus) UnmarshalBinary(b []byte) error {
var res SelfStatus
if err := swag.ReadJSON(b, &res); err != nil {
return err
}
*m = res
return nil
}
// Code generated by go-swagger; DO NOT EDIT.
// Copyright Authors of Cilium
// SPDX-License-Identifier: Apache-2.0
package models
// This file was generated by the swagger tool.
// Editing this file might prove futile when you re-run the swagger generate command
import (
"context"
"github.com/go-openapi/strfmt"
)
// Address IP address
//
// swagger:model Address
type Address string
// Validate validates this address
func (m Address) Validate(formats strfmt.Registry) error {
return nil
}
// ContextValidate validates this address based on context it is used
func (m Address) ContextValidate(ctx context.Context, formats strfmt.Registry) error {
return nil
}
// Code generated by go-swagger; DO NOT EDIT.
// Copyright Authors of Cilium
// SPDX-License-Identifier: Apache-2.0
package models
// This file was generated by the swagger tool.
// Editing this file might prove futile when you re-run the swagger generate command
import (
"context"
"github.com/go-openapi/strfmt"
"github.com/go-openapi/swag"
)
// AddressPair Addressing information of an endpoint
//
// swagger:model AddressPair
type AddressPair struct {
// IPv4 address
IPV4 string `json:"ipv4,omitempty"`
// UUID of IPv4 expiration timer
IPV4ExpirationUUID string `json:"ipv4-expiration-uuid,omitempty"`
// IPAM pool from which this IPv4 address was allocated
IPV4PoolName string `json:"ipv4-pool-name,omitempty"`
// IPv6 address
IPV6 string `json:"ipv6,omitempty"`
// UUID of IPv6 expiration timer
IPV6ExpirationUUID string `json:"ipv6-expiration-uuid,omitempty"`
// IPAM pool from which this IPv6 address was allocated
IPV6PoolName string `json:"ipv6-pool-name,omitempty"`
}
// Validate validates this address pair
func (m *AddressPair) Validate(formats strfmt.Registry) error {
return nil
}
// ContextValidate validates this address pair based on context it is used
func (m *AddressPair) ContextValidate(ctx context.Context, formats strfmt.Registry) error {
return nil
}
// MarshalBinary interface implementation
func (m *AddressPair) MarshalBinary() ([]byte, error) {
if m == nil {
return nil, nil
}
return swag.WriteJSON(m)
}
// UnmarshalBinary interface implementation
func (m *AddressPair) UnmarshalBinary(b []byte) error {
var res AddressPair
if err := swag.ReadJSON(b, &res); err != nil {
return err
}
*m = res
return nil
}
// Code generated by go-swagger; DO NOT EDIT.
// Copyright Authors of Cilium
// SPDX-License-Identifier: Apache-2.0
package models
// This file was generated by the swagger tool.
// Editing this file might prove futile when you re-run the swagger generate command
import (
"context"
"github.com/go-openapi/strfmt"
)
// AllocationMap Map of allocated IPs
//
// swagger:model AllocationMap
type AllocationMap map[string]string
// Validate validates this allocation map
func (m AllocationMap) Validate(formats strfmt.Registry) error {
return nil
}
// ContextValidate validates this allocation map based on context it is used
func (m AllocationMap) ContextValidate(ctx context.Context, formats strfmt.Registry) error {
return nil
}
// Code generated by go-swagger; DO NOT EDIT.
// Copyright Authors of Cilium
// SPDX-License-Identifier: Apache-2.0
package models
// This file was generated by the swagger tool.
// Editing this file might prove futile when you re-run the swagger generate command
import (
"context"
"encoding/json"
"github.com/go-openapi/errors"
"github.com/go-openapi/strfmt"
"github.com/go-openapi/validate"
)
// AttachMode Core datapath attachment mode
//
// swagger:model AttachMode
type AttachMode string
func NewAttachMode(value AttachMode) *AttachMode {
return &value
}
// Pointer returns a pointer to a freshly-allocated AttachMode.
func (m AttachMode) Pointer() *AttachMode {
return &m
}
const (
// AttachModeTc captures enum value "tc"
AttachModeTc AttachMode = "tc"
// AttachModeTcx captures enum value "tcx"
AttachModeTcx AttachMode = "tcx"
)
// for schema
var attachModeEnum []interface{}
func init() {
var res []AttachMode
if err := json.Unmarshal([]byte(`["tc","tcx"]`), &res); err != nil {
panic(err)
}
for _, v := range res {
attachModeEnum = append(attachModeEnum, v)
}
}
func (m AttachMode) validateAttachModeEnum(path, location string, value AttachMode) error {
if err := validate.EnumCase(path, location, value, attachModeEnum, true); err != nil {
return err
}
return nil
}
// Validate validates this attach mode
func (m AttachMode) Validate(formats strfmt.Registry) error {
var res []error
// value enum
if err := m.validateAttachModeEnum("", "body", m); err != nil {
return err
}
if len(res) > 0 {
return errors.CompositeValidationError(res...)
}
return nil
}
// ContextValidate validates this attach mode based on context it is used
func (m AttachMode) ContextValidate(ctx context.Context, formats strfmt.Registry) error {
return nil
}
// Code generated by go-swagger; DO NOT EDIT.
// Copyright Authors of Cilium
// SPDX-License-Identifier: Apache-2.0
package models
// This file was generated by the swagger tool.
// Editing this file might prove futile when you re-run the swagger generate command
import (
"context"
"strconv"
"github.com/go-openapi/errors"
"github.com/go-openapi/strfmt"
"github.com/go-openapi/swag"
)
// BPFMap BPF map definition and content
//
// swagger:model BPFMap
type BPFMap struct {
// Contents of cache
Cache []*BPFMapEntry `json:"cache"`
// Path to BPF map
Path string `json:"path,omitempty"`
}
// Validate validates this b p f map
func (m *BPFMap) Validate(formats strfmt.Registry) error {
var res []error
if err := m.validateCache(formats); err != nil {
res = append(res, err)
}
if len(res) > 0 {
return errors.CompositeValidationError(res...)
}
return nil
}
func (m *BPFMap) validateCache(formats strfmt.Registry) error {
if swag.IsZero(m.Cache) { // not required
return nil
}
for i := 0; i < len(m.Cache); i++ {
if swag.IsZero(m.Cache[i]) { // not required
continue
}
if m.Cache[i] != nil {
if err := m.Cache[i].Validate(formats); err != nil {
if ve, ok := err.(*errors.Validation); ok {
return ve.ValidateName("cache" + "." + strconv.Itoa(i))
} else if ce, ok := err.(*errors.CompositeError); ok {
return ce.ValidateName("cache" + "." + strconv.Itoa(i))
}
return err
}
}
}
return nil
}
// ContextValidate validate this b p f map based on the context it is used
func (m *BPFMap) ContextValidate(ctx context.Context, formats strfmt.Registry) error {
var res []error
if err := m.contextValidateCache(ctx, formats); err != nil {
res = append(res, err)
}
if len(res) > 0 {
return errors.CompositeValidationError(res...)
}
return nil
}
func (m *BPFMap) contextValidateCache(ctx context.Context, formats strfmt.Registry) error {
for i := 0; i < len(m.Cache); i++ {
if m.Cache[i] != nil {
if swag.IsZero(m.Cache[i]) { // not required
return nil
}
if err := m.Cache[i].ContextValidate(ctx, formats); err != nil {
if ve, ok := err.(*errors.Validation); ok {
return ve.ValidateName("cache" + "." + strconv.Itoa(i))
} else if ce, ok := err.(*errors.CompositeError); ok {
return ce.ValidateName("cache" + "." + strconv.Itoa(i))
}
return err
}
}
}
return nil
}
// MarshalBinary interface implementation
func (m *BPFMap) MarshalBinary() ([]byte, error) {
if m == nil {
return nil, nil
}
return swag.WriteJSON(m)
}
// UnmarshalBinary interface implementation
func (m *BPFMap) UnmarshalBinary(b []byte) error {
var res BPFMap
if err := swag.ReadJSON(b, &res); err != nil {
return err
}
*m = res
return nil
}
// Code generated by go-swagger; DO NOT EDIT.
// Copyright Authors of Cilium
// SPDX-License-Identifier: Apache-2.0
package models
// This file was generated by the swagger tool.
// Editing this file might prove futile when you re-run the swagger generate command
import (
"context"
"encoding/json"
"github.com/go-openapi/errors"
"github.com/go-openapi/strfmt"
"github.com/go-openapi/swag"
"github.com/go-openapi/validate"
)
// BPFMapEntry BPF map cache entry
//
// swagger:model BPFMapEntry
type BPFMapEntry struct {
// Desired action to be performed
// Enum: ["ok","insert","delete"]
DesiredAction string `json:"desired-action,omitempty"`
// Key of map entry
Key string `json:"key,omitempty"`
// Last error seen while performing desired action
LastError string `json:"last-error,omitempty"`
// Value of map entry
Value string `json:"value,omitempty"`
}
// Validate validates this b p f map entry
func (m *BPFMapEntry) Validate(formats strfmt.Registry) error {
var res []error
if err := m.validateDesiredAction(formats); err != nil {
res = append(res, err)
}
if len(res) > 0 {
return errors.CompositeValidationError(res...)
}
return nil
}
var bPFMapEntryTypeDesiredActionPropEnum []interface{}
func init() {
var res []string
if err := json.Unmarshal([]byte(`["ok","insert","delete"]`), &res); err != nil {
panic(err)
}
for _, v := range res {
bPFMapEntryTypeDesiredActionPropEnum = append(bPFMapEntryTypeDesiredActionPropEnum, v)
}
}
const (
// BPFMapEntryDesiredActionOk captures enum value "ok"
BPFMapEntryDesiredActionOk string = "ok"
// BPFMapEntryDesiredActionInsert captures enum value "insert"
BPFMapEntryDesiredActionInsert string = "insert"
// BPFMapEntryDesiredActionDelete captures enum value "delete"
BPFMapEntryDesiredActionDelete string = "delete"
)
// prop value enum
func (m *BPFMapEntry) validateDesiredActionEnum(path, location string, value string) error {
if err := validate.EnumCase(path, location, value, bPFMapEntryTypeDesiredActionPropEnum, true); err != nil {
return err
}
return nil
}
func (m *BPFMapEntry) validateDesiredAction(formats strfmt.Registry) error {
if swag.IsZero(m.DesiredAction) { // not required
return nil
}
// value enum
if err := m.validateDesiredActionEnum("desired-action", "body", m.DesiredAction); err != nil {
return err
}
return nil
}
// ContextValidate validates this b p f map entry based on context it is used
func (m *BPFMapEntry) ContextValidate(ctx context.Context, formats strfmt.Registry) error {
return nil
}
// MarshalBinary interface implementation
func (m *BPFMapEntry) MarshalBinary() ([]byte, error) {
if m == nil {
return nil, nil
}
return swag.WriteJSON(m)
}
// UnmarshalBinary interface implementation
func (m *BPFMapEntry) UnmarshalBinary(b []byte) error {
var res BPFMapEntry
if err := swag.ReadJSON(b, &res); err != nil {
return err
}
*m = res
return nil
}
// Code generated by go-swagger; DO NOT EDIT.
// Copyright Authors of Cilium
// SPDX-License-Identifier: Apache-2.0
package models
// This file was generated by the swagger tool.
// Editing this file might prove futile when you re-run the swagger generate command
import (
"context"
"strconv"
"github.com/go-openapi/errors"
"github.com/go-openapi/strfmt"
"github.com/go-openapi/swag"
)
// BPFMapList List of BPF Maps
//
// swagger:model BPFMapList
type BPFMapList struct {
// Array of open BPF map lists
Maps []*BPFMap `json:"maps"`
}
// Validate validates this b p f map list
func (m *BPFMapList) Validate(formats strfmt.Registry) error {
var res []error
if err := m.validateMaps(formats); err != nil {
res = append(res, err)
}
if len(res) > 0 {
return errors.CompositeValidationError(res...)
}
return nil
}
func (m *BPFMapList) validateMaps(formats strfmt.Registry) error {
if swag.IsZero(m.Maps) { // not required
return nil
}
for i := 0; i < len(m.Maps); i++ {
if swag.IsZero(m.Maps[i]) { // not required
continue
}
if m.Maps[i] != nil {
if err := m.Maps[i].Validate(formats); err != nil {
if ve, ok := err.(*errors.Validation); ok {
return ve.ValidateName("maps" + "." + strconv.Itoa(i))
} else if ce, ok := err.(*errors.CompositeError); ok {
return ce.ValidateName("maps" + "." + strconv.Itoa(i))
}
return err
}
}
}
return nil
}
// ContextValidate validate this b p f map list based on the context it is used
func (m *BPFMapList) ContextValidate(ctx context.Context, formats strfmt.Registry) error {
var res []error
if err := m.contextValidateMaps(ctx, formats); err != nil {
res = append(res, err)
}
if len(res) > 0 {
return errors.CompositeValidationError(res...)
}
return nil
}
func (m *BPFMapList) contextValidateMaps(ctx context.Context, formats strfmt.Registry) error {
for i := 0; i < len(m.Maps); i++ {
if m.Maps[i] != nil {
if swag.IsZero(m.Maps[i]) { // not required
return nil
}
if err := m.Maps[i].ContextValidate(ctx, formats); err != nil {
if ve, ok := err.(*errors.Validation); ok {
return ve.ValidateName("maps" + "." + strconv.Itoa(i))
} else if ce, ok := err.(*errors.CompositeError); ok {
return ce.ValidateName("maps" + "." + strconv.Itoa(i))
}
return err
}
}
}
return nil
}
// MarshalBinary interface implementation
func (m *BPFMapList) MarshalBinary() ([]byte, error) {
if m == nil {
return nil, nil
}
return swag.WriteJSON(m)
}
// UnmarshalBinary interface implementation
func (m *BPFMapList) UnmarshalBinary(b []byte) error {
var res BPFMapList
if err := swag.ReadJSON(b, &res); err != nil {
return err
}
*m = res
return nil
}
// Code generated by go-swagger; DO NOT EDIT.
// Copyright Authors of Cilium
// SPDX-License-Identifier: Apache-2.0
package models
// This file was generated by the swagger tool.
// Editing this file might prove futile when you re-run the swagger generate command
import (
"context"
"github.com/go-openapi/strfmt"
"github.com/go-openapi/swag"
)
// BPFMapProperties BPF map properties
//
// swagger:model BPFMapProperties
type BPFMapProperties struct {
// Name of the BPF map
Name string `json:"name,omitempty"`
// Size of the BPF map
Size int64 `json:"size,omitempty"`
}
// Validate validates this b p f map properties
func (m *BPFMapProperties) Validate(formats strfmt.Registry) error {
return nil
}
// ContextValidate validates this b p f map properties based on context it is used
func (m *BPFMapProperties) ContextValidate(ctx context.Context, formats strfmt.Registry) error {
return nil
}
// MarshalBinary interface implementation
func (m *BPFMapProperties) MarshalBinary() ([]byte, error) {
if m == nil {
return nil, nil
}
return swag.WriteJSON(m)
}
// UnmarshalBinary interface implementation
func (m *BPFMapProperties) UnmarshalBinary(b []byte) error {
var res BPFMapProperties
if err := swag.ReadJSON(b, &res); err != nil {
return err
}
*m = res
return nil
}
// Code generated by go-swagger; DO NOT EDIT.
// Copyright Authors of Cilium
// SPDX-License-Identifier: Apache-2.0
package models
// This file was generated by the swagger tool.
// Editing this file might prove futile when you re-run the swagger generate command
import (
"context"
"strconv"
"github.com/go-openapi/errors"
"github.com/go-openapi/strfmt"
"github.com/go-openapi/swag"
)
// BPFMapStatus BPF map status
//
// +k8s:deepcopy-gen=true
//
// swagger:model BPFMapStatus
type BPFMapStatus struct {
// Ratio of total system memory to use for dynamic sizing of BPF maps
DynamicSizeRatio float64 `json:"dynamic-size-ratio,omitempty"`
// BPF maps
Maps []*BPFMapProperties `json:"maps"`
}
// Validate validates this b p f map status
func (m *BPFMapStatus) Validate(formats strfmt.Registry) error {
var res []error
if err := m.validateMaps(formats); err != nil {
res = append(res, err)
}
if len(res) > 0 {
return errors.CompositeValidationError(res...)
}
return nil
}
func (m *BPFMapStatus) validateMaps(formats strfmt.Registry) error {
if swag.IsZero(m.Maps) { // not required
return nil
}
for i := 0; i < len(m.Maps); i++ {
if swag.IsZero(m.Maps[i]) { // not required
continue
}
if m.Maps[i] != nil {
if err := m.Maps[i].Validate(formats); err != nil {
if ve, ok := err.(*errors.Validation); ok {
return ve.ValidateName("maps" + "." + strconv.Itoa(i))
} else if ce, ok := err.(*errors.CompositeError); ok {
return ce.ValidateName("maps" + "." + strconv.Itoa(i))
}
return err
}
}
}
return nil
}
// ContextValidate validate this b p f map status based on the context it is used
func (m *BPFMapStatus) ContextValidate(ctx context.Context, formats strfmt.Registry) error {
var res []error
if err := m.contextValidateMaps(ctx, formats); err != nil {
res = append(res, err)
}
if len(res) > 0 {
return errors.CompositeValidationError(res...)
}
return nil
}
func (m *BPFMapStatus) contextValidateMaps(ctx context.Context, formats strfmt.Registry) error {
for i := 0; i < len(m.Maps); i++ {
if m.Maps[i] != nil {
if swag.IsZero(m.Maps[i]) { // not required
return nil
}
if err := m.Maps[i].ContextValidate(ctx, formats); err != nil {
if ve, ok := err.(*errors.Validation); ok {
return ve.ValidateName("maps" + "." + strconv.Itoa(i))
} else if ce, ok := err.(*errors.CompositeError); ok {
return ce.ValidateName("maps" + "." + strconv.Itoa(i))
}
return err
}
}
}
return nil
}
// MarshalBinary interface implementation
func (m *BPFMapStatus) MarshalBinary() ([]byte, error) {
if m == nil {
return nil, nil
}
return swag.WriteJSON(m)
}
// UnmarshalBinary interface implementation
func (m *BPFMapStatus) UnmarshalBinary(b []byte) error {
var res BPFMapStatus
if err := swag.ReadJSON(b, &res); err != nil {
return err
}
*m = res
return nil
}
// Code generated by go-swagger; DO NOT EDIT.
// Copyright Authors of Cilium
// SPDX-License-Identifier: Apache-2.0
package models
// This file was generated by the swagger tool.
// Editing this file might prove futile when you re-run the swagger generate command
import (
"context"
"encoding/json"
"github.com/go-openapi/errors"
"github.com/go-openapi/strfmt"
"github.com/go-openapi/swag"
"github.com/go-openapi/validate"
)
// BackendAddress Service backend address
//
// swagger:model BackendAddress
type BackendAddress struct {
// Layer 3 address
// Required: true
IP *string `json:"ip"`
// Optional name of the node on which this backend runs
NodeName string `json:"nodeName,omitempty"`
// Layer 4 port number
Port uint16 `json:"port,omitempty"`
// Indicator if this backend is preferred in the context of clustermesh service affinity. The value is set based
// on related annotation of global service. Applicable for active state only.
Preferred bool `json:"preferred,omitempty"`
// Layer 4 protocol (TCP, UDP, etc)
Protocol string `json:"protocol,omitempty"`
// State of the backend for load-balancing service traffic
// Enum: ["active","terminating","terminating-not-serving","quarantined","maintenance"]
State string `json:"state,omitempty"`
// Backend weight
Weight *uint16 `json:"weight,omitempty"`
// Optional name of the zone in which this backend runs
Zone string `json:"zone,omitempty"`
}
// Validate validates this backend address
func (m *BackendAddress) Validate(formats strfmt.Registry) error {
var res []error
if err := m.validateIP(formats); err != nil {
res = append(res, err)
}
if err := m.validateState(formats); err != nil {
res = append(res, err)
}
if len(res) > 0 {
return errors.CompositeValidationError(res...)
}
return nil
}
func (m *BackendAddress) validateIP(formats strfmt.Registry) error {
if err := validate.Required("ip", "body", m.IP); err != nil {
return err
}
return nil
}
var backendAddressTypeStatePropEnum []interface{}
func init() {
var res []string
if err := json.Unmarshal([]byte(`["active","terminating","terminating-not-serving","quarantined","maintenance"]`), &res); err != nil {
panic(err)
}
for _, v := range res {
backendAddressTypeStatePropEnum = append(backendAddressTypeStatePropEnum, v)
}
}
const (
// BackendAddressStateActive captures enum value "active"
BackendAddressStateActive string = "active"
// BackendAddressStateTerminating captures enum value "terminating"
BackendAddressStateTerminating string = "terminating"
// BackendAddressStateTerminatingDashNotDashServing captures enum value "terminating-not-serving"
BackendAddressStateTerminatingDashNotDashServing string = "terminating-not-serving"
// BackendAddressStateQuarantined captures enum value "quarantined"
BackendAddressStateQuarantined string = "quarantined"
// BackendAddressStateMaintenance captures enum value "maintenance"
BackendAddressStateMaintenance string = "maintenance"
)
// prop value enum
func (m *BackendAddress) validateStateEnum(path, location string, value string) error {
if err := validate.EnumCase(path, location, value, backendAddressTypeStatePropEnum, true); err != nil {
return err
}
return nil
}
func (m *BackendAddress) validateState(formats strfmt.Registry) error {
if swag.IsZero(m.State) { // not required
return nil
}
// value enum
if err := m.validateStateEnum("state", "body", m.State); err != nil {
return err
}
return nil
}
// ContextValidate validates this backend address based on context it is used
func (m *BackendAddress) ContextValidate(ctx context.Context, formats strfmt.Registry) error {
return nil
}
// MarshalBinary interface implementation
func (m *BackendAddress) MarshalBinary() ([]byte, error) {
if m == nil {
return nil, nil
}
return swag.WriteJSON(m)
}
// UnmarshalBinary interface implementation
func (m *BackendAddress) UnmarshalBinary(b []byte) error {
var res BackendAddress
if err := swag.ReadJSON(b, &res); err != nil {
return err
}
*m = res
return nil
}
// Code generated by go-swagger; DO NOT EDIT.
// Copyright Authors of Cilium
// SPDX-License-Identifier: Apache-2.0
package models
// This file was generated by the swagger tool.
// Editing this file might prove futile when you re-run the swagger generate command
import (
"context"
"encoding/json"
"github.com/go-openapi/errors"
"github.com/go-openapi/strfmt"
"github.com/go-openapi/swag"
"github.com/go-openapi/validate"
)
// BandwidthManager Status of bandwidth manager
//
// +k8s:deepcopy-gen=true
//
// swagger:model BandwidthManager
type BandwidthManager struct {
// congestion control
// Enum: ["cubic","bbr"]
CongestionControl string `json:"congestionControl,omitempty"`
// devices
Devices []string `json:"devices"`
// Is bandwidth manager enabled
Enabled bool `json:"enabled,omitempty"`
}
// Validate validates this bandwidth manager
func (m *BandwidthManager) Validate(formats strfmt.Registry) error {
var res []error
if err := m.validateCongestionControl(formats); err != nil {
res = append(res, err)
}
if len(res) > 0 {
return errors.CompositeValidationError(res...)
}
return nil
}
var bandwidthManagerTypeCongestionControlPropEnum []interface{}
func init() {
var res []string
if err := json.Unmarshal([]byte(`["cubic","bbr"]`), &res); err != nil {
panic(err)
}
for _, v := range res {
bandwidthManagerTypeCongestionControlPropEnum = append(bandwidthManagerTypeCongestionControlPropEnum, v)
}
}
const (
// BandwidthManagerCongestionControlCubic captures enum value "cubic"
BandwidthManagerCongestionControlCubic string = "cubic"
// BandwidthManagerCongestionControlBbr captures enum value "bbr"
BandwidthManagerCongestionControlBbr string = "bbr"
)
// prop value enum
func (m *BandwidthManager) validateCongestionControlEnum(path, location string, value string) error {
if err := validate.EnumCase(path, location, value, bandwidthManagerTypeCongestionControlPropEnum, true); err != nil {
return err
}
return nil
}
func (m *BandwidthManager) validateCongestionControl(formats strfmt.Registry) error {
if swag.IsZero(m.CongestionControl) { // not required
return nil
}
// value enum
if err := m.validateCongestionControlEnum("congestionControl", "body", m.CongestionControl); err != nil {
return err
}
return nil
}
// ContextValidate validates this bandwidth manager based on context it is used
func (m *BandwidthManager) ContextValidate(ctx context.Context, formats strfmt.Registry) error {
return nil
}
// MarshalBinary interface implementation
func (m *BandwidthManager) MarshalBinary() ([]byte, error) {
if m == nil {
return nil, nil
}
return swag.WriteJSON(m)
}
// UnmarshalBinary interface implementation
func (m *BandwidthManager) UnmarshalBinary(b []byte) error {
var res BandwidthManager
if err := swag.ReadJSON(b, &res); err != nil {
return err
}
*m = res
return nil
}
// Code generated by go-swagger; DO NOT EDIT.
// Copyright Authors of Cilium
// SPDX-License-Identifier: Apache-2.0
package models
// This file was generated by the swagger tool.
// Editing this file might prove futile when you re-run the swagger generate command
import (
"context"
"github.com/go-openapi/strfmt"
"github.com/go-openapi/swag"
)
// BgpCapabilities Represents the BGP capabilities.
//
// swagger:model BgpCapabilities
type BgpCapabilities struct {
// Base64-encoded BGP capabilities details
Capabilities string `json:"capabilities,omitempty"`
}
// Validate validates this bgp capabilities
func (m *BgpCapabilities) Validate(formats strfmt.Registry) error {
return nil
}
// ContextValidate validates this bgp capabilities based on context it is used
func (m *BgpCapabilities) ContextValidate(ctx context.Context, formats strfmt.Registry) error {
return nil
}
// MarshalBinary interface implementation
func (m *BgpCapabilities) MarshalBinary() ([]byte, error) {
if m == nil {
return nil, nil
}
return swag.WriteJSON(m)
}
// UnmarshalBinary interface implementation
func (m *BgpCapabilities) UnmarshalBinary(b []byte) error {
var res BgpCapabilities
if err := swag.ReadJSON(b, &res); err != nil {
return err
}
*m = res
return nil
}
// Code generated by go-swagger; DO NOT EDIT.
// Copyright Authors of Cilium
// SPDX-License-Identifier: Apache-2.0
package models
// This file was generated by the swagger tool.
// Editing this file might prove futile when you re-run the swagger generate command
import (
"context"
"github.com/go-openapi/strfmt"
"github.com/go-openapi/swag"
)
// BgpFamily Address Family Indicator (AFI) and Subsequent Address Family Indicator (SAFI) of the path
//
// swagger:model BgpFamily
type BgpFamily struct {
// Address Family Indicator (AFI) of the path
Afi string `json:"afi,omitempty"`
// Subsequent Address Family Indicator (SAFI) of the path
Safi string `json:"safi,omitempty"`
}
// Validate validates this bgp family
func (m *BgpFamily) Validate(formats strfmt.Registry) error {
return nil
}
// ContextValidate validates this bgp family based on context it is used
func (m *BgpFamily) ContextValidate(ctx context.Context, formats strfmt.Registry) error {
return nil
}
// MarshalBinary interface implementation
func (m *BgpFamily) MarshalBinary() ([]byte, error) {
if m == nil {
return nil, nil
}
return swag.WriteJSON(m)
}
// UnmarshalBinary interface implementation
func (m *BgpFamily) UnmarshalBinary(b []byte) error {
var res BgpFamily
if err := swag.ReadJSON(b, &res); err != nil {
return err
}
*m = res
return nil
}
// Code generated by go-swagger; DO NOT EDIT.
// Copyright Authors of Cilium
// SPDX-License-Identifier: Apache-2.0
package models
// This file was generated by the swagger tool.
// Editing this file might prove futile when you re-run the swagger generate command
import (
"context"
"github.com/go-openapi/strfmt"
"github.com/go-openapi/swag"
)
// BgpGracefulRestart BGP graceful restart parameters negotiated with the peer.
//
// swagger:model BgpGracefulRestart
type BgpGracefulRestart struct {
// When set, graceful restart capability is negotiated for all AFI/SAFIs of
// this peer.
Enabled bool `json:"enabled,omitempty"`
// This is the time advertised to peer for the BGP session to be re-established
// after a restart. After this period, peer will remove stale routes.
// (RFC 4724 section 4.2)
RestartTimeSeconds int64 `json:"restart-time-seconds,omitempty"`
}
// Validate validates this bgp graceful restart
func (m *BgpGracefulRestart) Validate(formats strfmt.Registry) error {
return nil
}
// ContextValidate validates this bgp graceful restart based on context it is used
func (m *BgpGracefulRestart) ContextValidate(ctx context.Context, formats strfmt.Registry) error {
return nil
}
// MarshalBinary interface implementation
func (m *BgpGracefulRestart) MarshalBinary() ([]byte, error) {
if m == nil {
return nil, nil
}
return swag.WriteJSON(m)
}
// UnmarshalBinary interface implementation
func (m *BgpGracefulRestart) UnmarshalBinary(b []byte) error {
var res BgpGracefulRestart
if err := swag.ReadJSON(b, &res); err != nil {
return err
}
*m = res
return nil
}
// Code generated by go-swagger; DO NOT EDIT.
// Copyright Authors of Cilium
// SPDX-License-Identifier: Apache-2.0
package models
// This file was generated by the swagger tool.
// Editing this file might prove futile when you re-run the swagger generate command
import (
"context"
"github.com/go-openapi/strfmt"
"github.com/go-openapi/swag"
)
// BgpNlri Network Layer Reachability Information (NLRI) of the path
//
// swagger:model BgpNlri
type BgpNlri struct {
// Base64-encoded NLRI in the BGP UPDATE message format
Base64 string `json:"base64,omitempty"`
}
// Validate validates this bgp nlri
func (m *BgpNlri) Validate(formats strfmt.Registry) error {
return nil
}
// ContextValidate validates this bgp nlri based on context it is used
func (m *BgpNlri) ContextValidate(ctx context.Context, formats strfmt.Registry) error {
return nil
}
// MarshalBinary interface implementation
func (m *BgpNlri) MarshalBinary() ([]byte, error) {
if m == nil {
return nil, nil
}
return swag.WriteJSON(m)
}
// UnmarshalBinary interface implementation
func (m *BgpNlri) UnmarshalBinary(b []byte) error {
var res BgpNlri
if err := swag.ReadJSON(b, &res); err != nil {
return err
}
*m = res
return nil
}
// Code generated by go-swagger; DO NOT EDIT.
// Copyright Authors of Cilium
// SPDX-License-Identifier: Apache-2.0
package models
// This file was generated by the swagger tool.
// Editing this file might prove futile when you re-run the swagger generate command
import (
"context"
"strconv"
"github.com/go-openapi/errors"
"github.com/go-openapi/strfmt"
"github.com/go-openapi/swag"
)
// BgpPath Single BGP routing Path containing BGP Network Layer Reachability Information (NLRI) and path attributes
//
// swagger:model BgpPath
type BgpPath struct {
// Age of the path (time since its creation) in nanoseconds
AgeNanoseconds int64 `json:"age-nanoseconds,omitempty"`
// True value flags the best path towards the destination prefix
Best bool `json:"best,omitempty"`
// Address Family Indicator (AFI) and Subsequent Address Family Indicator (SAFI) of the path
Family *BgpFamily `json:"family,omitempty"`
// Network Layer Reachability Information of the path
Nlri *BgpNlri `json:"nlri,omitempty"`
// List of BGP path attributes specific for the path
PathAttributes []*BgpPathAttribute `json:"path-attributes"`
// True value marks the path as stale
Stale bool `json:"stale,omitempty"`
}
// Validate validates this bgp path
func (m *BgpPath) Validate(formats strfmt.Registry) error {
var res []error
if err := m.validateFamily(formats); err != nil {
res = append(res, err)
}
if err := m.validateNlri(formats); err != nil {
res = append(res, err)
}
if err := m.validatePathAttributes(formats); err != nil {
res = append(res, err)
}
if len(res) > 0 {
return errors.CompositeValidationError(res...)
}
return nil
}
func (m *BgpPath) validateFamily(formats strfmt.Registry) error {
if swag.IsZero(m.Family) { // not required
return nil
}
if m.Family != nil {
if err := m.Family.Validate(formats); err != nil {
if ve, ok := err.(*errors.Validation); ok {
return ve.ValidateName("family")
} else if ce, ok := err.(*errors.CompositeError); ok {
return ce.ValidateName("family")
}
return err
}
}
return nil
}
func (m *BgpPath) validateNlri(formats strfmt.Registry) error {
if swag.IsZero(m.Nlri) { // not required
return nil
}
if m.Nlri != nil {
if err := m.Nlri.Validate(formats); err != nil {
if ve, ok := err.(*errors.Validation); ok {
return ve.ValidateName("nlri")
} else if ce, ok := err.(*errors.CompositeError); ok {
return ce.ValidateName("nlri")
}
return err
}
}
return nil
}
func (m *BgpPath) validatePathAttributes(formats strfmt.Registry) error {
if swag.IsZero(m.PathAttributes) { // not required
return nil
}
for i := 0; i < len(m.PathAttributes); i++ {
if swag.IsZero(m.PathAttributes[i]) { // not required
continue
}
if m.PathAttributes[i] != nil {
if err := m.PathAttributes[i].Validate(formats); err != nil {
if ve, ok := err.(*errors.Validation); ok {
return ve.ValidateName("path-attributes" + "." + strconv.Itoa(i))
} else if ce, ok := err.(*errors.CompositeError); ok {
return ce.ValidateName("path-attributes" + "." + strconv.Itoa(i))
}
return err
}
}
}
return nil
}
// ContextValidate validate this bgp path based on the context it is used
func (m *BgpPath) ContextValidate(ctx context.Context, formats strfmt.Registry) error {
var res []error
if err := m.contextValidateFamily(ctx, formats); err != nil {
res = append(res, err)
}
if err := m.contextValidateNlri(ctx, formats); err != nil {
res = append(res, err)
}
if err := m.contextValidatePathAttributes(ctx, formats); err != nil {
res = append(res, err)
}
if len(res) > 0 {
return errors.CompositeValidationError(res...)
}
return nil
}
func (m *BgpPath) contextValidateFamily(ctx context.Context, formats strfmt.Registry) error {
if m.Family != nil {
if swag.IsZero(m.Family) { // not required
return nil
}
if err := m.Family.ContextValidate(ctx, formats); err != nil {
if ve, ok := err.(*errors.Validation); ok {
return ve.ValidateName("family")
} else if ce, ok := err.(*errors.CompositeError); ok {
return ce.ValidateName("family")
}
return err
}
}
return nil
}
func (m *BgpPath) contextValidateNlri(ctx context.Context, formats strfmt.Registry) error {
if m.Nlri != nil {
if swag.IsZero(m.Nlri) { // not required
return nil
}
if err := m.Nlri.ContextValidate(ctx, formats); err != nil {
if ve, ok := err.(*errors.Validation); ok {
return ve.ValidateName("nlri")
} else if ce, ok := err.(*errors.CompositeError); ok {
return ce.ValidateName("nlri")
}
return err
}
}
return nil
}
func (m *BgpPath) contextValidatePathAttributes(ctx context.Context, formats strfmt.Registry) error {
for i := 0; i < len(m.PathAttributes); i++ {
if m.PathAttributes[i] != nil {
if swag.IsZero(m.PathAttributes[i]) { // not required
return nil
}
if err := m.PathAttributes[i].ContextValidate(ctx, formats); err != nil {
if ve, ok := err.(*errors.Validation); ok {
return ve.ValidateName("path-attributes" + "." + strconv.Itoa(i))
} else if ce, ok := err.(*errors.CompositeError); ok {
return ce.ValidateName("path-attributes" + "." + strconv.Itoa(i))
}
return err
}
}
}
return nil
}
// MarshalBinary interface implementation
func (m *BgpPath) MarshalBinary() ([]byte, error) {
if m == nil {
return nil, nil
}
return swag.WriteJSON(m)
}
// UnmarshalBinary interface implementation
func (m *BgpPath) UnmarshalBinary(b []byte) error {
var res BgpPath
if err := swag.ReadJSON(b, &res); err != nil {
return err
}
*m = res
return nil
}
// Code generated by go-swagger; DO NOT EDIT.
// Copyright Authors of Cilium
// SPDX-License-Identifier: Apache-2.0
package models
// This file was generated by the swagger tool.
// Editing this file might prove futile when you re-run the swagger generate command
import (
"context"
"github.com/go-openapi/strfmt"
"github.com/go-openapi/swag"
)
// BgpPathAttribute Single BGP path attribute specific for the path
//
// swagger:model BgpPathAttribute
type BgpPathAttribute struct {
// Base64-encoded BGP path attribute in the BGP UPDATE message format
Base64 string `json:"base64,omitempty"`
}
// Validate validates this bgp path attribute
func (m *BgpPathAttribute) Validate(formats strfmt.Registry) error {
return nil
}
// ContextValidate validates this bgp path attribute based on context it is used
func (m *BgpPathAttribute) ContextValidate(ctx context.Context, formats strfmt.Registry) error {
return nil
}
// MarshalBinary interface implementation
func (m *BgpPathAttribute) MarshalBinary() ([]byte, error) {
if m == nil {
return nil, nil
}
return swag.WriteJSON(m)
}
// UnmarshalBinary interface implementation
func (m *BgpPathAttribute) UnmarshalBinary(b []byte) error {
var res BgpPathAttribute
if err := swag.ReadJSON(b, &res); err != nil {
return err
}
*m = res
return nil
}
// Code generated by go-swagger; DO NOT EDIT.
// Copyright Authors of Cilium
// SPDX-License-Identifier: Apache-2.0
package models
// This file was generated by the swagger tool.
// Editing this file might prove futile when you re-run the swagger generate command
import (
"context"
"strconv"
"github.com/go-openapi/errors"
"github.com/go-openapi/strfmt"
"github.com/go-openapi/swag"
"github.com/go-openapi/validate"
)
// BgpPeer State of a BGP Peer
//
// +k8s:deepcopy-gen=true
//
// swagger:model BgpPeer
type BgpPeer struct {
// Applied initial value for the BGP HoldTimer (RFC 4271, Section 4.2) in seconds.
// The applied value holds the value that is in effect on the current BGP session.
//
AppliedHoldTimeSeconds int64 `json:"applied-hold-time-seconds,omitempty"`
// Applied initial value for the BGP KeepaliveTimer (RFC 4271, Section 8) in seconds.
// The applied value holds the value that is in effect on the current BGP session.
//
AppliedKeepAliveTimeSeconds int64 `json:"applied-keep-alive-time-seconds,omitempty"`
// Configured initial value for the BGP HoldTimer (RFC 4271, Section 4.2) in seconds.
// The configured value will be used for negotiation with the peer during the BGP session establishment.
//
ConfiguredHoldTimeSeconds int64 `json:"configured-hold-time-seconds,omitempty"`
// Configured initial value for the BGP KeepaliveTimer (RFC 4271, Section 8) in seconds.
// The applied value may be different than the configured value, as it depends on the negotiated hold time interval.
//
ConfiguredKeepAliveTimeSeconds int64 `json:"configured-keep-alive-time-seconds,omitempty"`
// Initial value for the BGP ConnectRetryTimer (RFC 4271, Section 8) in seconds
ConnectRetryTimeSeconds int64 `json:"connect-retry-time-seconds,omitempty"`
// Time To Live (TTL) value used in BGP packets sent to the eBGP neighbor.
// 1 implies that eBGP multi-hop feature is disabled (only a single hop is allowed).
//
EbgpMultihopTTL int64 `json:"ebgp-multihop-ttl,omitempty"`
// BGP peer address family state
Families []*BgpPeerFamilies `json:"families"`
// Graceful restart capability
GracefulRestart *BgpGracefulRestart `json:"graceful-restart,omitempty"`
// Local AS Number
LocalAsn int64 `json:"local-asn,omitempty"`
// Capabilities announced by the local peer
LocalCapabilities []*BgpCapabilities `json:"local-capabilities"`
// IP Address of peer
PeerAddress string `json:"peer-address,omitempty"`
// Peer AS Number
PeerAsn int64 `json:"peer-asn,omitempty"`
// TCP port number of peer
// Maximum: 65535
// Minimum: 1
PeerPort int64 `json:"peer-port,omitempty"`
// Capabilities announced by the remote peer
RemoteCapabilities []*BgpCapabilities `json:"remote-capabilities"`
// BGP peer operational state as described here
// https://www.rfc-editor.org/rfc/rfc4271#section-8.2.2
//
SessionState string `json:"session-state,omitempty"`
// Set when a TCP password is configured for communications with this peer
TCPPasswordEnabled bool `json:"tcp-password-enabled,omitempty"`
// BGP peer connection uptime in nano seconds.
UptimeNanoseconds int64 `json:"uptime-nanoseconds,omitempty"`
}
// Validate validates this bgp peer
func (m *BgpPeer) Validate(formats strfmt.Registry) error {
var res []error
if err := m.validateFamilies(formats); err != nil {
res = append(res, err)
}
if err := m.validateGracefulRestart(formats); err != nil {
res = append(res, err)
}
if err := m.validateLocalCapabilities(formats); err != nil {
res = append(res, err)
}
if err := m.validatePeerPort(formats); err != nil {
res = append(res, err)
}
if err := m.validateRemoteCapabilities(formats); err != nil {
res = append(res, err)
}
if len(res) > 0 {
return errors.CompositeValidationError(res...)
}
return nil
}
func (m *BgpPeer) validateFamilies(formats strfmt.Registry) error {
if swag.IsZero(m.Families) { // not required
return nil
}
for i := 0; i < len(m.Families); i++ {
if swag.IsZero(m.Families[i]) { // not required
continue
}
if m.Families[i] != nil {
if err := m.Families[i].Validate(formats); err != nil {
if ve, ok := err.(*errors.Validation); ok {
return ve.ValidateName("families" + "." + strconv.Itoa(i))
} else if ce, ok := err.(*errors.CompositeError); ok {
return ce.ValidateName("families" + "." + strconv.Itoa(i))
}
return err
}
}
}
return nil
}
func (m *BgpPeer) validateGracefulRestart(formats strfmt.Registry) error {
if swag.IsZero(m.GracefulRestart) { // not required
return nil
}
if m.GracefulRestart != nil {
if err := m.GracefulRestart.Validate(formats); err != nil {
if ve, ok := err.(*errors.Validation); ok {
return ve.ValidateName("graceful-restart")
} else if ce, ok := err.(*errors.CompositeError); ok {
return ce.ValidateName("graceful-restart")
}
return err
}
}
return nil
}
func (m *BgpPeer) validateLocalCapabilities(formats strfmt.Registry) error {
if swag.IsZero(m.LocalCapabilities) { // not required
return nil
}
for i := 0; i < len(m.LocalCapabilities); i++ {
if swag.IsZero(m.LocalCapabilities[i]) { // not required
continue
}
if m.LocalCapabilities[i] != nil {
if err := m.LocalCapabilities[i].Validate(formats); err != nil {
if ve, ok := err.(*errors.Validation); ok {
return ve.ValidateName("local-capabilities" + "." + strconv.Itoa(i))
} else if ce, ok := err.(*errors.CompositeError); ok {
return ce.ValidateName("local-capabilities" + "." + strconv.Itoa(i))
}
return err
}
}
}
return nil
}
func (m *BgpPeer) validatePeerPort(formats strfmt.Registry) error {
if swag.IsZero(m.PeerPort) { // not required
return nil
}
if err := validate.MinimumInt("peer-port", "body", m.PeerPort, 1, false); err != nil {
return err
}
if err := validate.MaximumInt("peer-port", "body", m.PeerPort, 65535, false); err != nil {
return err
}
return nil
}
func (m *BgpPeer) validateRemoteCapabilities(formats strfmt.Registry) error {
if swag.IsZero(m.RemoteCapabilities) { // not required
return nil
}
for i := 0; i < len(m.RemoteCapabilities); i++ {
if swag.IsZero(m.RemoteCapabilities[i]) { // not required
continue
}
if m.RemoteCapabilities[i] != nil {
if err := m.RemoteCapabilities[i].Validate(formats); err != nil {
if ve, ok := err.(*errors.Validation); ok {
return ve.ValidateName("remote-capabilities" + "." + strconv.Itoa(i))
} else if ce, ok := err.(*errors.CompositeError); ok {
return ce.ValidateName("remote-capabilities" + "." + strconv.Itoa(i))
}
return err
}
}
}
return nil
}
// ContextValidate validate this bgp peer based on the context it is used
func (m *BgpPeer) ContextValidate(ctx context.Context, formats strfmt.Registry) error {
var res []error
if err := m.contextValidateFamilies(ctx, formats); err != nil {
res = append(res, err)
}
if err := m.contextValidateGracefulRestart(ctx, formats); err != nil {
res = append(res, err)
}
if err := m.contextValidateLocalCapabilities(ctx, formats); err != nil {
res = append(res, err)
}
if err := m.contextValidateRemoteCapabilities(ctx, formats); err != nil {
res = append(res, err)
}
if len(res) > 0 {
return errors.CompositeValidationError(res...)
}
return nil
}
func (m *BgpPeer) contextValidateFamilies(ctx context.Context, formats strfmt.Registry) error {
for i := 0; i < len(m.Families); i++ {
if m.Families[i] != nil {
if swag.IsZero(m.Families[i]) { // not required
return nil
}
if err := m.Families[i].ContextValidate(ctx, formats); err != nil {
if ve, ok := err.(*errors.Validation); ok {
return ve.ValidateName("families" + "." + strconv.Itoa(i))
} else if ce, ok := err.(*errors.CompositeError); ok {
return ce.ValidateName("families" + "." + strconv.Itoa(i))
}
return err
}
}
}
return nil
}
func (m *BgpPeer) contextValidateGracefulRestart(ctx context.Context, formats strfmt.Registry) error {
if m.GracefulRestart != nil {
if swag.IsZero(m.GracefulRestart) { // not required
return nil
}
if err := m.GracefulRestart.ContextValidate(ctx, formats); err != nil {
if ve, ok := err.(*errors.Validation); ok {
return ve.ValidateName("graceful-restart")
} else if ce, ok := err.(*errors.CompositeError); ok {
return ce.ValidateName("graceful-restart")
}
return err
}
}
return nil
}
func (m *BgpPeer) contextValidateLocalCapabilities(ctx context.Context, formats strfmt.Registry) error {
for i := 0; i < len(m.LocalCapabilities); i++ {
if m.LocalCapabilities[i] != nil {
if swag.IsZero(m.LocalCapabilities[i]) { // not required
return nil
}
if err := m.LocalCapabilities[i].ContextValidate(ctx, formats); err != nil {
if ve, ok := err.(*errors.Validation); ok {
return ve.ValidateName("local-capabilities" + "." + strconv.Itoa(i))
} else if ce, ok := err.(*errors.CompositeError); ok {
return ce.ValidateName("local-capabilities" + "." + strconv.Itoa(i))
}
return err
}
}
}
return nil
}
func (m *BgpPeer) contextValidateRemoteCapabilities(ctx context.Context, formats strfmt.Registry) error {
for i := 0; i < len(m.RemoteCapabilities); i++ {
if m.RemoteCapabilities[i] != nil {
if swag.IsZero(m.RemoteCapabilities[i]) { // not required
return nil
}
if err := m.RemoteCapabilities[i].ContextValidate(ctx, formats); err != nil {
if ve, ok := err.(*errors.Validation); ok {
return ve.ValidateName("remote-capabilities" + "." + strconv.Itoa(i))
} else if ce, ok := err.(*errors.CompositeError); ok {
return ce.ValidateName("remote-capabilities" + "." + strconv.Itoa(i))
}
return err
}
}
}
return nil
}
// MarshalBinary interface implementation
func (m *BgpPeer) MarshalBinary() ([]byte, error) {
if m == nil {
return nil, nil
}
return swag.WriteJSON(m)
}
// UnmarshalBinary interface implementation
func (m *BgpPeer) UnmarshalBinary(b []byte) error {
var res BgpPeer
if err := swag.ReadJSON(b, &res); err != nil {
return err
}
*m = res
return nil
}
// Code generated by go-swagger; DO NOT EDIT.
// Copyright Authors of Cilium
// SPDX-License-Identifier: Apache-2.0
package models
// This file was generated by the swagger tool.
// Editing this file might prove futile when you re-run the swagger generate command
import (
"context"
"github.com/go-openapi/strfmt"
"github.com/go-openapi/swag"
)
// BgpPeerFamilies BGP AFI SAFI state of the peer
//
// swagger:model BgpPeerFamilies
type BgpPeerFamilies struct {
// Number of routes accepted from the peer of this address family
Accepted int64 `json:"accepted,omitempty"`
// Number of routes advertised of this address family to the peer
Advertised int64 `json:"advertised,omitempty"`
// BGP address family indicator
Afi string `json:"afi,omitempty"`
// Number of routes received from the peer of this address family
Received int64 `json:"received,omitempty"`
// BGP subsequent address family indicator
Safi string `json:"safi,omitempty"`
}
// Validate validates this bgp peer families
func (m *BgpPeerFamilies) Validate(formats strfmt.Registry) error {
return nil
}
// ContextValidate validates this bgp peer families based on context it is used
func (m *BgpPeerFamilies) ContextValidate(ctx context.Context, formats strfmt.Registry) error {
return nil
}
// MarshalBinary interface implementation
func (m *BgpPeerFamilies) MarshalBinary() ([]byte, error) {
if m == nil {
return nil, nil
}
return swag.WriteJSON(m)
}
// UnmarshalBinary interface implementation
func (m *BgpPeerFamilies) UnmarshalBinary(b []byte) error {
var res BgpPeerFamilies
if err := swag.ReadJSON(b, &res); err != nil {
return err
}
*m = res
return nil
}
// Code generated by go-swagger; DO NOT EDIT.
// Copyright Authors of Cilium
// SPDX-License-Identifier: Apache-2.0
package models
// This file was generated by the swagger tool.
// Editing this file might prove futile when you re-run the swagger generate command
import (
"context"
"strconv"
"github.com/go-openapi/errors"
"github.com/go-openapi/strfmt"
"github.com/go-openapi/swag"
)
// BgpRoute Single BGP route retrieved from the RIB of underlying router
//
// swagger:model BgpRoute
type BgpRoute struct {
// IP address specifying a BGP neighbor if the source table type is adj-rib-in or adj-rib-out
Neighbor string `json:"neighbor,omitempty"`
// List of routing paths leading towards the prefix
Paths []*BgpPath `json:"paths"`
// IP prefix of the route
Prefix string `json:"prefix,omitempty"`
// Autonomous System Number (ASN) identifying a BGP virtual router instance
RouterAsn int64 `json:"router-asn,omitempty"`
}
// Validate validates this bgp route
func (m *BgpRoute) Validate(formats strfmt.Registry) error {
var res []error
if err := m.validatePaths(formats); err != nil {
res = append(res, err)
}
if len(res) > 0 {
return errors.CompositeValidationError(res...)
}
return nil
}
func (m *BgpRoute) validatePaths(formats strfmt.Registry) error {
if swag.IsZero(m.Paths) { // not required
return nil
}
for i := 0; i < len(m.Paths); i++ {
if swag.IsZero(m.Paths[i]) { // not required
continue
}
if m.Paths[i] != nil {
if err := m.Paths[i].Validate(formats); err != nil {
if ve, ok := err.(*errors.Validation); ok {
return ve.ValidateName("paths" + "." + strconv.Itoa(i))
} else if ce, ok := err.(*errors.CompositeError); ok {
return ce.ValidateName("paths" + "." + strconv.Itoa(i))
}
return err
}
}
}
return nil
}
// ContextValidate validate this bgp route based on the context it is used
func (m *BgpRoute) ContextValidate(ctx context.Context, formats strfmt.Registry) error {
var res []error
if err := m.contextValidatePaths(ctx, formats); err != nil {
res = append(res, err)
}
if len(res) > 0 {
return errors.CompositeValidationError(res...)
}
return nil
}
func (m *BgpRoute) contextValidatePaths(ctx context.Context, formats strfmt.Registry) error {
for i := 0; i < len(m.Paths); i++ {
if m.Paths[i] != nil {
if swag.IsZero(m.Paths[i]) { // not required
return nil
}
if err := m.Paths[i].ContextValidate(ctx, formats); err != nil {
if ve, ok := err.(*errors.Validation); ok {
return ve.ValidateName("paths" + "." + strconv.Itoa(i))
} else if ce, ok := err.(*errors.CompositeError); ok {
return ce.ValidateName("paths" + "." + strconv.Itoa(i))
}
return err
}
}
}
return nil
}
// MarshalBinary interface implementation
func (m *BgpRoute) MarshalBinary() ([]byte, error) {
if m == nil {
return nil, nil
}
return swag.WriteJSON(m)
}
// UnmarshalBinary interface implementation
func (m *BgpRoute) UnmarshalBinary(b []byte) error {
var res BgpRoute
if err := swag.ReadJSON(b, &res); err != nil {
return err
}
*m = res
return nil
}
// Code generated by go-swagger; DO NOT EDIT.
// Copyright Authors of Cilium
// SPDX-License-Identifier: Apache-2.0
package models
// This file was generated by the swagger tool.
// Editing this file might prove futile when you re-run the swagger generate command
import (
"context"
"encoding/json"
"strconv"
"github.com/go-openapi/errors"
"github.com/go-openapi/strfmt"
"github.com/go-openapi/swag"
"github.com/go-openapi/validate"
)
// BgpRoutePolicy Single BGP route policy retrieved from the underlying router
//
// swagger:model BgpRoutePolicy
type BgpRoutePolicy struct {
// Name of the route policy
Name string `json:"name,omitempty"`
// Autonomous System Number (ASN) identifying a BGP virtual router instance
RouterAsn int64 `json:"router-asn,omitempty"`
// List of the route policy statements
Statements []*BgpRoutePolicyStatement `json:"statements"`
// Type of the route policy
// Enum: ["export","import"]
Type string `json:"type,omitempty"`
}
// Validate validates this bgp route policy
func (m *BgpRoutePolicy) Validate(formats strfmt.Registry) error {
var res []error
if err := m.validateStatements(formats); err != nil {
res = append(res, err)
}
if err := m.validateType(formats); err != nil {
res = append(res, err)
}
if len(res) > 0 {
return errors.CompositeValidationError(res...)
}
return nil
}
func (m *BgpRoutePolicy) validateStatements(formats strfmt.Registry) error {
if swag.IsZero(m.Statements) { // not required
return nil
}
for i := 0; i < len(m.Statements); i++ {
if swag.IsZero(m.Statements[i]) { // not required
continue
}
if m.Statements[i] != nil {
if err := m.Statements[i].Validate(formats); err != nil {
if ve, ok := err.(*errors.Validation); ok {
return ve.ValidateName("statements" + "." + strconv.Itoa(i))
} else if ce, ok := err.(*errors.CompositeError); ok {
return ce.ValidateName("statements" + "." + strconv.Itoa(i))
}
return err
}
}
}
return nil
}
var bgpRoutePolicyTypeTypePropEnum []interface{}
func init() {
var res []string
if err := json.Unmarshal([]byte(`["export","import"]`), &res); err != nil {
panic(err)
}
for _, v := range res {
bgpRoutePolicyTypeTypePropEnum = append(bgpRoutePolicyTypeTypePropEnum, v)
}
}
const (
// BgpRoutePolicyTypeExport captures enum value "export"
BgpRoutePolicyTypeExport string = "export"
// BgpRoutePolicyTypeImport captures enum value "import"
BgpRoutePolicyTypeImport string = "import"
)
// prop value enum
func (m *BgpRoutePolicy) validateTypeEnum(path, location string, value string) error {
if err := validate.EnumCase(path, location, value, bgpRoutePolicyTypeTypePropEnum, true); err != nil {
return err
}
return nil
}
func (m *BgpRoutePolicy) validateType(formats strfmt.Registry) error {
if swag.IsZero(m.Type) { // not required
return nil
}
// value enum
if err := m.validateTypeEnum("type", "body", m.Type); err != nil {
return err
}
return nil
}
// ContextValidate validate this bgp route policy based on the context it is used
func (m *BgpRoutePolicy) ContextValidate(ctx context.Context, formats strfmt.Registry) error {
var res []error
if err := m.contextValidateStatements(ctx, formats); err != nil {
res = append(res, err)
}
if len(res) > 0 {
return errors.CompositeValidationError(res...)
}
return nil
}
func (m *BgpRoutePolicy) contextValidateStatements(ctx context.Context, formats strfmt.Registry) error {
for i := 0; i < len(m.Statements); i++ {
if m.Statements[i] != nil {
if swag.IsZero(m.Statements[i]) { // not required
return nil
}
if err := m.Statements[i].ContextValidate(ctx, formats); err != nil {
if ve, ok := err.(*errors.Validation); ok {
return ve.ValidateName("statements" + "." + strconv.Itoa(i))
} else if ce, ok := err.(*errors.CompositeError); ok {
return ce.ValidateName("statements" + "." + strconv.Itoa(i))
}
return err
}
}
}
return nil
}
// MarshalBinary interface implementation
func (m *BgpRoutePolicy) MarshalBinary() ([]byte, error) {
if m == nil {
return nil, nil
}
return swag.WriteJSON(m)
}
// UnmarshalBinary interface implementation
func (m *BgpRoutePolicy) UnmarshalBinary(b []byte) error {
var res BgpRoutePolicy
if err := swag.ReadJSON(b, &res); err != nil {
return err
}
*m = res
return nil
}
// Code generated by go-swagger; DO NOT EDIT.
// Copyright Authors of Cilium
// SPDX-License-Identifier: Apache-2.0
package models
// This file was generated by the swagger tool.
// Editing this file might prove futile when you re-run the swagger generate command
import (
"context"
"github.com/go-openapi/strfmt"
"github.com/go-openapi/swag"
)
// BgpRoutePolicyNexthopAction BGP nexthop action
//
// swagger:model BgpRoutePolicyNexthopAction
type BgpRoutePolicyNexthopAction struct {
// Set nexthop to the IP address of itself
Self bool `json:"self,omitempty"`
// Don't change nexthop
Unchanged bool `json:"unchanged,omitempty"`
}
// Validate validates this bgp route policy nexthop action
func (m *BgpRoutePolicyNexthopAction) Validate(formats strfmt.Registry) error {
return nil
}
// ContextValidate validates this bgp route policy nexthop action based on context it is used
func (m *BgpRoutePolicyNexthopAction) ContextValidate(ctx context.Context, formats strfmt.Registry) error {
return nil
}
// MarshalBinary interface implementation
func (m *BgpRoutePolicyNexthopAction) MarshalBinary() ([]byte, error) {
if m == nil {
return nil, nil
}
return swag.WriteJSON(m)
}
// UnmarshalBinary interface implementation
func (m *BgpRoutePolicyNexthopAction) UnmarshalBinary(b []byte) error {
var res BgpRoutePolicyNexthopAction
if err := swag.ReadJSON(b, &res); err != nil {
return err
}
*m = res
return nil
}
// Code generated by go-swagger; DO NOT EDIT.
// Copyright Authors of Cilium
// SPDX-License-Identifier: Apache-2.0
package models
// This file was generated by the swagger tool.
// Editing this file might prove futile when you re-run the swagger generate command
import (
"context"
"github.com/go-openapi/strfmt"
"github.com/go-openapi/swag"
)
// BgpRoutePolicyPrefixMatch Matches a CIDR prefix in a BGP route policy
//
// swagger:model BgpRoutePolicyPrefixMatch
type BgpRoutePolicyPrefixMatch struct {
// CIDR prefix to match with
Cidr string `json:"cidr,omitempty"`
// Maximal prefix length that will match if it falls under CIDR
PrefixLenMax int64 `json:"prefix-len-max,omitempty"`
// Minimal prefix length that will match if it falls under CIDR
PrefixLenMin int64 `json:"prefix-len-min,omitempty"`
}
// Validate validates this bgp route policy prefix match
func (m *BgpRoutePolicyPrefixMatch) Validate(formats strfmt.Registry) error {
return nil
}
// ContextValidate validates this bgp route policy prefix match based on context it is used
func (m *BgpRoutePolicyPrefixMatch) ContextValidate(ctx context.Context, formats strfmt.Registry) error {
return nil
}
// MarshalBinary interface implementation
func (m *BgpRoutePolicyPrefixMatch) MarshalBinary() ([]byte, error) {
if m == nil {
return nil, nil
}
return swag.WriteJSON(m)
}
// UnmarshalBinary interface implementation
func (m *BgpRoutePolicyPrefixMatch) UnmarshalBinary(b []byte) error {
var res BgpRoutePolicyPrefixMatch
if err := swag.ReadJSON(b, &res); err != nil {
return err
}
*m = res
return nil
}
// Code generated by go-swagger; DO NOT EDIT.
// Copyright Authors of Cilium
// SPDX-License-Identifier: Apache-2.0
package models
// This file was generated by the swagger tool.
// Editing this file might prove futile when you re-run the swagger generate command
import (
"context"
"encoding/json"
"strconv"
"github.com/go-openapi/errors"
"github.com/go-openapi/strfmt"
"github.com/go-openapi/swag"
"github.com/go-openapi/validate"
)
// BgpRoutePolicyStatement Single BGP route policy statement
//
// swagger:model BgpRoutePolicyStatement
type BgpRoutePolicyStatement struct {
// List of BGP standard community values to be added to the matched route
AddCommunities []string `json:"add-communities"`
// List of BGP large community values to be added to the matched route
AddLargeCommunities []string `json:"add-large-communities"`
// Matches any of the provided address families. If empty matches all address families.
MatchFamilies []*BgpFamily `json:"match-families"`
// Matches any of the provided BGP neighbor IP addresses. If empty matches all neighbors.
MatchNeighbors []string `json:"match-neighbors"`
// Matches any of the provided prefixes. If empty matches all prefixes.
MatchPrefixes []*BgpRoutePolicyPrefixMatch `json:"match-prefixes"`
// BGP nexthop action
Nexthop *BgpRoutePolicyNexthopAction `json:"nexthop,omitempty"`
// RIB processing action taken on the matched route
// Enum: ["none","accept","reject"]
RouteAction string `json:"route-action,omitempty"`
// BGP local preference value to be set on the matched route
SetLocalPreference int64 `json:"set-local-preference,omitempty"`
}
// Validate validates this bgp route policy statement
func (m *BgpRoutePolicyStatement) Validate(formats strfmt.Registry) error {
var res []error
if err := m.validateMatchFamilies(formats); err != nil {
res = append(res, err)
}
if err := m.validateMatchPrefixes(formats); err != nil {
res = append(res, err)
}
if err := m.validateNexthop(formats); err != nil {
res = append(res, err)
}
if err := m.validateRouteAction(formats); err != nil {
res = append(res, err)
}
if len(res) > 0 {
return errors.CompositeValidationError(res...)
}
return nil
}
func (m *BgpRoutePolicyStatement) validateMatchFamilies(formats strfmt.Registry) error {
if swag.IsZero(m.MatchFamilies) { // not required
return nil
}
for i := 0; i < len(m.MatchFamilies); i++ {
if swag.IsZero(m.MatchFamilies[i]) { // not required
continue
}
if m.MatchFamilies[i] != nil {
if err := m.MatchFamilies[i].Validate(formats); err != nil {
if ve, ok := err.(*errors.Validation); ok {
return ve.ValidateName("match-families" + "." + strconv.Itoa(i))
} else if ce, ok := err.(*errors.CompositeError); ok {
return ce.ValidateName("match-families" + "." + strconv.Itoa(i))
}
return err
}
}
}
return nil
}
func (m *BgpRoutePolicyStatement) validateMatchPrefixes(formats strfmt.Registry) error {
if swag.IsZero(m.MatchPrefixes) { // not required
return nil
}
for i := 0; i < len(m.MatchPrefixes); i++ {
if swag.IsZero(m.MatchPrefixes[i]) { // not required
continue
}
if m.MatchPrefixes[i] != nil {
if err := m.MatchPrefixes[i].Validate(formats); err != nil {
if ve, ok := err.(*errors.Validation); ok {
return ve.ValidateName("match-prefixes" + "." + strconv.Itoa(i))
} else if ce, ok := err.(*errors.CompositeError); ok {
return ce.ValidateName("match-prefixes" + "." + strconv.Itoa(i))
}
return err
}
}
}
return nil
}
func (m *BgpRoutePolicyStatement) validateNexthop(formats strfmt.Registry) error {
if swag.IsZero(m.Nexthop) { // not required
return nil
}
if m.Nexthop != nil {
if err := m.Nexthop.Validate(formats); err != nil {
if ve, ok := err.(*errors.Validation); ok {
return ve.ValidateName("nexthop")
} else if ce, ok := err.(*errors.CompositeError); ok {
return ce.ValidateName("nexthop")
}
return err
}
}
return nil
}
var bgpRoutePolicyStatementTypeRouteActionPropEnum []interface{}
func init() {
var res []string
if err := json.Unmarshal([]byte(`["none","accept","reject"]`), &res); err != nil {
panic(err)
}
for _, v := range res {
bgpRoutePolicyStatementTypeRouteActionPropEnum = append(bgpRoutePolicyStatementTypeRouteActionPropEnum, v)
}
}
const (
// BgpRoutePolicyStatementRouteActionNone captures enum value "none"
BgpRoutePolicyStatementRouteActionNone string = "none"
// BgpRoutePolicyStatementRouteActionAccept captures enum value "accept"
BgpRoutePolicyStatementRouteActionAccept string = "accept"
// BgpRoutePolicyStatementRouteActionReject captures enum value "reject"
BgpRoutePolicyStatementRouteActionReject string = "reject"
)
// prop value enum
func (m *BgpRoutePolicyStatement) validateRouteActionEnum(path, location string, value string) error {
if err := validate.EnumCase(path, location, value, bgpRoutePolicyStatementTypeRouteActionPropEnum, true); err != nil {
return err
}
return nil
}
func (m *BgpRoutePolicyStatement) validateRouteAction(formats strfmt.Registry) error {
if swag.IsZero(m.RouteAction) { // not required
return nil
}
// value enum
if err := m.validateRouteActionEnum("route-action", "body", m.RouteAction); err != nil {
return err
}
return nil
}
// ContextValidate validate this bgp route policy statement based on the context it is used
func (m *BgpRoutePolicyStatement) ContextValidate(ctx context.Context, formats strfmt.Registry) error {
var res []error
if err := m.contextValidateMatchFamilies(ctx, formats); err != nil {
res = append(res, err)
}
if err := m.contextValidateMatchPrefixes(ctx, formats); err != nil {
res = append(res, err)
}
if err := m.contextValidateNexthop(ctx, formats); err != nil {
res = append(res, err)
}
if len(res) > 0 {
return errors.CompositeValidationError(res...)
}
return nil
}
func (m *BgpRoutePolicyStatement) contextValidateMatchFamilies(ctx context.Context, formats strfmt.Registry) error {
for i := 0; i < len(m.MatchFamilies); i++ {
if m.MatchFamilies[i] != nil {
if swag.IsZero(m.MatchFamilies[i]) { // not required
return nil
}
if err := m.MatchFamilies[i].ContextValidate(ctx, formats); err != nil {
if ve, ok := err.(*errors.Validation); ok {
return ve.ValidateName("match-families" + "." + strconv.Itoa(i))
} else if ce, ok := err.(*errors.CompositeError); ok {
return ce.ValidateName("match-families" + "." + strconv.Itoa(i))
}
return err
}
}
}
return nil
}
func (m *BgpRoutePolicyStatement) contextValidateMatchPrefixes(ctx context.Context, formats strfmt.Registry) error {
for i := 0; i < len(m.MatchPrefixes); i++ {
if m.MatchPrefixes[i] != nil {
if swag.IsZero(m.MatchPrefixes[i]) { // not required
return nil
}
if err := m.MatchPrefixes[i].ContextValidate(ctx, formats); err != nil {
if ve, ok := err.(*errors.Validation); ok {
return ve.ValidateName("match-prefixes" + "." + strconv.Itoa(i))
} else if ce, ok := err.(*errors.CompositeError); ok {
return ce.ValidateName("match-prefixes" + "." + strconv.Itoa(i))
}
return err
}
}
}
return nil
}
func (m *BgpRoutePolicyStatement) contextValidateNexthop(ctx context.Context, formats strfmt.Registry) error {
if m.Nexthop != nil {
if swag.IsZero(m.Nexthop) { // not required
return nil
}
if err := m.Nexthop.ContextValidate(ctx, formats); err != nil {
if ve, ok := err.(*errors.Validation); ok {
return ve.ValidateName("nexthop")
} else if ce, ok := err.(*errors.CompositeError); ok {
return ce.ValidateName("nexthop")
}
return err
}
}
return nil
}
// MarshalBinary interface implementation
func (m *BgpRoutePolicyStatement) MarshalBinary() ([]byte, error) {
if m == nil {
return nil, nil
}
return swag.WriteJSON(m)
}
// UnmarshalBinary interface implementation
func (m *BgpRoutePolicyStatement) UnmarshalBinary(b []byte) error {
var res BgpRoutePolicyStatement
if err := swag.ReadJSON(b, &res); err != nil {
return err
}
*m = res
return nil
}
// Code generated by go-swagger; DO NOT EDIT.
// Copyright Authors of Cilium
// SPDX-License-Identifier: Apache-2.0
package models
// This file was generated by the swagger tool.
// Editing this file might prove futile when you re-run the swagger generate command
import (
"context"
"github.com/go-openapi/strfmt"
"github.com/go-openapi/swag"
)
// CIDRList List of CIDRs
//
// swagger:model CIDRList
type CIDRList struct {
// list
List []string `json:"list"`
// revision
Revision int64 `json:"revision,omitempty"`
}
// Validate validates this c ID r list
func (m *CIDRList) Validate(formats strfmt.Registry) error {
return nil
}
// ContextValidate validates this c ID r list based on context it is used
func (m *CIDRList) ContextValidate(ctx context.Context, formats strfmt.Registry) error {
return nil
}
// MarshalBinary interface implementation
func (m *CIDRList) MarshalBinary() ([]byte, error) {
if m == nil {
return nil, nil
}
return swag.WriteJSON(m)
}
// UnmarshalBinary interface implementation
func (m *CIDRList) UnmarshalBinary(b []byte) error {
var res CIDRList
if err := swag.ReadJSON(b, &res); err != nil {
return err
}
*m = res
return nil
}
// Code generated by go-swagger; DO NOT EDIT.
// Copyright Authors of Cilium
// SPDX-License-Identifier: Apache-2.0
package models
// This file was generated by the swagger tool.
// Editing this file might prove futile when you re-run the swagger generate command
import (
"context"
"strconv"
"github.com/go-openapi/errors"
"github.com/go-openapi/strfmt"
"github.com/go-openapi/swag"
)
// CIDRPolicy CIDR endpoint policy
//
// swagger:model CIDRPolicy
type CIDRPolicy struct {
// List of CIDR egress rules
Egress []*PolicyRule `json:"egress"`
// List of CIDR ingress rules
Ingress []*PolicyRule `json:"ingress"`
}
// Validate validates this c ID r policy
func (m *CIDRPolicy) Validate(formats strfmt.Registry) error {
var res []error
if err := m.validateEgress(formats); err != nil {
res = append(res, err)
}
if err := m.validateIngress(formats); err != nil {
res = append(res, err)
}
if len(res) > 0 {
return errors.CompositeValidationError(res...)
}
return nil
}
func (m *CIDRPolicy) validateEgress(formats strfmt.Registry) error {
if swag.IsZero(m.Egress) { // not required
return nil
}
for i := 0; i < len(m.Egress); i++ {
if swag.IsZero(m.Egress[i]) { // not required
continue
}
if m.Egress[i] != nil {
if err := m.Egress[i].Validate(formats); err != nil {
if ve, ok := err.(*errors.Validation); ok {
return ve.ValidateName("egress" + "." + strconv.Itoa(i))
} else if ce, ok := err.(*errors.CompositeError); ok {
return ce.ValidateName("egress" + "." + strconv.Itoa(i))
}
return err
}
}
}
return nil
}
func (m *CIDRPolicy) validateIngress(formats strfmt.Registry) error {
if swag.IsZero(m.Ingress) { // not required
return nil
}
for i := 0; i < len(m.Ingress); i++ {
if swag.IsZero(m.Ingress[i]) { // not required
continue
}
if m.Ingress[i] != nil {
if err := m.Ingress[i].Validate(formats); err != nil {
if ve, ok := err.(*errors.Validation); ok {
return ve.ValidateName("ingress" + "." + strconv.Itoa(i))
} else if ce, ok := err.(*errors.CompositeError); ok {
return ce.ValidateName("ingress" + "." + strconv.Itoa(i))
}
return err
}
}
}
return nil
}
// ContextValidate validate this c ID r policy based on the context it is used
func (m *CIDRPolicy) ContextValidate(ctx context.Context, formats strfmt.Registry) error {
var res []error
if err := m.contextValidateEgress(ctx, formats); err != nil {
res = append(res, err)
}
if err := m.contextValidateIngress(ctx, formats); err != nil {
res = append(res, err)
}
if len(res) > 0 {
return errors.CompositeValidationError(res...)
}
return nil
}
func (m *CIDRPolicy) contextValidateEgress(ctx context.Context, formats strfmt.Registry) error {
for i := 0; i < len(m.Egress); i++ {
if m.Egress[i] != nil {
if swag.IsZero(m.Egress[i]) { // not required
return nil
}
if err := m.Egress[i].ContextValidate(ctx, formats); err != nil {
if ve, ok := err.(*errors.Validation); ok {
return ve.ValidateName("egress" + "." + strconv.Itoa(i))
} else if ce, ok := err.(*errors.CompositeError); ok {
return ce.ValidateName("egress" + "." + strconv.Itoa(i))
}
return err
}
}
}
return nil
}
func (m *CIDRPolicy) contextValidateIngress(ctx context.Context, formats strfmt.Registry) error {
for i := 0; i < len(m.Ingress); i++ {
if m.Ingress[i] != nil {
if swag.IsZero(m.Ingress[i]) { // not required
return nil
}
if err := m.Ingress[i].ContextValidate(ctx, formats); err != nil {
if ve, ok := err.(*errors.Validation); ok {
return ve.ValidateName("ingress" + "." + strconv.Itoa(i))
} else if ce, ok := err.(*errors.CompositeError); ok {
return ce.ValidateName("ingress" + "." + strconv.Itoa(i))
}
return err
}
}
}
return nil
}
// MarshalBinary interface implementation
func (m *CIDRPolicy) MarshalBinary() ([]byte, error) {
if m == nil {
return nil, nil
}
return swag.WriteJSON(m)
}
// UnmarshalBinary interface implementation
func (m *CIDRPolicy) UnmarshalBinary(b []byte) error {
var res CIDRPolicy
if err := swag.ReadJSON(b, &res); err != nil {
return err
}
*m = res
return nil
}
// Code generated by go-swagger; DO NOT EDIT.
// Copyright Authors of Cilium
// SPDX-License-Identifier: Apache-2.0
package models
// This file was generated by the swagger tool.
// Editing this file might prove futile when you re-run the swagger generate command
import (
"context"
"encoding/json"
"github.com/go-openapi/errors"
"github.com/go-openapi/strfmt"
"github.com/go-openapi/swag"
"github.com/go-openapi/validate"
)
// CNIChainingStatus Status of CNI chaining
//
// swagger:model CNIChainingStatus
type CNIChainingStatus struct {
// mode
// Enum: ["none","aws-cni","flannel","generic-veth","portmap"]
Mode string `json:"mode,omitempty"`
}
// Validate validates this c n i chaining status
func (m *CNIChainingStatus) Validate(formats strfmt.Registry) error {
var res []error
if err := m.validateMode(formats); err != nil {
res = append(res, err)
}
if len(res) > 0 {
return errors.CompositeValidationError(res...)
}
return nil
}
var cNIChainingStatusTypeModePropEnum []interface{}
func init() {
var res []string
if err := json.Unmarshal([]byte(`["none","aws-cni","flannel","generic-veth","portmap"]`), &res); err != nil {
panic(err)
}
for _, v := range res {
cNIChainingStatusTypeModePropEnum = append(cNIChainingStatusTypeModePropEnum, v)
}
}
const (
// CNIChainingStatusModeNone captures enum value "none"
CNIChainingStatusModeNone string = "none"
// CNIChainingStatusModeAwsDashCni captures enum value "aws-cni"
CNIChainingStatusModeAwsDashCni string = "aws-cni"
// CNIChainingStatusModeFlannel captures enum value "flannel"
CNIChainingStatusModeFlannel string = "flannel"
// CNIChainingStatusModeGenericDashVeth captures enum value "generic-veth"
CNIChainingStatusModeGenericDashVeth string = "generic-veth"
// CNIChainingStatusModePortmap captures enum value "portmap"
CNIChainingStatusModePortmap string = "portmap"
)
// prop value enum
func (m *CNIChainingStatus) validateModeEnum(path, location string, value string) error {
if err := validate.EnumCase(path, location, value, cNIChainingStatusTypeModePropEnum, true); err != nil {
return err
}
return nil
}
func (m *CNIChainingStatus) validateMode(formats strfmt.Registry) error {
if swag.IsZero(m.Mode) { // not required
return nil
}
// value enum
if err := m.validateModeEnum("mode", "body", m.Mode); err != nil {
return err
}
return nil
}
// ContextValidate validates this c n i chaining status based on context it is used
func (m *CNIChainingStatus) ContextValidate(ctx context.Context, formats strfmt.Registry) error {
return nil
}
// MarshalBinary interface implementation
func (m *CNIChainingStatus) MarshalBinary() ([]byte, error) {
if m == nil {
return nil, nil
}
return swag.WriteJSON(m)
}
// UnmarshalBinary interface implementation
func (m *CNIChainingStatus) UnmarshalBinary(b []byte) error {
var res CNIChainingStatus
if err := swag.ReadJSON(b, &res); err != nil {
return err
}
*m = res
return nil
}
// Code generated by go-swagger; DO NOT EDIT.
// Copyright Authors of Cilium
// SPDX-License-Identifier: Apache-2.0
package models
// This file was generated by the swagger tool.
// Editing this file might prove futile when you re-run the swagger generate command
import (
"context"
"github.com/go-openapi/strfmt"
"github.com/go-openapi/swag"
)
// CgroupContainerMetadata cgroup container metadata
//
// swagger:model CgroupContainerMetadata
type CgroupContainerMetadata struct {
// cgroup id
CgroupID uint64 `json:"cgroup-id,omitempty"`
// cgroup path
CgroupPath string `json:"cgroup-path,omitempty"`
}
// Validate validates this cgroup container metadata
func (m *CgroupContainerMetadata) Validate(formats strfmt.Registry) error {
return nil
}
// ContextValidate validates this cgroup container metadata based on context it is used
func (m *CgroupContainerMetadata) ContextValidate(ctx context.Context, formats strfmt.Registry) error {
return nil
}
// MarshalBinary interface implementation
func (m *CgroupContainerMetadata) MarshalBinary() ([]byte, error) {
if m == nil {
return nil, nil
}
return swag.WriteJSON(m)
}
// UnmarshalBinary interface implementation
func (m *CgroupContainerMetadata) UnmarshalBinary(b []byte) error {
var res CgroupContainerMetadata
if err := swag.ReadJSON(b, &res); err != nil {
return err
}
*m = res
return nil
}
// Code generated by go-swagger; DO NOT EDIT.
// Copyright Authors of Cilium
// SPDX-License-Identifier: Apache-2.0
package models
// This file was generated by the swagger tool.
// Editing this file might prove futile when you re-run the swagger generate command
import (
"context"
"strconv"
"github.com/go-openapi/errors"
"github.com/go-openapi/strfmt"
"github.com/go-openapi/swag"
)
// CgroupDumpMetadata cgroup full metadata
//
// swagger:model CgroupDumpMetadata
type CgroupDumpMetadata struct {
// pod metadatas
PodMetadatas []*CgroupPodMetadata `json:"pod-metadatas"`
}
// Validate validates this cgroup dump metadata
func (m *CgroupDumpMetadata) Validate(formats strfmt.Registry) error {
var res []error
if err := m.validatePodMetadatas(formats); err != nil {
res = append(res, err)
}
if len(res) > 0 {
return errors.CompositeValidationError(res...)
}
return nil
}
func (m *CgroupDumpMetadata) validatePodMetadatas(formats strfmt.Registry) error {
if swag.IsZero(m.PodMetadatas) { // not required
return nil
}
for i := 0; i < len(m.PodMetadatas); i++ {
if swag.IsZero(m.PodMetadatas[i]) { // not required
continue
}
if m.PodMetadatas[i] != nil {
if err := m.PodMetadatas[i].Validate(formats); err != nil {
if ve, ok := err.(*errors.Validation); ok {
return ve.ValidateName("pod-metadatas" + "." + strconv.Itoa(i))
} else if ce, ok := err.(*errors.CompositeError); ok {
return ce.ValidateName("pod-metadatas" + "." + strconv.Itoa(i))
}
return err
}
}
}
return nil
}
// ContextValidate validate this cgroup dump metadata based on the context it is used
func (m *CgroupDumpMetadata) ContextValidate(ctx context.Context, formats strfmt.Registry) error {
var res []error
if err := m.contextValidatePodMetadatas(ctx, formats); err != nil {
res = append(res, err)
}
if len(res) > 0 {
return errors.CompositeValidationError(res...)
}
return nil
}
func (m *CgroupDumpMetadata) contextValidatePodMetadatas(ctx context.Context, formats strfmt.Registry) error {
for i := 0; i < len(m.PodMetadatas); i++ {
if m.PodMetadatas[i] != nil {
if swag.IsZero(m.PodMetadatas[i]) { // not required
return nil
}
if err := m.PodMetadatas[i].ContextValidate(ctx, formats); err != nil {
if ve, ok := err.(*errors.Validation); ok {
return ve.ValidateName("pod-metadatas" + "." + strconv.Itoa(i))
} else if ce, ok := err.(*errors.CompositeError); ok {
return ce.ValidateName("pod-metadatas" + "." + strconv.Itoa(i))
}
return err
}
}
}
return nil
}
// MarshalBinary interface implementation
func (m *CgroupDumpMetadata) MarshalBinary() ([]byte, error) {
if m == nil {
return nil, nil
}
return swag.WriteJSON(m)
}
// UnmarshalBinary interface implementation
func (m *CgroupDumpMetadata) UnmarshalBinary(b []byte) error {
var res CgroupDumpMetadata
if err := swag.ReadJSON(b, &res); err != nil {
return err
}
*m = res
return nil
}
// Code generated by go-swagger; DO NOT EDIT.
// Copyright Authors of Cilium
// SPDX-License-Identifier: Apache-2.0
package models
// This file was generated by the swagger tool.
// Editing this file might prove futile when you re-run the swagger generate command
import (
"context"
"strconv"
"github.com/go-openapi/errors"
"github.com/go-openapi/strfmt"
"github.com/go-openapi/swag"
)
// CgroupPodMetadata cgroup pod metadata
//
// swagger:model CgroupPodMetadata
type CgroupPodMetadata struct {
// containers
Containers []*CgroupContainerMetadata `json:"containers"`
// ips
Ips []string `json:"ips"`
// name
Name string `json:"name,omitempty"`
// namespace
Namespace string `json:"namespace,omitempty"`
}
// Validate validates this cgroup pod metadata
func (m *CgroupPodMetadata) Validate(formats strfmt.Registry) error {
var res []error
if err := m.validateContainers(formats); err != nil {
res = append(res, err)
}
if len(res) > 0 {
return errors.CompositeValidationError(res...)
}
return nil
}
func (m *CgroupPodMetadata) validateContainers(formats strfmt.Registry) error {
if swag.IsZero(m.Containers) { // not required
return nil
}
for i := 0; i < len(m.Containers); i++ {
if swag.IsZero(m.Containers[i]) { // not required
continue
}
if m.Containers[i] != nil {
if err := m.Containers[i].Validate(formats); err != nil {
if ve, ok := err.(*errors.Validation); ok {
return ve.ValidateName("containers" + "." + strconv.Itoa(i))
} else if ce, ok := err.(*errors.CompositeError); ok {
return ce.ValidateName("containers" + "." + strconv.Itoa(i))
}
return err
}
}
}
return nil
}
// ContextValidate validate this cgroup pod metadata based on the context it is used
func (m *CgroupPodMetadata) ContextValidate(ctx context.Context, formats strfmt.Registry) error {
var res []error
if err := m.contextValidateContainers(ctx, formats); err != nil {
res = append(res, err)
}
if len(res) > 0 {
return errors.CompositeValidationError(res...)
}
return nil
}
func (m *CgroupPodMetadata) contextValidateContainers(ctx context.Context, formats strfmt.Registry) error {
for i := 0; i < len(m.Containers); i++ {
if m.Containers[i] != nil {
if swag.IsZero(m.Containers[i]) { // not required
return nil
}
if err := m.Containers[i].ContextValidate(ctx, formats); err != nil {
if ve, ok := err.(*errors.Validation); ok {
return ve.ValidateName("containers" + "." + strconv.Itoa(i))
} else if ce, ok := err.(*errors.CompositeError); ok {
return ce.ValidateName("containers" + "." + strconv.Itoa(i))
}
return err
}
}
}
return nil
}
// MarshalBinary interface implementation
func (m *CgroupPodMetadata) MarshalBinary() ([]byte, error) {
if m == nil {
return nil, nil
}
return swag.WriteJSON(m)
}
// UnmarshalBinary interface implementation
func (m *CgroupPodMetadata) UnmarshalBinary(b []byte) error {
var res CgroupPodMetadata
if err := swag.ReadJSON(b, &res); err != nil {
return err
}
*m = res
return nil
}
// Code generated by go-swagger; DO NOT EDIT.
// Copyright Authors of Cilium
// SPDX-License-Identifier: Apache-2.0
package models
// This file was generated by the swagger tool.
// Editing this file might prove futile when you re-run the swagger generate command
import (
"context"
"encoding/json"
"github.com/go-openapi/errors"
"github.com/go-openapi/strfmt"
"github.com/go-openapi/swag"
"github.com/go-openapi/validate"
)
// ClockSource Status of BPF clock source
//
// swagger:model ClockSource
type ClockSource struct {
// Kernel Hz
Hertz int64 `json:"hertz,omitempty"`
// Datapath clock source
// Enum: ["ktime","jiffies"]
Mode string `json:"mode,omitempty"`
}
// Validate validates this clock source
func (m *ClockSource) Validate(formats strfmt.Registry) error {
var res []error
if err := m.validateMode(formats); err != nil {
res = append(res, err)
}
if len(res) > 0 {
return errors.CompositeValidationError(res...)
}
return nil
}
var clockSourceTypeModePropEnum []interface{}
func init() {
var res []string
if err := json.Unmarshal([]byte(`["ktime","jiffies"]`), &res); err != nil {
panic(err)
}
for _, v := range res {
clockSourceTypeModePropEnum = append(clockSourceTypeModePropEnum, v)
}
}
const (
// ClockSourceModeKtime captures enum value "ktime"
ClockSourceModeKtime string = "ktime"
// ClockSourceModeJiffies captures enum value "jiffies"
ClockSourceModeJiffies string = "jiffies"
)
// prop value enum
func (m *ClockSource) validateModeEnum(path, location string, value string) error {
if err := validate.EnumCase(path, location, value, clockSourceTypeModePropEnum, true); err != nil {
return err
}
return nil
}
func (m *ClockSource) validateMode(formats strfmt.Registry) error {
if swag.IsZero(m.Mode) { // not required
return nil
}
// value enum
if err := m.validateModeEnum("mode", "body", m.Mode); err != nil {
return err
}
return nil
}
// ContextValidate validates this clock source based on context it is used
func (m *ClockSource) ContextValidate(ctx context.Context, formats strfmt.Registry) error {
return nil
}
// MarshalBinary interface implementation
func (m *ClockSource) MarshalBinary() ([]byte, error) {
if m == nil {
return nil, nil
}
return swag.WriteJSON(m)
}
// UnmarshalBinary interface implementation
func (m *ClockSource) UnmarshalBinary(b []byte) error {
var res ClockSource
if err := swag.ReadJSON(b, &res); err != nil {
return err
}
*m = res
return nil
}
// Code generated by go-swagger; DO NOT EDIT.
// Copyright Authors of Cilium
// SPDX-License-Identifier: Apache-2.0
package models
// This file was generated by the swagger tool.
// Editing this file might prove futile when you re-run the swagger generate command
import (
"context"
"strconv"
"github.com/go-openapi/errors"
"github.com/go-openapi/strfmt"
"github.com/go-openapi/swag"
)
// ClusterMeshStatus Status of ClusterMesh
//
// +k8s:deepcopy-gen=true
//
// swagger:model ClusterMeshStatus
type ClusterMeshStatus struct {
// List of remote clusters
Clusters []*RemoteCluster `json:"clusters"`
// Number of global services
NumGlobalServices int64 `json:"num-global-services,omitempty"`
}
// Validate validates this cluster mesh status
func (m *ClusterMeshStatus) Validate(formats strfmt.Registry) error {
var res []error
if err := m.validateClusters(formats); err != nil {
res = append(res, err)
}
if len(res) > 0 {
return errors.CompositeValidationError(res...)
}
return nil
}
func (m *ClusterMeshStatus) validateClusters(formats strfmt.Registry) error {
if swag.IsZero(m.Clusters) { // not required
return nil
}
for i := 0; i < len(m.Clusters); i++ {
if swag.IsZero(m.Clusters[i]) { // not required
continue
}
if m.Clusters[i] != nil {
if err := m.Clusters[i].Validate(formats); err != nil {
if ve, ok := err.(*errors.Validation); ok {
return ve.ValidateName("clusters" + "." + strconv.Itoa(i))
} else if ce, ok := err.(*errors.CompositeError); ok {
return ce.ValidateName("clusters" + "." + strconv.Itoa(i))
}
return err
}
}
}
return nil
}
// ContextValidate validate this cluster mesh status based on the context it is used
func (m *ClusterMeshStatus) ContextValidate(ctx context.Context, formats strfmt.Registry) error {
var res []error
if err := m.contextValidateClusters(ctx, formats); err != nil {
res = append(res, err)
}
if len(res) > 0 {
return errors.CompositeValidationError(res...)
}
return nil
}
func (m *ClusterMeshStatus) contextValidateClusters(ctx context.Context, formats strfmt.Registry) error {
for i := 0; i < len(m.Clusters); i++ {
if m.Clusters[i] != nil {
if swag.IsZero(m.Clusters[i]) { // not required
return nil
}
if err := m.Clusters[i].ContextValidate(ctx, formats); err != nil {
if ve, ok := err.(*errors.Validation); ok {
return ve.ValidateName("clusters" + "." + strconv.Itoa(i))
} else if ce, ok := err.(*errors.CompositeError); ok {
return ce.ValidateName("clusters" + "." + strconv.Itoa(i))
}
return err
}
}
}
return nil
}
// MarshalBinary interface implementation
func (m *ClusterMeshStatus) MarshalBinary() ([]byte, error) {
if m == nil {
return nil, nil
}
return swag.WriteJSON(m)
}
// UnmarshalBinary interface implementation
func (m *ClusterMeshStatus) UnmarshalBinary(b []byte) error {
var res ClusterMeshStatus
if err := swag.ReadJSON(b, &res); err != nil {
return err
}
*m = res
return nil
}
// Code generated by go-swagger; DO NOT EDIT.
// Copyright Authors of Cilium
// SPDX-License-Identifier: Apache-2.0
package models
// This file was generated by the swagger tool.
// Editing this file might prove futile when you re-run the swagger generate command
import (
"context"
"strconv"
"github.com/go-openapi/errors"
"github.com/go-openapi/strfmt"
"github.com/go-openapi/swag"
)
// ClusterNodeStatus Status of cluster
//
// swagger:model ClusterNodeStatus
type ClusterNodeStatus struct {
// ID that should be used by the client to receive a diff from the previous request
ClientID int64 `json:"client-id,omitempty"`
// List of known nodes
NodesAdded []*NodeElement `json:"nodes-added"`
// List of known nodes
NodesRemoved []*NodeElement `json:"nodes-removed"`
// Name of local node (if available)
Self string `json:"self,omitempty"`
}
// Validate validates this cluster node status
func (m *ClusterNodeStatus) Validate(formats strfmt.Registry) error {
var res []error
if err := m.validateNodesAdded(formats); err != nil {
res = append(res, err)
}
if err := m.validateNodesRemoved(formats); err != nil {
res = append(res, err)
}
if len(res) > 0 {
return errors.CompositeValidationError(res...)
}
return nil
}
func (m *ClusterNodeStatus) validateNodesAdded(formats strfmt.Registry) error {
if swag.IsZero(m.NodesAdded) { // not required
return nil
}
for i := 0; i < len(m.NodesAdded); i++ {
if swag.IsZero(m.NodesAdded[i]) { // not required
continue
}
if m.NodesAdded[i] != nil {
if err := m.NodesAdded[i].Validate(formats); err != nil {
if ve, ok := err.(*errors.Validation); ok {
return ve.ValidateName("nodes-added" + "." + strconv.Itoa(i))
} else if ce, ok := err.(*errors.CompositeError); ok {
return ce.ValidateName("nodes-added" + "." + strconv.Itoa(i))
}
return err
}
}
}
return nil
}
func (m *ClusterNodeStatus) validateNodesRemoved(formats strfmt.Registry) error {
if swag.IsZero(m.NodesRemoved) { // not required
return nil
}
for i := 0; i < len(m.NodesRemoved); i++ {
if swag.IsZero(m.NodesRemoved[i]) { // not required
continue
}
if m.NodesRemoved[i] != nil {
if err := m.NodesRemoved[i].Validate(formats); err != nil {
if ve, ok := err.(*errors.Validation); ok {
return ve.ValidateName("nodes-removed" + "." + strconv.Itoa(i))
} else if ce, ok := err.(*errors.CompositeError); ok {
return ce.ValidateName("nodes-removed" + "." + strconv.Itoa(i))
}
return err
}
}
}
return nil
}
// ContextValidate validate this cluster node status based on the context it is used
func (m *ClusterNodeStatus) ContextValidate(ctx context.Context, formats strfmt.Registry) error {
var res []error
if err := m.contextValidateNodesAdded(ctx, formats); err != nil {
res = append(res, err)
}
if err := m.contextValidateNodesRemoved(ctx, formats); err != nil {
res = append(res, err)
}
if len(res) > 0 {
return errors.CompositeValidationError(res...)
}
return nil
}
func (m *ClusterNodeStatus) contextValidateNodesAdded(ctx context.Context, formats strfmt.Registry) error {
for i := 0; i < len(m.NodesAdded); i++ {
if m.NodesAdded[i] != nil {
if swag.IsZero(m.NodesAdded[i]) { // not required
return nil
}
if err := m.NodesAdded[i].ContextValidate(ctx, formats); err != nil {
if ve, ok := err.(*errors.Validation); ok {
return ve.ValidateName("nodes-added" + "." + strconv.Itoa(i))
} else if ce, ok := err.(*errors.CompositeError); ok {
return ce.ValidateName("nodes-added" + "." + strconv.Itoa(i))
}
return err
}
}
}
return nil
}
func (m *ClusterNodeStatus) contextValidateNodesRemoved(ctx context.Context, formats strfmt.Registry) error {
for i := 0; i < len(m.NodesRemoved); i++ {
if m.NodesRemoved[i] != nil {
if swag.IsZero(m.NodesRemoved[i]) { // not required
return nil
}
if err := m.NodesRemoved[i].ContextValidate(ctx, formats); err != nil {
if ve, ok := err.(*errors.Validation); ok {
return ve.ValidateName("nodes-removed" + "." + strconv.Itoa(i))
} else if ce, ok := err.(*errors.CompositeError); ok {
return ce.ValidateName("nodes-removed" + "." + strconv.Itoa(i))
}
return err
}
}
}
return nil
}
// MarshalBinary interface implementation
func (m *ClusterNodeStatus) MarshalBinary() ([]byte, error) {
if m == nil {
return nil, nil
}
return swag.WriteJSON(m)
}
// UnmarshalBinary interface implementation
func (m *ClusterNodeStatus) UnmarshalBinary(b []byte) error {
var res ClusterNodeStatus
if err := swag.ReadJSON(b, &res); err != nil {
return err
}
*m = res
return nil
}
// Code generated by go-swagger; DO NOT EDIT.
// Copyright Authors of Cilium
// SPDX-License-Identifier: Apache-2.0
package models
// This file was generated by the swagger tool.
// Editing this file might prove futile when you re-run the swagger generate command
import (
"context"
"strconv"
"github.com/go-openapi/errors"
"github.com/go-openapi/strfmt"
"github.com/go-openapi/swag"
)
// ClusterNodesResponse cluster nodes response
//
// swagger:model ClusterNodesResponse
type ClusterNodesResponse struct {
// List of known nodes
Nodes []*NodeElement `json:"nodes"`
// Name of local node (if available)
Self string `json:"self,omitempty"`
}
// Validate validates this cluster nodes response
func (m *ClusterNodesResponse) Validate(formats strfmt.Registry) error {
var res []error
if err := m.validateNodes(formats); err != nil {
res = append(res, err)
}
if len(res) > 0 {
return errors.CompositeValidationError(res...)
}
return nil
}
func (m *ClusterNodesResponse) validateNodes(formats strfmt.Registry) error {
if swag.IsZero(m.Nodes) { // not required
return nil
}
for i := 0; i < len(m.Nodes); i++ {
if swag.IsZero(m.Nodes[i]) { // not required
continue
}
if m.Nodes[i] != nil {
if err := m.Nodes[i].Validate(formats); err != nil {
if ve, ok := err.(*errors.Validation); ok {
return ve.ValidateName("nodes" + "." + strconv.Itoa(i))
} else if ce, ok := err.(*errors.CompositeError); ok {
return ce.ValidateName("nodes" + "." + strconv.Itoa(i))
}
return err
}
}
}
return nil
}
// ContextValidate validate this cluster nodes response based on the context it is used
func (m *ClusterNodesResponse) ContextValidate(ctx context.Context, formats strfmt.Registry) error {
var res []error
if err := m.contextValidateNodes(ctx, formats); err != nil {
res = append(res, err)
}
if len(res) > 0 {
return errors.CompositeValidationError(res...)
}
return nil
}
func (m *ClusterNodesResponse) contextValidateNodes(ctx context.Context, formats strfmt.Registry) error {
for i := 0; i < len(m.Nodes); i++ {
if m.Nodes[i] != nil {
if swag.IsZero(m.Nodes[i]) { // not required
return nil
}
if err := m.Nodes[i].ContextValidate(ctx, formats); err != nil {
if ve, ok := err.(*errors.Validation); ok {
return ve.ValidateName("nodes" + "." + strconv.Itoa(i))
} else if ce, ok := err.(*errors.CompositeError); ok {
return ce.ValidateName("nodes" + "." + strconv.Itoa(i))
}
return err
}
}
}
return nil
}
// MarshalBinary interface implementation
func (m *ClusterNodesResponse) MarshalBinary() ([]byte, error) {
if m == nil {
return nil, nil
}
return swag.WriteJSON(m)
}
// UnmarshalBinary interface implementation
func (m *ClusterNodesResponse) UnmarshalBinary(b []byte) error {
var res ClusterNodesResponse
if err := swag.ReadJSON(b, &res); err != nil {
return err
}
*m = res
return nil
}
// Code generated by go-swagger; DO NOT EDIT.
// Copyright Authors of Cilium
// SPDX-License-Identifier: Apache-2.0
package models
// This file was generated by the swagger tool.
// Editing this file might prove futile when you re-run the swagger generate command
import (
"context"
"strconv"
"github.com/go-openapi/errors"
"github.com/go-openapi/strfmt"
"github.com/go-openapi/swag"
)
// ClusterStatus Status of cluster
//
// +k8s:deepcopy-gen=true
//
// swagger:model ClusterStatus
type ClusterStatus struct {
// Status of local cilium-health daemon
CiliumHealth *Status `json:"ciliumHealth,omitempty"`
// List of known nodes
Nodes []*NodeElement `json:"nodes"`
// Name of local node (if available)
Self string `json:"self,omitempty"`
}
// Validate validates this cluster status
func (m *ClusterStatus) Validate(formats strfmt.Registry) error {
var res []error
if err := m.validateCiliumHealth(formats); err != nil {
res = append(res, err)
}
if err := m.validateNodes(formats); err != nil {
res = append(res, err)
}
if len(res) > 0 {
return errors.CompositeValidationError(res...)
}
return nil
}
func (m *ClusterStatus) validateCiliumHealth(formats strfmt.Registry) error {
if swag.IsZero(m.CiliumHealth) { // not required
return nil
}
if m.CiliumHealth != nil {
if err := m.CiliumHealth.Validate(formats); err != nil {
if ve, ok := err.(*errors.Validation); ok {
return ve.ValidateName("ciliumHealth")
} else if ce, ok := err.(*errors.CompositeError); ok {
return ce.ValidateName("ciliumHealth")
}
return err
}
}
return nil
}
func (m *ClusterStatus) validateNodes(formats strfmt.Registry) error {
if swag.IsZero(m.Nodes) { // not required
return nil
}
for i := 0; i < len(m.Nodes); i++ {
if swag.IsZero(m.Nodes[i]) { // not required
continue
}
if m.Nodes[i] != nil {
if err := m.Nodes[i].Validate(formats); err != nil {
if ve, ok := err.(*errors.Validation); ok {
return ve.ValidateName("nodes" + "." + strconv.Itoa(i))
} else if ce, ok := err.(*errors.CompositeError); ok {
return ce.ValidateName("nodes" + "." + strconv.Itoa(i))
}
return err
}
}
}
return nil
}
// ContextValidate validate this cluster status based on the context it is used
func (m *ClusterStatus) ContextValidate(ctx context.Context, formats strfmt.Registry) error {
var res []error
if err := m.contextValidateCiliumHealth(ctx, formats); err != nil {
res = append(res, err)
}
if err := m.contextValidateNodes(ctx, formats); err != nil {
res = append(res, err)
}
if len(res) > 0 {
return errors.CompositeValidationError(res...)
}
return nil
}
func (m *ClusterStatus) contextValidateCiliumHealth(ctx context.Context, formats strfmt.Registry) error {
if m.CiliumHealth != nil {
if swag.IsZero(m.CiliumHealth) { // not required
return nil
}
if err := m.CiliumHealth.ContextValidate(ctx, formats); err != nil {
if ve, ok := err.(*errors.Validation); ok {
return ve.ValidateName("ciliumHealth")
} else if ce, ok := err.(*errors.CompositeError); ok {
return ce.ValidateName("ciliumHealth")
}
return err
}
}
return nil
}
func (m *ClusterStatus) contextValidateNodes(ctx context.Context, formats strfmt.Registry) error {
for i := 0; i < len(m.Nodes); i++ {
if m.Nodes[i] != nil {
if swag.IsZero(m.Nodes[i]) { // not required
return nil
}
if err := m.Nodes[i].ContextValidate(ctx, formats); err != nil {
if ve, ok := err.(*errors.Validation); ok {
return ve.ValidateName("nodes" + "." + strconv.Itoa(i))
} else if ce, ok := err.(*errors.CompositeError); ok {
return ce.ValidateName("nodes" + "." + strconv.Itoa(i))
}
return err
}
}
}
return nil
}
// MarshalBinary interface implementation
func (m *ClusterStatus) MarshalBinary() ([]byte, error) {
if m == nil {
return nil, nil
}
return swag.WriteJSON(m)
}
// UnmarshalBinary interface implementation
func (m *ClusterStatus) UnmarshalBinary(b []byte) error {
var res ClusterStatus
if err := swag.ReadJSON(b, &res); err != nil {
return err
}
*m = res
return nil
}
// Code generated by go-swagger; DO NOT EDIT.
// Copyright Authors of Cilium
// SPDX-License-Identifier: Apache-2.0
package models
// This file was generated by the swagger tool.
// Editing this file might prove futile when you re-run the swagger generate command
import (
"context"
"github.com/go-openapi/strfmt"
)
// ConfigurationMap Map of configuration key/value pairs.
//
// swagger:model ConfigurationMap
type ConfigurationMap map[string]string
// Validate validates this configuration map
func (m ConfigurationMap) Validate(formats strfmt.Registry) error {
return nil
}
// ContextValidate validates this configuration map based on context it is used
func (m ConfigurationMap) ContextValidate(ctx context.Context, formats strfmt.Registry) error {
return nil
}
// Code generated by go-swagger; DO NOT EDIT.
// Copyright Authors of Cilium
// SPDX-License-Identifier: Apache-2.0
package models
// This file was generated by the swagger tool.
// Editing this file might prove futile when you re-run the swagger generate command
import (
"context"
"github.com/go-openapi/errors"
"github.com/go-openapi/strfmt"
"github.com/go-openapi/swag"
"github.com/go-openapi/validate"
)
// ControllerStatus Status of a controller
//
// +k8s:deepcopy-gen=true
//
// swagger:model ControllerStatus
type ControllerStatus struct {
// configuration
Configuration *ControllerStatusConfiguration `json:"configuration,omitempty"`
// Name of controller
Name string `json:"name,omitempty"`
// status
Status *ControllerStatusStatus `json:"status,omitempty"`
// UUID of controller
// Format: uuid
UUID strfmt.UUID `json:"uuid,omitempty"`
}
// Validate validates this controller status
func (m *ControllerStatus) Validate(formats strfmt.Registry) error {
var res []error
if err := m.validateConfiguration(formats); err != nil {
res = append(res, err)
}
if err := m.validateStatus(formats); err != nil {
res = append(res, err)
}
if err := m.validateUUID(formats); err != nil {
res = append(res, err)
}
if len(res) > 0 {
return errors.CompositeValidationError(res...)
}
return nil
}
func (m *ControllerStatus) validateConfiguration(formats strfmt.Registry) error {
if swag.IsZero(m.Configuration) { // not required
return nil
}
if m.Configuration != nil {
if err := m.Configuration.Validate(formats); err != nil {
if ve, ok := err.(*errors.Validation); ok {
return ve.ValidateName("configuration")
} else if ce, ok := err.(*errors.CompositeError); ok {
return ce.ValidateName("configuration")
}
return err
}
}
return nil
}
func (m *ControllerStatus) validateStatus(formats strfmt.Registry) error {
if swag.IsZero(m.Status) { // not required
return nil
}
if m.Status != nil {
if err := m.Status.Validate(formats); err != nil {
if ve, ok := err.(*errors.Validation); ok {
return ve.ValidateName("status")
} else if ce, ok := err.(*errors.CompositeError); ok {
return ce.ValidateName("status")
}
return err
}
}
return nil
}
func (m *ControllerStatus) validateUUID(formats strfmt.Registry) error {
if swag.IsZero(m.UUID) { // not required
return nil
}
if err := validate.FormatOf("uuid", "body", "uuid", m.UUID.String(), formats); err != nil {
return err
}
return nil
}
// ContextValidate validate this controller status based on the context it is used
func (m *ControllerStatus) ContextValidate(ctx context.Context, formats strfmt.Registry) error {
var res []error
if err := m.contextValidateConfiguration(ctx, formats); err != nil {
res = append(res, err)
}
if err := m.contextValidateStatus(ctx, formats); err != nil {
res = append(res, err)
}
if len(res) > 0 {
return errors.CompositeValidationError(res...)
}
return nil
}
func (m *ControllerStatus) contextValidateConfiguration(ctx context.Context, formats strfmt.Registry) error {
if m.Configuration != nil {
if swag.IsZero(m.Configuration) { // not required
return nil
}
if err := m.Configuration.ContextValidate(ctx, formats); err != nil {
if ve, ok := err.(*errors.Validation); ok {
return ve.ValidateName("configuration")
} else if ce, ok := err.(*errors.CompositeError); ok {
return ce.ValidateName("configuration")
}
return err
}
}
return nil
}
func (m *ControllerStatus) contextValidateStatus(ctx context.Context, formats strfmt.Registry) error {
if m.Status != nil {
if swag.IsZero(m.Status) { // not required
return nil
}
if err := m.Status.ContextValidate(ctx, formats); err != nil {
if ve, ok := err.(*errors.Validation); ok {
return ve.ValidateName("status")
} else if ce, ok := err.(*errors.CompositeError); ok {
return ce.ValidateName("status")
}
return err
}
}
return nil
}
// MarshalBinary interface implementation
func (m *ControllerStatus) MarshalBinary() ([]byte, error) {
if m == nil {
return nil, nil
}
return swag.WriteJSON(m)
}
// UnmarshalBinary interface implementation
func (m *ControllerStatus) UnmarshalBinary(b []byte) error {
var res ControllerStatus
if err := swag.ReadJSON(b, &res); err != nil {
return err
}
*m = res
return nil
}
// ControllerStatusConfiguration Configuration of controller
//
// +deepequal-gen=true
// +k8s:deepcopy-gen=true
//
// swagger:model ControllerStatusConfiguration
type ControllerStatusConfiguration struct {
// Retry on error
ErrorRetry bool `json:"error-retry,omitempty"`
// Base error retry back-off time
// Format: duration
ErrorRetryBase strfmt.Duration `json:"error-retry-base,omitempty"`
// Regular synchronization interval
// Format: duration
Interval strfmt.Duration `json:"interval,omitempty"`
}
// Validate validates this controller status configuration
func (m *ControllerStatusConfiguration) Validate(formats strfmt.Registry) error {
var res []error
if err := m.validateErrorRetryBase(formats); err != nil {
res = append(res, err)
}
if err := m.validateInterval(formats); err != nil {
res = append(res, err)
}
if len(res) > 0 {
return errors.CompositeValidationError(res...)
}
return nil
}
func (m *ControllerStatusConfiguration) validateErrorRetryBase(formats strfmt.Registry) error {
if swag.IsZero(m.ErrorRetryBase) { // not required
return nil
}
if err := validate.FormatOf("configuration"+"."+"error-retry-base", "body", "duration", m.ErrorRetryBase.String(), formats); err != nil {
return err
}
return nil
}
func (m *ControllerStatusConfiguration) validateInterval(formats strfmt.Registry) error {
if swag.IsZero(m.Interval) { // not required
return nil
}
if err := validate.FormatOf("configuration"+"."+"interval", "body", "duration", m.Interval.String(), formats); err != nil {
return err
}
return nil
}
// ContextValidate validates this controller status configuration based on context it is used
func (m *ControllerStatusConfiguration) ContextValidate(ctx context.Context, formats strfmt.Registry) error {
return nil
}
// MarshalBinary interface implementation
func (m *ControllerStatusConfiguration) MarshalBinary() ([]byte, error) {
if m == nil {
return nil, nil
}
return swag.WriteJSON(m)
}
// UnmarshalBinary interface implementation
func (m *ControllerStatusConfiguration) UnmarshalBinary(b []byte) error {
var res ControllerStatusConfiguration
if err := swag.ReadJSON(b, &res); err != nil {
return err
}
*m = res
return nil
}
// ControllerStatusStatus Current status of controller
//
// +k8s:deepcopy-gen=true
//
// swagger:model ControllerStatusStatus
type ControllerStatusStatus struct {
// Number of consecutive errors since last success
ConsecutiveFailureCount int64 `json:"consecutive-failure-count,omitempty"`
// Total number of failed runs
FailureCount int64 `json:"failure-count,omitempty"`
// Error message of last failed run
LastFailureMsg string `json:"last-failure-msg,omitempty"`
// Timestamp of last error
// Format: date-time
LastFailureTimestamp strfmt.DateTime `json:"last-failure-timestamp,omitempty"`
// Timestamp of last success
// Format: date-time
LastSuccessTimestamp strfmt.DateTime `json:"last-success-timestamp,omitempty"`
// Total number of successful runs
SuccessCount int64 `json:"success-count,omitempty"`
}
// Validate validates this controller status status
func (m *ControllerStatusStatus) Validate(formats strfmt.Registry) error {
var res []error
if err := m.validateLastFailureTimestamp(formats); err != nil {
res = append(res, err)
}
if err := m.validateLastSuccessTimestamp(formats); err != nil {
res = append(res, err)
}
if len(res) > 0 {
return errors.CompositeValidationError(res...)
}
return nil
}
func (m *ControllerStatusStatus) validateLastFailureTimestamp(formats strfmt.Registry) error {
if swag.IsZero(m.LastFailureTimestamp) { // not required
return nil
}
if err := validate.FormatOf("status"+"."+"last-failure-timestamp", "body", "date-time", m.LastFailureTimestamp.String(), formats); err != nil {
return err
}
return nil
}
func (m *ControllerStatusStatus) validateLastSuccessTimestamp(formats strfmt.Registry) error {
if swag.IsZero(m.LastSuccessTimestamp) { // not required
return nil
}
if err := validate.FormatOf("status"+"."+"last-success-timestamp", "body", "date-time", m.LastSuccessTimestamp.String(), formats); err != nil {
return err
}
return nil
}
// ContextValidate validates this controller status status based on context it is used
func (m *ControllerStatusStatus) ContextValidate(ctx context.Context, formats strfmt.Registry) error {
return nil
}
// MarshalBinary interface implementation
func (m *ControllerStatusStatus) MarshalBinary() ([]byte, error) {
if m == nil {
return nil, nil
}
return swag.WriteJSON(m)
}
// UnmarshalBinary interface implementation
func (m *ControllerStatusStatus) UnmarshalBinary(b []byte) error {
var res ControllerStatusStatus
if err := swag.ReadJSON(b, &res); err != nil {
return err
}
*m = res
return nil
}
// Code generated by go-swagger; DO NOT EDIT.
// Copyright Authors of Cilium
// SPDX-License-Identifier: Apache-2.0
package models
// This file was generated by the swagger tool.
// Editing this file might prove futile when you re-run the swagger generate command
import (
"context"
"strconv"
"github.com/go-openapi/errors"
"github.com/go-openapi/strfmt"
"github.com/go-openapi/swag"
)
// ControllerStatuses Collection of controller statuses
//
// swagger:model ControllerStatuses
type ControllerStatuses []*ControllerStatus
// Validate validates this controller statuses
func (m ControllerStatuses) Validate(formats strfmt.Registry) error {
var res []error
for i := 0; i < len(m); i++ {
if swag.IsZero(m[i]) { // not required
continue
}
if m[i] != nil {
if err := m[i].Validate(formats); err != nil {
if ve, ok := err.(*errors.Validation); ok {
return ve.ValidateName(strconv.Itoa(i))
} else if ce, ok := err.(*errors.CompositeError); ok {
return ce.ValidateName(strconv.Itoa(i))
}
return err
}
}
}
if len(res) > 0 {
return errors.CompositeValidationError(res...)
}
return nil
}
// ContextValidate validate this controller statuses based on the context it is used
func (m ControllerStatuses) ContextValidate(ctx context.Context, formats strfmt.Registry) error {
var res []error
for i := 0; i < len(m); i++ {
if m[i] != nil {
if swag.IsZero(m[i]) { // not required
return nil
}
if err := m[i].ContextValidate(ctx, formats); err != nil {
if ve, ok := err.(*errors.Validation); ok {
return ve.ValidateName(strconv.Itoa(i))
} else if ce, ok := err.(*errors.CompositeError); ok {
return ce.ValidateName(strconv.Itoa(i))
}
return err
}
}
}
if len(res) > 0 {
return errors.CompositeValidationError(res...)
}
return nil
}
// Code generated by go-swagger; DO NOT EDIT.
// Copyright Authors of Cilium
// SPDX-License-Identifier: Apache-2.0
package models
// This file was generated by the swagger tool.
// Editing this file might prove futile when you re-run the swagger generate command
import (
"context"
"github.com/go-openapi/errors"
"github.com/go-openapi/strfmt"
"github.com/go-openapi/swag"
)
// DaemonConfiguration Response to a daemon configuration request.
//
// swagger:model DaemonConfiguration
type DaemonConfiguration struct {
// Changeable configuration
Spec *DaemonConfigurationSpec `json:"spec,omitempty"`
// Current daemon configuration related status.Contains the addressing
// information, k8s, node monitor and immutable and mutable
// configuration settings.
//
Status *DaemonConfigurationStatus `json:"status,omitempty"`
}
// Validate validates this daemon configuration
func (m *DaemonConfiguration) Validate(formats strfmt.Registry) error {
var res []error
if err := m.validateSpec(formats); err != nil {
res = append(res, err)
}
if err := m.validateStatus(formats); err != nil {
res = append(res, err)
}
if len(res) > 0 {
return errors.CompositeValidationError(res...)
}
return nil
}
func (m *DaemonConfiguration) validateSpec(formats strfmt.Registry) error {
if swag.IsZero(m.Spec) { // not required
return nil
}
if m.Spec != nil {
if err := m.Spec.Validate(formats); err != nil {
if ve, ok := err.(*errors.Validation); ok {
return ve.ValidateName("spec")
} else if ce, ok := err.(*errors.CompositeError); ok {
return ce.ValidateName("spec")
}
return err
}
}
return nil
}
func (m *DaemonConfiguration) validateStatus(formats strfmt.Registry) error {
if swag.IsZero(m.Status) { // not required
return nil
}
if m.Status != nil {
if err := m.Status.Validate(formats); err != nil {
if ve, ok := err.(*errors.Validation); ok {
return ve.ValidateName("status")
} else if ce, ok := err.(*errors.CompositeError); ok {
return ce.ValidateName("status")
}
return err
}
}
return nil
}
// ContextValidate validate this daemon configuration based on the context it is used
func (m *DaemonConfiguration) ContextValidate(ctx context.Context, formats strfmt.Registry) error {
var res []error
if err := m.contextValidateSpec(ctx, formats); err != nil {
res = append(res, err)
}
if err := m.contextValidateStatus(ctx, formats); err != nil {
res = append(res, err)
}
if len(res) > 0 {
return errors.CompositeValidationError(res...)
}
return nil
}
func (m *DaemonConfiguration) contextValidateSpec(ctx context.Context, formats strfmt.Registry) error {
if m.Spec != nil {
if swag.IsZero(m.Spec) { // not required
return nil
}
if err := m.Spec.ContextValidate(ctx, formats); err != nil {
if ve, ok := err.(*errors.Validation); ok {
return ve.ValidateName("spec")
} else if ce, ok := err.(*errors.CompositeError); ok {
return ce.ValidateName("spec")
}
return err
}
}
return nil
}
func (m *DaemonConfiguration) contextValidateStatus(ctx context.Context, formats strfmt.Registry) error {
if m.Status != nil {
if swag.IsZero(m.Status) { // not required
return nil
}
if err := m.Status.ContextValidate(ctx, formats); err != nil {
if ve, ok := err.(*errors.Validation); ok {
return ve.ValidateName("status")
} else if ce, ok := err.(*errors.CompositeError); ok {
return ce.ValidateName("status")
}
return err
}
}
return nil
}
// MarshalBinary interface implementation
func (m *DaemonConfiguration) MarshalBinary() ([]byte, error) {
if m == nil {
return nil, nil
}
return swag.WriteJSON(m)
}
// UnmarshalBinary interface implementation
func (m *DaemonConfiguration) UnmarshalBinary(b []byte) error {
var res DaemonConfiguration
if err := swag.ReadJSON(b, &res); err != nil {
return err
}
*m = res
return nil
}
// Code generated by go-swagger; DO NOT EDIT.
// Copyright Authors of Cilium
// SPDX-License-Identifier: Apache-2.0
package models
// This file was generated by the swagger tool.
// Editing this file might prove futile when you re-run the swagger generate command
import (
"context"
"encoding/json"
"github.com/go-openapi/errors"
"github.com/go-openapi/strfmt"
"github.com/go-openapi/swag"
"github.com/go-openapi/validate"
)
// DaemonConfigurationSpec The controllable configuration of the daemon.
//
// swagger:model DaemonConfigurationSpec
type DaemonConfigurationSpec struct {
// Changeable configuration
Options ConfigurationMap `json:"options,omitempty"`
// The policy-enforcement mode
// Enum: ["default","always","never"]
PolicyEnforcement string `json:"policy-enforcement,omitempty"`
}
// Validate validates this daemon configuration spec
func (m *DaemonConfigurationSpec) Validate(formats strfmt.Registry) error {
var res []error
if err := m.validateOptions(formats); err != nil {
res = append(res, err)
}
if err := m.validatePolicyEnforcement(formats); err != nil {
res = append(res, err)
}
if len(res) > 0 {
return errors.CompositeValidationError(res...)
}
return nil
}
func (m *DaemonConfigurationSpec) validateOptions(formats strfmt.Registry) error {
if swag.IsZero(m.Options) { // not required
return nil
}
if m.Options != nil {
if err := m.Options.Validate(formats); err != nil {
if ve, ok := err.(*errors.Validation); ok {
return ve.ValidateName("options")
} else if ce, ok := err.(*errors.CompositeError); ok {
return ce.ValidateName("options")
}
return err
}
}
return nil
}
var daemonConfigurationSpecTypePolicyEnforcementPropEnum []interface{}
func init() {
var res []string
if err := json.Unmarshal([]byte(`["default","always","never"]`), &res); err != nil {
panic(err)
}
for _, v := range res {
daemonConfigurationSpecTypePolicyEnforcementPropEnum = append(daemonConfigurationSpecTypePolicyEnforcementPropEnum, v)
}
}
const (
// DaemonConfigurationSpecPolicyEnforcementDefault captures enum value "default"
DaemonConfigurationSpecPolicyEnforcementDefault string = "default"
// DaemonConfigurationSpecPolicyEnforcementAlways captures enum value "always"
DaemonConfigurationSpecPolicyEnforcementAlways string = "always"
// DaemonConfigurationSpecPolicyEnforcementNever captures enum value "never"
DaemonConfigurationSpecPolicyEnforcementNever string = "never"
)
// prop value enum
func (m *DaemonConfigurationSpec) validatePolicyEnforcementEnum(path, location string, value string) error {
if err := validate.EnumCase(path, location, value, daemonConfigurationSpecTypePolicyEnforcementPropEnum, true); err != nil {
return err
}
return nil
}
func (m *DaemonConfigurationSpec) validatePolicyEnforcement(formats strfmt.Registry) error {
if swag.IsZero(m.PolicyEnforcement) { // not required
return nil
}
// value enum
if err := m.validatePolicyEnforcementEnum("policy-enforcement", "body", m.PolicyEnforcement); err != nil {
return err
}
return nil
}
// ContextValidate validate this daemon configuration spec based on the context it is used
func (m *DaemonConfigurationSpec) ContextValidate(ctx context.Context, formats strfmt.Registry) error {
var res []error
if err := m.contextValidateOptions(ctx, formats); err != nil {
res = append(res, err)
}
if len(res) > 0 {
return errors.CompositeValidationError(res...)
}
return nil
}
func (m *DaemonConfigurationSpec) contextValidateOptions(ctx context.Context, formats strfmt.Registry) error {
if swag.IsZero(m.Options) { // not required
return nil
}
if err := m.Options.ContextValidate(ctx, formats); err != nil {
if ve, ok := err.(*errors.Validation); ok {
return ve.ValidateName("options")
} else if ce, ok := err.(*errors.CompositeError); ok {
return ce.ValidateName("options")
}
return err
}
return nil
}
// MarshalBinary interface implementation
func (m *DaemonConfigurationSpec) MarshalBinary() ([]byte, error) {
if m == nil {
return nil, nil
}
return swag.WriteJSON(m)
}
// UnmarshalBinary interface implementation
func (m *DaemonConfigurationSpec) UnmarshalBinary(b []byte) error {
var res DaemonConfigurationSpec
if err := swag.ReadJSON(b, &res); err != nil {
return err
}
*m = res
return nil
}
// Code generated by go-swagger; DO NOT EDIT.
// Copyright Authors of Cilium
// SPDX-License-Identifier: Apache-2.0
package models
// This file was generated by the swagger tool.
// Editing this file might prove futile when you re-run the swagger generate command
import (
"context"
"github.com/go-openapi/errors"
"github.com/go-openapi/strfmt"
"github.com/go-openapi/swag"
)
// DaemonConfigurationStatus Response to a daemon configuration request. Contains the addressing
// information, k8s, node monitor and immutable and mutable configuration
// settings.
//
// swagger:model DaemonConfigurationStatus
type DaemonConfigurationStatus struct {
// Maximum IPv4 GRO size on workload facing devices
GROIPV4MaxSize int64 `json:"GROIPv4MaxSize,omitempty"`
// Maximum IPv6 GRO size on workload facing devices
GROMaxSize int64 `json:"GROMaxSize,omitempty"`
// Maximum IPv4 GSO size on workload facing devices
GSOIPV4MaxSize int64 `json:"GSOIPv4MaxSize,omitempty"`
// Maximum IPv6 GSO size on workload facing devices
GSOMaxSize int64 `json:"GSOMaxSize,omitempty"`
// addressing
Addressing *NodeAddressing `json:"addressing,omitempty"`
// Config map which contains all the active daemon configurations
DaemonConfigurationMap map[string]interface{} `json:"daemonConfigurationMap,omitempty"`
// datapath mode
DatapathMode DatapathMode `json:"datapathMode,omitempty"`
// MTU on workload facing devices
DeviceMTU int64 `json:"deviceMTU,omitempty"`
// Configured compatibility mode for --egress-multi-home-ip-rule-compat
EgressMultiHomeIPRuleCompat bool `json:"egress-multi-home-ip-rule-compat,omitempty"`
// True if BBR is enabled only in the host network namespace
EnableBBRHostNamespaceOnly bool `json:"enableBBRHostNamespaceOnly,omitempty"`
// Enable route MTU for pod netns when CNI chaining is used
EnableRouteMTUForCNIChaining bool `json:"enableRouteMTUForCNIChaining,omitempty"`
// Immutable configuration (read-only)
Immutable ConfigurationMap `json:"immutable,omitempty"`
// Install ingress/egress routes through uplink on host for Pods when working with
// delegated IPAM plugin.
//
InstallUplinkRoutesForDelegatedIPAM bool `json:"installUplinkRoutesForDelegatedIPAM,omitempty"`
// Comma-separated list of IP ports should be reserved in the workload network namespace
IPLocalReservedPorts string `json:"ipLocalReservedPorts,omitempty"`
// Configured IPAM mode
IpamMode string `json:"ipam-mode,omitempty"`
// k8s configuration
K8sConfiguration string `json:"k8s-configuration,omitempty"`
// k8s endpoint
K8sEndpoint string `json:"k8s-endpoint,omitempty"`
// kvstore configuration
KvstoreConfiguration *KVstoreConfiguration `json:"kvstoreConfiguration,omitempty"`
// masquerade
Masquerade bool `json:"masquerade,omitempty"`
// masquerade protocols
MasqueradeProtocols *DaemonConfigurationStatusMasqueradeProtocols `json:"masqueradeProtocols,omitempty"`
// Status of the node monitor
NodeMonitor *MonitorStatus `json:"nodeMonitor,omitempty"`
// Currently applied configuration
Realized *DaemonConfigurationSpec `json:"realized,omitempty"`
// MTU for network facing routes
RouteMTU int64 `json:"routeMTU,omitempty"`
}
// Validate validates this daemon configuration status
func (m *DaemonConfigurationStatus) Validate(formats strfmt.Registry) error {
var res []error
if err := m.validateAddressing(formats); err != nil {
res = append(res, err)
}
if err := m.validateDatapathMode(formats); err != nil {
res = append(res, err)
}
if err := m.validateImmutable(formats); err != nil {
res = append(res, err)
}
if err := m.validateKvstoreConfiguration(formats); err != nil {
res = append(res, err)
}
if err := m.validateMasqueradeProtocols(formats); err != nil {
res = append(res, err)
}
if err := m.validateNodeMonitor(formats); err != nil {
res = append(res, err)
}
if err := m.validateRealized(formats); err != nil {
res = append(res, err)
}
if len(res) > 0 {
return errors.CompositeValidationError(res...)
}
return nil
}
func (m *DaemonConfigurationStatus) validateAddressing(formats strfmt.Registry) error {
if swag.IsZero(m.Addressing) { // not required
return nil
}
if m.Addressing != nil {
if err := m.Addressing.Validate(formats); err != nil {
if ve, ok := err.(*errors.Validation); ok {
return ve.ValidateName("addressing")
} else if ce, ok := err.(*errors.CompositeError); ok {
return ce.ValidateName("addressing")
}
return err
}
}
return nil
}
func (m *DaemonConfigurationStatus) validateDatapathMode(formats strfmt.Registry) error {
if swag.IsZero(m.DatapathMode) { // not required
return nil
}
if err := m.DatapathMode.Validate(formats); err != nil {
if ve, ok := err.(*errors.Validation); ok {
return ve.ValidateName("datapathMode")
} else if ce, ok := err.(*errors.CompositeError); ok {
return ce.ValidateName("datapathMode")
}
return err
}
return nil
}
func (m *DaemonConfigurationStatus) validateImmutable(formats strfmt.Registry) error {
if swag.IsZero(m.Immutable) { // not required
return nil
}
if m.Immutable != nil {
if err := m.Immutable.Validate(formats); err != nil {
if ve, ok := err.(*errors.Validation); ok {
return ve.ValidateName("immutable")
} else if ce, ok := err.(*errors.CompositeError); ok {
return ce.ValidateName("immutable")
}
return err
}
}
return nil
}
func (m *DaemonConfigurationStatus) validateKvstoreConfiguration(formats strfmt.Registry) error {
if swag.IsZero(m.KvstoreConfiguration) { // not required
return nil
}
if m.KvstoreConfiguration != nil {
if err := m.KvstoreConfiguration.Validate(formats); err != nil {
if ve, ok := err.(*errors.Validation); ok {
return ve.ValidateName("kvstoreConfiguration")
} else if ce, ok := err.(*errors.CompositeError); ok {
return ce.ValidateName("kvstoreConfiguration")
}
return err
}
}
return nil
}
func (m *DaemonConfigurationStatus) validateMasqueradeProtocols(formats strfmt.Registry) error {
if swag.IsZero(m.MasqueradeProtocols) { // not required
return nil
}
if m.MasqueradeProtocols != nil {
if err := m.MasqueradeProtocols.Validate(formats); err != nil {
if ve, ok := err.(*errors.Validation); ok {
return ve.ValidateName("masqueradeProtocols")
} else if ce, ok := err.(*errors.CompositeError); ok {
return ce.ValidateName("masqueradeProtocols")
}
return err
}
}
return nil
}
func (m *DaemonConfigurationStatus) validateNodeMonitor(formats strfmt.Registry) error {
if swag.IsZero(m.NodeMonitor) { // not required
return nil
}
if m.NodeMonitor != nil {
if err := m.NodeMonitor.Validate(formats); err != nil {
if ve, ok := err.(*errors.Validation); ok {
return ve.ValidateName("nodeMonitor")
} else if ce, ok := err.(*errors.CompositeError); ok {
return ce.ValidateName("nodeMonitor")
}
return err
}
}
return nil
}
func (m *DaemonConfigurationStatus) validateRealized(formats strfmt.Registry) error {
if swag.IsZero(m.Realized) { // not required
return nil
}
if m.Realized != nil {
if err := m.Realized.Validate(formats); err != nil {
if ve, ok := err.(*errors.Validation); ok {
return ve.ValidateName("realized")
} else if ce, ok := err.(*errors.CompositeError); ok {
return ce.ValidateName("realized")
}
return err
}
}
return nil
}
// ContextValidate validate this daemon configuration status based on the context it is used
func (m *DaemonConfigurationStatus) ContextValidate(ctx context.Context, formats strfmt.Registry) error {
var res []error
if err := m.contextValidateAddressing(ctx, formats); err != nil {
res = append(res, err)
}
if err := m.contextValidateDatapathMode(ctx, formats); err != nil {
res = append(res, err)
}
if err := m.contextValidateImmutable(ctx, formats); err != nil {
res = append(res, err)
}
if err := m.contextValidateKvstoreConfiguration(ctx, formats); err != nil {
res = append(res, err)
}
if err := m.contextValidateMasqueradeProtocols(ctx, formats); err != nil {
res = append(res, err)
}
if err := m.contextValidateNodeMonitor(ctx, formats); err != nil {
res = append(res, err)
}
if err := m.contextValidateRealized(ctx, formats); err != nil {
res = append(res, err)
}
if len(res) > 0 {
return errors.CompositeValidationError(res...)
}
return nil
}
func (m *DaemonConfigurationStatus) contextValidateAddressing(ctx context.Context, formats strfmt.Registry) error {
if m.Addressing != nil {
if swag.IsZero(m.Addressing) { // not required
return nil
}
if err := m.Addressing.ContextValidate(ctx, formats); err != nil {
if ve, ok := err.(*errors.Validation); ok {
return ve.ValidateName("addressing")
} else if ce, ok := err.(*errors.CompositeError); ok {
return ce.ValidateName("addressing")
}
return err
}
}
return nil
}
func (m *DaemonConfigurationStatus) contextValidateDatapathMode(ctx context.Context, formats strfmt.Registry) error {
if swag.IsZero(m.DatapathMode) { // not required
return nil
}
if err := m.DatapathMode.ContextValidate(ctx, formats); err != nil {
if ve, ok := err.(*errors.Validation); ok {
return ve.ValidateName("datapathMode")
} else if ce, ok := err.(*errors.CompositeError); ok {
return ce.ValidateName("datapathMode")
}
return err
}
return nil
}
func (m *DaemonConfigurationStatus) contextValidateImmutable(ctx context.Context, formats strfmt.Registry) error {
if swag.IsZero(m.Immutable) { // not required
return nil
}
if err := m.Immutable.ContextValidate(ctx, formats); err != nil {
if ve, ok := err.(*errors.Validation); ok {
return ve.ValidateName("immutable")
} else if ce, ok := err.(*errors.CompositeError); ok {
return ce.ValidateName("immutable")
}
return err
}
return nil
}
func (m *DaemonConfigurationStatus) contextValidateKvstoreConfiguration(ctx context.Context, formats strfmt.Registry) error {
if m.KvstoreConfiguration != nil {
if swag.IsZero(m.KvstoreConfiguration) { // not required
return nil
}
if err := m.KvstoreConfiguration.ContextValidate(ctx, formats); err != nil {
if ve, ok := err.(*errors.Validation); ok {
return ve.ValidateName("kvstoreConfiguration")
} else if ce, ok := err.(*errors.CompositeError); ok {
return ce.ValidateName("kvstoreConfiguration")
}
return err
}
}
return nil
}
func (m *DaemonConfigurationStatus) contextValidateMasqueradeProtocols(ctx context.Context, formats strfmt.Registry) error {
if m.MasqueradeProtocols != nil {
if swag.IsZero(m.MasqueradeProtocols) { // not required
return nil
}
if err := m.MasqueradeProtocols.ContextValidate(ctx, formats); err != nil {
if ve, ok := err.(*errors.Validation); ok {
return ve.ValidateName("masqueradeProtocols")
} else if ce, ok := err.(*errors.CompositeError); ok {
return ce.ValidateName("masqueradeProtocols")
}
return err
}
}
return nil
}
func (m *DaemonConfigurationStatus) contextValidateNodeMonitor(ctx context.Context, formats strfmt.Registry) error {
if m.NodeMonitor != nil {
if swag.IsZero(m.NodeMonitor) { // not required
return nil
}
if err := m.NodeMonitor.ContextValidate(ctx, formats); err != nil {
if ve, ok := err.(*errors.Validation); ok {
return ve.ValidateName("nodeMonitor")
} else if ce, ok := err.(*errors.CompositeError); ok {
return ce.ValidateName("nodeMonitor")
}
return err
}
}
return nil
}
func (m *DaemonConfigurationStatus) contextValidateRealized(ctx context.Context, formats strfmt.Registry) error {
if m.Realized != nil {
if swag.IsZero(m.Realized) { // not required
return nil
}
if err := m.Realized.ContextValidate(ctx, formats); err != nil {
if ve, ok := err.(*errors.Validation); ok {
return ve.ValidateName("realized")
} else if ce, ok := err.(*errors.CompositeError); ok {
return ce.ValidateName("realized")
}
return err
}
}
return nil
}
// MarshalBinary interface implementation
func (m *DaemonConfigurationStatus) MarshalBinary() ([]byte, error) {
if m == nil {
return nil, nil
}
return swag.WriteJSON(m)
}
// UnmarshalBinary interface implementation
func (m *DaemonConfigurationStatus) UnmarshalBinary(b []byte) error {
var res DaemonConfigurationStatus
if err := swag.ReadJSON(b, &res); err != nil {
return err
}
*m = res
return nil
}
// DaemonConfigurationStatusMasqueradeProtocols Status of masquerading feature
//
// swagger:model DaemonConfigurationStatusMasqueradeProtocols
type DaemonConfigurationStatusMasqueradeProtocols struct {
// Status of masquerading for IPv4 traffic
IPV4 bool `json:"ipv4,omitempty"`
// Status of masquerading for IPv6 traffic
IPV6 bool `json:"ipv6,omitempty"`
}
// Validate validates this daemon configuration status masquerade protocols
func (m *DaemonConfigurationStatusMasqueradeProtocols) Validate(formats strfmt.Registry) error {
return nil
}
// ContextValidate validates this daemon configuration status masquerade protocols based on context it is used
func (m *DaemonConfigurationStatusMasqueradeProtocols) ContextValidate(ctx context.Context, formats strfmt.Registry) error {
return nil
}
// MarshalBinary interface implementation
func (m *DaemonConfigurationStatusMasqueradeProtocols) MarshalBinary() ([]byte, error) {
if m == nil {
return nil, nil
}
return swag.WriteJSON(m)
}
// UnmarshalBinary interface implementation
func (m *DaemonConfigurationStatusMasqueradeProtocols) UnmarshalBinary(b []byte) error {
var res DaemonConfigurationStatusMasqueradeProtocols
if err := swag.ReadJSON(b, &res); err != nil {
return err
}
*m = res
return nil
}
// Code generated by go-swagger; DO NOT EDIT.
// Copyright Authors of Cilium
// SPDX-License-Identifier: Apache-2.0
package models
// This file was generated by the swagger tool.
// Editing this file might prove futile when you re-run the swagger generate command
import (
"context"
"encoding/json"
"github.com/go-openapi/errors"
"github.com/go-openapi/strfmt"
"github.com/go-openapi/validate"
)
// DatapathMode Datapath mode
//
// swagger:model DatapathMode
type DatapathMode string
func NewDatapathMode(value DatapathMode) *DatapathMode {
return &value
}
// Pointer returns a pointer to a freshly-allocated DatapathMode.
func (m DatapathMode) Pointer() *DatapathMode {
return &m
}
const (
// DatapathModeVeth captures enum value "veth"
DatapathModeVeth DatapathMode = "veth"
// DatapathModeNetkit captures enum value "netkit"
DatapathModeNetkit DatapathMode = "netkit"
// DatapathModeNetkitDashL2 captures enum value "netkit-l2"
DatapathModeNetkitDashL2 DatapathMode = "netkit-l2"
)
// for schema
var datapathModeEnum []interface{}
func init() {
var res []DatapathMode
if err := json.Unmarshal([]byte(`["veth","netkit","netkit-l2"]`), &res); err != nil {
panic(err)
}
for _, v := range res {
datapathModeEnum = append(datapathModeEnum, v)
}
}
func (m DatapathMode) validateDatapathModeEnum(path, location string, value DatapathMode) error {
if err := validate.EnumCase(path, location, value, datapathModeEnum, true); err != nil {
return err
}
return nil
}
// Validate validates this datapath mode
func (m DatapathMode) Validate(formats strfmt.Registry) error {
var res []error
// value enum
if err := m.validateDatapathModeEnum("", "body", m); err != nil {
return err
}
if len(res) > 0 {
return errors.CompositeValidationError(res...)
}
return nil
}
// ContextValidate validates this datapath mode based on context it is used
func (m DatapathMode) ContextValidate(ctx context.Context, formats strfmt.Registry) error {
return nil
}
// Code generated by go-swagger; DO NOT EDIT.
// Copyright Authors of Cilium
// SPDX-License-Identifier: Apache-2.0
package models
// This file was generated by the swagger tool.
// Editing this file might prove futile when you re-run the swagger generate command
import (
"context"
"strconv"
"github.com/go-openapi/errors"
"github.com/go-openapi/strfmt"
"github.com/go-openapi/swag"
)
// DebugInfo groups some debugging related information on the agent
//
// swagger:model DebugInfo
type DebugInfo struct {
// cilium memory map
CiliumMemoryMap string `json:"cilium-memory-map,omitempty"`
// cilium nodemonitor memory map
CiliumNodemonitorMemoryMap string `json:"cilium-nodemonitor-memory-map,omitempty"`
// cilium status
CiliumStatus *StatusResponse `json:"cilium-status,omitempty"`
// cilium version
CiliumVersion string `json:"cilium-version,omitempty"`
// encryption
Encryption *DebugInfoEncryption `json:"encryption,omitempty"`
// endpoint list
EndpointList []*Endpoint `json:"endpoint-list"`
// environment variables
EnvironmentVariables []string `json:"environment-variables"`
// kernel version
KernelVersion string `json:"kernel-version,omitempty"`
// policy
Policy *Policy `json:"policy,omitempty"`
// service list
ServiceList []*Service `json:"service-list"`
// subsystem
Subsystem map[string]string `json:"subsystem,omitempty"`
}
// Validate validates this debug info
func (m *DebugInfo) Validate(formats strfmt.Registry) error {
var res []error
if err := m.validateCiliumStatus(formats); err != nil {
res = append(res, err)
}
if err := m.validateEncryption(formats); err != nil {
res = append(res, err)
}
if err := m.validateEndpointList(formats); err != nil {
res = append(res, err)
}
if err := m.validatePolicy(formats); err != nil {
res = append(res, err)
}
if err := m.validateServiceList(formats); err != nil {
res = append(res, err)
}
if len(res) > 0 {
return errors.CompositeValidationError(res...)
}
return nil
}
func (m *DebugInfo) validateCiliumStatus(formats strfmt.Registry) error {
if swag.IsZero(m.CiliumStatus) { // not required
return nil
}
if m.CiliumStatus != nil {
if err := m.CiliumStatus.Validate(formats); err != nil {
if ve, ok := err.(*errors.Validation); ok {
return ve.ValidateName("cilium-status")
} else if ce, ok := err.(*errors.CompositeError); ok {
return ce.ValidateName("cilium-status")
}
return err
}
}
return nil
}
func (m *DebugInfo) validateEncryption(formats strfmt.Registry) error {
if swag.IsZero(m.Encryption) { // not required
return nil
}
if m.Encryption != nil {
if err := m.Encryption.Validate(formats); err != nil {
if ve, ok := err.(*errors.Validation); ok {
return ve.ValidateName("encryption")
} else if ce, ok := err.(*errors.CompositeError); ok {
return ce.ValidateName("encryption")
}
return err
}
}
return nil
}
func (m *DebugInfo) validateEndpointList(formats strfmt.Registry) error {
if swag.IsZero(m.EndpointList) { // not required
return nil
}
for i := 0; i < len(m.EndpointList); i++ {
if swag.IsZero(m.EndpointList[i]) { // not required
continue
}
if m.EndpointList[i] != nil {
if err := m.EndpointList[i].Validate(formats); err != nil {
if ve, ok := err.(*errors.Validation); ok {
return ve.ValidateName("endpoint-list" + "." + strconv.Itoa(i))
} else if ce, ok := err.(*errors.CompositeError); ok {
return ce.ValidateName("endpoint-list" + "." + strconv.Itoa(i))
}
return err
}
}
}
return nil
}
func (m *DebugInfo) validatePolicy(formats strfmt.Registry) error {
if swag.IsZero(m.Policy) { // not required
return nil
}
if m.Policy != nil {
if err := m.Policy.Validate(formats); err != nil {
if ve, ok := err.(*errors.Validation); ok {
return ve.ValidateName("policy")
} else if ce, ok := err.(*errors.CompositeError); ok {
return ce.ValidateName("policy")
}
return err
}
}
return nil
}
func (m *DebugInfo) validateServiceList(formats strfmt.Registry) error {
if swag.IsZero(m.ServiceList) { // not required
return nil
}
for i := 0; i < len(m.ServiceList); i++ {
if swag.IsZero(m.ServiceList[i]) { // not required
continue
}
if m.ServiceList[i] != nil {
if err := m.ServiceList[i].Validate(formats); err != nil {
if ve, ok := err.(*errors.Validation); ok {
return ve.ValidateName("service-list" + "." + strconv.Itoa(i))
} else if ce, ok := err.(*errors.CompositeError); ok {
return ce.ValidateName("service-list" + "." + strconv.Itoa(i))
}
return err
}
}
}
return nil
}
// ContextValidate validate this debug info based on the context it is used
func (m *DebugInfo) ContextValidate(ctx context.Context, formats strfmt.Registry) error {
var res []error
if err := m.contextValidateCiliumStatus(ctx, formats); err != nil {
res = append(res, err)
}
if err := m.contextValidateEncryption(ctx, formats); err != nil {
res = append(res, err)
}
if err := m.contextValidateEndpointList(ctx, formats); err != nil {
res = append(res, err)
}
if err := m.contextValidatePolicy(ctx, formats); err != nil {
res = append(res, err)
}
if err := m.contextValidateServiceList(ctx, formats); err != nil {
res = append(res, err)
}
if len(res) > 0 {
return errors.CompositeValidationError(res...)
}
return nil
}
func (m *DebugInfo) contextValidateCiliumStatus(ctx context.Context, formats strfmt.Registry) error {
if m.CiliumStatus != nil {
if swag.IsZero(m.CiliumStatus) { // not required
return nil
}
if err := m.CiliumStatus.ContextValidate(ctx, formats); err != nil {
if ve, ok := err.(*errors.Validation); ok {
return ve.ValidateName("cilium-status")
} else if ce, ok := err.(*errors.CompositeError); ok {
return ce.ValidateName("cilium-status")
}
return err
}
}
return nil
}
func (m *DebugInfo) contextValidateEncryption(ctx context.Context, formats strfmt.Registry) error {
if m.Encryption != nil {
if swag.IsZero(m.Encryption) { // not required
return nil
}
if err := m.Encryption.ContextValidate(ctx, formats); err != nil {
if ve, ok := err.(*errors.Validation); ok {
return ve.ValidateName("encryption")
} else if ce, ok := err.(*errors.CompositeError); ok {
return ce.ValidateName("encryption")
}
return err
}
}
return nil
}
func (m *DebugInfo) contextValidateEndpointList(ctx context.Context, formats strfmt.Registry) error {
for i := 0; i < len(m.EndpointList); i++ {
if m.EndpointList[i] != nil {
if swag.IsZero(m.EndpointList[i]) { // not required
return nil
}
if err := m.EndpointList[i].ContextValidate(ctx, formats); err != nil {
if ve, ok := err.(*errors.Validation); ok {
return ve.ValidateName("endpoint-list" + "." + strconv.Itoa(i))
} else if ce, ok := err.(*errors.CompositeError); ok {
return ce.ValidateName("endpoint-list" + "." + strconv.Itoa(i))
}
return err
}
}
}
return nil
}
func (m *DebugInfo) contextValidatePolicy(ctx context.Context, formats strfmt.Registry) error {
if m.Policy != nil {
if swag.IsZero(m.Policy) { // not required
return nil
}
if err := m.Policy.ContextValidate(ctx, formats); err != nil {
if ve, ok := err.(*errors.Validation); ok {
return ve.ValidateName("policy")
} else if ce, ok := err.(*errors.CompositeError); ok {
return ce.ValidateName("policy")
}
return err
}
}
return nil
}
func (m *DebugInfo) contextValidateServiceList(ctx context.Context, formats strfmt.Registry) error {
for i := 0; i < len(m.ServiceList); i++ {
if m.ServiceList[i] != nil {
if swag.IsZero(m.ServiceList[i]) { // not required
return nil
}
if err := m.ServiceList[i].ContextValidate(ctx, formats); err != nil {
if ve, ok := err.(*errors.Validation); ok {
return ve.ValidateName("service-list" + "." + strconv.Itoa(i))
} else if ce, ok := err.(*errors.CompositeError); ok {
return ce.ValidateName("service-list" + "." + strconv.Itoa(i))
}
return err
}
}
}
return nil
}
// MarshalBinary interface implementation
func (m *DebugInfo) MarshalBinary() ([]byte, error) {
if m == nil {
return nil, nil
}
return swag.WriteJSON(m)
}
// UnmarshalBinary interface implementation
func (m *DebugInfo) UnmarshalBinary(b []byte) error {
var res DebugInfo
if err := swag.ReadJSON(b, &res); err != nil {
return err
}
*m = res
return nil
}
// DebugInfoEncryption debug info encryption
//
// swagger:model DebugInfoEncryption
type DebugInfoEncryption struct {
// Status of the WireGuard agent
Wireguard *WireguardStatus `json:"wireguard,omitempty"`
}
// Validate validates this debug info encryption
func (m *DebugInfoEncryption) Validate(formats strfmt.Registry) error {
var res []error
if err := m.validateWireguard(formats); err != nil {
res = append(res, err)
}
if len(res) > 0 {
return errors.CompositeValidationError(res...)
}
return nil
}
func (m *DebugInfoEncryption) validateWireguard(formats strfmt.Registry) error {
if swag.IsZero(m.Wireguard) { // not required
return nil
}
if m.Wireguard != nil {
if err := m.Wireguard.Validate(formats); err != nil {
if ve, ok := err.(*errors.Validation); ok {
return ve.ValidateName("encryption" + "." + "wireguard")
} else if ce, ok := err.(*errors.CompositeError); ok {
return ce.ValidateName("encryption" + "." + "wireguard")
}
return err
}
}
return nil
}
// ContextValidate validate this debug info encryption based on the context it is used
func (m *DebugInfoEncryption) ContextValidate(ctx context.Context, formats strfmt.Registry) error {
var res []error
if err := m.contextValidateWireguard(ctx, formats); err != nil {
res = append(res, err)
}
if len(res) > 0 {
return errors.CompositeValidationError(res...)
}
return nil
}
func (m *DebugInfoEncryption) contextValidateWireguard(ctx context.Context, formats strfmt.Registry) error {
if m.Wireguard != nil {
if swag.IsZero(m.Wireguard) { // not required
return nil
}
if err := m.Wireguard.ContextValidate(ctx, formats); err != nil {
if ve, ok := err.(*errors.Validation); ok {
return ve.ValidateName("encryption" + "." + "wireguard")
} else if ce, ok := err.(*errors.CompositeError); ok {
return ce.ValidateName("encryption" + "." + "wireguard")
}
return err
}
}
return nil
}
// MarshalBinary interface implementation
func (m *DebugInfoEncryption) MarshalBinary() ([]byte, error) {
if m == nil {
return nil, nil
}
return swag.WriteJSON(m)
}
// UnmarshalBinary interface implementation
func (m *DebugInfoEncryption) UnmarshalBinary(b []byte) error {
var res DebugInfoEncryption
if err := swag.ReadJSON(b, &res); err != nil {
return err
}
*m = res
return nil
}
// Code generated by go-swagger; DO NOT EDIT.
// Copyright Authors of Cilium
// SPDX-License-Identifier: Apache-2.0
package models
// This file was generated by the swagger tool.
// Editing this file might prove futile when you re-run the swagger generate command
import (
"context"
"github.com/go-openapi/errors"
"github.com/go-openapi/strfmt"
"github.com/go-openapi/swag"
"github.com/go-openapi/validate"
)
// DNSLookup An IP -> DNS mapping, with metadata
//
// swagger:model DNSLookup
type DNSLookup struct {
// The endpoint that made this lookup, or 0 for the agent itself.
EndpointID int64 `json:"endpoint-id,omitempty"`
// The absolute time when this data will expire in this cache
// Format: date-time
ExpirationTime strfmt.DateTime `json:"expiration-time,omitempty"`
// DNS name
Fqdn string `json:"fqdn,omitempty"`
// IP addresses returned in this lookup
Ips []string `json:"ips"`
// The absolute time when this data was received
// Format: date-time
LookupTime strfmt.DateTime `json:"lookup-time,omitempty"`
// The reason this FQDN IP association exists. Either a DNS lookup or an ongoing connection to an IP that was created by a DNS lookup.
Source string `json:"source,omitempty"`
// The TTL in the DNS response
TTL int64 `json:"ttl,omitempty"`
}
// Validate validates this DNS lookup
func (m *DNSLookup) Validate(formats strfmt.Registry) error {
var res []error
if err := m.validateExpirationTime(formats); err != nil {
res = append(res, err)
}
if err := m.validateLookupTime(formats); err != nil {
res = append(res, err)
}
if len(res) > 0 {
return errors.CompositeValidationError(res...)
}
return nil
}
func (m *DNSLookup) validateExpirationTime(formats strfmt.Registry) error {
if swag.IsZero(m.ExpirationTime) { // not required
return nil
}
if err := validate.FormatOf("expiration-time", "body", "date-time", m.ExpirationTime.String(), formats); err != nil {
return err
}
return nil
}
func (m *DNSLookup) validateLookupTime(formats strfmt.Registry) error {
if swag.IsZero(m.LookupTime) { // not required
return nil
}
if err := validate.FormatOf("lookup-time", "body", "date-time", m.LookupTime.String(), formats); err != nil {
return err
}
return nil
}
// ContextValidate validates this DNS lookup based on context it is used
func (m *DNSLookup) ContextValidate(ctx context.Context, formats strfmt.Registry) error {
return nil
}
// MarshalBinary interface implementation
func (m *DNSLookup) MarshalBinary() ([]byte, error) {
if m == nil {
return nil, nil
}
return swag.WriteJSON(m)
}
// UnmarshalBinary interface implementation
func (m *DNSLookup) UnmarshalBinary(b []byte) error {
var res DNSLookup
if err := swag.ReadJSON(b, &res); err != nil {
return err
}
*m = res
return nil
}
// Code generated by go-swagger; DO NOT EDIT.
// Copyright Authors of Cilium
// SPDX-License-Identifier: Apache-2.0
package models
// This file was generated by the swagger tool.
// Editing this file might prove futile when you re-run the swagger generate command
import (
"context"
"encoding/json"
"github.com/go-openapi/errors"
"github.com/go-openapi/strfmt"
"github.com/go-openapi/swag"
"github.com/go-openapi/validate"
)
// EncryptionStatus Status of transparent encryption
//
// +k8s:deepcopy-gen=true
//
// swagger:model EncryptionStatus
type EncryptionStatus struct {
// Status of the IPsec agent
Ipsec *IPsecStatus `json:"ipsec,omitempty"`
// mode
// Enum: ["Disabled","IPsec","Wireguard"]
Mode string `json:"mode,omitempty"`
// Human readable error/warning message
Msg string `json:"msg,omitempty"`
// Status of the WireGuard agent
Wireguard *WireguardStatus `json:"wireguard,omitempty"`
}
// Validate validates this encryption status
func (m *EncryptionStatus) Validate(formats strfmt.Registry) error {
var res []error
if err := m.validateIpsec(formats); err != nil {
res = append(res, err)
}
if err := m.validateMode(formats); err != nil {
res = append(res, err)
}
if err := m.validateWireguard(formats); err != nil {
res = append(res, err)
}
if len(res) > 0 {
return errors.CompositeValidationError(res...)
}
return nil
}
func (m *EncryptionStatus) validateIpsec(formats strfmt.Registry) error {
if swag.IsZero(m.Ipsec) { // not required
return nil
}
if m.Ipsec != nil {
if err := m.Ipsec.Validate(formats); err != nil {
if ve, ok := err.(*errors.Validation); ok {
return ve.ValidateName("ipsec")
} else if ce, ok := err.(*errors.CompositeError); ok {
return ce.ValidateName("ipsec")
}
return err
}
}
return nil
}
var encryptionStatusTypeModePropEnum []interface{}
func init() {
var res []string
if err := json.Unmarshal([]byte(`["Disabled","IPsec","Wireguard"]`), &res); err != nil {
panic(err)
}
for _, v := range res {
encryptionStatusTypeModePropEnum = append(encryptionStatusTypeModePropEnum, v)
}
}
const (
// EncryptionStatusModeDisabled captures enum value "Disabled"
EncryptionStatusModeDisabled string = "Disabled"
// EncryptionStatusModeIPsec captures enum value "IPsec"
EncryptionStatusModeIPsec string = "IPsec"
// EncryptionStatusModeWireguard captures enum value "Wireguard"
EncryptionStatusModeWireguard string = "Wireguard"
)
// prop value enum
func (m *EncryptionStatus) validateModeEnum(path, location string, value string) error {
if err := validate.EnumCase(path, location, value, encryptionStatusTypeModePropEnum, true); err != nil {
return err
}
return nil
}
func (m *EncryptionStatus) validateMode(formats strfmt.Registry) error {
if swag.IsZero(m.Mode) { // not required
return nil
}
// value enum
if err := m.validateModeEnum("mode", "body", m.Mode); err != nil {
return err
}
return nil
}
func (m *EncryptionStatus) validateWireguard(formats strfmt.Registry) error {
if swag.IsZero(m.Wireguard) { // not required
return nil
}
if m.Wireguard != nil {
if err := m.Wireguard.Validate(formats); err != nil {
if ve, ok := err.(*errors.Validation); ok {
return ve.ValidateName("wireguard")
} else if ce, ok := err.(*errors.CompositeError); ok {
return ce.ValidateName("wireguard")
}
return err
}
}
return nil
}
// ContextValidate validate this encryption status based on the context it is used
func (m *EncryptionStatus) ContextValidate(ctx context.Context, formats strfmt.Registry) error {
var res []error
if err := m.contextValidateIpsec(ctx, formats); err != nil {
res = append(res, err)
}
if err := m.contextValidateWireguard(ctx, formats); err != nil {
res = append(res, err)
}
if len(res) > 0 {
return errors.CompositeValidationError(res...)
}
return nil
}
func (m *EncryptionStatus) contextValidateIpsec(ctx context.Context, formats strfmt.Registry) error {
if m.Ipsec != nil {
if swag.IsZero(m.Ipsec) { // not required
return nil
}
if err := m.Ipsec.ContextValidate(ctx, formats); err != nil {
if ve, ok := err.(*errors.Validation); ok {
return ve.ValidateName("ipsec")
} else if ce, ok := err.(*errors.CompositeError); ok {
return ce.ValidateName("ipsec")
}
return err
}
}
return nil
}
func (m *EncryptionStatus) contextValidateWireguard(ctx context.Context, formats strfmt.Registry) error {
if m.Wireguard != nil {
if swag.IsZero(m.Wireguard) { // not required
return nil
}
if err := m.Wireguard.ContextValidate(ctx, formats); err != nil {
if ve, ok := err.(*errors.Validation); ok {
return ve.ValidateName("wireguard")
} else if ce, ok := err.(*errors.CompositeError); ok {
return ce.ValidateName("wireguard")
}
return err
}
}
return nil
}
// MarshalBinary interface implementation
func (m *EncryptionStatus) MarshalBinary() ([]byte, error) {
if m == nil {
return nil, nil
}
return swag.WriteJSON(m)
}
// UnmarshalBinary interface implementation
func (m *EncryptionStatus) UnmarshalBinary(b []byte) error {
var res EncryptionStatus
if err := swag.ReadJSON(b, &res); err != nil {
return err
}
*m = res
return nil
}
// Code generated by go-swagger; DO NOT EDIT.
// Copyright Authors of Cilium
// SPDX-License-Identifier: Apache-2.0
package models
// This file was generated by the swagger tool.
// Editing this file might prove futile when you re-run the swagger generate command
import (
"context"
"github.com/go-openapi/errors"
"github.com/go-openapi/strfmt"
"github.com/go-openapi/swag"
)
// Endpoint An endpoint is a namespaced network interface to which cilium applies policies
//
// swagger:model Endpoint
type Endpoint struct {
// The cilium-agent-local ID of the endpoint
ID int64 `json:"id,omitempty"`
// The desired configuration state of the endpoint
Spec *EndpointConfigurationSpec `json:"spec,omitempty"`
// The desired and realized configuration state of the endpoint
Status *EndpointStatus `json:"status,omitempty"`
}
// Validate validates this endpoint
func (m *Endpoint) Validate(formats strfmt.Registry) error {
var res []error
if err := m.validateSpec(formats); err != nil {
res = append(res, err)
}
if err := m.validateStatus(formats); err != nil {
res = append(res, err)
}
if len(res) > 0 {
return errors.CompositeValidationError(res...)
}
return nil
}
func (m *Endpoint) validateSpec(formats strfmt.Registry) error {
if swag.IsZero(m.Spec) { // not required
return nil
}
if m.Spec != nil {
if err := m.Spec.Validate(formats); err != nil {
if ve, ok := err.(*errors.Validation); ok {
return ve.ValidateName("spec")
} else if ce, ok := err.(*errors.CompositeError); ok {
return ce.ValidateName("spec")
}
return err
}
}
return nil
}
func (m *Endpoint) validateStatus(formats strfmt.Registry) error {
if swag.IsZero(m.Status) { // not required
return nil
}
if m.Status != nil {
if err := m.Status.Validate(formats); err != nil {
if ve, ok := err.(*errors.Validation); ok {
return ve.ValidateName("status")
} else if ce, ok := err.(*errors.CompositeError); ok {
return ce.ValidateName("status")
}
return err
}
}
return nil
}
// ContextValidate validate this endpoint based on the context it is used
func (m *Endpoint) ContextValidate(ctx context.Context, formats strfmt.Registry) error {
var res []error
if err := m.contextValidateSpec(ctx, formats); err != nil {
res = append(res, err)
}
if err := m.contextValidateStatus(ctx, formats); err != nil {
res = append(res, err)
}
if len(res) > 0 {
return errors.CompositeValidationError(res...)
}
return nil
}
func (m *Endpoint) contextValidateSpec(ctx context.Context, formats strfmt.Registry) error {
if m.Spec != nil {
if swag.IsZero(m.Spec) { // not required
return nil
}
if err := m.Spec.ContextValidate(ctx, formats); err != nil {
if ve, ok := err.(*errors.Validation); ok {
return ve.ValidateName("spec")
} else if ce, ok := err.(*errors.CompositeError); ok {
return ce.ValidateName("spec")
}
return err
}
}
return nil
}
func (m *Endpoint) contextValidateStatus(ctx context.Context, formats strfmt.Registry) error {
if m.Status != nil {
if swag.IsZero(m.Status) { // not required
return nil
}
if err := m.Status.ContextValidate(ctx, formats); err != nil {
if ve, ok := err.(*errors.Validation); ok {
return ve.ValidateName("status")
} else if ce, ok := err.(*errors.CompositeError); ok {
return ce.ValidateName("status")
}
return err
}
}
return nil
}
// MarshalBinary interface implementation
func (m *Endpoint) MarshalBinary() ([]byte, error) {
if m == nil {
return nil, nil
}
return swag.WriteJSON(m)
}
// UnmarshalBinary interface implementation
func (m *Endpoint) UnmarshalBinary(b []byte) error {
var res Endpoint
if err := swag.ReadJSON(b, &res); err != nil {
return err
}
*m = res
return nil
}
// Code generated by go-swagger; DO NOT EDIT.
// Copyright Authors of Cilium
// SPDX-License-Identifier: Apache-2.0
package models
// This file was generated by the swagger tool.
// Editing this file might prove futile when you re-run the swagger generate command
import (
"context"
"github.com/go-openapi/strfmt"
"github.com/go-openapi/swag"
)
// EndpointBatchDeleteRequest Properties selecting a batch of endpoints to delete.
//
// swagger:model EndpointBatchDeleteRequest
type EndpointBatchDeleteRequest struct {
// ID assigned by container runtime
ContainerID string `json:"container-id,omitempty"`
}
// Validate validates this endpoint batch delete request
func (m *EndpointBatchDeleteRequest) Validate(formats strfmt.Registry) error {
return nil
}
// ContextValidate validates this endpoint batch delete request based on context it is used
func (m *EndpointBatchDeleteRequest) ContextValidate(ctx context.Context, formats strfmt.Registry) error {
return nil
}
// MarshalBinary interface implementation
func (m *EndpointBatchDeleteRequest) MarshalBinary() ([]byte, error) {
if m == nil {
return nil, nil
}
return swag.WriteJSON(m)
}
// UnmarshalBinary interface implementation
func (m *EndpointBatchDeleteRequest) UnmarshalBinary(b []byte) error {
var res EndpointBatchDeleteRequest
if err := swag.ReadJSON(b, &res); err != nil {
return err
}
*m = res
return nil
}
// Code generated by go-swagger; DO NOT EDIT.
// Copyright Authors of Cilium
// SPDX-License-Identifier: Apache-2.0
package models
// This file was generated by the swagger tool.
// Editing this file might prove futile when you re-run the swagger generate command
import (
"context"
"github.com/go-openapi/errors"
"github.com/go-openapi/strfmt"
"github.com/go-openapi/swag"
"github.com/go-openapi/validate"
)
// EndpointChangeRequest Structure which contains the mutable elements of an Endpoint.
//
// swagger:model EndpointChangeRequest
type EndpointChangeRequest struct {
// addressing
Addressing *AddressPair `json:"addressing,omitempty"`
// ID assigned by container runtime
ContainerID string `json:"container-id,omitempty"`
// Name of network device in container netns
ContainerInterfaceName string `json:"container-interface-name,omitempty"`
// Name assigned to container
ContainerName string `json:"container-name,omitempty"`
// datapath configuration
DatapathConfiguration *EndpointDatapathConfiguration `json:"datapath-configuration,omitempty"`
// ID of datapath tail call map
DatapathMapID int64 `json:"datapath-map-id,omitempty"`
// Disables lookup using legacy endpoint identifiers (container name, container id, pod name) for this endpoint
DisableLegacyIdentifiers bool `json:"disable-legacy-identifiers,omitempty"`
// Docker endpoint ID
DockerEndpointID string `json:"docker-endpoint-id,omitempty"`
// Docker network ID
DockerNetworkID string `json:"docker-network-id,omitempty"`
// MAC address
HostMac string `json:"host-mac,omitempty"`
// Local endpoint ID
ID int64 `json:"id,omitempty"`
// Index of network device in host netns
InterfaceIndex int64 `json:"interface-index,omitempty"`
// Name of network device in host netns
InterfaceName string `json:"interface-name,omitempty"`
// Kubernetes namespace name
K8sNamespace string `json:"k8s-namespace,omitempty"`
// Kubernetes pod name
K8sPodName string `json:"k8s-pod-name,omitempty"`
// Kubernetes pod UID
K8sUID string `json:"k8s-uid,omitempty"`
// Labels describing the identity
Labels Labels `json:"labels,omitempty"`
// MAC address
Mac string `json:"mac,omitempty"`
// Network namespace cookie
NetnsCookie string `json:"netns-cookie,omitempty"`
// Index of network device from which an IP was used as endpoint IP. Only relevant for ENI environments.
ParentInterfaceIndex int64 `json:"parent-interface-index,omitempty"`
// Process ID of the workload belonging to this endpoint
Pid int64 `json:"pid,omitempty"`
// Whether policy enforcement is enabled or not
PolicyEnabled bool `json:"policy-enabled,omitempty"`
// Properties is used to store information about the endpoint at creation. Useful for tests.
Properties map[string]interface{} `json:"properties,omitempty"`
// Current state of endpoint
// Required: true
State *EndpointState `json:"state"`
// Whether to build an endpoint synchronously
//
SyncBuildEndpoint bool `json:"sync-build-endpoint,omitempty"`
}
// Validate validates this endpoint change request
func (m *EndpointChangeRequest) Validate(formats strfmt.Registry) error {
var res []error
if err := m.validateAddressing(formats); err != nil {
res = append(res, err)
}
if err := m.validateDatapathConfiguration(formats); err != nil {
res = append(res, err)
}
if err := m.validateLabels(formats); err != nil {
res = append(res, err)
}
if err := m.validateState(formats); err != nil {
res = append(res, err)
}
if len(res) > 0 {
return errors.CompositeValidationError(res...)
}
return nil
}
func (m *EndpointChangeRequest) validateAddressing(formats strfmt.Registry) error {
if swag.IsZero(m.Addressing) { // not required
return nil
}
if m.Addressing != nil {
if err := m.Addressing.Validate(formats); err != nil {
if ve, ok := err.(*errors.Validation); ok {
return ve.ValidateName("addressing")
} else if ce, ok := err.(*errors.CompositeError); ok {
return ce.ValidateName("addressing")
}
return err
}
}
return nil
}
func (m *EndpointChangeRequest) validateDatapathConfiguration(formats strfmt.Registry) error {
if swag.IsZero(m.DatapathConfiguration) { // not required
return nil
}
if m.DatapathConfiguration != nil {
if err := m.DatapathConfiguration.Validate(formats); err != nil {
if ve, ok := err.(*errors.Validation); ok {
return ve.ValidateName("datapath-configuration")
} else if ce, ok := err.(*errors.CompositeError); ok {
return ce.ValidateName("datapath-configuration")
}
return err
}
}
return nil
}
func (m *EndpointChangeRequest) validateLabels(formats strfmt.Registry) error {
if swag.IsZero(m.Labels) { // not required
return nil
}
if err := m.Labels.Validate(formats); err != nil {
if ve, ok := err.(*errors.Validation); ok {
return ve.ValidateName("labels")
} else if ce, ok := err.(*errors.CompositeError); ok {
return ce.ValidateName("labels")
}
return err
}
return nil
}
func (m *EndpointChangeRequest) validateState(formats strfmt.Registry) error {
if err := validate.Required("state", "body", m.State); err != nil {
return err
}
if err := validate.Required("state", "body", m.State); err != nil {
return err
}
if m.State != nil {
if err := m.State.Validate(formats); err != nil {
if ve, ok := err.(*errors.Validation); ok {
return ve.ValidateName("state")
} else if ce, ok := err.(*errors.CompositeError); ok {
return ce.ValidateName("state")
}
return err
}
}
return nil
}
// ContextValidate validate this endpoint change request based on the context it is used
func (m *EndpointChangeRequest) ContextValidate(ctx context.Context, formats strfmt.Registry) error {
var res []error
if err := m.contextValidateAddressing(ctx, formats); err != nil {
res = append(res, err)
}
if err := m.contextValidateDatapathConfiguration(ctx, formats); err != nil {
res = append(res, err)
}
if err := m.contextValidateLabels(ctx, formats); err != nil {
res = append(res, err)
}
if err := m.contextValidateState(ctx, formats); err != nil {
res = append(res, err)
}
if len(res) > 0 {
return errors.CompositeValidationError(res...)
}
return nil
}
func (m *EndpointChangeRequest) contextValidateAddressing(ctx context.Context, formats strfmt.Registry) error {
if m.Addressing != nil {
if swag.IsZero(m.Addressing) { // not required
return nil
}
if err := m.Addressing.ContextValidate(ctx, formats); err != nil {
if ve, ok := err.(*errors.Validation); ok {
return ve.ValidateName("addressing")
} else if ce, ok := err.(*errors.CompositeError); ok {
return ce.ValidateName("addressing")
}
return err
}
}
return nil
}
func (m *EndpointChangeRequest) contextValidateDatapathConfiguration(ctx context.Context, formats strfmt.Registry) error {
if m.DatapathConfiguration != nil {
if swag.IsZero(m.DatapathConfiguration) { // not required
return nil
}
if err := m.DatapathConfiguration.ContextValidate(ctx, formats); err != nil {
if ve, ok := err.(*errors.Validation); ok {
return ve.ValidateName("datapath-configuration")
} else if ce, ok := err.(*errors.CompositeError); ok {
return ce.ValidateName("datapath-configuration")
}
return err
}
}
return nil
}
func (m *EndpointChangeRequest) contextValidateLabels(ctx context.Context, formats strfmt.Registry) error {
if err := m.Labels.ContextValidate(ctx, formats); err != nil {
if ve, ok := err.(*errors.Validation); ok {
return ve.ValidateName("labels")
} else if ce, ok := err.(*errors.CompositeError); ok {
return ce.ValidateName("labels")
}
return err
}
return nil
}
func (m *EndpointChangeRequest) contextValidateState(ctx context.Context, formats strfmt.Registry) error {
if m.State != nil {
if err := m.State.ContextValidate(ctx, formats); err != nil {
if ve, ok := err.(*errors.Validation); ok {
return ve.ValidateName("state")
} else if ce, ok := err.(*errors.CompositeError); ok {
return ce.ValidateName("state")
}
return err
}
}
return nil
}
// MarshalBinary interface implementation
func (m *EndpointChangeRequest) MarshalBinary() ([]byte, error) {
if m == nil {
return nil, nil
}
return swag.WriteJSON(m)
}
// UnmarshalBinary interface implementation
func (m *EndpointChangeRequest) UnmarshalBinary(b []byte) error {
var res EndpointChangeRequest
if err := swag.ReadJSON(b, &res); err != nil {
return err
}
*m = res
return nil
}
// Code generated by go-swagger; DO NOT EDIT.
// Copyright Authors of Cilium
// SPDX-License-Identifier: Apache-2.0
package models
// This file was generated by the swagger tool.
// Editing this file might prove futile when you re-run the swagger generate command
import (
"context"
"github.com/go-openapi/errors"
"github.com/go-openapi/strfmt"
"github.com/go-openapi/swag"
)
// EndpointConfigurationSpec An endpoint's configuration
//
// swagger:model EndpointConfigurationSpec
type EndpointConfigurationSpec struct {
// the endpoint's labels
LabelConfiguration *LabelConfigurationSpec `json:"label-configuration,omitempty"`
// Changeable configuration
Options ConfigurationMap `json:"options,omitempty"`
}
// Validate validates this endpoint configuration spec
func (m *EndpointConfigurationSpec) Validate(formats strfmt.Registry) error {
var res []error
if err := m.validateLabelConfiguration(formats); err != nil {
res = append(res, err)
}
if err := m.validateOptions(formats); err != nil {
res = append(res, err)
}
if len(res) > 0 {
return errors.CompositeValidationError(res...)
}
return nil
}
func (m *EndpointConfigurationSpec) validateLabelConfiguration(formats strfmt.Registry) error {
if swag.IsZero(m.LabelConfiguration) { // not required
return nil
}
if m.LabelConfiguration != nil {
if err := m.LabelConfiguration.Validate(formats); err != nil {
if ve, ok := err.(*errors.Validation); ok {
return ve.ValidateName("label-configuration")
} else if ce, ok := err.(*errors.CompositeError); ok {
return ce.ValidateName("label-configuration")
}
return err
}
}
return nil
}
func (m *EndpointConfigurationSpec) validateOptions(formats strfmt.Registry) error {
if swag.IsZero(m.Options) { // not required
return nil
}
if m.Options != nil {
if err := m.Options.Validate(formats); err != nil {
if ve, ok := err.(*errors.Validation); ok {
return ve.ValidateName("options")
} else if ce, ok := err.(*errors.CompositeError); ok {
return ce.ValidateName("options")
}
return err
}
}
return nil
}
// ContextValidate validate this endpoint configuration spec based on the context it is used
func (m *EndpointConfigurationSpec) ContextValidate(ctx context.Context, formats strfmt.Registry) error {
var res []error
if err := m.contextValidateLabelConfiguration(ctx, formats); err != nil {
res = append(res, err)
}
if err := m.contextValidateOptions(ctx, formats); err != nil {
res = append(res, err)
}
if len(res) > 0 {
return errors.CompositeValidationError(res...)
}
return nil
}
func (m *EndpointConfigurationSpec) contextValidateLabelConfiguration(ctx context.Context, formats strfmt.Registry) error {
if m.LabelConfiguration != nil {
if swag.IsZero(m.LabelConfiguration) { // not required
return nil
}
if err := m.LabelConfiguration.ContextValidate(ctx, formats); err != nil {
if ve, ok := err.(*errors.Validation); ok {
return ve.ValidateName("label-configuration")
} else if ce, ok := err.(*errors.CompositeError); ok {
return ce.ValidateName("label-configuration")
}
return err
}
}
return nil
}
func (m *EndpointConfigurationSpec) contextValidateOptions(ctx context.Context, formats strfmt.Registry) error {
if swag.IsZero(m.Options) { // not required
return nil
}
if err := m.Options.ContextValidate(ctx, formats); err != nil {
if ve, ok := err.(*errors.Validation); ok {
return ve.ValidateName("options")
} else if ce, ok := err.(*errors.CompositeError); ok {
return ce.ValidateName("options")
}
return err
}
return nil
}
// MarshalBinary interface implementation
func (m *EndpointConfigurationSpec) MarshalBinary() ([]byte, error) {
if m == nil {
return nil, nil
}
return swag.WriteJSON(m)
}
// UnmarshalBinary interface implementation
func (m *EndpointConfigurationSpec) UnmarshalBinary(b []byte) error {
var res EndpointConfigurationSpec
if err := swag.ReadJSON(b, &res); err != nil {
return err
}
*m = res
return nil
}
// Code generated by go-swagger; DO NOT EDIT.
// Copyright Authors of Cilium
// SPDX-License-Identifier: Apache-2.0
package models
// This file was generated by the swagger tool.
// Editing this file might prove futile when you re-run the swagger generate command
import (
"context"
"github.com/go-openapi/errors"
"github.com/go-openapi/strfmt"
"github.com/go-openapi/swag"
)
// EndpointConfigurationStatus An endpoint's configuration
//
// swagger:model EndpointConfigurationStatus
type EndpointConfigurationStatus struct {
// Most recent error, if applicable
Error Error `json:"error,omitempty"`
// Immutable configuration (read-only)
Immutable ConfigurationMap `json:"immutable,omitempty"`
// currently applied changeable configuration
Realized *EndpointConfigurationSpec `json:"realized,omitempty"`
}
// Validate validates this endpoint configuration status
func (m *EndpointConfigurationStatus) Validate(formats strfmt.Registry) error {
var res []error
if err := m.validateError(formats); err != nil {
res = append(res, err)
}
if err := m.validateImmutable(formats); err != nil {
res = append(res, err)
}
if err := m.validateRealized(formats); err != nil {
res = append(res, err)
}
if len(res) > 0 {
return errors.CompositeValidationError(res...)
}
return nil
}
func (m *EndpointConfigurationStatus) validateError(formats strfmt.Registry) error {
if swag.IsZero(m.Error) { // not required
return nil
}
if err := m.Error.Validate(formats); err != nil {
if ve, ok := err.(*errors.Validation); ok {
return ve.ValidateName("error")
} else if ce, ok := err.(*errors.CompositeError); ok {
return ce.ValidateName("error")
}
return err
}
return nil
}
func (m *EndpointConfigurationStatus) validateImmutable(formats strfmt.Registry) error {
if swag.IsZero(m.Immutable) { // not required
return nil
}
if m.Immutable != nil {
if err := m.Immutable.Validate(formats); err != nil {
if ve, ok := err.(*errors.Validation); ok {
return ve.ValidateName("immutable")
} else if ce, ok := err.(*errors.CompositeError); ok {
return ce.ValidateName("immutable")
}
return err
}
}
return nil
}
func (m *EndpointConfigurationStatus) validateRealized(formats strfmt.Registry) error {
if swag.IsZero(m.Realized) { // not required
return nil
}
if m.Realized != nil {
if err := m.Realized.Validate(formats); err != nil {
if ve, ok := err.(*errors.Validation); ok {
return ve.ValidateName("realized")
} else if ce, ok := err.(*errors.CompositeError); ok {
return ce.ValidateName("realized")
}
return err
}
}
return nil
}
// ContextValidate validate this endpoint configuration status based on the context it is used
func (m *EndpointConfigurationStatus) ContextValidate(ctx context.Context, formats strfmt.Registry) error {
var res []error
if err := m.contextValidateError(ctx, formats); err != nil {
res = append(res, err)
}
if err := m.contextValidateImmutable(ctx, formats); err != nil {
res = append(res, err)
}
if err := m.contextValidateRealized(ctx, formats); err != nil {
res = append(res, err)
}
if len(res) > 0 {
return errors.CompositeValidationError(res...)
}
return nil
}
func (m *EndpointConfigurationStatus) contextValidateError(ctx context.Context, formats strfmt.Registry) error {
if swag.IsZero(m.Error) { // not required
return nil
}
if err := m.Error.ContextValidate(ctx, formats); err != nil {
if ve, ok := err.(*errors.Validation); ok {
return ve.ValidateName("error")
} else if ce, ok := err.(*errors.CompositeError); ok {
return ce.ValidateName("error")
}
return err
}
return nil
}
func (m *EndpointConfigurationStatus) contextValidateImmutable(ctx context.Context, formats strfmt.Registry) error {
if swag.IsZero(m.Immutable) { // not required
return nil
}
if err := m.Immutable.ContextValidate(ctx, formats); err != nil {
if ve, ok := err.(*errors.Validation); ok {
return ve.ValidateName("immutable")
} else if ce, ok := err.(*errors.CompositeError); ok {
return ce.ValidateName("immutable")
}
return err
}
return nil
}
func (m *EndpointConfigurationStatus) contextValidateRealized(ctx context.Context, formats strfmt.Registry) error {
if m.Realized != nil {
if swag.IsZero(m.Realized) { // not required
return nil
}
if err := m.Realized.ContextValidate(ctx, formats); err != nil {
if ve, ok := err.(*errors.Validation); ok {
return ve.ValidateName("realized")
} else if ce, ok := err.(*errors.CompositeError); ok {
return ce.ValidateName("realized")
}
return err
}
}
return nil
}
// MarshalBinary interface implementation
func (m *EndpointConfigurationStatus) MarshalBinary() ([]byte, error) {
if m == nil {
return nil, nil
}
return swag.WriteJSON(m)
}
// UnmarshalBinary interface implementation
func (m *EndpointConfigurationStatus) UnmarshalBinary(b []byte) error {
var res EndpointConfigurationStatus
if err := swag.ReadJSON(b, &res); err != nil {
return err
}
*m = res
return nil
}
// Code generated by go-swagger; DO NOT EDIT.
// Copyright Authors of Cilium
// SPDX-License-Identifier: Apache-2.0
package models
// This file was generated by the swagger tool.
// Editing this file might prove futile when you re-run the swagger generate command
import (
"context"
"github.com/go-openapi/strfmt"
"github.com/go-openapi/swag"
)
// EndpointDatapathConfiguration Datapath configuration to be used for the endpoint
//
// swagger:model EndpointDatapathConfiguration
type EndpointDatapathConfiguration struct {
// Disable source IP verification for the endpoint.
//
DisableSipVerification bool `json:"disable-sip-verification,omitempty"`
// Indicates that IPAM is done external to Cilium. This will prevent the IP from being released and re-allocation of the IP address is skipped on restore.
//
ExternalIpam bool `json:"external-ipam,omitempty"`
// Installs a route in the Linux routing table pointing to the device of the endpoint's interface.
//
InstallEndpointRoute bool `json:"install-endpoint-route,omitempty"`
// Enable ARP passthrough mode
RequireArpPassthrough bool `json:"require-arp-passthrough,omitempty"`
// Endpoint requires a host-facing egress program to be attached to implement ingress policy and reverse NAT.
//
RequireEgressProg bool `json:"require-egress-prog,omitempty"`
// Endpoint requires BPF routing to be enabled, when disabled, routing is delegated to Linux routing.
//
RequireRouting *bool `json:"require-routing,omitempty"`
}
// Validate validates this endpoint datapath configuration
func (m *EndpointDatapathConfiguration) Validate(formats strfmt.Registry) error {
return nil
}
// ContextValidate validates this endpoint datapath configuration based on context it is used
func (m *EndpointDatapathConfiguration) ContextValidate(ctx context.Context, formats strfmt.Registry) error {
return nil
}
// MarshalBinary interface implementation
func (m *EndpointDatapathConfiguration) MarshalBinary() ([]byte, error) {
if m == nil {
return nil, nil
}
return swag.WriteJSON(m)
}
// UnmarshalBinary interface implementation
func (m *EndpointDatapathConfiguration) UnmarshalBinary(b []byte) error {
var res EndpointDatapathConfiguration
if err := swag.ReadJSON(b, &res); err != nil {
return err
}
*m = res
return nil
}
// Code generated by go-swagger; DO NOT EDIT.
// Copyright Authors of Cilium
// SPDX-License-Identifier: Apache-2.0
package models
// This file was generated by the swagger tool.
// Editing this file might prove futile when you re-run the swagger generate command
import (
"context"
"github.com/go-openapi/errors"
"github.com/go-openapi/strfmt"
"github.com/go-openapi/swag"
)
// EndpointHealth Health of the endpoint
//
// +deepequal-gen=true
//
// swagger:model EndpointHealth
type EndpointHealth struct {
// bpf
Bpf EndpointHealthStatus `json:"bpf,omitempty"`
// Is this endpoint reachable
Connected bool `json:"connected,omitempty"`
// overall health
OverallHealth EndpointHealthStatus `json:"overallHealth,omitempty"`
// policy
Policy EndpointHealthStatus `json:"policy,omitempty"`
}
// Validate validates this endpoint health
func (m *EndpointHealth) Validate(formats strfmt.Registry) error {
var res []error
if err := m.validateBpf(formats); err != nil {
res = append(res, err)
}
if err := m.validateOverallHealth(formats); err != nil {
res = append(res, err)
}
if err := m.validatePolicy(formats); err != nil {
res = append(res, err)
}
if len(res) > 0 {
return errors.CompositeValidationError(res...)
}
return nil
}
func (m *EndpointHealth) validateBpf(formats strfmt.Registry) error {
if swag.IsZero(m.Bpf) { // not required
return nil
}
if err := m.Bpf.Validate(formats); err != nil {
if ve, ok := err.(*errors.Validation); ok {
return ve.ValidateName("bpf")
} else if ce, ok := err.(*errors.CompositeError); ok {
return ce.ValidateName("bpf")
}
return err
}
return nil
}
func (m *EndpointHealth) validateOverallHealth(formats strfmt.Registry) error {
if swag.IsZero(m.OverallHealth) { // not required
return nil
}
if err := m.OverallHealth.Validate(formats); err != nil {
if ve, ok := err.(*errors.Validation); ok {
return ve.ValidateName("overallHealth")
} else if ce, ok := err.(*errors.CompositeError); ok {
return ce.ValidateName("overallHealth")
}
return err
}
return nil
}
func (m *EndpointHealth) validatePolicy(formats strfmt.Registry) error {
if swag.IsZero(m.Policy) { // not required
return nil
}
if err := m.Policy.Validate(formats); err != nil {
if ve, ok := err.(*errors.Validation); ok {
return ve.ValidateName("policy")
} else if ce, ok := err.(*errors.CompositeError); ok {
return ce.ValidateName("policy")
}
return err
}
return nil
}
// ContextValidate validate this endpoint health based on the context it is used
func (m *EndpointHealth) ContextValidate(ctx context.Context, formats strfmt.Registry) error {
var res []error
if err := m.contextValidateBpf(ctx, formats); err != nil {
res = append(res, err)
}
if err := m.contextValidateOverallHealth(ctx, formats); err != nil {
res = append(res, err)
}
if err := m.contextValidatePolicy(ctx, formats); err != nil {
res = append(res, err)
}
if len(res) > 0 {
return errors.CompositeValidationError(res...)
}
return nil
}
func (m *EndpointHealth) contextValidateBpf(ctx context.Context, formats strfmt.Registry) error {
if swag.IsZero(m.Bpf) { // not required
return nil
}
if err := m.Bpf.ContextValidate(ctx, formats); err != nil {
if ve, ok := err.(*errors.Validation); ok {
return ve.ValidateName("bpf")
} else if ce, ok := err.(*errors.CompositeError); ok {
return ce.ValidateName("bpf")
}
return err
}
return nil
}
func (m *EndpointHealth) contextValidateOverallHealth(ctx context.Context, formats strfmt.Registry) error {
if swag.IsZero(m.OverallHealth) { // not required
return nil
}
if err := m.OverallHealth.ContextValidate(ctx, formats); err != nil {
if ve, ok := err.(*errors.Validation); ok {
return ve.ValidateName("overallHealth")
} else if ce, ok := err.(*errors.CompositeError); ok {
return ce.ValidateName("overallHealth")
}
return err
}
return nil
}
func (m *EndpointHealth) contextValidatePolicy(ctx context.Context, formats strfmt.Registry) error {
if swag.IsZero(m.Policy) { // not required
return nil
}
if err := m.Policy.ContextValidate(ctx, formats); err != nil {
if ve, ok := err.(*errors.Validation); ok {
return ve.ValidateName("policy")
} else if ce, ok := err.(*errors.CompositeError); ok {
return ce.ValidateName("policy")
}
return err
}
return nil
}
// MarshalBinary interface implementation
func (m *EndpointHealth) MarshalBinary() ([]byte, error) {
if m == nil {
return nil, nil
}
return swag.WriteJSON(m)
}
// UnmarshalBinary interface implementation
func (m *EndpointHealth) UnmarshalBinary(b []byte) error {
var res EndpointHealth
if err := swag.ReadJSON(b, &res); err != nil {
return err
}
*m = res
return nil
}
// Code generated by go-swagger; DO NOT EDIT.
// Copyright Authors of Cilium
// SPDX-License-Identifier: Apache-2.0
package models
// This file was generated by the swagger tool.
// Editing this file might prove futile when you re-run the swagger generate command
import (
"context"
"encoding/json"
"github.com/go-openapi/errors"
"github.com/go-openapi/strfmt"
"github.com/go-openapi/validate"
)
// EndpointHealthStatus A common set of statuses for endpoint health * “OK“ = All components operational * “Bootstrap“ = This component is being created * “Pending“ = A change is being processed to be applied * “Warning“ = This component is not applying up-to-date policies (but is still applying the previous version) * “Failure“ = An error has occurred and no policy is being applied * “Disabled“ = This endpoint is disabled and will not handle traffic
//
// swagger:model EndpointHealthStatus
type EndpointHealthStatus string
func NewEndpointHealthStatus(value EndpointHealthStatus) *EndpointHealthStatus {
return &value
}
// Pointer returns a pointer to a freshly-allocated EndpointHealthStatus.
func (m EndpointHealthStatus) Pointer() *EndpointHealthStatus {
return &m
}
const (
// EndpointHealthStatusOK captures enum value "OK"
EndpointHealthStatusOK EndpointHealthStatus = "OK"
// EndpointHealthStatusBootstrap captures enum value "Bootstrap"
EndpointHealthStatusBootstrap EndpointHealthStatus = "Bootstrap"
// EndpointHealthStatusPending captures enum value "Pending"
EndpointHealthStatusPending EndpointHealthStatus = "Pending"
// EndpointHealthStatusWarning captures enum value "Warning"
EndpointHealthStatusWarning EndpointHealthStatus = "Warning"
// EndpointHealthStatusFailure captures enum value "Failure"
EndpointHealthStatusFailure EndpointHealthStatus = "Failure"
// EndpointHealthStatusDisabled captures enum value "Disabled"
EndpointHealthStatusDisabled EndpointHealthStatus = "Disabled"
)
// for schema
var endpointHealthStatusEnum []interface{}
func init() {
var res []EndpointHealthStatus
if err := json.Unmarshal([]byte(`["OK","Bootstrap","Pending","Warning","Failure","Disabled"]`), &res); err != nil {
panic(err)
}
for _, v := range res {
endpointHealthStatusEnum = append(endpointHealthStatusEnum, v)
}
}
func (m EndpointHealthStatus) validateEndpointHealthStatusEnum(path, location string, value EndpointHealthStatus) error {
if err := validate.EnumCase(path, location, value, endpointHealthStatusEnum, true); err != nil {
return err
}
return nil
}
// Validate validates this endpoint health status
func (m EndpointHealthStatus) Validate(formats strfmt.Registry) error {
var res []error
// value enum
if err := m.validateEndpointHealthStatusEnum("", "body", m); err != nil {
return err
}
if len(res) > 0 {
return errors.CompositeValidationError(res...)
}
return nil
}
// ContextValidate validates this endpoint health status based on context it is used
func (m EndpointHealthStatus) ContextValidate(ctx context.Context, formats strfmt.Registry) error {
return nil
}
// Code generated by go-swagger; DO NOT EDIT.
// Copyright Authors of Cilium
// SPDX-License-Identifier: Apache-2.0
package models
// This file was generated by the swagger tool.
// Editing this file might prove futile when you re-run the swagger generate command
import (
"context"
"github.com/go-openapi/strfmt"
"github.com/go-openapi/swag"
)
// EndpointIdentifiers Unique identifiers for this endpoint from outside cilium
//
// +deepequal-gen=true
//
// swagger:model EndpointIdentifiers
type EndpointIdentifiers struct {
// ID assigned to this attachment by container runtime
CniAttachmentID string `json:"cni-attachment-id,omitempty"`
// ID assigned by container runtime (deprecated, may not be unique)
ContainerID string `json:"container-id,omitempty"`
// Name assigned to container (deprecated, may not be unique)
ContainerName string `json:"container-name,omitempty"`
// Docker endpoint ID
DockerEndpointID string `json:"docker-endpoint-id,omitempty"`
// Docker network ID
DockerNetworkID string `json:"docker-network-id,omitempty"`
// K8s namespace for this endpoint (deprecated, may not be unique)
K8sNamespace string `json:"k8s-namespace,omitempty"`
// K8s pod name for this endpoint (deprecated, may not be unique)
K8sPodName string `json:"k8s-pod-name,omitempty"`
// K8s pod for this endpoint (deprecated, may not be unique)
PodName string `json:"pod-name,omitempty"`
}
// Validate validates this endpoint identifiers
func (m *EndpointIdentifiers) Validate(formats strfmt.Registry) error {
return nil
}
// ContextValidate validates this endpoint identifiers based on context it is used
func (m *EndpointIdentifiers) ContextValidate(ctx context.Context, formats strfmt.Registry) error {
return nil
}
// MarshalBinary interface implementation
func (m *EndpointIdentifiers) MarshalBinary() ([]byte, error) {
if m == nil {
return nil, nil
}
return swag.WriteJSON(m)
}
// UnmarshalBinary interface implementation
func (m *EndpointIdentifiers) UnmarshalBinary(b []byte) error {
var res EndpointIdentifiers
if err := swag.ReadJSON(b, &res); err != nil {
return err
}
*m = res
return nil
}
// Code generated by go-swagger; DO NOT EDIT.
// Copyright Authors of Cilium
// SPDX-License-Identifier: Apache-2.0
package models
// This file was generated by the swagger tool.
// Editing this file might prove futile when you re-run the swagger generate command
import (
"context"
"strconv"
"github.com/go-openapi/errors"
"github.com/go-openapi/strfmt"
"github.com/go-openapi/swag"
)
// EndpointNetworking Unique identifiers for this endpoint from outside cilium
//
// swagger:model EndpointNetworking
type EndpointNetworking struct {
// IP4/6 addresses assigned to this Endpoint
Addressing []*AddressPair `json:"addressing"`
// Name of network device in container netns
ContainerInterfaceName string `json:"container-interface-name,omitempty"`
// host addressing
HostAddressing *NodeAddressing `json:"host-addressing,omitempty"`
// MAC address
HostMac string `json:"host-mac,omitempty"`
// Index of network device in host netns
InterfaceIndex int64 `json:"interface-index,omitempty"`
// Name of network device in host netns
InterfaceName string `json:"interface-name,omitempty"`
// MAC address
Mac string `json:"mac,omitempty"`
}
// Validate validates this endpoint networking
func (m *EndpointNetworking) Validate(formats strfmt.Registry) error {
var res []error
if err := m.validateAddressing(formats); err != nil {
res = append(res, err)
}
if err := m.validateHostAddressing(formats); err != nil {
res = append(res, err)
}
if len(res) > 0 {
return errors.CompositeValidationError(res...)
}
return nil
}
func (m *EndpointNetworking) validateAddressing(formats strfmt.Registry) error {
if swag.IsZero(m.Addressing) { // not required
return nil
}
for i := 0; i < len(m.Addressing); i++ {
if swag.IsZero(m.Addressing[i]) { // not required
continue
}
if m.Addressing[i] != nil {
if err := m.Addressing[i].Validate(formats); err != nil {
if ve, ok := err.(*errors.Validation); ok {
return ve.ValidateName("addressing" + "." + strconv.Itoa(i))
} else if ce, ok := err.(*errors.CompositeError); ok {
return ce.ValidateName("addressing" + "." + strconv.Itoa(i))
}
return err
}
}
}
return nil
}
func (m *EndpointNetworking) validateHostAddressing(formats strfmt.Registry) error {
if swag.IsZero(m.HostAddressing) { // not required
return nil
}
if m.HostAddressing != nil {
if err := m.HostAddressing.Validate(formats); err != nil {
if ve, ok := err.(*errors.Validation); ok {
return ve.ValidateName("host-addressing")
} else if ce, ok := err.(*errors.CompositeError); ok {
return ce.ValidateName("host-addressing")
}
return err
}
}
return nil
}
// ContextValidate validate this endpoint networking based on the context it is used
func (m *EndpointNetworking) ContextValidate(ctx context.Context, formats strfmt.Registry) error {
var res []error
if err := m.contextValidateAddressing(ctx, formats); err != nil {
res = append(res, err)
}
if err := m.contextValidateHostAddressing(ctx, formats); err != nil {
res = append(res, err)
}
if len(res) > 0 {
return errors.CompositeValidationError(res...)
}
return nil
}
func (m *EndpointNetworking) contextValidateAddressing(ctx context.Context, formats strfmt.Registry) error {
for i := 0; i < len(m.Addressing); i++ {
if m.Addressing[i] != nil {
if swag.IsZero(m.Addressing[i]) { // not required
return nil
}
if err := m.Addressing[i].ContextValidate(ctx, formats); err != nil {
if ve, ok := err.(*errors.Validation); ok {
return ve.ValidateName("addressing" + "." + strconv.Itoa(i))
} else if ce, ok := err.(*errors.CompositeError); ok {
return ce.ValidateName("addressing" + "." + strconv.Itoa(i))
}
return err
}
}
}
return nil
}
func (m *EndpointNetworking) contextValidateHostAddressing(ctx context.Context, formats strfmt.Registry) error {
if m.HostAddressing != nil {
if swag.IsZero(m.HostAddressing) { // not required
return nil
}
if err := m.HostAddressing.ContextValidate(ctx, formats); err != nil {
if ve, ok := err.(*errors.Validation); ok {
return ve.ValidateName("host-addressing")
} else if ce, ok := err.(*errors.CompositeError); ok {
return ce.ValidateName("host-addressing")
}
return err
}
}
return nil
}
// MarshalBinary interface implementation
func (m *EndpointNetworking) MarshalBinary() ([]byte, error) {
if m == nil {
return nil, nil
}
return swag.WriteJSON(m)
}
// UnmarshalBinary interface implementation
func (m *EndpointNetworking) UnmarshalBinary(b []byte) error {
var res EndpointNetworking
if err := swag.ReadJSON(b, &res); err != nil {
return err
}
*m = res
return nil
}
// Code generated by go-swagger; DO NOT EDIT.
// Copyright Authors of Cilium
// SPDX-License-Identifier: Apache-2.0
package models
// This file was generated by the swagger tool.
// Editing this file might prove futile when you re-run the swagger generate command
import (
"context"
"github.com/go-openapi/errors"
"github.com/go-openapi/strfmt"
"github.com/go-openapi/swag"
)
// EndpointPolicy Policy information of an endpoint
//
// swagger:model EndpointPolicy
type EndpointPolicy struct {
// List of identities to which this endpoint is allowed to communicate
//
AllowedEgressIdentities []int64 `json:"allowed-egress-identities"`
// List of identities allowed to communicate to this endpoint
//
AllowedIngressIdentities []int64 `json:"allowed-ingress-identities"`
// Build number of calculated policy in use
Build int64 `json:"build,omitempty"`
// cidr policy
CidrPolicy *CIDRPolicy `json:"cidr-policy,omitempty"`
// List of identities to which this endpoint is not allowed to communicate
//
DeniedEgressIdentities []int64 `json:"denied-egress-identities"`
// List of identities not allowed to communicate to this endpoint
//
DeniedIngressIdentities []int64 `json:"denied-ingress-identities"`
// Own identity of endpoint
ID int64 `json:"id,omitempty"`
// l4
L4 *L4Policy `json:"l4,omitempty"`
// Whether policy enforcement is enabled (ingress, egress, both or none)
PolicyEnabled EndpointPolicyEnabled `json:"policy-enabled,omitempty"`
// The agent-local policy revision
PolicyRevision int64 `json:"policy-revision,omitempty"`
}
// Validate validates this endpoint policy
func (m *EndpointPolicy) Validate(formats strfmt.Registry) error {
var res []error
if err := m.validateCidrPolicy(formats); err != nil {
res = append(res, err)
}
if err := m.validateL4(formats); err != nil {
res = append(res, err)
}
if err := m.validatePolicyEnabled(formats); err != nil {
res = append(res, err)
}
if len(res) > 0 {
return errors.CompositeValidationError(res...)
}
return nil
}
func (m *EndpointPolicy) validateCidrPolicy(formats strfmt.Registry) error {
if swag.IsZero(m.CidrPolicy) { // not required
return nil
}
if m.CidrPolicy != nil {
if err := m.CidrPolicy.Validate(formats); err != nil {
if ve, ok := err.(*errors.Validation); ok {
return ve.ValidateName("cidr-policy")
} else if ce, ok := err.(*errors.CompositeError); ok {
return ce.ValidateName("cidr-policy")
}
return err
}
}
return nil
}
func (m *EndpointPolicy) validateL4(formats strfmt.Registry) error {
if swag.IsZero(m.L4) { // not required
return nil
}
if m.L4 != nil {
if err := m.L4.Validate(formats); err != nil {
if ve, ok := err.(*errors.Validation); ok {
return ve.ValidateName("l4")
} else if ce, ok := err.(*errors.CompositeError); ok {
return ce.ValidateName("l4")
}
return err
}
}
return nil
}
func (m *EndpointPolicy) validatePolicyEnabled(formats strfmt.Registry) error {
if swag.IsZero(m.PolicyEnabled) { // not required
return nil
}
if err := m.PolicyEnabled.Validate(formats); err != nil {
if ve, ok := err.(*errors.Validation); ok {
return ve.ValidateName("policy-enabled")
} else if ce, ok := err.(*errors.CompositeError); ok {
return ce.ValidateName("policy-enabled")
}
return err
}
return nil
}
// ContextValidate validate this endpoint policy based on the context it is used
func (m *EndpointPolicy) ContextValidate(ctx context.Context, formats strfmt.Registry) error {
var res []error
if err := m.contextValidateCidrPolicy(ctx, formats); err != nil {
res = append(res, err)
}
if err := m.contextValidateL4(ctx, formats); err != nil {
res = append(res, err)
}
if err := m.contextValidatePolicyEnabled(ctx, formats); err != nil {
res = append(res, err)
}
if len(res) > 0 {
return errors.CompositeValidationError(res...)
}
return nil
}
func (m *EndpointPolicy) contextValidateCidrPolicy(ctx context.Context, formats strfmt.Registry) error {
if m.CidrPolicy != nil {
if swag.IsZero(m.CidrPolicy) { // not required
return nil
}
if err := m.CidrPolicy.ContextValidate(ctx, formats); err != nil {
if ve, ok := err.(*errors.Validation); ok {
return ve.ValidateName("cidr-policy")
} else if ce, ok := err.(*errors.CompositeError); ok {
return ce.ValidateName("cidr-policy")
}
return err
}
}
return nil
}
func (m *EndpointPolicy) contextValidateL4(ctx context.Context, formats strfmt.Registry) error {
if m.L4 != nil {
if swag.IsZero(m.L4) { // not required
return nil
}
if err := m.L4.ContextValidate(ctx, formats); err != nil {
if ve, ok := err.(*errors.Validation); ok {
return ve.ValidateName("l4")
} else if ce, ok := err.(*errors.CompositeError); ok {
return ce.ValidateName("l4")
}
return err
}
}
return nil
}
func (m *EndpointPolicy) contextValidatePolicyEnabled(ctx context.Context, formats strfmt.Registry) error {
if swag.IsZero(m.PolicyEnabled) { // not required
return nil
}
if err := m.PolicyEnabled.ContextValidate(ctx, formats); err != nil {
if ve, ok := err.(*errors.Validation); ok {
return ve.ValidateName("policy-enabled")
} else if ce, ok := err.(*errors.CompositeError); ok {
return ce.ValidateName("policy-enabled")
}
return err
}
return nil
}
// MarshalBinary interface implementation
func (m *EndpointPolicy) MarshalBinary() ([]byte, error) {
if m == nil {
return nil, nil
}
return swag.WriteJSON(m)
}
// UnmarshalBinary interface implementation
func (m *EndpointPolicy) UnmarshalBinary(b []byte) error {
var res EndpointPolicy
if err := swag.ReadJSON(b, &res); err != nil {
return err
}
*m = res
return nil
}
// Code generated by go-swagger; DO NOT EDIT.
// Copyright Authors of Cilium
// SPDX-License-Identifier: Apache-2.0
package models
// This file was generated by the swagger tool.
// Editing this file might prove futile when you re-run the swagger generate command
import (
"context"
"encoding/json"
"github.com/go-openapi/errors"
"github.com/go-openapi/strfmt"
"github.com/go-openapi/validate"
)
// EndpointPolicyEnabled Whether policy enforcement is enabled (ingress, egress, both or none)
//
// swagger:model EndpointPolicyEnabled
type EndpointPolicyEnabled string
func NewEndpointPolicyEnabled(value EndpointPolicyEnabled) *EndpointPolicyEnabled {
return &value
}
// Pointer returns a pointer to a freshly-allocated EndpointPolicyEnabled.
func (m EndpointPolicyEnabled) Pointer() *EndpointPolicyEnabled {
return &m
}
const (
// EndpointPolicyEnabledNone captures enum value "none"
EndpointPolicyEnabledNone EndpointPolicyEnabled = "none"
// EndpointPolicyEnabledIngress captures enum value "ingress"
EndpointPolicyEnabledIngress EndpointPolicyEnabled = "ingress"
// EndpointPolicyEnabledEgress captures enum value "egress"
EndpointPolicyEnabledEgress EndpointPolicyEnabled = "egress"
// EndpointPolicyEnabledBoth captures enum value "both"
EndpointPolicyEnabledBoth EndpointPolicyEnabled = "both"
// EndpointPolicyEnabledAuditDashIngress captures enum value "audit-ingress"
EndpointPolicyEnabledAuditDashIngress EndpointPolicyEnabled = "audit-ingress"
// EndpointPolicyEnabledAuditDashEgress captures enum value "audit-egress"
EndpointPolicyEnabledAuditDashEgress EndpointPolicyEnabled = "audit-egress"
// EndpointPolicyEnabledAuditDashBoth captures enum value "audit-both"
EndpointPolicyEnabledAuditDashBoth EndpointPolicyEnabled = "audit-both"
)
// for schema
var endpointPolicyEnabledEnum []interface{}
func init() {
var res []EndpointPolicyEnabled
if err := json.Unmarshal([]byte(`["none","ingress","egress","both","audit-ingress","audit-egress","audit-both"]`), &res); err != nil {
panic(err)
}
for _, v := range res {
endpointPolicyEnabledEnum = append(endpointPolicyEnabledEnum, v)
}
}
func (m EndpointPolicyEnabled) validateEndpointPolicyEnabledEnum(path, location string, value EndpointPolicyEnabled) error {
if err := validate.EnumCase(path, location, value, endpointPolicyEnabledEnum, true); err != nil {
return err
}
return nil
}
// Validate validates this endpoint policy enabled
func (m EndpointPolicyEnabled) Validate(formats strfmt.Registry) error {
var res []error
// value enum
if err := m.validateEndpointPolicyEnabledEnum("", "body", m); err != nil {
return err
}
if len(res) > 0 {
return errors.CompositeValidationError(res...)
}
return nil
}
// ContextValidate validates this endpoint policy enabled based on context it is used
func (m EndpointPolicyEnabled) ContextValidate(ctx context.Context, formats strfmt.Registry) error {
return nil
}
// Code generated by go-swagger; DO NOT EDIT.
// Copyright Authors of Cilium
// SPDX-License-Identifier: Apache-2.0
package models
// This file was generated by the swagger tool.
// Editing this file might prove futile when you re-run the swagger generate command
import (
"context"
"strconv"
"github.com/go-openapi/errors"
"github.com/go-openapi/strfmt"
"github.com/go-openapi/swag"
)
// EndpointPolicyStatus Policy information of an endpoint
//
// swagger:model EndpointPolicyStatus
type EndpointPolicyStatus struct {
// The policy revision currently enforced in the proxy for this endpoint
ProxyPolicyRevision int64 `json:"proxy-policy-revision,omitempty"`
// Statistics of the proxy redirects configured for this endpoint
ProxyStatistics []*ProxyStatistics `json:"proxy-statistics"`
// The policy in the datapath for this endpoint
Realized *EndpointPolicy `json:"realized,omitempty"`
// The policy that should apply to this endpoint
Spec *EndpointPolicy `json:"spec,omitempty"`
}
// Validate validates this endpoint policy status
func (m *EndpointPolicyStatus) Validate(formats strfmt.Registry) error {
var res []error
if err := m.validateProxyStatistics(formats); err != nil {
res = append(res, err)
}
if err := m.validateRealized(formats); err != nil {
res = append(res, err)
}
if err := m.validateSpec(formats); err != nil {
res = append(res, err)
}
if len(res) > 0 {
return errors.CompositeValidationError(res...)
}
return nil
}
func (m *EndpointPolicyStatus) validateProxyStatistics(formats strfmt.Registry) error {
if swag.IsZero(m.ProxyStatistics) { // not required
return nil
}
for i := 0; i < len(m.ProxyStatistics); i++ {
if swag.IsZero(m.ProxyStatistics[i]) { // not required
continue
}
if m.ProxyStatistics[i] != nil {
if err := m.ProxyStatistics[i].Validate(formats); err != nil {
if ve, ok := err.(*errors.Validation); ok {
return ve.ValidateName("proxy-statistics" + "." + strconv.Itoa(i))
} else if ce, ok := err.(*errors.CompositeError); ok {
return ce.ValidateName("proxy-statistics" + "." + strconv.Itoa(i))
}
return err
}
}
}
return nil
}
func (m *EndpointPolicyStatus) validateRealized(formats strfmt.Registry) error {
if swag.IsZero(m.Realized) { // not required
return nil
}
if m.Realized != nil {
if err := m.Realized.Validate(formats); err != nil {
if ve, ok := err.(*errors.Validation); ok {
return ve.ValidateName("realized")
} else if ce, ok := err.(*errors.CompositeError); ok {
return ce.ValidateName("realized")
}
return err
}
}
return nil
}
func (m *EndpointPolicyStatus) validateSpec(formats strfmt.Registry) error {
if swag.IsZero(m.Spec) { // not required
return nil
}
if m.Spec != nil {
if err := m.Spec.Validate(formats); err != nil {
if ve, ok := err.(*errors.Validation); ok {
return ve.ValidateName("spec")
} else if ce, ok := err.(*errors.CompositeError); ok {
return ce.ValidateName("spec")
}
return err
}
}
return nil
}
// ContextValidate validate this endpoint policy status based on the context it is used
func (m *EndpointPolicyStatus) ContextValidate(ctx context.Context, formats strfmt.Registry) error {
var res []error
if err := m.contextValidateProxyStatistics(ctx, formats); err != nil {
res = append(res, err)
}
if err := m.contextValidateRealized(ctx, formats); err != nil {
res = append(res, err)
}
if err := m.contextValidateSpec(ctx, formats); err != nil {
res = append(res, err)
}
if len(res) > 0 {
return errors.CompositeValidationError(res...)
}
return nil
}
func (m *EndpointPolicyStatus) contextValidateProxyStatistics(ctx context.Context, formats strfmt.Registry) error {
for i := 0; i < len(m.ProxyStatistics); i++ {
if m.ProxyStatistics[i] != nil {
if swag.IsZero(m.ProxyStatistics[i]) { // not required
return nil
}
if err := m.ProxyStatistics[i].ContextValidate(ctx, formats); err != nil {
if ve, ok := err.(*errors.Validation); ok {
return ve.ValidateName("proxy-statistics" + "." + strconv.Itoa(i))
} else if ce, ok := err.(*errors.CompositeError); ok {
return ce.ValidateName("proxy-statistics" + "." + strconv.Itoa(i))
}
return err
}
}
}
return nil
}
func (m *EndpointPolicyStatus) contextValidateRealized(ctx context.Context, formats strfmt.Registry) error {
if m.Realized != nil {
if swag.IsZero(m.Realized) { // not required
return nil
}
if err := m.Realized.ContextValidate(ctx, formats); err != nil {
if ve, ok := err.(*errors.Validation); ok {
return ve.ValidateName("realized")
} else if ce, ok := err.(*errors.CompositeError); ok {
return ce.ValidateName("realized")
}
return err
}
}
return nil
}
func (m *EndpointPolicyStatus) contextValidateSpec(ctx context.Context, formats strfmt.Registry) error {
if m.Spec != nil {
if swag.IsZero(m.Spec) { // not required
return nil
}
if err := m.Spec.ContextValidate(ctx, formats); err != nil {
if ve, ok := err.(*errors.Validation); ok {
return ve.ValidateName("spec")
} else if ce, ok := err.(*errors.CompositeError); ok {
return ce.ValidateName("spec")
}
return err
}
}
return nil
}
// MarshalBinary interface implementation
func (m *EndpointPolicyStatus) MarshalBinary() ([]byte, error) {
if m == nil {
return nil, nil
}
return swag.WriteJSON(m)
}
// UnmarshalBinary interface implementation
func (m *EndpointPolicyStatus) UnmarshalBinary(b []byte) error {
var res EndpointPolicyStatus
if err := swag.ReadJSON(b, &res); err != nil {
return err
}
*m = res
return nil
}
// Code generated by go-swagger; DO NOT EDIT.
// Copyright Authors of Cilium
// SPDX-License-Identifier: Apache-2.0
package models
// This file was generated by the swagger tool.
// Editing this file might prove futile when you re-run the swagger generate command
import (
"context"
"encoding/json"
"github.com/go-openapi/errors"
"github.com/go-openapi/strfmt"
"github.com/go-openapi/validate"
)
// EndpointState State of endpoint
//
// swagger:model EndpointState
type EndpointState string
func NewEndpointState(value EndpointState) *EndpointState {
return &value
}
// Pointer returns a pointer to a freshly-allocated EndpointState.
func (m EndpointState) Pointer() *EndpointState {
return &m
}
const (
// EndpointStateWaitingDashForDashIdentity captures enum value "waiting-for-identity"
EndpointStateWaitingDashForDashIdentity EndpointState = "waiting-for-identity"
// EndpointStateNotDashReady captures enum value "not-ready"
EndpointStateNotDashReady EndpointState = "not-ready"
// EndpointStateWaitingDashToDashRegenerate captures enum value "waiting-to-regenerate"
EndpointStateWaitingDashToDashRegenerate EndpointState = "waiting-to-regenerate"
// EndpointStateRegenerating captures enum value "regenerating"
EndpointStateRegenerating EndpointState = "regenerating"
// EndpointStateRestoring captures enum value "restoring"
EndpointStateRestoring EndpointState = "restoring"
// EndpointStateReady captures enum value "ready"
EndpointStateReady EndpointState = "ready"
// EndpointStateDisconnecting captures enum value "disconnecting"
EndpointStateDisconnecting EndpointState = "disconnecting"
// EndpointStateDisconnected captures enum value "disconnected"
EndpointStateDisconnected EndpointState = "disconnected"
// EndpointStateInvalid captures enum value "invalid"
EndpointStateInvalid EndpointState = "invalid"
)
// for schema
var endpointStateEnum []interface{}
func init() {
var res []EndpointState
if err := json.Unmarshal([]byte(`["waiting-for-identity","not-ready","waiting-to-regenerate","regenerating","restoring","ready","disconnecting","disconnected","invalid"]`), &res); err != nil {
panic(err)
}
for _, v := range res {
endpointStateEnum = append(endpointStateEnum, v)
}
}
func (m EndpointState) validateEndpointStateEnum(path, location string, value EndpointState) error {
if err := validate.EnumCase(path, location, value, endpointStateEnum, true); err != nil {
return err
}
return nil
}
// Validate validates this endpoint state
func (m EndpointState) Validate(formats strfmt.Registry) error {
var res []error
// value enum
if err := m.validateEndpointStateEnum("", "body", m); err != nil {
return err
}
if len(res) > 0 {
return errors.CompositeValidationError(res...)
}
return nil
}
// ContextValidate validates this endpoint state based on context it is used
func (m EndpointState) ContextValidate(ctx context.Context, formats strfmt.Registry) error {
return nil
}
// Code generated by go-swagger; DO NOT EDIT.
// Copyright Authors of Cilium
// SPDX-License-Identifier: Apache-2.0
package models
// This file was generated by the swagger tool.
// Editing this file might prove futile when you re-run the swagger generate command
import (
"context"
"github.com/go-openapi/errors"
"github.com/go-openapi/strfmt"
"github.com/go-openapi/swag"
"github.com/go-openapi/validate"
)
// EndpointStatus The current state and configuration of the endpoint, its policy & datapath, and subcomponents
//
// swagger:model EndpointStatus
type EndpointStatus struct {
// Status of internal controllers attached to this endpoint
Controllers ControllerStatuses `json:"controllers,omitempty"`
// Unique identifiers for this endpoint from outside cilium
ExternalIdentifiers *EndpointIdentifiers `json:"external-identifiers,omitempty"`
// Summary overall endpoint & subcomponent health
Health *EndpointHealth `json:"health,omitempty"`
// The security identity for this endpoint
Identity *Identity `json:"identity,omitempty"`
// Labels applied to this endpoint
Labels *LabelConfigurationStatus `json:"labels,omitempty"`
// Most recent status log. See endpoint/{id}/log for the complete log.
Log EndpointStatusLog `json:"log,omitempty"`
// List of named ports that can be used in Network Policy
NamedPorts NamedPorts `json:"namedPorts,omitempty"`
// Networking properties of the endpoint
Networking *EndpointNetworking `json:"networking,omitempty"`
// The policy applied to this endpoint from the policy repository
Policy *EndpointPolicyStatus `json:"policy,omitempty"`
// The configuration in effect on this endpoint
Realized *EndpointConfigurationSpec `json:"realized,omitempty"`
// Current state of endpoint
// Required: true
State *EndpointState `json:"state"`
}
// Validate validates this endpoint status
func (m *EndpointStatus) Validate(formats strfmt.Registry) error {
var res []error
if err := m.validateControllers(formats); err != nil {
res = append(res, err)
}
if err := m.validateExternalIdentifiers(formats); err != nil {
res = append(res, err)
}
if err := m.validateHealth(formats); err != nil {
res = append(res, err)
}
if err := m.validateIdentity(formats); err != nil {
res = append(res, err)
}
if err := m.validateLabels(formats); err != nil {
res = append(res, err)
}
if err := m.validateLog(formats); err != nil {
res = append(res, err)
}
if err := m.validateNamedPorts(formats); err != nil {
res = append(res, err)
}
if err := m.validateNetworking(formats); err != nil {
res = append(res, err)
}
if err := m.validatePolicy(formats); err != nil {
res = append(res, err)
}
if err := m.validateRealized(formats); err != nil {
res = append(res, err)
}
if err := m.validateState(formats); err != nil {
res = append(res, err)
}
if len(res) > 0 {
return errors.CompositeValidationError(res...)
}
return nil
}
func (m *EndpointStatus) validateControllers(formats strfmt.Registry) error {
if swag.IsZero(m.Controllers) { // not required
return nil
}
if err := m.Controllers.Validate(formats); err != nil {
if ve, ok := err.(*errors.Validation); ok {
return ve.ValidateName("controllers")
} else if ce, ok := err.(*errors.CompositeError); ok {
return ce.ValidateName("controllers")
}
return err
}
return nil
}
func (m *EndpointStatus) validateExternalIdentifiers(formats strfmt.Registry) error {
if swag.IsZero(m.ExternalIdentifiers) { // not required
return nil
}
if m.ExternalIdentifiers != nil {
if err := m.ExternalIdentifiers.Validate(formats); err != nil {
if ve, ok := err.(*errors.Validation); ok {
return ve.ValidateName("external-identifiers")
} else if ce, ok := err.(*errors.CompositeError); ok {
return ce.ValidateName("external-identifiers")
}
return err
}
}
return nil
}
func (m *EndpointStatus) validateHealth(formats strfmt.Registry) error {
if swag.IsZero(m.Health) { // not required
return nil
}
if m.Health != nil {
if err := m.Health.Validate(formats); err != nil {
if ve, ok := err.(*errors.Validation); ok {
return ve.ValidateName("health")
} else if ce, ok := err.(*errors.CompositeError); ok {
return ce.ValidateName("health")
}
return err
}
}
return nil
}
func (m *EndpointStatus) validateIdentity(formats strfmt.Registry) error {
if swag.IsZero(m.Identity) { // not required
return nil
}
if m.Identity != nil {
if err := m.Identity.Validate(formats); err != nil {
if ve, ok := err.(*errors.Validation); ok {
return ve.ValidateName("identity")
} else if ce, ok := err.(*errors.CompositeError); ok {
return ce.ValidateName("identity")
}
return err
}
}
return nil
}
func (m *EndpointStatus) validateLabels(formats strfmt.Registry) error {
if swag.IsZero(m.Labels) { // not required
return nil
}
if m.Labels != nil {
if err := m.Labels.Validate(formats); err != nil {
if ve, ok := err.(*errors.Validation); ok {
return ve.ValidateName("labels")
} else if ce, ok := err.(*errors.CompositeError); ok {
return ce.ValidateName("labels")
}
return err
}
}
return nil
}
func (m *EndpointStatus) validateLog(formats strfmt.Registry) error {
if swag.IsZero(m.Log) { // not required
return nil
}
if err := m.Log.Validate(formats); err != nil {
if ve, ok := err.(*errors.Validation); ok {
return ve.ValidateName("log")
} else if ce, ok := err.(*errors.CompositeError); ok {
return ce.ValidateName("log")
}
return err
}
return nil
}
func (m *EndpointStatus) validateNamedPorts(formats strfmt.Registry) error {
if swag.IsZero(m.NamedPorts) { // not required
return nil
}
if err := m.NamedPorts.Validate(formats); err != nil {
if ve, ok := err.(*errors.Validation); ok {
return ve.ValidateName("namedPorts")
} else if ce, ok := err.(*errors.CompositeError); ok {
return ce.ValidateName("namedPorts")
}
return err
}
return nil
}
func (m *EndpointStatus) validateNetworking(formats strfmt.Registry) error {
if swag.IsZero(m.Networking) { // not required
return nil
}
if m.Networking != nil {
if err := m.Networking.Validate(formats); err != nil {
if ve, ok := err.(*errors.Validation); ok {
return ve.ValidateName("networking")
} else if ce, ok := err.(*errors.CompositeError); ok {
return ce.ValidateName("networking")
}
return err
}
}
return nil
}
func (m *EndpointStatus) validatePolicy(formats strfmt.Registry) error {
if swag.IsZero(m.Policy) { // not required
return nil
}
if m.Policy != nil {
if err := m.Policy.Validate(formats); err != nil {
if ve, ok := err.(*errors.Validation); ok {
return ve.ValidateName("policy")
} else if ce, ok := err.(*errors.CompositeError); ok {
return ce.ValidateName("policy")
}
return err
}
}
return nil
}
func (m *EndpointStatus) validateRealized(formats strfmt.Registry) error {
if swag.IsZero(m.Realized) { // not required
return nil
}
if m.Realized != nil {
if err := m.Realized.Validate(formats); err != nil {
if ve, ok := err.(*errors.Validation); ok {
return ve.ValidateName("realized")
} else if ce, ok := err.(*errors.CompositeError); ok {
return ce.ValidateName("realized")
}
return err
}
}
return nil
}
func (m *EndpointStatus) validateState(formats strfmt.Registry) error {
if err := validate.Required("state", "body", m.State); err != nil {
return err
}
if err := validate.Required("state", "body", m.State); err != nil {
return err
}
if m.State != nil {
if err := m.State.Validate(formats); err != nil {
if ve, ok := err.(*errors.Validation); ok {
return ve.ValidateName("state")
} else if ce, ok := err.(*errors.CompositeError); ok {
return ce.ValidateName("state")
}
return err
}
}
return nil
}
// ContextValidate validate this endpoint status based on the context it is used
func (m *EndpointStatus) ContextValidate(ctx context.Context, formats strfmt.Registry) error {
var res []error
if err := m.contextValidateControllers(ctx, formats); err != nil {
res = append(res, err)
}
if err := m.contextValidateExternalIdentifiers(ctx, formats); err != nil {
res = append(res, err)
}
if err := m.contextValidateHealth(ctx, formats); err != nil {
res = append(res, err)
}
if err := m.contextValidateIdentity(ctx, formats); err != nil {
res = append(res, err)
}
if err := m.contextValidateLabels(ctx, formats); err != nil {
res = append(res, err)
}
if err := m.contextValidateLog(ctx, formats); err != nil {
res = append(res, err)
}
if err := m.contextValidateNamedPorts(ctx, formats); err != nil {
res = append(res, err)
}
if err := m.contextValidateNetworking(ctx, formats); err != nil {
res = append(res, err)
}
if err := m.contextValidatePolicy(ctx, formats); err != nil {
res = append(res, err)
}
if err := m.contextValidateRealized(ctx, formats); err != nil {
res = append(res, err)
}
if err := m.contextValidateState(ctx, formats); err != nil {
res = append(res, err)
}
if len(res) > 0 {
return errors.CompositeValidationError(res...)
}
return nil
}
func (m *EndpointStatus) contextValidateControllers(ctx context.Context, formats strfmt.Registry) error {
if err := m.Controllers.ContextValidate(ctx, formats); err != nil {
if ve, ok := err.(*errors.Validation); ok {
return ve.ValidateName("controllers")
} else if ce, ok := err.(*errors.CompositeError); ok {
return ce.ValidateName("controllers")
}
return err
}
return nil
}
func (m *EndpointStatus) contextValidateExternalIdentifiers(ctx context.Context, formats strfmt.Registry) error {
if m.ExternalIdentifiers != nil {
if swag.IsZero(m.ExternalIdentifiers) { // not required
return nil
}
if err := m.ExternalIdentifiers.ContextValidate(ctx, formats); err != nil {
if ve, ok := err.(*errors.Validation); ok {
return ve.ValidateName("external-identifiers")
} else if ce, ok := err.(*errors.CompositeError); ok {
return ce.ValidateName("external-identifiers")
}
return err
}
}
return nil
}
func (m *EndpointStatus) contextValidateHealth(ctx context.Context, formats strfmt.Registry) error {
if m.Health != nil {
if swag.IsZero(m.Health) { // not required
return nil
}
if err := m.Health.ContextValidate(ctx, formats); err != nil {
if ve, ok := err.(*errors.Validation); ok {
return ve.ValidateName("health")
} else if ce, ok := err.(*errors.CompositeError); ok {
return ce.ValidateName("health")
}
return err
}
}
return nil
}
func (m *EndpointStatus) contextValidateIdentity(ctx context.Context, formats strfmt.Registry) error {
if m.Identity != nil {
if swag.IsZero(m.Identity) { // not required
return nil
}
if err := m.Identity.ContextValidate(ctx, formats); err != nil {
if ve, ok := err.(*errors.Validation); ok {
return ve.ValidateName("identity")
} else if ce, ok := err.(*errors.CompositeError); ok {
return ce.ValidateName("identity")
}
return err
}
}
return nil
}
func (m *EndpointStatus) contextValidateLabels(ctx context.Context, formats strfmt.Registry) error {
if m.Labels != nil {
if swag.IsZero(m.Labels) { // not required
return nil
}
if err := m.Labels.ContextValidate(ctx, formats); err != nil {
if ve, ok := err.(*errors.Validation); ok {
return ve.ValidateName("labels")
} else if ce, ok := err.(*errors.CompositeError); ok {
return ce.ValidateName("labels")
}
return err
}
}
return nil
}
func (m *EndpointStatus) contextValidateLog(ctx context.Context, formats strfmt.Registry) error {
if err := m.Log.ContextValidate(ctx, formats); err != nil {
if ve, ok := err.(*errors.Validation); ok {
return ve.ValidateName("log")
} else if ce, ok := err.(*errors.CompositeError); ok {
return ce.ValidateName("log")
}
return err
}
return nil
}
func (m *EndpointStatus) contextValidateNamedPorts(ctx context.Context, formats strfmt.Registry) error {
if err := m.NamedPorts.ContextValidate(ctx, formats); err != nil {
if ve, ok := err.(*errors.Validation); ok {
return ve.ValidateName("namedPorts")
} else if ce, ok := err.(*errors.CompositeError); ok {
return ce.ValidateName("namedPorts")
}
return err
}
return nil
}
func (m *EndpointStatus) contextValidateNetworking(ctx context.Context, formats strfmt.Registry) error {
if m.Networking != nil {
if swag.IsZero(m.Networking) { // not required
return nil
}
if err := m.Networking.ContextValidate(ctx, formats); err != nil {
if ve, ok := err.(*errors.Validation); ok {
return ve.ValidateName("networking")
} else if ce, ok := err.(*errors.CompositeError); ok {
return ce.ValidateName("networking")
}
return err
}
}
return nil
}
func (m *EndpointStatus) contextValidatePolicy(ctx context.Context, formats strfmt.Registry) error {
if m.Policy != nil {
if swag.IsZero(m.Policy) { // not required
return nil
}
if err := m.Policy.ContextValidate(ctx, formats); err != nil {
if ve, ok := err.(*errors.Validation); ok {
return ve.ValidateName("policy")
} else if ce, ok := err.(*errors.CompositeError); ok {
return ce.ValidateName("policy")
}
return err
}
}
return nil
}
func (m *EndpointStatus) contextValidateRealized(ctx context.Context, formats strfmt.Registry) error {
if m.Realized != nil {
if swag.IsZero(m.Realized) { // not required
return nil
}
if err := m.Realized.ContextValidate(ctx, formats); err != nil {
if ve, ok := err.(*errors.Validation); ok {
return ve.ValidateName("realized")
} else if ce, ok := err.(*errors.CompositeError); ok {
return ce.ValidateName("realized")
}
return err
}
}
return nil
}
func (m *EndpointStatus) contextValidateState(ctx context.Context, formats strfmt.Registry) error {
if m.State != nil {
if err := m.State.ContextValidate(ctx, formats); err != nil {
if ve, ok := err.(*errors.Validation); ok {
return ve.ValidateName("state")
} else if ce, ok := err.(*errors.CompositeError); ok {
return ce.ValidateName("state")
}
return err
}
}
return nil
}
// MarshalBinary interface implementation
func (m *EndpointStatus) MarshalBinary() ([]byte, error) {
if m == nil {
return nil, nil
}
return swag.WriteJSON(m)
}
// UnmarshalBinary interface implementation
func (m *EndpointStatus) UnmarshalBinary(b []byte) error {
var res EndpointStatus
if err := swag.ReadJSON(b, &res); err != nil {
return err
}
*m = res
return nil
}
// Code generated by go-swagger; DO NOT EDIT.
// Copyright Authors of Cilium
// SPDX-License-Identifier: Apache-2.0
package models
// This file was generated by the swagger tool.
// Editing this file might prove futile when you re-run the swagger generate command
import (
"context"
"encoding/json"
"github.com/go-openapi/errors"
"github.com/go-openapi/strfmt"
"github.com/go-openapi/swag"
"github.com/go-openapi/validate"
)
// EndpointStatusChange Indication of a change of status
//
// +deepequal-gen=true
//
// swagger:model EndpointStatusChange
type EndpointStatusChange struct {
// Code indicate type of status change
// Enum: ["ok","failed"]
Code string `json:"code,omitempty"`
// Status message
Message string `json:"message,omitempty"`
// state
State EndpointState `json:"state,omitempty"`
// Timestamp when status change occurred
Timestamp string `json:"timestamp,omitempty"`
}
// Validate validates this endpoint status change
func (m *EndpointStatusChange) Validate(formats strfmt.Registry) error {
var res []error
if err := m.validateCode(formats); err != nil {
res = append(res, err)
}
if err := m.validateState(formats); err != nil {
res = append(res, err)
}
if len(res) > 0 {
return errors.CompositeValidationError(res...)
}
return nil
}
var endpointStatusChangeTypeCodePropEnum []interface{}
func init() {
var res []string
if err := json.Unmarshal([]byte(`["ok","failed"]`), &res); err != nil {
panic(err)
}
for _, v := range res {
endpointStatusChangeTypeCodePropEnum = append(endpointStatusChangeTypeCodePropEnum, v)
}
}
const (
// EndpointStatusChangeCodeOk captures enum value "ok"
EndpointStatusChangeCodeOk string = "ok"
// EndpointStatusChangeCodeFailed captures enum value "failed"
EndpointStatusChangeCodeFailed string = "failed"
)
// prop value enum
func (m *EndpointStatusChange) validateCodeEnum(path, location string, value string) error {
if err := validate.EnumCase(path, location, value, endpointStatusChangeTypeCodePropEnum, true); err != nil {
return err
}
return nil
}
func (m *EndpointStatusChange) validateCode(formats strfmt.Registry) error {
if swag.IsZero(m.Code) { // not required
return nil
}
// value enum
if err := m.validateCodeEnum("code", "body", m.Code); err != nil {
return err
}
return nil
}
func (m *EndpointStatusChange) validateState(formats strfmt.Registry) error {
if swag.IsZero(m.State) { // not required
return nil
}
if err := m.State.Validate(formats); err != nil {
if ve, ok := err.(*errors.Validation); ok {
return ve.ValidateName("state")
} else if ce, ok := err.(*errors.CompositeError); ok {
return ce.ValidateName("state")
}
return err
}
return nil
}
// ContextValidate validate this endpoint status change based on the context it is used
func (m *EndpointStatusChange) ContextValidate(ctx context.Context, formats strfmt.Registry) error {
var res []error
if err := m.contextValidateState(ctx, formats); err != nil {
res = append(res, err)
}
if len(res) > 0 {
return errors.CompositeValidationError(res...)
}
return nil
}
func (m *EndpointStatusChange) contextValidateState(ctx context.Context, formats strfmt.Registry) error {
if swag.IsZero(m.State) { // not required
return nil
}
if err := m.State.ContextValidate(ctx, formats); err != nil {
if ve, ok := err.(*errors.Validation); ok {
return ve.ValidateName("state")
} else if ce, ok := err.(*errors.CompositeError); ok {
return ce.ValidateName("state")
}
return err
}
return nil
}
// MarshalBinary interface implementation
func (m *EndpointStatusChange) MarshalBinary() ([]byte, error) {
if m == nil {
return nil, nil
}
return swag.WriteJSON(m)
}
// UnmarshalBinary interface implementation
func (m *EndpointStatusChange) UnmarshalBinary(b []byte) error {
var res EndpointStatusChange
if err := swag.ReadJSON(b, &res); err != nil {
return err
}
*m = res
return nil
}
// Code generated by go-swagger; DO NOT EDIT.
// Copyright Authors of Cilium
// SPDX-License-Identifier: Apache-2.0
package models
// This file was generated by the swagger tool.
// Editing this file might prove futile when you re-run the swagger generate command
import (
"context"
"strconv"
"github.com/go-openapi/errors"
"github.com/go-openapi/strfmt"
"github.com/go-openapi/swag"
)
// EndpointStatusLog Status log of endpoint
//
// swagger:model EndpointStatusLog
type EndpointStatusLog []*EndpointStatusChange
// Validate validates this endpoint status log
func (m EndpointStatusLog) Validate(formats strfmt.Registry) error {
var res []error
for i := 0; i < len(m); i++ {
if swag.IsZero(m[i]) { // not required
continue
}
if m[i] != nil {
if err := m[i].Validate(formats); err != nil {
if ve, ok := err.(*errors.Validation); ok {
return ve.ValidateName(strconv.Itoa(i))
} else if ce, ok := err.(*errors.CompositeError); ok {
return ce.ValidateName(strconv.Itoa(i))
}
return err
}
}
}
if len(res) > 0 {
return errors.CompositeValidationError(res...)
}
return nil
}
// ContextValidate validate this endpoint status log based on the context it is used
func (m EndpointStatusLog) ContextValidate(ctx context.Context, formats strfmt.Registry) error {
var res []error
for i := 0; i < len(m); i++ {
if m[i] != nil {
if swag.IsZero(m[i]) { // not required
return nil
}
if err := m[i].ContextValidate(ctx, formats); err != nil {
if ve, ok := err.(*errors.Validation); ok {
return ve.ValidateName(strconv.Itoa(i))
} else if ce, ok := err.(*errors.CompositeError); ok {
return ce.ValidateName(strconv.Itoa(i))
}
return err
}
}
}
if len(res) > 0 {
return errors.CompositeValidationError(res...)
}
return nil
}
// Code generated by go-swagger; DO NOT EDIT.
// Copyright Authors of Cilium
// SPDX-License-Identifier: Apache-2.0
package models
// This file was generated by the swagger tool.
// Editing this file might prove futile when you re-run the swagger generate command
import (
"context"
"github.com/go-openapi/strfmt"
)
// Error error
//
// swagger:model Error
type Error string
// Validate validates this error
func (m Error) Validate(formats strfmt.Registry) error {
return nil
}
// ContextValidate validates this error based on context it is used
func (m Error) ContextValidate(ctx context.Context, formats strfmt.Registry) error {
return nil
}
// Code generated by go-swagger; DO NOT EDIT.
// Copyright Authors of Cilium
// SPDX-License-Identifier: Apache-2.0
package models
// This file was generated by the swagger tool.
// Editing this file might prove futile when you re-run the swagger generate command
import (
"context"
"encoding/json"
"github.com/go-openapi/errors"
"github.com/go-openapi/strfmt"
"github.com/go-openapi/swag"
"github.com/go-openapi/validate"
)
// FrontendAddress Layer 4 address. The protocol is currently ignored, all services will
// behave as if protocol any is specified. To restrict to a particular
// protocol, use policy.
//
// swagger:model FrontendAddress
type FrontendAddress struct {
// Layer 3 address
IP string `json:"ip,omitempty"`
// Layer 4 port number
Port uint16 `json:"port,omitempty"`
// Layer 4 protocol
// Enum: ["tcp","udp","any"]
Protocol string `json:"protocol,omitempty"`
// Load balancing scope for frontend address
// Enum: ["external","internal"]
Scope string `json:"scope,omitempty"`
}
// Validate validates this frontend address
func (m *FrontendAddress) Validate(formats strfmt.Registry) error {
var res []error
if err := m.validateProtocol(formats); err != nil {
res = append(res, err)
}
if err := m.validateScope(formats); err != nil {
res = append(res, err)
}
if len(res) > 0 {
return errors.CompositeValidationError(res...)
}
return nil
}
var frontendAddressTypeProtocolPropEnum []interface{}
func init() {
var res []string
if err := json.Unmarshal([]byte(`["tcp","udp","any"]`), &res); err != nil {
panic(err)
}
for _, v := range res {
frontendAddressTypeProtocolPropEnum = append(frontendAddressTypeProtocolPropEnum, v)
}
}
const (
// FrontendAddressProtocolTCP captures enum value "tcp"
FrontendAddressProtocolTCP string = "tcp"
// FrontendAddressProtocolUDP captures enum value "udp"
FrontendAddressProtocolUDP string = "udp"
// FrontendAddressProtocolAny captures enum value "any"
FrontendAddressProtocolAny string = "any"
)
// prop value enum
func (m *FrontendAddress) validateProtocolEnum(path, location string, value string) error {
if err := validate.EnumCase(path, location, value, frontendAddressTypeProtocolPropEnum, true); err != nil {
return err
}
return nil
}
func (m *FrontendAddress) validateProtocol(formats strfmt.Registry) error {
if swag.IsZero(m.Protocol) { // not required
return nil
}
// value enum
if err := m.validateProtocolEnum("protocol", "body", m.Protocol); err != nil {
return err
}
return nil
}
var frontendAddressTypeScopePropEnum []interface{}
func init() {
var res []string
if err := json.Unmarshal([]byte(`["external","internal"]`), &res); err != nil {
panic(err)
}
for _, v := range res {
frontendAddressTypeScopePropEnum = append(frontendAddressTypeScopePropEnum, v)
}
}
const (
// FrontendAddressScopeExternal captures enum value "external"
FrontendAddressScopeExternal string = "external"
// FrontendAddressScopeInternal captures enum value "internal"
FrontendAddressScopeInternal string = "internal"
)
// prop value enum
func (m *FrontendAddress) validateScopeEnum(path, location string, value string) error {
if err := validate.EnumCase(path, location, value, frontendAddressTypeScopePropEnum, true); err != nil {
return err
}
return nil
}
func (m *FrontendAddress) validateScope(formats strfmt.Registry) error {
if swag.IsZero(m.Scope) { // not required
return nil
}
// value enum
if err := m.validateScopeEnum("scope", "body", m.Scope); err != nil {
return err
}
return nil
}
// ContextValidate validates this frontend address based on context it is used
func (m *FrontendAddress) ContextValidate(ctx context.Context, formats strfmt.Registry) error {
return nil
}
// MarshalBinary interface implementation
func (m *FrontendAddress) MarshalBinary() ([]byte, error) {
if m == nil {
return nil, nil
}
return swag.WriteJSON(m)
}
// UnmarshalBinary interface implementation
func (m *FrontendAddress) UnmarshalBinary(b []byte) error {
var res FrontendAddress
if err := swag.ReadJSON(b, &res); err != nil {
return err
}
*m = res
return nil
}
// Code generated by go-swagger; DO NOT EDIT.
// Copyright Authors of Cilium
// SPDX-License-Identifier: Apache-2.0
package models
// This file was generated by the swagger tool.
// Editing this file might prove futile when you re-run the swagger generate command
import (
"context"
"strconv"
"github.com/go-openapi/errors"
"github.com/go-openapi/strfmt"
"github.com/go-openapi/swag"
)
// FrontendMapping Mapping of frontend to backend pods of an LRP
//
// swagger:model FrontendMapping
type FrontendMapping struct {
// Pod backends of an LRP
Backends []*LRPBackend `json:"backends"`
// frontend address
FrontendAddress *FrontendAddress `json:"frontend-address,omitempty"`
}
// Validate validates this frontend mapping
func (m *FrontendMapping) Validate(formats strfmt.Registry) error {
var res []error
if err := m.validateBackends(formats); err != nil {
res = append(res, err)
}
if err := m.validateFrontendAddress(formats); err != nil {
res = append(res, err)
}
if len(res) > 0 {
return errors.CompositeValidationError(res...)
}
return nil
}
func (m *FrontendMapping) validateBackends(formats strfmt.Registry) error {
if swag.IsZero(m.Backends) { // not required
return nil
}
for i := 0; i < len(m.Backends); i++ {
if swag.IsZero(m.Backends[i]) { // not required
continue
}
if m.Backends[i] != nil {
if err := m.Backends[i].Validate(formats); err != nil {
if ve, ok := err.(*errors.Validation); ok {
return ve.ValidateName("backends" + "." + strconv.Itoa(i))
} else if ce, ok := err.(*errors.CompositeError); ok {
return ce.ValidateName("backends" + "." + strconv.Itoa(i))
}
return err
}
}
}
return nil
}
func (m *FrontendMapping) validateFrontendAddress(formats strfmt.Registry) error {
if swag.IsZero(m.FrontendAddress) { // not required
return nil
}
if m.FrontendAddress != nil {
if err := m.FrontendAddress.Validate(formats); err != nil {
if ve, ok := err.(*errors.Validation); ok {
return ve.ValidateName("frontend-address")
} else if ce, ok := err.(*errors.CompositeError); ok {
return ce.ValidateName("frontend-address")
}
return err
}
}
return nil
}
// ContextValidate validate this frontend mapping based on the context it is used
func (m *FrontendMapping) ContextValidate(ctx context.Context, formats strfmt.Registry) error {
var res []error
if err := m.contextValidateBackends(ctx, formats); err != nil {
res = append(res, err)
}
if err := m.contextValidateFrontendAddress(ctx, formats); err != nil {
res = append(res, err)
}
if len(res) > 0 {
return errors.CompositeValidationError(res...)
}
return nil
}
func (m *FrontendMapping) contextValidateBackends(ctx context.Context, formats strfmt.Registry) error {
for i := 0; i < len(m.Backends); i++ {
if m.Backends[i] != nil {
if swag.IsZero(m.Backends[i]) { // not required
return nil
}
if err := m.Backends[i].ContextValidate(ctx, formats); err != nil {
if ve, ok := err.(*errors.Validation); ok {
return ve.ValidateName("backends" + "." + strconv.Itoa(i))
} else if ce, ok := err.(*errors.CompositeError); ok {
return ce.ValidateName("backends" + "." + strconv.Itoa(i))
}
return err
}
}
}
return nil
}
func (m *FrontendMapping) contextValidateFrontendAddress(ctx context.Context, formats strfmt.Registry) error {
if m.FrontendAddress != nil {
if swag.IsZero(m.FrontendAddress) { // not required
return nil
}
if err := m.FrontendAddress.ContextValidate(ctx, formats); err != nil {
if ve, ok := err.(*errors.Validation); ok {
return ve.ValidateName("frontend-address")
} else if ce, ok := err.(*errors.CompositeError); ok {
return ce.ValidateName("frontend-address")
}
return err
}
}
return nil
}
// MarshalBinary interface implementation
func (m *FrontendMapping) MarshalBinary() ([]byte, error) {
if m == nil {
return nil, nil
}
return swag.WriteJSON(m)
}
// UnmarshalBinary interface implementation
func (m *FrontendMapping) UnmarshalBinary(b []byte) error {
var res FrontendMapping
if err := swag.ReadJSON(b, &res); err != nil {
return err
}
*m = res
return nil
}
// Code generated by go-swagger; DO NOT EDIT.
// Copyright Authors of Cilium
// SPDX-License-Identifier: Apache-2.0
package models
// This file was generated by the swagger tool.
// Editing this file might prove futile when you re-run the swagger generate command
import (
"context"
"encoding/json"
"github.com/go-openapi/errors"
"github.com/go-openapi/strfmt"
"github.com/go-openapi/swag"
"github.com/go-openapi/validate"
)
// HostFirewall Status of the host firewall
//
// +k8s:deepcopy-gen=true
//
// swagger:model HostFirewall
type HostFirewall struct {
// devices
Devices []string `json:"devices"`
// mode
// Enum: ["Disabled","Enabled"]
Mode string `json:"mode,omitempty"`
}
// Validate validates this host firewall
func (m *HostFirewall) Validate(formats strfmt.Registry) error {
var res []error
if err := m.validateMode(formats); err != nil {
res = append(res, err)
}
if len(res) > 0 {
return errors.CompositeValidationError(res...)
}
return nil
}
var hostFirewallTypeModePropEnum []interface{}
func init() {
var res []string
if err := json.Unmarshal([]byte(`["Disabled","Enabled"]`), &res); err != nil {
panic(err)
}
for _, v := range res {
hostFirewallTypeModePropEnum = append(hostFirewallTypeModePropEnum, v)
}
}
const (
// HostFirewallModeDisabled captures enum value "Disabled"
HostFirewallModeDisabled string = "Disabled"
// HostFirewallModeEnabled captures enum value "Enabled"
HostFirewallModeEnabled string = "Enabled"
)
// prop value enum
func (m *HostFirewall) validateModeEnum(path, location string, value string) error {
if err := validate.EnumCase(path, location, value, hostFirewallTypeModePropEnum, true); err != nil {
return err
}
return nil
}
func (m *HostFirewall) validateMode(formats strfmt.Registry) error {
if swag.IsZero(m.Mode) { // not required
return nil
}
// value enum
if err := m.validateModeEnum("mode", "body", m.Mode); err != nil {
return err
}
return nil
}
// ContextValidate validates this host firewall based on context it is used
func (m *HostFirewall) ContextValidate(ctx context.Context, formats strfmt.Registry) error {
return nil
}
// MarshalBinary interface implementation
func (m *HostFirewall) MarshalBinary() ([]byte, error) {
if m == nil {
return nil, nil
}
return swag.WriteJSON(m)
}
// UnmarshalBinary interface implementation
func (m *HostFirewall) UnmarshalBinary(b []byte) error {
var res HostFirewall
if err := swag.ReadJSON(b, &res); err != nil {
return err
}
*m = res
return nil
}
// Code generated by go-swagger; DO NOT EDIT.
// Copyright Authors of Cilium
// SPDX-License-Identifier: Apache-2.0
package models
// This file was generated by the swagger tool.
// Editing this file might prove futile when you re-run the swagger generate command
import (
"context"
"encoding/json"
"github.com/go-openapi/errors"
"github.com/go-openapi/strfmt"
"github.com/go-openapi/swag"
"github.com/go-openapi/validate"
)
// HubbleMetricsStatus Status of the Hubble metrics server
//
// +k8s:deepcopy-gen=true
//
// swagger:model HubbleMetricsStatus
type HubbleMetricsStatus struct {
// Human readable status/error/warning message
Msg string `json:"msg,omitempty"`
// State the component is in
// Enum: ["Ok","Warning","Failure","Disabled"]
State string `json:"state,omitempty"`
}
// Validate validates this hubble metrics status
func (m *HubbleMetricsStatus) Validate(formats strfmt.Registry) error {
var res []error
if err := m.validateState(formats); err != nil {
res = append(res, err)
}
if len(res) > 0 {
return errors.CompositeValidationError(res...)
}
return nil
}
var hubbleMetricsStatusTypeStatePropEnum []interface{}
func init() {
var res []string
if err := json.Unmarshal([]byte(`["Ok","Warning","Failure","Disabled"]`), &res); err != nil {
panic(err)
}
for _, v := range res {
hubbleMetricsStatusTypeStatePropEnum = append(hubbleMetricsStatusTypeStatePropEnum, v)
}
}
const (
// HubbleMetricsStatusStateOk captures enum value "Ok"
HubbleMetricsStatusStateOk string = "Ok"
// HubbleMetricsStatusStateWarning captures enum value "Warning"
HubbleMetricsStatusStateWarning string = "Warning"
// HubbleMetricsStatusStateFailure captures enum value "Failure"
HubbleMetricsStatusStateFailure string = "Failure"
// HubbleMetricsStatusStateDisabled captures enum value "Disabled"
HubbleMetricsStatusStateDisabled string = "Disabled"
)
// prop value enum
func (m *HubbleMetricsStatus) validateStateEnum(path, location string, value string) error {
if err := validate.EnumCase(path, location, value, hubbleMetricsStatusTypeStatePropEnum, true); err != nil {
return err
}
return nil
}
func (m *HubbleMetricsStatus) validateState(formats strfmt.Registry) error {
if swag.IsZero(m.State) { // not required
return nil
}
// value enum
if err := m.validateStateEnum("state", "body", m.State); err != nil {
return err
}
return nil
}
// ContextValidate validates this hubble metrics status based on context it is used
func (m *HubbleMetricsStatus) ContextValidate(ctx context.Context, formats strfmt.Registry) error {
return nil
}
// MarshalBinary interface implementation
func (m *HubbleMetricsStatus) MarshalBinary() ([]byte, error) {
if m == nil {
return nil, nil
}
return swag.WriteJSON(m)
}
// UnmarshalBinary interface implementation
func (m *HubbleMetricsStatus) UnmarshalBinary(b []byte) error {
var res HubbleMetricsStatus
if err := swag.ReadJSON(b, &res); err != nil {
return err
}
*m = res
return nil
}
// Code generated by go-swagger; DO NOT EDIT.
// Copyright Authors of Cilium
// SPDX-License-Identifier: Apache-2.0
package models
// This file was generated by the swagger tool.
// Editing this file might prove futile when you re-run the swagger generate command
import (
"context"
"encoding/json"
"github.com/go-openapi/errors"
"github.com/go-openapi/strfmt"
"github.com/go-openapi/swag"
"github.com/go-openapi/validate"
)
// HubbleStatus Status of the Hubble server
//
// +k8s:deepcopy-gen=true
//
// swagger:model HubbleStatus
type HubbleStatus struct {
// Human readable status/error/warning message
Msg string `json:"msg,omitempty"`
// observer
Observer *HubbleStatusObserver `json:"observer,omitempty"`
// State the component is in
// Enum: ["Ok","Warning","Failure","Disabled"]
State string `json:"state,omitempty"`
}
// Validate validates this hubble status
func (m *HubbleStatus) Validate(formats strfmt.Registry) error {
var res []error
if err := m.validateObserver(formats); err != nil {
res = append(res, err)
}
if err := m.validateState(formats); err != nil {
res = append(res, err)
}
if len(res) > 0 {
return errors.CompositeValidationError(res...)
}
return nil
}
func (m *HubbleStatus) validateObserver(formats strfmt.Registry) error {
if swag.IsZero(m.Observer) { // not required
return nil
}
if m.Observer != nil {
if err := m.Observer.Validate(formats); err != nil {
if ve, ok := err.(*errors.Validation); ok {
return ve.ValidateName("observer")
} else if ce, ok := err.(*errors.CompositeError); ok {
return ce.ValidateName("observer")
}
return err
}
}
return nil
}
var hubbleStatusTypeStatePropEnum []interface{}
func init() {
var res []string
if err := json.Unmarshal([]byte(`["Ok","Warning","Failure","Disabled"]`), &res); err != nil {
panic(err)
}
for _, v := range res {
hubbleStatusTypeStatePropEnum = append(hubbleStatusTypeStatePropEnum, v)
}
}
const (
// HubbleStatusStateOk captures enum value "Ok"
HubbleStatusStateOk string = "Ok"
// HubbleStatusStateWarning captures enum value "Warning"
HubbleStatusStateWarning string = "Warning"
// HubbleStatusStateFailure captures enum value "Failure"
HubbleStatusStateFailure string = "Failure"
// HubbleStatusStateDisabled captures enum value "Disabled"
HubbleStatusStateDisabled string = "Disabled"
)
// prop value enum
func (m *HubbleStatus) validateStateEnum(path, location string, value string) error {
if err := validate.EnumCase(path, location, value, hubbleStatusTypeStatePropEnum, true); err != nil {
return err
}
return nil
}
func (m *HubbleStatus) validateState(formats strfmt.Registry) error {
if swag.IsZero(m.State) { // not required
return nil
}
// value enum
if err := m.validateStateEnum("state", "body", m.State); err != nil {
return err
}
return nil
}
// ContextValidate validate this hubble status based on the context it is used
func (m *HubbleStatus) ContextValidate(ctx context.Context, formats strfmt.Registry) error {
var res []error
if err := m.contextValidateObserver(ctx, formats); err != nil {
res = append(res, err)
}
if len(res) > 0 {
return errors.CompositeValidationError(res...)
}
return nil
}
func (m *HubbleStatus) contextValidateObserver(ctx context.Context, formats strfmt.Registry) error {
if m.Observer != nil {
if swag.IsZero(m.Observer) { // not required
return nil
}
if err := m.Observer.ContextValidate(ctx, formats); err != nil {
if ve, ok := err.(*errors.Validation); ok {
return ve.ValidateName("observer")
} else if ce, ok := err.(*errors.CompositeError); ok {
return ce.ValidateName("observer")
}
return err
}
}
return nil
}
// MarshalBinary interface implementation
func (m *HubbleStatus) MarshalBinary() ([]byte, error) {
if m == nil {
return nil, nil
}
return swag.WriteJSON(m)
}
// UnmarshalBinary interface implementation
func (m *HubbleStatus) UnmarshalBinary(b []byte) error {
var res HubbleStatus
if err := swag.ReadJSON(b, &res); err != nil {
return err
}
*m = res
return nil
}
// HubbleStatusObserver Status of the Hubble observer
//
// +k8s:deepcopy-gen=true
//
// swagger:model HubbleStatusObserver
type HubbleStatusObserver struct {
// Current number of flows this Hubble observer stores
CurrentFlows int64 `json:"current-flows,omitempty"`
// Maximum number of flows this Hubble observer is able to store
MaxFlows int64 `json:"max-flows,omitempty"`
// Total number of flows this Hubble observer has seen
SeenFlows int64 `json:"seen-flows,omitempty"`
// Uptime of this Hubble observer instance
// Format: duration
Uptime strfmt.Duration `json:"uptime,omitempty"`
}
// Validate validates this hubble status observer
func (m *HubbleStatusObserver) Validate(formats strfmt.Registry) error {
var res []error
if err := m.validateUptime(formats); err != nil {
res = append(res, err)
}
if len(res) > 0 {
return errors.CompositeValidationError(res...)
}
return nil
}
func (m *HubbleStatusObserver) validateUptime(formats strfmt.Registry) error {
if swag.IsZero(m.Uptime) { // not required
return nil
}
if err := validate.FormatOf("observer"+"."+"uptime", "body", "duration", m.Uptime.String(), formats); err != nil {
return err
}
return nil
}
// ContextValidate validates this hubble status observer based on context it is used
func (m *HubbleStatusObserver) ContextValidate(ctx context.Context, formats strfmt.Registry) error {
return nil
}
// MarshalBinary interface implementation
func (m *HubbleStatusObserver) MarshalBinary() ([]byte, error) {
if m == nil {
return nil, nil
}
return swag.WriteJSON(m)
}
// UnmarshalBinary interface implementation
func (m *HubbleStatusObserver) UnmarshalBinary(b []byte) error {
var res HubbleStatusObserver
if err := swag.ReadJSON(b, &res); err != nil {
return err
}
*m = res
return nil
}
// Code generated by go-swagger; DO NOT EDIT.
// Copyright Authors of Cilium
// SPDX-License-Identifier: Apache-2.0
package models
// This file was generated by the swagger tool.
// Editing this file might prove futile when you re-run the swagger generate command
import (
"context"
"github.com/go-openapi/strfmt"
"github.com/go-openapi/swag"
)
// IPsecStatus Status of the IPsec agent
//
// +k8s:deepcopy-gen=true
//
// swagger:model IPsecStatus
type IPsecStatus struct {
// IPsec decryption interfaces
DecryptInterfaces []string `json:"decrypt-interfaces"`
// IPsec error count
ErrorCount int64 `json:"error-count,omitempty"`
// IPsec keys in use
KeysInUse int64 `json:"keys-in-use,omitempty"`
// IPsec max sequence number
MaxSeqNumber string `json:"max-seq-number,omitempty"`
// IPsec XFRM errors
XfrmErrors map[string]int64 `json:"xfrm-errors,omitempty"`
}
// Validate validates this i psec status
func (m *IPsecStatus) Validate(formats strfmt.Registry) error {
return nil
}
// ContextValidate validates this i psec status based on context it is used
func (m *IPsecStatus) ContextValidate(ctx context.Context, formats strfmt.Registry) error {
return nil
}
// MarshalBinary interface implementation
func (m *IPsecStatus) MarshalBinary() ([]byte, error) {
if m == nil {
return nil, nil
}
return swag.WriteJSON(m)
}
// UnmarshalBinary interface implementation
func (m *IPsecStatus) UnmarshalBinary(b []byte) error {
var res IPsecStatus
if err := swag.ReadJSON(b, &res); err != nil {
return err
}
*m = res
return nil
}
// Code generated by go-swagger; DO NOT EDIT.
// Copyright Authors of Cilium
// SPDX-License-Identifier: Apache-2.0
package models
// This file was generated by the swagger tool.
// Editing this file might prove futile when you re-run the swagger generate command
import (
"context"
"github.com/go-openapi/errors"
"github.com/go-openapi/strfmt"
"github.com/go-openapi/swag"
)
// Identity Security identity
//
// swagger:model Identity
type Identity struct {
// Unique identifier
ID int64 `json:"id,omitempty"`
// Labels describing the identity
Labels Labels `json:"labels,omitempty"`
// SHA256 of labels
LabelsSHA256 string `json:"labelsSHA256,omitempty"`
}
// Validate validates this identity
func (m *Identity) Validate(formats strfmt.Registry) error {
var res []error
if err := m.validateLabels(formats); err != nil {
res = append(res, err)
}
if len(res) > 0 {
return errors.CompositeValidationError(res...)
}
return nil
}
func (m *Identity) validateLabels(formats strfmt.Registry) error {
if swag.IsZero(m.Labels) { // not required
return nil
}
if err := m.Labels.Validate(formats); err != nil {
if ve, ok := err.(*errors.Validation); ok {
return ve.ValidateName("labels")
} else if ce, ok := err.(*errors.CompositeError); ok {
return ce.ValidateName("labels")
}
return err
}
return nil
}
// ContextValidate validate this identity based on the context it is used
func (m *Identity) ContextValidate(ctx context.Context, formats strfmt.Registry) error {
var res []error
if err := m.contextValidateLabels(ctx, formats); err != nil {
res = append(res, err)
}
if len(res) > 0 {
return errors.CompositeValidationError(res...)
}
return nil
}
func (m *Identity) contextValidateLabels(ctx context.Context, formats strfmt.Registry) error {
if err := m.Labels.ContextValidate(ctx, formats); err != nil {
if ve, ok := err.(*errors.Validation); ok {
return ve.ValidateName("labels")
} else if ce, ok := err.(*errors.CompositeError); ok {
return ce.ValidateName("labels")
}
return err
}
return nil
}
// MarshalBinary interface implementation
func (m *Identity) MarshalBinary() ([]byte, error) {
if m == nil {
return nil, nil
}
return swag.WriteJSON(m)
}
// UnmarshalBinary interface implementation
func (m *Identity) UnmarshalBinary(b []byte) error {
var res Identity
if err := swag.ReadJSON(b, &res); err != nil {
return err
}
*m = res
return nil
}
// Code generated by go-swagger; DO NOT EDIT.
// Copyright Authors of Cilium
// SPDX-License-Identifier: Apache-2.0
package models
// This file was generated by the swagger tool.
// Editing this file might prove futile when you re-run the swagger generate command
import (
"context"
"github.com/go-openapi/errors"
"github.com/go-openapi/strfmt"
"github.com/go-openapi/swag"
)
// IdentityEndpoints Security identities owned by endpoints on the local node
//
// swagger:model IdentityEndpoints
type IdentityEndpoints struct {
// Security identity
Identity *Identity `json:"identity,omitempty"`
// number of endpoints consuming this identity locally (should always be > 0)
RefCount int64 `json:"refCount,omitempty"`
}
// Validate validates this identity endpoints
func (m *IdentityEndpoints) Validate(formats strfmt.Registry) error {
var res []error
if err := m.validateIdentity(formats); err != nil {
res = append(res, err)
}
if len(res) > 0 {
return errors.CompositeValidationError(res...)
}
return nil
}
func (m *IdentityEndpoints) validateIdentity(formats strfmt.Registry) error {
if swag.IsZero(m.Identity) { // not required
return nil
}
if m.Identity != nil {
if err := m.Identity.Validate(formats); err != nil {
if ve, ok := err.(*errors.Validation); ok {
return ve.ValidateName("identity")
} else if ce, ok := err.(*errors.CompositeError); ok {
return ce.ValidateName("identity")
}
return err
}
}
return nil
}
// ContextValidate validate this identity endpoints based on the context it is used
func (m *IdentityEndpoints) ContextValidate(ctx context.Context, formats strfmt.Registry) error {
var res []error
if err := m.contextValidateIdentity(ctx, formats); err != nil {
res = append(res, err)
}
if len(res) > 0 {
return errors.CompositeValidationError(res...)
}
return nil
}
func (m *IdentityEndpoints) contextValidateIdentity(ctx context.Context, formats strfmt.Registry) error {
if m.Identity != nil {
if swag.IsZero(m.Identity) { // not required
return nil
}
if err := m.Identity.ContextValidate(ctx, formats); err != nil {
if ve, ok := err.(*errors.Validation); ok {
return ve.ValidateName("identity")
} else if ce, ok := err.(*errors.CompositeError); ok {
return ce.ValidateName("identity")
}
return err
}
}
return nil
}
// MarshalBinary interface implementation
func (m *IdentityEndpoints) MarshalBinary() ([]byte, error) {
if m == nil {
return nil, nil
}
return swag.WriteJSON(m)
}
// UnmarshalBinary interface implementation
func (m *IdentityEndpoints) UnmarshalBinary(b []byte) error {
var res IdentityEndpoints
if err := swag.ReadJSON(b, &res); err != nil {
return err
}
*m = res
return nil
}
// Code generated by go-swagger; DO NOT EDIT.
// Copyright Authors of Cilium
// SPDX-License-Identifier: Apache-2.0
package models
// This file was generated by the swagger tool.
// Editing this file might prove futile when you re-run the swagger generate command
import (
"context"
"github.com/go-openapi/strfmt"
"github.com/go-openapi/swag"
)
// IdentityRange Status of identity range of the cluster
//
// swagger:model IdentityRange
type IdentityRange struct {
// Maximum identity of the cluster
MaxIdentity int64 `json:"max-identity,omitempty"`
// Minimum identity of the cluster
MinIdentity int64 `json:"min-identity,omitempty"`
}
// Validate validates this identity range
func (m *IdentityRange) Validate(formats strfmt.Registry) error {
return nil
}
// ContextValidate validates this identity range based on context it is used
func (m *IdentityRange) ContextValidate(ctx context.Context, formats strfmt.Registry) error {
return nil
}
// MarshalBinary interface implementation
func (m *IdentityRange) MarshalBinary() ([]byte, error) {
if m == nil {
return nil, nil
}
return swag.WriteJSON(m)
}
// UnmarshalBinary interface implementation
func (m *IdentityRange) UnmarshalBinary(b []byte) error {
var res IdentityRange
if err := swag.ReadJSON(b, &res); err != nil {
return err
}
*m = res
return nil
}
// Code generated by go-swagger; DO NOT EDIT.
// Copyright Authors of Cilium
// SPDX-License-Identifier: Apache-2.0
package models
// This file was generated by the swagger tool.
// Editing this file might prove futile when you re-run the swagger generate command
import (
"context"
"github.com/go-openapi/strfmt"
"github.com/go-openapi/swag"
)
// IPAMAddressResponse IPAM configuration of an individual address family
//
// swagger:model IPAMAddressResponse
type IPAMAddressResponse struct {
// List of CIDRs out of which IPs are allocated
Cidrs []string `json:"cidrs"`
// The UUID for the expiration timer. Set when expiration has been
// enabled while allocating.
//
ExpirationUUID string `json:"expiration-uuid,omitempty"`
// IP of gateway
Gateway string `json:"gateway,omitempty"`
// InterfaceNumber is a field for generically identifying an interface. This is only useful in ENI mode.
//
InterfaceNumber string `json:"interface-number,omitempty"`
// Allocated IP for endpoint
IP string `json:"ip,omitempty"`
// MAC of master interface if address is a slave/secondary of a master interface
MasterMac string `json:"master-mac,omitempty"`
}
// Validate validates this IP a m address response
func (m *IPAMAddressResponse) Validate(formats strfmt.Registry) error {
return nil
}
// ContextValidate validates this IP a m address response based on context it is used
func (m *IPAMAddressResponse) ContextValidate(ctx context.Context, formats strfmt.Registry) error {
return nil
}
// MarshalBinary interface implementation
func (m *IPAMAddressResponse) MarshalBinary() ([]byte, error) {
if m == nil {
return nil, nil
}
return swag.WriteJSON(m)
}
// UnmarshalBinary interface implementation
func (m *IPAMAddressResponse) UnmarshalBinary(b []byte) error {
var res IPAMAddressResponse
if err := swag.ReadJSON(b, &res); err != nil {
return err
}
*m = res
return nil
}
// Code generated by go-swagger; DO NOT EDIT.
// Copyright Authors of Cilium
// SPDX-License-Identifier: Apache-2.0
package models
// This file was generated by the swagger tool.
// Editing this file might prove futile when you re-run the swagger generate command
import (
"context"
"github.com/go-openapi/errors"
"github.com/go-openapi/strfmt"
"github.com/go-openapi/swag"
"github.com/go-openapi/validate"
)
// IPAMResponse IPAM configuration of an endpoint
//
// swagger:model IPAMResponse
type IPAMResponse struct {
// address
// Required: true
Address *AddressPair `json:"address"`
// host addressing
// Required: true
HostAddressing *NodeAddressing `json:"host-addressing"`
// ipv4
IPV4 *IPAMAddressResponse `json:"ipv4,omitempty"`
// ipv6
IPV6 *IPAMAddressResponse `json:"ipv6,omitempty"`
}
// Validate validates this IP a m response
func (m *IPAMResponse) Validate(formats strfmt.Registry) error {
var res []error
if err := m.validateAddress(formats); err != nil {
res = append(res, err)
}
if err := m.validateHostAddressing(formats); err != nil {
res = append(res, err)
}
if err := m.validateIPV4(formats); err != nil {
res = append(res, err)
}
if err := m.validateIPV6(formats); err != nil {
res = append(res, err)
}
if len(res) > 0 {
return errors.CompositeValidationError(res...)
}
return nil
}
func (m *IPAMResponse) validateAddress(formats strfmt.Registry) error {
if err := validate.Required("address", "body", m.Address); err != nil {
return err
}
if m.Address != nil {
if err := m.Address.Validate(formats); err != nil {
if ve, ok := err.(*errors.Validation); ok {
return ve.ValidateName("address")
} else if ce, ok := err.(*errors.CompositeError); ok {
return ce.ValidateName("address")
}
return err
}
}
return nil
}
func (m *IPAMResponse) validateHostAddressing(formats strfmt.Registry) error {
if err := validate.Required("host-addressing", "body", m.HostAddressing); err != nil {
return err
}
if m.HostAddressing != nil {
if err := m.HostAddressing.Validate(formats); err != nil {
if ve, ok := err.(*errors.Validation); ok {
return ve.ValidateName("host-addressing")
} else if ce, ok := err.(*errors.CompositeError); ok {
return ce.ValidateName("host-addressing")
}
return err
}
}
return nil
}
func (m *IPAMResponse) validateIPV4(formats strfmt.Registry) error {
if swag.IsZero(m.IPV4) { // not required
return nil
}
if m.IPV4 != nil {
if err := m.IPV4.Validate(formats); err != nil {
if ve, ok := err.(*errors.Validation); ok {
return ve.ValidateName("ipv4")
} else if ce, ok := err.(*errors.CompositeError); ok {
return ce.ValidateName("ipv4")
}
return err
}
}
return nil
}
func (m *IPAMResponse) validateIPV6(formats strfmt.Registry) error {
if swag.IsZero(m.IPV6) { // not required
return nil
}
if m.IPV6 != nil {
if err := m.IPV6.Validate(formats); err != nil {
if ve, ok := err.(*errors.Validation); ok {
return ve.ValidateName("ipv6")
} else if ce, ok := err.(*errors.CompositeError); ok {
return ce.ValidateName("ipv6")
}
return err
}
}
return nil
}
// ContextValidate validate this IP a m response based on the context it is used
func (m *IPAMResponse) ContextValidate(ctx context.Context, formats strfmt.Registry) error {
var res []error
if err := m.contextValidateAddress(ctx, formats); err != nil {
res = append(res, err)
}
if err := m.contextValidateHostAddressing(ctx, formats); err != nil {
res = append(res, err)
}
if err := m.contextValidateIPV4(ctx, formats); err != nil {
res = append(res, err)
}
if err := m.contextValidateIPV6(ctx, formats); err != nil {
res = append(res, err)
}
if len(res) > 0 {
return errors.CompositeValidationError(res...)
}
return nil
}
func (m *IPAMResponse) contextValidateAddress(ctx context.Context, formats strfmt.Registry) error {
if m.Address != nil {
if err := m.Address.ContextValidate(ctx, formats); err != nil {
if ve, ok := err.(*errors.Validation); ok {
return ve.ValidateName("address")
} else if ce, ok := err.(*errors.CompositeError); ok {
return ce.ValidateName("address")
}
return err
}
}
return nil
}
func (m *IPAMResponse) contextValidateHostAddressing(ctx context.Context, formats strfmt.Registry) error {
if m.HostAddressing != nil {
if err := m.HostAddressing.ContextValidate(ctx, formats); err != nil {
if ve, ok := err.(*errors.Validation); ok {
return ve.ValidateName("host-addressing")
} else if ce, ok := err.(*errors.CompositeError); ok {
return ce.ValidateName("host-addressing")
}
return err
}
}
return nil
}
func (m *IPAMResponse) contextValidateIPV4(ctx context.Context, formats strfmt.Registry) error {
if m.IPV4 != nil {
if swag.IsZero(m.IPV4) { // not required
return nil
}
if err := m.IPV4.ContextValidate(ctx, formats); err != nil {
if ve, ok := err.(*errors.Validation); ok {
return ve.ValidateName("ipv4")
} else if ce, ok := err.(*errors.CompositeError); ok {
return ce.ValidateName("ipv4")
}
return err
}
}
return nil
}
func (m *IPAMResponse) contextValidateIPV6(ctx context.Context, formats strfmt.Registry) error {
if m.IPV6 != nil {
if swag.IsZero(m.IPV6) { // not required
return nil
}
if err := m.IPV6.ContextValidate(ctx, formats); err != nil {
if ve, ok := err.(*errors.Validation); ok {
return ve.ValidateName("ipv6")
} else if ce, ok := err.(*errors.CompositeError); ok {
return ce.ValidateName("ipv6")
}
return err
}
}
return nil
}
// MarshalBinary interface implementation
func (m *IPAMResponse) MarshalBinary() ([]byte, error) {
if m == nil {
return nil, nil
}
return swag.WriteJSON(m)
}
// UnmarshalBinary interface implementation
func (m *IPAMResponse) UnmarshalBinary(b []byte) error {
var res IPAMResponse
if err := swag.ReadJSON(b, &res); err != nil {
return err
}
*m = res
return nil
}
// Code generated by go-swagger; DO NOT EDIT.
// Copyright Authors of Cilium
// SPDX-License-Identifier: Apache-2.0
package models
// This file was generated by the swagger tool.
// Editing this file might prove futile when you re-run the swagger generate command
import (
"context"
"github.com/go-openapi/errors"
"github.com/go-openapi/strfmt"
"github.com/go-openapi/swag"
)
// IPAMStatus Status of IP address management
//
// +k8s:deepcopy-gen=true
//
// swagger:model IPAMStatus
type IPAMStatus struct {
// allocations
Allocations AllocationMap `json:"allocations,omitempty"`
// ipv4
IPV4 []string `json:"ipv4"`
// ipv6
IPV6 []string `json:"ipv6"`
// status
Status string `json:"status,omitempty"`
}
// Validate validates this IP a m status
func (m *IPAMStatus) Validate(formats strfmt.Registry) error {
var res []error
if err := m.validateAllocations(formats); err != nil {
res = append(res, err)
}
if len(res) > 0 {
return errors.CompositeValidationError(res...)
}
return nil
}
func (m *IPAMStatus) validateAllocations(formats strfmt.Registry) error {
if swag.IsZero(m.Allocations) { // not required
return nil
}
if m.Allocations != nil {
if err := m.Allocations.Validate(formats); err != nil {
if ve, ok := err.(*errors.Validation); ok {
return ve.ValidateName("allocations")
} else if ce, ok := err.(*errors.CompositeError); ok {
return ce.ValidateName("allocations")
}
return err
}
}
return nil
}
// ContextValidate validate this IP a m status based on the context it is used
func (m *IPAMStatus) ContextValidate(ctx context.Context, formats strfmt.Registry) error {
var res []error
if err := m.contextValidateAllocations(ctx, formats); err != nil {
res = append(res, err)
}
if len(res) > 0 {
return errors.CompositeValidationError(res...)
}
return nil
}
func (m *IPAMStatus) contextValidateAllocations(ctx context.Context, formats strfmt.Registry) error {
if swag.IsZero(m.Allocations) { // not required
return nil
}
if err := m.Allocations.ContextValidate(ctx, formats); err != nil {
if ve, ok := err.(*errors.Validation); ok {
return ve.ValidateName("allocations")
} else if ce, ok := err.(*errors.CompositeError); ok {
return ce.ValidateName("allocations")
}
return err
}
return nil
}
// MarshalBinary interface implementation
func (m *IPAMStatus) MarshalBinary() ([]byte, error) {
if m == nil {
return nil, nil
}
return swag.WriteJSON(m)
}
// UnmarshalBinary interface implementation
func (m *IPAMStatus) UnmarshalBinary(b []byte) error {
var res IPAMStatus
if err := swag.ReadJSON(b, &res); err != nil {
return err
}
*m = res
return nil
}
// Code generated by go-swagger; DO NOT EDIT.
// Copyright Authors of Cilium
// SPDX-License-Identifier: Apache-2.0
package models
// This file was generated by the swagger tool.
// Editing this file might prove futile when you re-run the swagger generate command
import (
"context"
"github.com/go-openapi/errors"
"github.com/go-openapi/strfmt"
"github.com/go-openapi/swag"
"github.com/go-openapi/validate"
)
// IPListEntry IP entry with metadata
//
// swagger:model IPListEntry
type IPListEntry struct {
// Key of the entry in the form of a CIDR range
// Required: true
Cidr *string `json:"cidr"`
// The context ID for the encryption session
EncryptKey int64 `json:"encryptKey,omitempty"`
// IP address of the host
HostIP string `json:"hostIP,omitempty"`
// Numerical identity assigned to the IP
// Required: true
Identity *int64 `json:"identity"`
// metadata
Metadata *IPListEntryMetadata `json:"metadata,omitempty"`
}
// Validate validates this IP list entry
func (m *IPListEntry) Validate(formats strfmt.Registry) error {
var res []error
if err := m.validateCidr(formats); err != nil {
res = append(res, err)
}
if err := m.validateIdentity(formats); err != nil {
res = append(res, err)
}
if err := m.validateMetadata(formats); err != nil {
res = append(res, err)
}
if len(res) > 0 {
return errors.CompositeValidationError(res...)
}
return nil
}
func (m *IPListEntry) validateCidr(formats strfmt.Registry) error {
if err := validate.Required("cidr", "body", m.Cidr); err != nil {
return err
}
return nil
}
func (m *IPListEntry) validateIdentity(formats strfmt.Registry) error {
if err := validate.Required("identity", "body", m.Identity); err != nil {
return err
}
return nil
}
func (m *IPListEntry) validateMetadata(formats strfmt.Registry) error {
if swag.IsZero(m.Metadata) { // not required
return nil
}
if m.Metadata != nil {
if err := m.Metadata.Validate(formats); err != nil {
if ve, ok := err.(*errors.Validation); ok {
return ve.ValidateName("metadata")
} else if ce, ok := err.(*errors.CompositeError); ok {
return ce.ValidateName("metadata")
}
return err
}
}
return nil
}
// ContextValidate validate this IP list entry based on the context it is used
func (m *IPListEntry) ContextValidate(ctx context.Context, formats strfmt.Registry) error {
var res []error
if err := m.contextValidateMetadata(ctx, formats); err != nil {
res = append(res, err)
}
if len(res) > 0 {
return errors.CompositeValidationError(res...)
}
return nil
}
func (m *IPListEntry) contextValidateMetadata(ctx context.Context, formats strfmt.Registry) error {
if m.Metadata != nil {
if swag.IsZero(m.Metadata) { // not required
return nil
}
if err := m.Metadata.ContextValidate(ctx, formats); err != nil {
if ve, ok := err.(*errors.Validation); ok {
return ve.ValidateName("metadata")
} else if ce, ok := err.(*errors.CompositeError); ok {
return ce.ValidateName("metadata")
}
return err
}
}
return nil
}
// MarshalBinary interface implementation
func (m *IPListEntry) MarshalBinary() ([]byte, error) {
if m == nil {
return nil, nil
}
return swag.WriteJSON(m)
}
// UnmarshalBinary interface implementation
func (m *IPListEntry) UnmarshalBinary(b []byte) error {
var res IPListEntry
if err := swag.ReadJSON(b, &res); err != nil {
return err
}
*m = res
return nil
}
// Code generated by go-swagger; DO NOT EDIT.
// Copyright Authors of Cilium
// SPDX-License-Identifier: Apache-2.0
package models
// This file was generated by the swagger tool.
// Editing this file might prove futile when you re-run the swagger generate command
import (
"context"
"github.com/go-openapi/strfmt"
"github.com/go-openapi/swag"
)
// IPListEntryMetadata Additional metadata assigned to an IP list entry
//
// swagger:model IPListEntryMetadata
type IPListEntryMetadata struct {
// Name assigned to the IP (e.g. Kubernetes pod name)
Name string `json:"name,omitempty"`
// Namespace of the IP (e.g. Kubernetes namespace)
Namespace string `json:"namespace,omitempty"`
// Source of the IP entry and its metadata
// Example: k8s
Source string `json:"source,omitempty"`
}
// Validate validates this IP list entry metadata
func (m *IPListEntryMetadata) Validate(formats strfmt.Registry) error {
return nil
}
// ContextValidate validates this IP list entry metadata based on context it is used
func (m *IPListEntryMetadata) ContextValidate(ctx context.Context, formats strfmt.Registry) error {
return nil
}
// MarshalBinary interface implementation
func (m *IPListEntryMetadata) MarshalBinary() ([]byte, error) {
if m == nil {
return nil, nil
}
return swag.WriteJSON(m)
}
// UnmarshalBinary interface implementation
func (m *IPListEntryMetadata) UnmarshalBinary(b []byte) error {
var res IPListEntryMetadata
if err := swag.ReadJSON(b, &res); err != nil {
return err
}
*m = res
return nil
}
// Code generated by go-swagger; DO NOT EDIT.
// Copyright Authors of Cilium
// SPDX-License-Identifier: Apache-2.0
package models
// This file was generated by the swagger tool.
// Editing this file might prove futile when you re-run the swagger generate command
import (
"context"
"github.com/go-openapi/strfmt"
"github.com/go-openapi/swag"
)
// IPV4BigTCP Status of IPv4 BIG TCP
//
// swagger:model IPV4BigTCP
type IPV4BigTCP struct {
// Is IPv4 BIG TCP enabled
Enabled bool `json:"enabled,omitempty"`
// Maximum IPv4 GRO size
MaxGRO int64 `json:"maxGRO,omitempty"`
// Maximum IPv4 GSO size
MaxGSO int64 `json:"maxGSO,omitempty"`
}
// Validate validates this IP v4 big TCP
func (m *IPV4BigTCP) Validate(formats strfmt.Registry) error {
return nil
}
// ContextValidate validates this IP v4 big TCP based on context it is used
func (m *IPV4BigTCP) ContextValidate(ctx context.Context, formats strfmt.Registry) error {
return nil
}
// MarshalBinary interface implementation
func (m *IPV4BigTCP) MarshalBinary() ([]byte, error) {
if m == nil {
return nil, nil
}
return swag.WriteJSON(m)
}
// UnmarshalBinary interface implementation
func (m *IPV4BigTCP) UnmarshalBinary(b []byte) error {
var res IPV4BigTCP
if err := swag.ReadJSON(b, &res); err != nil {
return err
}
*m = res
return nil
}
// Code generated by go-swagger; DO NOT EDIT.
// Copyright Authors of Cilium
// SPDX-License-Identifier: Apache-2.0
package models
// This file was generated by the swagger tool.
// Editing this file might prove futile when you re-run the swagger generate command
import (
"context"
"github.com/go-openapi/strfmt"
"github.com/go-openapi/swag"
)
// IPV6BigTCP Status of IPv6 BIG TCP
//
// swagger:model IPV6BigTCP
type IPV6BigTCP struct {
// Is IPv6 BIG TCP enabled
Enabled bool `json:"enabled,omitempty"`
// Maximum IPv6 GRO size
MaxGRO int64 `json:"maxGRO,omitempty"`
// Maximum IPv6 GSO size
MaxGSO int64 `json:"maxGSO,omitempty"`
}
// Validate validates this IP v6 big TCP
func (m *IPV6BigTCP) Validate(formats strfmt.Registry) error {
return nil
}
// ContextValidate validates this IP v6 big TCP based on context it is used
func (m *IPV6BigTCP) ContextValidate(ctx context.Context, formats strfmt.Registry) error {
return nil
}
// MarshalBinary interface implementation
func (m *IPV6BigTCP) MarshalBinary() ([]byte, error) {
if m == nil {
return nil, nil
}
return swag.WriteJSON(m)
}
// UnmarshalBinary interface implementation
func (m *IPV6BigTCP) UnmarshalBinary(b []byte) error {
var res IPV6BigTCP
if err := swag.ReadJSON(b, &res); err != nil {
return err
}
*m = res
return nil
}
// Code generated by go-swagger; DO NOT EDIT.
// Copyright Authors of Cilium
// SPDX-License-Identifier: Apache-2.0
package models
// This file was generated by the swagger tool.
// Editing this file might prove futile when you re-run the swagger generate command
import (
"context"
"encoding/json"
"github.com/go-openapi/errors"
"github.com/go-openapi/strfmt"
"github.com/go-openapi/swag"
"github.com/go-openapi/validate"
)
// K8sStatus Status of Kubernetes integration
//
// +k8s:deepcopy-gen=true
//
// swagger:model K8sStatus
type K8sStatus struct {
// k8s api versions
K8sAPIVersions []string `json:"k8s-api-versions"`
// Human readable status/error/warning message
Msg string `json:"msg,omitempty"`
// State the component is in
// Enum: ["Ok","Warning","Failure","Disabled"]
State string `json:"state,omitempty"`
}
// Validate validates this k8s status
func (m *K8sStatus) Validate(formats strfmt.Registry) error {
var res []error
if err := m.validateState(formats); err != nil {
res = append(res, err)
}
if len(res) > 0 {
return errors.CompositeValidationError(res...)
}
return nil
}
var k8sStatusTypeStatePropEnum []interface{}
func init() {
var res []string
if err := json.Unmarshal([]byte(`["Ok","Warning","Failure","Disabled"]`), &res); err != nil {
panic(err)
}
for _, v := range res {
k8sStatusTypeStatePropEnum = append(k8sStatusTypeStatePropEnum, v)
}
}
const (
// K8sStatusStateOk captures enum value "Ok"
K8sStatusStateOk string = "Ok"
// K8sStatusStateWarning captures enum value "Warning"
K8sStatusStateWarning string = "Warning"
// K8sStatusStateFailure captures enum value "Failure"
K8sStatusStateFailure string = "Failure"
// K8sStatusStateDisabled captures enum value "Disabled"
K8sStatusStateDisabled string = "Disabled"
)
// prop value enum
func (m *K8sStatus) validateStateEnum(path, location string, value string) error {
if err := validate.EnumCase(path, location, value, k8sStatusTypeStatePropEnum, true); err != nil {
return err
}
return nil
}
func (m *K8sStatus) validateState(formats strfmt.Registry) error {
if swag.IsZero(m.State) { // not required
return nil
}
// value enum
if err := m.validateStateEnum("state", "body", m.State); err != nil {
return err
}
return nil
}
// ContextValidate validates this k8s status based on context it is used
func (m *K8sStatus) ContextValidate(ctx context.Context, formats strfmt.Registry) error {
return nil
}
// MarshalBinary interface implementation
func (m *K8sStatus) MarshalBinary() ([]byte, error) {
if m == nil {
return nil, nil
}
return swag.WriteJSON(m)
}
// UnmarshalBinary interface implementation
func (m *K8sStatus) UnmarshalBinary(b []byte) error {
var res K8sStatus
if err := swag.ReadJSON(b, &res); err != nil {
return err
}
*m = res
return nil
}
// Code generated by go-swagger; DO NOT EDIT.
// Copyright Authors of Cilium
// SPDX-License-Identifier: Apache-2.0
package models
// This file was generated by the swagger tool.
// Editing this file might prove futile when you re-run the swagger generate command
import (
"context"
"github.com/go-openapi/strfmt"
"github.com/go-openapi/swag"
)
// KVstoreConfiguration Configuration used for the kvstore
//
// swagger:model KVstoreConfiguration
type KVstoreConfiguration struct {
// Configuration options
Options map[string]string `json:"options,omitempty"`
// Type of kvstore
Type string `json:"type,omitempty"`
}
// Validate validates this k vstore configuration
func (m *KVstoreConfiguration) Validate(formats strfmt.Registry) error {
return nil
}
// ContextValidate validates this k vstore configuration based on context it is used
func (m *KVstoreConfiguration) ContextValidate(ctx context.Context, formats strfmt.Registry) error {
return nil
}
// MarshalBinary interface implementation
func (m *KVstoreConfiguration) MarshalBinary() ([]byte, error) {
if m == nil {
return nil, nil
}
return swag.WriteJSON(m)
}
// UnmarshalBinary interface implementation
func (m *KVstoreConfiguration) UnmarshalBinary(b []byte) error {
var res KVstoreConfiguration
if err := swag.ReadJSON(b, &res); err != nil {
return err
}
*m = res
return nil
}
// Code generated by go-swagger; DO NOT EDIT.
// Copyright Authors of Cilium
// SPDX-License-Identifier: Apache-2.0
package models
// This file was generated by the swagger tool.
// Editing this file might prove futile when you re-run the swagger generate command
import (
"context"
"encoding/json"
"strconv"
"github.com/go-openapi/errors"
"github.com/go-openapi/strfmt"
"github.com/go-openapi/swag"
"github.com/go-openapi/validate"
)
// KubeProxyReplacement Status of kube-proxy replacement
//
// +k8s:deepcopy-gen=true
//
// swagger:model KubeProxyReplacement
type KubeProxyReplacement struct {
//
//
// +k8s:deepcopy-gen=true
DeviceList []*KubeProxyReplacementDeviceListItems0 `json:"deviceList"`
// devices
Devices []string `json:"devices"`
// direct routing device
DirectRoutingDevice string `json:"directRoutingDevice,omitempty"`
// features
Features *KubeProxyReplacementFeatures `json:"features,omitempty"`
// mode
// Enum: ["True","False"]
Mode string `json:"mode,omitempty"`
}
// Validate validates this kube proxy replacement
func (m *KubeProxyReplacement) Validate(formats strfmt.Registry) error {
var res []error
if err := m.validateDeviceList(formats); err != nil {
res = append(res, err)
}
if err := m.validateFeatures(formats); err != nil {
res = append(res, err)
}
if err := m.validateMode(formats); err != nil {
res = append(res, err)
}
if len(res) > 0 {
return errors.CompositeValidationError(res...)
}
return nil
}
func (m *KubeProxyReplacement) validateDeviceList(formats strfmt.Registry) error {
if swag.IsZero(m.DeviceList) { // not required
return nil
}
for i := 0; i < len(m.DeviceList); i++ {
if swag.IsZero(m.DeviceList[i]) { // not required
continue
}
if m.DeviceList[i] != nil {
if err := m.DeviceList[i].Validate(formats); err != nil {
if ve, ok := err.(*errors.Validation); ok {
return ve.ValidateName("deviceList" + "." + strconv.Itoa(i))
} else if ce, ok := err.(*errors.CompositeError); ok {
return ce.ValidateName("deviceList" + "." + strconv.Itoa(i))
}
return err
}
}
}
return nil
}
func (m *KubeProxyReplacement) validateFeatures(formats strfmt.Registry) error {
if swag.IsZero(m.Features) { // not required
return nil
}
if m.Features != nil {
if err := m.Features.Validate(formats); err != nil {
if ve, ok := err.(*errors.Validation); ok {
return ve.ValidateName("features")
} else if ce, ok := err.(*errors.CompositeError); ok {
return ce.ValidateName("features")
}
return err
}
}
return nil
}
var kubeProxyReplacementTypeModePropEnum []interface{}
func init() {
var res []string
if err := json.Unmarshal([]byte(`["True","False"]`), &res); err != nil {
panic(err)
}
for _, v := range res {
kubeProxyReplacementTypeModePropEnum = append(kubeProxyReplacementTypeModePropEnum, v)
}
}
const (
// KubeProxyReplacementModeTrue captures enum value "True"
KubeProxyReplacementModeTrue string = "True"
// KubeProxyReplacementModeFalse captures enum value "False"
KubeProxyReplacementModeFalse string = "False"
)
// prop value enum
func (m *KubeProxyReplacement) validateModeEnum(path, location string, value string) error {
if err := validate.EnumCase(path, location, value, kubeProxyReplacementTypeModePropEnum, true); err != nil {
return err
}
return nil
}
func (m *KubeProxyReplacement) validateMode(formats strfmt.Registry) error {
if swag.IsZero(m.Mode) { // not required
return nil
}
// value enum
if err := m.validateModeEnum("mode", "body", m.Mode); err != nil {
return err
}
return nil
}
// ContextValidate validate this kube proxy replacement based on the context it is used
func (m *KubeProxyReplacement) ContextValidate(ctx context.Context, formats strfmt.Registry) error {
var res []error
if err := m.contextValidateDeviceList(ctx, formats); err != nil {
res = append(res, err)
}
if err := m.contextValidateFeatures(ctx, formats); err != nil {
res = append(res, err)
}
if len(res) > 0 {
return errors.CompositeValidationError(res...)
}
return nil
}
func (m *KubeProxyReplacement) contextValidateDeviceList(ctx context.Context, formats strfmt.Registry) error {
for i := 0; i < len(m.DeviceList); i++ {
if m.DeviceList[i] != nil {
if swag.IsZero(m.DeviceList[i]) { // not required
return nil
}
if err := m.DeviceList[i].ContextValidate(ctx, formats); err != nil {
if ve, ok := err.(*errors.Validation); ok {
return ve.ValidateName("deviceList" + "." + strconv.Itoa(i))
} else if ce, ok := err.(*errors.CompositeError); ok {
return ce.ValidateName("deviceList" + "." + strconv.Itoa(i))
}
return err
}
}
}
return nil
}
func (m *KubeProxyReplacement) contextValidateFeatures(ctx context.Context, formats strfmt.Registry) error {
if m.Features != nil {
if swag.IsZero(m.Features) { // not required
return nil
}
if err := m.Features.ContextValidate(ctx, formats); err != nil {
if ve, ok := err.(*errors.Validation); ok {
return ve.ValidateName("features")
} else if ce, ok := err.(*errors.CompositeError); ok {
return ce.ValidateName("features")
}
return err
}
}
return nil
}
// MarshalBinary interface implementation
func (m *KubeProxyReplacement) MarshalBinary() ([]byte, error) {
if m == nil {
return nil, nil
}
return swag.WriteJSON(m)
}
// UnmarshalBinary interface implementation
func (m *KubeProxyReplacement) UnmarshalBinary(b []byte) error {
var res KubeProxyReplacement
if err := swag.ReadJSON(b, &res); err != nil {
return err
}
*m = res
return nil
}
// KubeProxyReplacementDeviceListItems0
//
// +k8s:deepcopy-gen=true
//
// swagger:model KubeProxyReplacementDeviceListItems0
type KubeProxyReplacementDeviceListItems0 struct {
//
//
// +k8s:deepcopy-gen=true
IP []string `json:"ip"`
// name
Name string `json:"name,omitempty"`
}
// Validate validates this kube proxy replacement device list items0
func (m *KubeProxyReplacementDeviceListItems0) Validate(formats strfmt.Registry) error {
return nil
}
// ContextValidate validates this kube proxy replacement device list items0 based on context it is used
func (m *KubeProxyReplacementDeviceListItems0) ContextValidate(ctx context.Context, formats strfmt.Registry) error {
return nil
}
// MarshalBinary interface implementation
func (m *KubeProxyReplacementDeviceListItems0) MarshalBinary() ([]byte, error) {
if m == nil {
return nil, nil
}
return swag.WriteJSON(m)
}
// UnmarshalBinary interface implementation
func (m *KubeProxyReplacementDeviceListItems0) UnmarshalBinary(b []byte) error {
var res KubeProxyReplacementDeviceListItems0
if err := swag.ReadJSON(b, &res); err != nil {
return err
}
*m = res
return nil
}
// KubeProxyReplacementFeatures
//
// +k8s:deepcopy-gen=true
//
// swagger:model KubeProxyReplacementFeatures
type KubeProxyReplacementFeatures struct {
// annotations
Annotations []string `json:"annotations"`
// flag bpf-lb-sock-hostns-only
BpfSocketLBHostnsOnly bool `json:"bpfSocketLBHostnsOnly,omitempty"`
// external i ps
ExternalIPs *KubeProxyReplacementFeaturesExternalIPs `json:"externalIPs,omitempty"`
// graceful termination
GracefulTermination *KubeProxyReplacementFeaturesGracefulTermination `json:"gracefulTermination,omitempty"`
// host port
HostPort *KubeProxyReplacementFeaturesHostPort `json:"hostPort,omitempty"`
// host reachable services
HostReachableServices *KubeProxyReplacementFeaturesHostReachableServices `json:"hostReachableServices,omitempty"`
// nat46 x64
Nat46X64 *KubeProxyReplacementFeaturesNat46X64 `json:"nat46X64,omitempty"`
// node port
NodePort *KubeProxyReplacementFeaturesNodePort `json:"nodePort,omitempty"`
// session affinity
SessionAffinity *KubeProxyReplacementFeaturesSessionAffinity `json:"sessionAffinity,omitempty"`
// socket l b
SocketLB *KubeProxyReplacementFeaturesSocketLB `json:"socketLB,omitempty"`
// socket l b tracing
SocketLBTracing *KubeProxyReplacementFeaturesSocketLBTracing `json:"socketLBTracing,omitempty"`
}
// Validate validates this kube proxy replacement features
func (m *KubeProxyReplacementFeatures) Validate(formats strfmt.Registry) error {
var res []error
if err := m.validateExternalIPs(formats); err != nil {
res = append(res, err)
}
if err := m.validateGracefulTermination(formats); err != nil {
res = append(res, err)
}
if err := m.validateHostPort(formats); err != nil {
res = append(res, err)
}
if err := m.validateHostReachableServices(formats); err != nil {
res = append(res, err)
}
if err := m.validateNat46X64(formats); err != nil {
res = append(res, err)
}
if err := m.validateNodePort(formats); err != nil {
res = append(res, err)
}
if err := m.validateSessionAffinity(formats); err != nil {
res = append(res, err)
}
if err := m.validateSocketLB(formats); err != nil {
res = append(res, err)
}
if err := m.validateSocketLBTracing(formats); err != nil {
res = append(res, err)
}
if len(res) > 0 {
return errors.CompositeValidationError(res...)
}
return nil
}
func (m *KubeProxyReplacementFeatures) validateExternalIPs(formats strfmt.Registry) error {
if swag.IsZero(m.ExternalIPs) { // not required
return nil
}
if m.ExternalIPs != nil {
if err := m.ExternalIPs.Validate(formats); err != nil {
if ve, ok := err.(*errors.Validation); ok {
return ve.ValidateName("features" + "." + "externalIPs")
} else if ce, ok := err.(*errors.CompositeError); ok {
return ce.ValidateName("features" + "." + "externalIPs")
}
return err
}
}
return nil
}
func (m *KubeProxyReplacementFeatures) validateGracefulTermination(formats strfmt.Registry) error {
if swag.IsZero(m.GracefulTermination) { // not required
return nil
}
if m.GracefulTermination != nil {
if err := m.GracefulTermination.Validate(formats); err != nil {
if ve, ok := err.(*errors.Validation); ok {
return ve.ValidateName("features" + "." + "gracefulTermination")
} else if ce, ok := err.(*errors.CompositeError); ok {
return ce.ValidateName("features" + "." + "gracefulTermination")
}
return err
}
}
return nil
}
func (m *KubeProxyReplacementFeatures) validateHostPort(formats strfmt.Registry) error {
if swag.IsZero(m.HostPort) { // not required
return nil
}
if m.HostPort != nil {
if err := m.HostPort.Validate(formats); err != nil {
if ve, ok := err.(*errors.Validation); ok {
return ve.ValidateName("features" + "." + "hostPort")
} else if ce, ok := err.(*errors.CompositeError); ok {
return ce.ValidateName("features" + "." + "hostPort")
}
return err
}
}
return nil
}
func (m *KubeProxyReplacementFeatures) validateHostReachableServices(formats strfmt.Registry) error {
if swag.IsZero(m.HostReachableServices) { // not required
return nil
}
if m.HostReachableServices != nil {
if err := m.HostReachableServices.Validate(formats); err != nil {
if ve, ok := err.(*errors.Validation); ok {
return ve.ValidateName("features" + "." + "hostReachableServices")
} else if ce, ok := err.(*errors.CompositeError); ok {
return ce.ValidateName("features" + "." + "hostReachableServices")
}
return err
}
}
return nil
}
func (m *KubeProxyReplacementFeatures) validateNat46X64(formats strfmt.Registry) error {
if swag.IsZero(m.Nat46X64) { // not required
return nil
}
if m.Nat46X64 != nil {
if err := m.Nat46X64.Validate(formats); err != nil {
if ve, ok := err.(*errors.Validation); ok {
return ve.ValidateName("features" + "." + "nat46X64")
} else if ce, ok := err.(*errors.CompositeError); ok {
return ce.ValidateName("features" + "." + "nat46X64")
}
return err
}
}
return nil
}
func (m *KubeProxyReplacementFeatures) validateNodePort(formats strfmt.Registry) error {
if swag.IsZero(m.NodePort) { // not required
return nil
}
if m.NodePort != nil {
if err := m.NodePort.Validate(formats); err != nil {
if ve, ok := err.(*errors.Validation); ok {
return ve.ValidateName("features" + "." + "nodePort")
} else if ce, ok := err.(*errors.CompositeError); ok {
return ce.ValidateName("features" + "." + "nodePort")
}
return err
}
}
return nil
}
func (m *KubeProxyReplacementFeatures) validateSessionAffinity(formats strfmt.Registry) error {
if swag.IsZero(m.SessionAffinity) { // not required
return nil
}
if m.SessionAffinity != nil {
if err := m.SessionAffinity.Validate(formats); err != nil {
if ve, ok := err.(*errors.Validation); ok {
return ve.ValidateName("features" + "." + "sessionAffinity")
} else if ce, ok := err.(*errors.CompositeError); ok {
return ce.ValidateName("features" + "." + "sessionAffinity")
}
return err
}
}
return nil
}
func (m *KubeProxyReplacementFeatures) validateSocketLB(formats strfmt.Registry) error {
if swag.IsZero(m.SocketLB) { // not required
return nil
}
if m.SocketLB != nil {
if err := m.SocketLB.Validate(formats); err != nil {
if ve, ok := err.(*errors.Validation); ok {
return ve.ValidateName("features" + "." + "socketLB")
} else if ce, ok := err.(*errors.CompositeError); ok {
return ce.ValidateName("features" + "." + "socketLB")
}
return err
}
}
return nil
}
func (m *KubeProxyReplacementFeatures) validateSocketLBTracing(formats strfmt.Registry) error {
if swag.IsZero(m.SocketLBTracing) { // not required
return nil
}
if m.SocketLBTracing != nil {
if err := m.SocketLBTracing.Validate(formats); err != nil {
if ve, ok := err.(*errors.Validation); ok {
return ve.ValidateName("features" + "." + "socketLBTracing")
} else if ce, ok := err.(*errors.CompositeError); ok {
return ce.ValidateName("features" + "." + "socketLBTracing")
}
return err
}
}
return nil
}
// ContextValidate validate this kube proxy replacement features based on the context it is used
func (m *KubeProxyReplacementFeatures) ContextValidate(ctx context.Context, formats strfmt.Registry) error {
var res []error
if err := m.contextValidateExternalIPs(ctx, formats); err != nil {
res = append(res, err)
}
if err := m.contextValidateGracefulTermination(ctx, formats); err != nil {
res = append(res, err)
}
if err := m.contextValidateHostPort(ctx, formats); err != nil {
res = append(res, err)
}
if err := m.contextValidateHostReachableServices(ctx, formats); err != nil {
res = append(res, err)
}
if err := m.contextValidateNat46X64(ctx, formats); err != nil {
res = append(res, err)
}
if err := m.contextValidateNodePort(ctx, formats); err != nil {
res = append(res, err)
}
if err := m.contextValidateSessionAffinity(ctx, formats); err != nil {
res = append(res, err)
}
if err := m.contextValidateSocketLB(ctx, formats); err != nil {
res = append(res, err)
}
if err := m.contextValidateSocketLBTracing(ctx, formats); err != nil {
res = append(res, err)
}
if len(res) > 0 {
return errors.CompositeValidationError(res...)
}
return nil
}
func (m *KubeProxyReplacementFeatures) contextValidateExternalIPs(ctx context.Context, formats strfmt.Registry) error {
if m.ExternalIPs != nil {
if swag.IsZero(m.ExternalIPs) { // not required
return nil
}
if err := m.ExternalIPs.ContextValidate(ctx, formats); err != nil {
if ve, ok := err.(*errors.Validation); ok {
return ve.ValidateName("features" + "." + "externalIPs")
} else if ce, ok := err.(*errors.CompositeError); ok {
return ce.ValidateName("features" + "." + "externalIPs")
}
return err
}
}
return nil
}
func (m *KubeProxyReplacementFeatures) contextValidateGracefulTermination(ctx context.Context, formats strfmt.Registry) error {
if m.GracefulTermination != nil {
if swag.IsZero(m.GracefulTermination) { // not required
return nil
}
if err := m.GracefulTermination.ContextValidate(ctx, formats); err != nil {
if ve, ok := err.(*errors.Validation); ok {
return ve.ValidateName("features" + "." + "gracefulTermination")
} else if ce, ok := err.(*errors.CompositeError); ok {
return ce.ValidateName("features" + "." + "gracefulTermination")
}
return err
}
}
return nil
}
func (m *KubeProxyReplacementFeatures) contextValidateHostPort(ctx context.Context, formats strfmt.Registry) error {
if m.HostPort != nil {
if swag.IsZero(m.HostPort) { // not required
return nil
}
if err := m.HostPort.ContextValidate(ctx, formats); err != nil {
if ve, ok := err.(*errors.Validation); ok {
return ve.ValidateName("features" + "." + "hostPort")
} else if ce, ok := err.(*errors.CompositeError); ok {
return ce.ValidateName("features" + "." + "hostPort")
}
return err
}
}
return nil
}
func (m *KubeProxyReplacementFeatures) contextValidateHostReachableServices(ctx context.Context, formats strfmt.Registry) error {
if m.HostReachableServices != nil {
if swag.IsZero(m.HostReachableServices) { // not required
return nil
}
if err := m.HostReachableServices.ContextValidate(ctx, formats); err != nil {
if ve, ok := err.(*errors.Validation); ok {
return ve.ValidateName("features" + "." + "hostReachableServices")
} else if ce, ok := err.(*errors.CompositeError); ok {
return ce.ValidateName("features" + "." + "hostReachableServices")
}
return err
}
}
return nil
}
func (m *KubeProxyReplacementFeatures) contextValidateNat46X64(ctx context.Context, formats strfmt.Registry) error {
if m.Nat46X64 != nil {
if swag.IsZero(m.Nat46X64) { // not required
return nil
}
if err := m.Nat46X64.ContextValidate(ctx, formats); err != nil {
if ve, ok := err.(*errors.Validation); ok {
return ve.ValidateName("features" + "." + "nat46X64")
} else if ce, ok := err.(*errors.CompositeError); ok {
return ce.ValidateName("features" + "." + "nat46X64")
}
return err
}
}
return nil
}
func (m *KubeProxyReplacementFeatures) contextValidateNodePort(ctx context.Context, formats strfmt.Registry) error {
if m.NodePort != nil {
if swag.IsZero(m.NodePort) { // not required
return nil
}
if err := m.NodePort.ContextValidate(ctx, formats); err != nil {
if ve, ok := err.(*errors.Validation); ok {
return ve.ValidateName("features" + "." + "nodePort")
} else if ce, ok := err.(*errors.CompositeError); ok {
return ce.ValidateName("features" + "." + "nodePort")
}
return err
}
}
return nil
}
func (m *KubeProxyReplacementFeatures) contextValidateSessionAffinity(ctx context.Context, formats strfmt.Registry) error {
if m.SessionAffinity != nil {
if swag.IsZero(m.SessionAffinity) { // not required
return nil
}
if err := m.SessionAffinity.ContextValidate(ctx, formats); err != nil {
if ve, ok := err.(*errors.Validation); ok {
return ve.ValidateName("features" + "." + "sessionAffinity")
} else if ce, ok := err.(*errors.CompositeError); ok {
return ce.ValidateName("features" + "." + "sessionAffinity")
}
return err
}
}
return nil
}
func (m *KubeProxyReplacementFeatures) contextValidateSocketLB(ctx context.Context, formats strfmt.Registry) error {
if m.SocketLB != nil {
if swag.IsZero(m.SocketLB) { // not required
return nil
}
if err := m.SocketLB.ContextValidate(ctx, formats); err != nil {
if ve, ok := err.(*errors.Validation); ok {
return ve.ValidateName("features" + "." + "socketLB")
} else if ce, ok := err.(*errors.CompositeError); ok {
return ce.ValidateName("features" + "." + "socketLB")
}
return err
}
}
return nil
}
func (m *KubeProxyReplacementFeatures) contextValidateSocketLBTracing(ctx context.Context, formats strfmt.Registry) error {
if m.SocketLBTracing != nil {
if swag.IsZero(m.SocketLBTracing) { // not required
return nil
}
if err := m.SocketLBTracing.ContextValidate(ctx, formats); err != nil {
if ve, ok := err.(*errors.Validation); ok {
return ve.ValidateName("features" + "." + "socketLBTracing")
} else if ce, ok := err.(*errors.CompositeError); ok {
return ce.ValidateName("features" + "." + "socketLBTracing")
}
return err
}
}
return nil
}
// MarshalBinary interface implementation
func (m *KubeProxyReplacementFeatures) MarshalBinary() ([]byte, error) {
if m == nil {
return nil, nil
}
return swag.WriteJSON(m)
}
// UnmarshalBinary interface implementation
func (m *KubeProxyReplacementFeatures) UnmarshalBinary(b []byte) error {
var res KubeProxyReplacementFeatures
if err := swag.ReadJSON(b, &res); err != nil {
return err
}
*m = res
return nil
}
// KubeProxyReplacementFeaturesExternalIPs kube proxy replacement features external i ps
//
// swagger:model KubeProxyReplacementFeaturesExternalIPs
type KubeProxyReplacementFeaturesExternalIPs struct {
// enabled
Enabled bool `json:"enabled,omitempty"`
}
// Validate validates this kube proxy replacement features external i ps
func (m *KubeProxyReplacementFeaturesExternalIPs) Validate(formats strfmt.Registry) error {
return nil
}
// ContextValidate validates this kube proxy replacement features external i ps based on context it is used
func (m *KubeProxyReplacementFeaturesExternalIPs) ContextValidate(ctx context.Context, formats strfmt.Registry) error {
return nil
}
// MarshalBinary interface implementation
func (m *KubeProxyReplacementFeaturesExternalIPs) MarshalBinary() ([]byte, error) {
if m == nil {
return nil, nil
}
return swag.WriteJSON(m)
}
// UnmarshalBinary interface implementation
func (m *KubeProxyReplacementFeaturesExternalIPs) UnmarshalBinary(b []byte) error {
var res KubeProxyReplacementFeaturesExternalIPs
if err := swag.ReadJSON(b, &res); err != nil {
return err
}
*m = res
return nil
}
// KubeProxyReplacementFeaturesGracefulTermination Deprecated
//
// swagger:model KubeProxyReplacementFeaturesGracefulTermination
type KubeProxyReplacementFeaturesGracefulTermination struct {
// enabled
Enabled bool `json:"enabled,omitempty"`
}
// Validate validates this kube proxy replacement features graceful termination
func (m *KubeProxyReplacementFeaturesGracefulTermination) Validate(formats strfmt.Registry) error {
return nil
}
// ContextValidate validates this kube proxy replacement features graceful termination based on context it is used
func (m *KubeProxyReplacementFeaturesGracefulTermination) ContextValidate(ctx context.Context, formats strfmt.Registry) error {
return nil
}
// MarshalBinary interface implementation
func (m *KubeProxyReplacementFeaturesGracefulTermination) MarshalBinary() ([]byte, error) {
if m == nil {
return nil, nil
}
return swag.WriteJSON(m)
}
// UnmarshalBinary interface implementation
func (m *KubeProxyReplacementFeaturesGracefulTermination) UnmarshalBinary(b []byte) error {
var res KubeProxyReplacementFeaturesGracefulTermination
if err := swag.ReadJSON(b, &res); err != nil {
return err
}
*m = res
return nil
}
// KubeProxyReplacementFeaturesHostPort kube proxy replacement features host port
//
// swagger:model KubeProxyReplacementFeaturesHostPort
type KubeProxyReplacementFeaturesHostPort struct {
// enabled
Enabled bool `json:"enabled,omitempty"`
}
// Validate validates this kube proxy replacement features host port
func (m *KubeProxyReplacementFeaturesHostPort) Validate(formats strfmt.Registry) error {
return nil
}
// ContextValidate validates this kube proxy replacement features host port based on context it is used
func (m *KubeProxyReplacementFeaturesHostPort) ContextValidate(ctx context.Context, formats strfmt.Registry) error {
return nil
}
// MarshalBinary interface implementation
func (m *KubeProxyReplacementFeaturesHostPort) MarshalBinary() ([]byte, error) {
if m == nil {
return nil, nil
}
return swag.WriteJSON(m)
}
// UnmarshalBinary interface implementation
func (m *KubeProxyReplacementFeaturesHostPort) UnmarshalBinary(b []byte) error {
var res KubeProxyReplacementFeaturesHostPort
if err := swag.ReadJSON(b, &res); err != nil {
return err
}
*m = res
return nil
}
// KubeProxyReplacementFeaturesHostReachableServices
//
// +k8s:deepcopy-gen=true
//
// swagger:model KubeProxyReplacementFeaturesHostReachableServices
type KubeProxyReplacementFeaturesHostReachableServices struct {
// enabled
Enabled bool `json:"enabled,omitempty"`
// protocols
Protocols []string `json:"protocols"`
}
// Validate validates this kube proxy replacement features host reachable services
func (m *KubeProxyReplacementFeaturesHostReachableServices) Validate(formats strfmt.Registry) error {
return nil
}
// ContextValidate validates this kube proxy replacement features host reachable services based on context it is used
func (m *KubeProxyReplacementFeaturesHostReachableServices) ContextValidate(ctx context.Context, formats strfmt.Registry) error {
return nil
}
// MarshalBinary interface implementation
func (m *KubeProxyReplacementFeaturesHostReachableServices) MarshalBinary() ([]byte, error) {
if m == nil {
return nil, nil
}
return swag.WriteJSON(m)
}
// UnmarshalBinary interface implementation
func (m *KubeProxyReplacementFeaturesHostReachableServices) UnmarshalBinary(b []byte) error {
var res KubeProxyReplacementFeaturesHostReachableServices
if err := swag.ReadJSON(b, &res); err != nil {
return err
}
*m = res
return nil
}
// KubeProxyReplacementFeaturesNat46X64
//
// +k8s:deepcopy-gen=true
//
// swagger:model KubeProxyReplacementFeaturesNat46X64
type KubeProxyReplacementFeaturesNat46X64 struct {
// enabled
Enabled bool `json:"enabled,omitempty"`
// gateway
Gateway *KubeProxyReplacementFeaturesNat46X64Gateway `json:"gateway,omitempty"`
// service
Service *KubeProxyReplacementFeaturesNat46X64Service `json:"service,omitempty"`
}
// Validate validates this kube proxy replacement features nat46 x64
func (m *KubeProxyReplacementFeaturesNat46X64) Validate(formats strfmt.Registry) error {
var res []error
if err := m.validateGateway(formats); err != nil {
res = append(res, err)
}
if err := m.validateService(formats); err != nil {
res = append(res, err)
}
if len(res) > 0 {
return errors.CompositeValidationError(res...)
}
return nil
}
func (m *KubeProxyReplacementFeaturesNat46X64) validateGateway(formats strfmt.Registry) error {
if swag.IsZero(m.Gateway) { // not required
return nil
}
if m.Gateway != nil {
if err := m.Gateway.Validate(formats); err != nil {
if ve, ok := err.(*errors.Validation); ok {
return ve.ValidateName("features" + "." + "nat46X64" + "." + "gateway")
} else if ce, ok := err.(*errors.CompositeError); ok {
return ce.ValidateName("features" + "." + "nat46X64" + "." + "gateway")
}
return err
}
}
return nil
}
func (m *KubeProxyReplacementFeaturesNat46X64) validateService(formats strfmt.Registry) error {
if swag.IsZero(m.Service) { // not required
return nil
}
if m.Service != nil {
if err := m.Service.Validate(formats); err != nil {
if ve, ok := err.(*errors.Validation); ok {
return ve.ValidateName("features" + "." + "nat46X64" + "." + "service")
} else if ce, ok := err.(*errors.CompositeError); ok {
return ce.ValidateName("features" + "." + "nat46X64" + "." + "service")
}
return err
}
}
return nil
}
// ContextValidate validate this kube proxy replacement features nat46 x64 based on the context it is used
func (m *KubeProxyReplacementFeaturesNat46X64) ContextValidate(ctx context.Context, formats strfmt.Registry) error {
var res []error
if err := m.contextValidateGateway(ctx, formats); err != nil {
res = append(res, err)
}
if err := m.contextValidateService(ctx, formats); err != nil {
res = append(res, err)
}
if len(res) > 0 {
return errors.CompositeValidationError(res...)
}
return nil
}
func (m *KubeProxyReplacementFeaturesNat46X64) contextValidateGateway(ctx context.Context, formats strfmt.Registry) error {
if m.Gateway != nil {
if swag.IsZero(m.Gateway) { // not required
return nil
}
if err := m.Gateway.ContextValidate(ctx, formats); err != nil {
if ve, ok := err.(*errors.Validation); ok {
return ve.ValidateName("features" + "." + "nat46X64" + "." + "gateway")
} else if ce, ok := err.(*errors.CompositeError); ok {
return ce.ValidateName("features" + "." + "nat46X64" + "." + "gateway")
}
return err
}
}
return nil
}
func (m *KubeProxyReplacementFeaturesNat46X64) contextValidateService(ctx context.Context, formats strfmt.Registry) error {
if m.Service != nil {
if swag.IsZero(m.Service) { // not required
return nil
}
if err := m.Service.ContextValidate(ctx, formats); err != nil {
if ve, ok := err.(*errors.Validation); ok {
return ve.ValidateName("features" + "." + "nat46X64" + "." + "service")
} else if ce, ok := err.(*errors.CompositeError); ok {
return ce.ValidateName("features" + "." + "nat46X64" + "." + "service")
}
return err
}
}
return nil
}
// MarshalBinary interface implementation
func (m *KubeProxyReplacementFeaturesNat46X64) MarshalBinary() ([]byte, error) {
if m == nil {
return nil, nil
}
return swag.WriteJSON(m)
}
// UnmarshalBinary interface implementation
func (m *KubeProxyReplacementFeaturesNat46X64) UnmarshalBinary(b []byte) error {
var res KubeProxyReplacementFeaturesNat46X64
if err := swag.ReadJSON(b, &res); err != nil {
return err
}
*m = res
return nil
}
// KubeProxyReplacementFeaturesNat46X64Gateway
//
// +k8s:deepcopy-gen=true
//
// swagger:model KubeProxyReplacementFeaturesNat46X64Gateway
type KubeProxyReplacementFeaturesNat46X64Gateway struct {
// enabled
Enabled bool `json:"enabled,omitempty"`
// prefixes
Prefixes []string `json:"prefixes"`
}
// Validate validates this kube proxy replacement features nat46 x64 gateway
func (m *KubeProxyReplacementFeaturesNat46X64Gateway) Validate(formats strfmt.Registry) error {
return nil
}
// ContextValidate validates this kube proxy replacement features nat46 x64 gateway based on context it is used
func (m *KubeProxyReplacementFeaturesNat46X64Gateway) ContextValidate(ctx context.Context, formats strfmt.Registry) error {
return nil
}
// MarshalBinary interface implementation
func (m *KubeProxyReplacementFeaturesNat46X64Gateway) MarshalBinary() ([]byte, error) {
if m == nil {
return nil, nil
}
return swag.WriteJSON(m)
}
// UnmarshalBinary interface implementation
func (m *KubeProxyReplacementFeaturesNat46X64Gateway) UnmarshalBinary(b []byte) error {
var res KubeProxyReplacementFeaturesNat46X64Gateway
if err := swag.ReadJSON(b, &res); err != nil {
return err
}
*m = res
return nil
}
// KubeProxyReplacementFeaturesNat46X64Service kube proxy replacement features nat46 x64 service
//
// swagger:model KubeProxyReplacementFeaturesNat46X64Service
type KubeProxyReplacementFeaturesNat46X64Service struct {
// enabled
Enabled bool `json:"enabled,omitempty"`
}
// Validate validates this kube proxy replacement features nat46 x64 service
func (m *KubeProxyReplacementFeaturesNat46X64Service) Validate(formats strfmt.Registry) error {
return nil
}
// ContextValidate validates this kube proxy replacement features nat46 x64 service based on context it is used
func (m *KubeProxyReplacementFeaturesNat46X64Service) ContextValidate(ctx context.Context, formats strfmt.Registry) error {
return nil
}
// MarshalBinary interface implementation
func (m *KubeProxyReplacementFeaturesNat46X64Service) MarshalBinary() ([]byte, error) {
if m == nil {
return nil, nil
}
return swag.WriteJSON(m)
}
// UnmarshalBinary interface implementation
func (m *KubeProxyReplacementFeaturesNat46X64Service) UnmarshalBinary(b []byte) error {
var res KubeProxyReplacementFeaturesNat46X64Service
if err := swag.ReadJSON(b, &res); err != nil {
return err
}
*m = res
return nil
}
// KubeProxyReplacementFeaturesNodePort kube proxy replacement features node port
//
// swagger:model KubeProxyReplacementFeaturesNodePort
type KubeProxyReplacementFeaturesNodePort struct {
// acceleration
// Enum: ["None","Native","Generic","Best-Effort"]
Acceleration string `json:"acceleration,omitempty"`
// algorithm
// Enum: ["Random","Maglev"]
Algorithm string `json:"algorithm,omitempty"`
// dsr mode
// Enum: ["IP Option/Extension","IPIP","Geneve"]
DsrMode string `json:"dsrMode,omitempty"`
// enabled
Enabled bool `json:"enabled,omitempty"`
// lut size
LutSize int64 `json:"lutSize,omitempty"`
// mode
// Enum: ["SNAT","DSR","Hybrid"]
Mode string `json:"mode,omitempty"`
// port max
PortMax int64 `json:"portMax,omitempty"`
// port min
PortMin int64 `json:"portMin,omitempty"`
}
// Validate validates this kube proxy replacement features node port
func (m *KubeProxyReplacementFeaturesNodePort) Validate(formats strfmt.Registry) error {
var res []error
if err := m.validateAcceleration(formats); err != nil {
res = append(res, err)
}
if err := m.validateAlgorithm(formats); err != nil {
res = append(res, err)
}
if err := m.validateDsrMode(formats); err != nil {
res = append(res, err)
}
if err := m.validateMode(formats); err != nil {
res = append(res, err)
}
if len(res) > 0 {
return errors.CompositeValidationError(res...)
}
return nil
}
var kubeProxyReplacementFeaturesNodePortTypeAccelerationPropEnum []interface{}
func init() {
var res []string
if err := json.Unmarshal([]byte(`["None","Native","Generic","Best-Effort"]`), &res); err != nil {
panic(err)
}
for _, v := range res {
kubeProxyReplacementFeaturesNodePortTypeAccelerationPropEnum = append(kubeProxyReplacementFeaturesNodePortTypeAccelerationPropEnum, v)
}
}
const (
// KubeProxyReplacementFeaturesNodePortAccelerationNone captures enum value "None"
KubeProxyReplacementFeaturesNodePortAccelerationNone string = "None"
// KubeProxyReplacementFeaturesNodePortAccelerationNative captures enum value "Native"
KubeProxyReplacementFeaturesNodePortAccelerationNative string = "Native"
// KubeProxyReplacementFeaturesNodePortAccelerationGeneric captures enum value "Generic"
KubeProxyReplacementFeaturesNodePortAccelerationGeneric string = "Generic"
// KubeProxyReplacementFeaturesNodePortAccelerationBestDashEffort captures enum value "Best-Effort"
KubeProxyReplacementFeaturesNodePortAccelerationBestDashEffort string = "Best-Effort"
)
// prop value enum
func (m *KubeProxyReplacementFeaturesNodePort) validateAccelerationEnum(path, location string, value string) error {
if err := validate.EnumCase(path, location, value, kubeProxyReplacementFeaturesNodePortTypeAccelerationPropEnum, true); err != nil {
return err
}
return nil
}
func (m *KubeProxyReplacementFeaturesNodePort) validateAcceleration(formats strfmt.Registry) error {
if swag.IsZero(m.Acceleration) { // not required
return nil
}
// value enum
if err := m.validateAccelerationEnum("features"+"."+"nodePort"+"."+"acceleration", "body", m.Acceleration); err != nil {
return err
}
return nil
}
var kubeProxyReplacementFeaturesNodePortTypeAlgorithmPropEnum []interface{}
func init() {
var res []string
if err := json.Unmarshal([]byte(`["Random","Maglev"]`), &res); err != nil {
panic(err)
}
for _, v := range res {
kubeProxyReplacementFeaturesNodePortTypeAlgorithmPropEnum = append(kubeProxyReplacementFeaturesNodePortTypeAlgorithmPropEnum, v)
}
}
const (
// KubeProxyReplacementFeaturesNodePortAlgorithmRandom captures enum value "Random"
KubeProxyReplacementFeaturesNodePortAlgorithmRandom string = "Random"
// KubeProxyReplacementFeaturesNodePortAlgorithmMaglev captures enum value "Maglev"
KubeProxyReplacementFeaturesNodePortAlgorithmMaglev string = "Maglev"
)
// prop value enum
func (m *KubeProxyReplacementFeaturesNodePort) validateAlgorithmEnum(path, location string, value string) error {
if err := validate.EnumCase(path, location, value, kubeProxyReplacementFeaturesNodePortTypeAlgorithmPropEnum, true); err != nil {
return err
}
return nil
}
func (m *KubeProxyReplacementFeaturesNodePort) validateAlgorithm(formats strfmt.Registry) error {
if swag.IsZero(m.Algorithm) { // not required
return nil
}
// value enum
if err := m.validateAlgorithmEnum("features"+"."+"nodePort"+"."+"algorithm", "body", m.Algorithm); err != nil {
return err
}
return nil
}
var kubeProxyReplacementFeaturesNodePortTypeDsrModePropEnum []interface{}
func init() {
var res []string
if err := json.Unmarshal([]byte(`["IP Option/Extension","IPIP","Geneve"]`), &res); err != nil {
panic(err)
}
for _, v := range res {
kubeProxyReplacementFeaturesNodePortTypeDsrModePropEnum = append(kubeProxyReplacementFeaturesNodePortTypeDsrModePropEnum, v)
}
}
const (
// KubeProxyReplacementFeaturesNodePortDsrModeIPOptionExtension captures enum value "IP Option/Extension"
KubeProxyReplacementFeaturesNodePortDsrModeIPOptionExtension string = "IP Option/Extension"
// KubeProxyReplacementFeaturesNodePortDsrModeIPIP captures enum value "IPIP"
KubeProxyReplacementFeaturesNodePortDsrModeIPIP string = "IPIP"
// KubeProxyReplacementFeaturesNodePortDsrModeGeneve captures enum value "Geneve"
KubeProxyReplacementFeaturesNodePortDsrModeGeneve string = "Geneve"
)
// prop value enum
func (m *KubeProxyReplacementFeaturesNodePort) validateDsrModeEnum(path, location string, value string) error {
if err := validate.EnumCase(path, location, value, kubeProxyReplacementFeaturesNodePortTypeDsrModePropEnum, true); err != nil {
return err
}
return nil
}
func (m *KubeProxyReplacementFeaturesNodePort) validateDsrMode(formats strfmt.Registry) error {
if swag.IsZero(m.DsrMode) { // not required
return nil
}
// value enum
if err := m.validateDsrModeEnum("features"+"."+"nodePort"+"."+"dsrMode", "body", m.DsrMode); err != nil {
return err
}
return nil
}
var kubeProxyReplacementFeaturesNodePortTypeModePropEnum []interface{}
func init() {
var res []string
if err := json.Unmarshal([]byte(`["SNAT","DSR","Hybrid"]`), &res); err != nil {
panic(err)
}
for _, v := range res {
kubeProxyReplacementFeaturesNodePortTypeModePropEnum = append(kubeProxyReplacementFeaturesNodePortTypeModePropEnum, v)
}
}
const (
// KubeProxyReplacementFeaturesNodePortModeSNAT captures enum value "SNAT"
KubeProxyReplacementFeaturesNodePortModeSNAT string = "SNAT"
// KubeProxyReplacementFeaturesNodePortModeDSR captures enum value "DSR"
KubeProxyReplacementFeaturesNodePortModeDSR string = "DSR"
// KubeProxyReplacementFeaturesNodePortModeHybrid captures enum value "Hybrid"
KubeProxyReplacementFeaturesNodePortModeHybrid string = "Hybrid"
)
// prop value enum
func (m *KubeProxyReplacementFeaturesNodePort) validateModeEnum(path, location string, value string) error {
if err := validate.EnumCase(path, location, value, kubeProxyReplacementFeaturesNodePortTypeModePropEnum, true); err != nil {
return err
}
return nil
}
func (m *KubeProxyReplacementFeaturesNodePort) validateMode(formats strfmt.Registry) error {
if swag.IsZero(m.Mode) { // not required
return nil
}
// value enum
if err := m.validateModeEnum("features"+"."+"nodePort"+"."+"mode", "body", m.Mode); err != nil {
return err
}
return nil
}
// ContextValidate validates this kube proxy replacement features node port based on context it is used
func (m *KubeProxyReplacementFeaturesNodePort) ContextValidate(ctx context.Context, formats strfmt.Registry) error {
return nil
}
// MarshalBinary interface implementation
func (m *KubeProxyReplacementFeaturesNodePort) MarshalBinary() ([]byte, error) {
if m == nil {
return nil, nil
}
return swag.WriteJSON(m)
}
// UnmarshalBinary interface implementation
func (m *KubeProxyReplacementFeaturesNodePort) UnmarshalBinary(b []byte) error {
var res KubeProxyReplacementFeaturesNodePort
if err := swag.ReadJSON(b, &res); err != nil {
return err
}
*m = res
return nil
}
// KubeProxyReplacementFeaturesSessionAffinity kube proxy replacement features session affinity
//
// swagger:model KubeProxyReplacementFeaturesSessionAffinity
type KubeProxyReplacementFeaturesSessionAffinity struct {
// enabled
Enabled bool `json:"enabled,omitempty"`
}
// Validate validates this kube proxy replacement features session affinity
func (m *KubeProxyReplacementFeaturesSessionAffinity) Validate(formats strfmt.Registry) error {
return nil
}
// ContextValidate validates this kube proxy replacement features session affinity based on context it is used
func (m *KubeProxyReplacementFeaturesSessionAffinity) ContextValidate(ctx context.Context, formats strfmt.Registry) error {
return nil
}
// MarshalBinary interface implementation
func (m *KubeProxyReplacementFeaturesSessionAffinity) MarshalBinary() ([]byte, error) {
if m == nil {
return nil, nil
}
return swag.WriteJSON(m)
}
// UnmarshalBinary interface implementation
func (m *KubeProxyReplacementFeaturesSessionAffinity) UnmarshalBinary(b []byte) error {
var res KubeProxyReplacementFeaturesSessionAffinity
if err := swag.ReadJSON(b, &res); err != nil {
return err
}
*m = res
return nil
}
// KubeProxyReplacementFeaturesSocketLB kube proxy replacement features socket l b
//
// swagger:model KubeProxyReplacementFeaturesSocketLB
type KubeProxyReplacementFeaturesSocketLB struct {
// enabled
Enabled bool `json:"enabled,omitempty"`
}
// Validate validates this kube proxy replacement features socket l b
func (m *KubeProxyReplacementFeaturesSocketLB) Validate(formats strfmt.Registry) error {
return nil
}
// ContextValidate validates this kube proxy replacement features socket l b based on context it is used
func (m *KubeProxyReplacementFeaturesSocketLB) ContextValidate(ctx context.Context, formats strfmt.Registry) error {
return nil
}
// MarshalBinary interface implementation
func (m *KubeProxyReplacementFeaturesSocketLB) MarshalBinary() ([]byte, error) {
if m == nil {
return nil, nil
}
return swag.WriteJSON(m)
}
// UnmarshalBinary interface implementation
func (m *KubeProxyReplacementFeaturesSocketLB) UnmarshalBinary(b []byte) error {
var res KubeProxyReplacementFeaturesSocketLB
if err := swag.ReadJSON(b, &res); err != nil {
return err
}
*m = res
return nil
}
// KubeProxyReplacementFeaturesSocketLBTracing kube proxy replacement features socket l b tracing
//
// swagger:model KubeProxyReplacementFeaturesSocketLBTracing
type KubeProxyReplacementFeaturesSocketLBTracing struct {
// enabled
Enabled bool `json:"enabled,omitempty"`
}
// Validate validates this kube proxy replacement features socket l b tracing
func (m *KubeProxyReplacementFeaturesSocketLBTracing) Validate(formats strfmt.Registry) error {
return nil
}
// ContextValidate validates this kube proxy replacement features socket l b tracing based on context it is used
func (m *KubeProxyReplacementFeaturesSocketLBTracing) ContextValidate(ctx context.Context, formats strfmt.Registry) error {
return nil
}
// MarshalBinary interface implementation
func (m *KubeProxyReplacementFeaturesSocketLBTracing) MarshalBinary() ([]byte, error) {
if m == nil {
return nil, nil
}
return swag.WriteJSON(m)
}
// UnmarshalBinary interface implementation
func (m *KubeProxyReplacementFeaturesSocketLBTracing) UnmarshalBinary(b []byte) error {
var res KubeProxyReplacementFeaturesSocketLBTracing
if err := swag.ReadJSON(b, &res); err != nil {
return err
}
*m = res
return nil
}
// Code generated by go-swagger; DO NOT EDIT.
// Copyright Authors of Cilium
// SPDX-License-Identifier: Apache-2.0
package models
// This file was generated by the swagger tool.
// Editing this file might prove futile when you re-run the swagger generate command
import (
"context"
"strconv"
"github.com/go-openapi/errors"
"github.com/go-openapi/strfmt"
"github.com/go-openapi/swag"
)
// L4Policy L4 endpoint policy
//
// swagger:model L4Policy
type L4Policy struct {
// List of L4 egress rules
Egress []*PolicyRule `json:"egress"`
// List of L4 ingress rules
Ingress []*PolicyRule `json:"ingress"`
}
// Validate validates this l4 policy
func (m *L4Policy) Validate(formats strfmt.Registry) error {
var res []error
if err := m.validateEgress(formats); err != nil {
res = append(res, err)
}
if err := m.validateIngress(formats); err != nil {
res = append(res, err)
}
if len(res) > 0 {
return errors.CompositeValidationError(res...)
}
return nil
}
func (m *L4Policy) validateEgress(formats strfmt.Registry) error {
if swag.IsZero(m.Egress) { // not required
return nil
}
for i := 0; i < len(m.Egress); i++ {
if swag.IsZero(m.Egress[i]) { // not required
continue
}
if m.Egress[i] != nil {
if err := m.Egress[i].Validate(formats); err != nil {
if ve, ok := err.(*errors.Validation); ok {
return ve.ValidateName("egress" + "." + strconv.Itoa(i))
} else if ce, ok := err.(*errors.CompositeError); ok {
return ce.ValidateName("egress" + "." + strconv.Itoa(i))
}
return err
}
}
}
return nil
}
func (m *L4Policy) validateIngress(formats strfmt.Registry) error {
if swag.IsZero(m.Ingress) { // not required
return nil
}
for i := 0; i < len(m.Ingress); i++ {
if swag.IsZero(m.Ingress[i]) { // not required
continue
}
if m.Ingress[i] != nil {
if err := m.Ingress[i].Validate(formats); err != nil {
if ve, ok := err.(*errors.Validation); ok {
return ve.ValidateName("ingress" + "." + strconv.Itoa(i))
} else if ce, ok := err.(*errors.CompositeError); ok {
return ce.ValidateName("ingress" + "." + strconv.Itoa(i))
}
return err
}
}
}
return nil
}
// ContextValidate validate this l4 policy based on the context it is used
func (m *L4Policy) ContextValidate(ctx context.Context, formats strfmt.Registry) error {
var res []error
if err := m.contextValidateEgress(ctx, formats); err != nil {
res = append(res, err)
}
if err := m.contextValidateIngress(ctx, formats); err != nil {
res = append(res, err)
}
if len(res) > 0 {
return errors.CompositeValidationError(res...)
}
return nil
}
func (m *L4Policy) contextValidateEgress(ctx context.Context, formats strfmt.Registry) error {
for i := 0; i < len(m.Egress); i++ {
if m.Egress[i] != nil {
if swag.IsZero(m.Egress[i]) { // not required
return nil
}
if err := m.Egress[i].ContextValidate(ctx, formats); err != nil {
if ve, ok := err.(*errors.Validation); ok {
return ve.ValidateName("egress" + "." + strconv.Itoa(i))
} else if ce, ok := err.(*errors.CompositeError); ok {
return ce.ValidateName("egress" + "." + strconv.Itoa(i))
}
return err
}
}
}
return nil
}
func (m *L4Policy) contextValidateIngress(ctx context.Context, formats strfmt.Registry) error {
for i := 0; i < len(m.Ingress); i++ {
if m.Ingress[i] != nil {
if swag.IsZero(m.Ingress[i]) { // not required
return nil
}
if err := m.Ingress[i].ContextValidate(ctx, formats); err != nil {
if ve, ok := err.(*errors.Validation); ok {
return ve.ValidateName("ingress" + "." + strconv.Itoa(i))
} else if ce, ok := err.(*errors.CompositeError); ok {
return ce.ValidateName("ingress" + "." + strconv.Itoa(i))
}
return err
}
}
}
return nil
}
// MarshalBinary interface implementation
func (m *L4Policy) MarshalBinary() ([]byte, error) {
if m == nil {
return nil, nil
}
return swag.WriteJSON(m)
}
// UnmarshalBinary interface implementation
func (m *L4Policy) UnmarshalBinary(b []byte) error {
var res L4Policy
if err := swag.ReadJSON(b, &res); err != nil {
return err
}
*m = res
return nil
}
// Code generated by go-swagger; DO NOT EDIT.
// Copyright Authors of Cilium
// SPDX-License-Identifier: Apache-2.0
package models
// This file was generated by the swagger tool.
// Editing this file might prove futile when you re-run the swagger generate command
import (
"context"
"github.com/go-openapi/errors"
"github.com/go-openapi/strfmt"
"github.com/go-openapi/swag"
)
// LRPBackend Pod backend of an LRP
//
// swagger:model LRPBackend
type LRPBackend struct {
// backend address
BackendAddress *BackendAddress `json:"backend-address,omitempty"`
// Namespace and name of the backend pod
PodID string `json:"pod-id,omitempty"`
}
// Validate validates this l r p backend
func (m *LRPBackend) Validate(formats strfmt.Registry) error {
var res []error
if err := m.validateBackendAddress(formats); err != nil {
res = append(res, err)
}
if len(res) > 0 {
return errors.CompositeValidationError(res...)
}
return nil
}
func (m *LRPBackend) validateBackendAddress(formats strfmt.Registry) error {
if swag.IsZero(m.BackendAddress) { // not required
return nil
}
if m.BackendAddress != nil {
if err := m.BackendAddress.Validate(formats); err != nil {
if ve, ok := err.(*errors.Validation); ok {
return ve.ValidateName("backend-address")
} else if ce, ok := err.(*errors.CompositeError); ok {
return ce.ValidateName("backend-address")
}
return err
}
}
return nil
}
// ContextValidate validate this l r p backend based on the context it is used
func (m *LRPBackend) ContextValidate(ctx context.Context, formats strfmt.Registry) error {
var res []error
if err := m.contextValidateBackendAddress(ctx, formats); err != nil {
res = append(res, err)
}
if len(res) > 0 {
return errors.CompositeValidationError(res...)
}
return nil
}
func (m *LRPBackend) contextValidateBackendAddress(ctx context.Context, formats strfmt.Registry) error {
if m.BackendAddress != nil {
if swag.IsZero(m.BackendAddress) { // not required
return nil
}
if err := m.BackendAddress.ContextValidate(ctx, formats); err != nil {
if ve, ok := err.(*errors.Validation); ok {
return ve.ValidateName("backend-address")
} else if ce, ok := err.(*errors.CompositeError); ok {
return ce.ValidateName("backend-address")
}
return err
}
}
return nil
}
// MarshalBinary interface implementation
func (m *LRPBackend) MarshalBinary() ([]byte, error) {
if m == nil {
return nil, nil
}
return swag.WriteJSON(m)
}
// UnmarshalBinary interface implementation
func (m *LRPBackend) UnmarshalBinary(b []byte) error {
var res LRPBackend
if err := swag.ReadJSON(b, &res); err != nil {
return err
}
*m = res
return nil
}
// Code generated by go-swagger; DO NOT EDIT.
// Copyright Authors of Cilium
// SPDX-License-Identifier: Apache-2.0
package models
// This file was generated by the swagger tool.
// Editing this file might prove futile when you re-run the swagger generate command
import (
"context"
"strconv"
"github.com/go-openapi/errors"
"github.com/go-openapi/strfmt"
"github.com/go-openapi/swag"
)
// LRPSpec Configuration of an LRP
//
// swagger:model LRPSpec
type LRPSpec struct {
// mapping of frontends to pod backends
FrontendMappings []*FrontendMapping `json:"frontend-mappings"`
// LRP frontend type
FrontendType string `json:"frontend-type,omitempty"`
// LRP config type
LrpType string `json:"lrp-type,omitempty"`
// LRP service name
Name string `json:"name,omitempty"`
// LRP service namespace
Namespace string `json:"namespace,omitempty"`
// matching k8s service namespace and name
ServiceID string `json:"service-id,omitempty"`
// Unique identification
UID string `json:"uid,omitempty"`
}
// Validate validates this l r p spec
func (m *LRPSpec) Validate(formats strfmt.Registry) error {
var res []error
if err := m.validateFrontendMappings(formats); err != nil {
res = append(res, err)
}
if len(res) > 0 {
return errors.CompositeValidationError(res...)
}
return nil
}
func (m *LRPSpec) validateFrontendMappings(formats strfmt.Registry) error {
if swag.IsZero(m.FrontendMappings) { // not required
return nil
}
for i := 0; i < len(m.FrontendMappings); i++ {
if swag.IsZero(m.FrontendMappings[i]) { // not required
continue
}
if m.FrontendMappings[i] != nil {
if err := m.FrontendMappings[i].Validate(formats); err != nil {
if ve, ok := err.(*errors.Validation); ok {
return ve.ValidateName("frontend-mappings" + "." + strconv.Itoa(i))
} else if ce, ok := err.(*errors.CompositeError); ok {
return ce.ValidateName("frontend-mappings" + "." + strconv.Itoa(i))
}
return err
}
}
}
return nil
}
// ContextValidate validate this l r p spec based on the context it is used
func (m *LRPSpec) ContextValidate(ctx context.Context, formats strfmt.Registry) error {
var res []error
if err := m.contextValidateFrontendMappings(ctx, formats); err != nil {
res = append(res, err)
}
if len(res) > 0 {
return errors.CompositeValidationError(res...)
}
return nil
}
func (m *LRPSpec) contextValidateFrontendMappings(ctx context.Context, formats strfmt.Registry) error {
for i := 0; i < len(m.FrontendMappings); i++ {
if m.FrontendMappings[i] != nil {
if swag.IsZero(m.FrontendMappings[i]) { // not required
return nil
}
if err := m.FrontendMappings[i].ContextValidate(ctx, formats); err != nil {
if ve, ok := err.(*errors.Validation); ok {
return ve.ValidateName("frontend-mappings" + "." + strconv.Itoa(i))
} else if ce, ok := err.(*errors.CompositeError); ok {
return ce.ValidateName("frontend-mappings" + "." + strconv.Itoa(i))
}
return err
}
}
}
return nil
}
// MarshalBinary interface implementation
func (m *LRPSpec) MarshalBinary() ([]byte, error) {
if m == nil {
return nil, nil
}
return swag.WriteJSON(m)
}
// UnmarshalBinary interface implementation
func (m *LRPSpec) UnmarshalBinary(b []byte) error {
var res LRPSpec
if err := swag.ReadJSON(b, &res); err != nil {
return err
}
*m = res
return nil
}
// Code generated by go-swagger; DO NOT EDIT.
// Copyright Authors of Cilium
// SPDX-License-Identifier: Apache-2.0
package models
// This file was generated by the swagger tool.
// Editing this file might prove futile when you re-run the swagger generate command
import (
"context"
"github.com/go-openapi/strfmt"
"github.com/go-openapi/swag"
)
// Label Label is the Cilium's representation of a container label
//
// swagger:model Label
type Label struct {
// key
Key string `json:"key,omitempty"`
// Source can be one of the above values (e.g. LabelSourceContainer)
Source string `json:"source,omitempty"`
// value
Value string `json:"value,omitempty"`
}
// Validate validates this label
func (m *Label) Validate(formats strfmt.Registry) error {
return nil
}
// ContextValidate validates this label based on context it is used
func (m *Label) ContextValidate(ctx context.Context, formats strfmt.Registry) error {
return nil
}
// MarshalBinary interface implementation
func (m *Label) MarshalBinary() ([]byte, error) {
if m == nil {
return nil, nil
}
return swag.WriteJSON(m)
}
// UnmarshalBinary interface implementation
func (m *Label) UnmarshalBinary(b []byte) error {
var res Label
if err := swag.ReadJSON(b, &res); err != nil {
return err
}
*m = res
return nil
}
// Code generated by go-swagger; DO NOT EDIT.
// Copyright Authors of Cilium
// SPDX-License-Identifier: Apache-2.0
package models
// This file was generated by the swagger tool.
// Editing this file might prove futile when you re-run the swagger generate command
import (
"context"
"strconv"
"github.com/go-openapi/errors"
"github.com/go-openapi/strfmt"
"github.com/go-openapi/swag"
)
// LabelArray LabelArray is an array of labels forming a set
//
// swagger:model LabelArray
type LabelArray []*Label
// Validate validates this label array
func (m LabelArray) Validate(formats strfmt.Registry) error {
var res []error
for i := 0; i < len(m); i++ {
if swag.IsZero(m[i]) { // not required
continue
}
if m[i] != nil {
if err := m[i].Validate(formats); err != nil {
if ve, ok := err.(*errors.Validation); ok {
return ve.ValidateName(strconv.Itoa(i))
} else if ce, ok := err.(*errors.CompositeError); ok {
return ce.ValidateName(strconv.Itoa(i))
}
return err
}
}
}
if len(res) > 0 {
return errors.CompositeValidationError(res...)
}
return nil
}
// ContextValidate validate this label array based on the context it is used
func (m LabelArray) ContextValidate(ctx context.Context, formats strfmt.Registry) error {
var res []error
for i := 0; i < len(m); i++ {
if m[i] != nil {
if swag.IsZero(m[i]) { // not required
return nil
}
if err := m[i].ContextValidate(ctx, formats); err != nil {
if ve, ok := err.(*errors.Validation); ok {
return ve.ValidateName(strconv.Itoa(i))
} else if ce, ok := err.(*errors.CompositeError); ok {
return ce.ValidateName(strconv.Itoa(i))
}
return err
}
}
}
if len(res) > 0 {
return errors.CompositeValidationError(res...)
}
return nil
}
// Code generated by go-swagger; DO NOT EDIT.
// Copyright Authors of Cilium
// SPDX-License-Identifier: Apache-2.0
package models
// This file was generated by the swagger tool.
// Editing this file might prove futile when you re-run the swagger generate command
import (
"context"
"github.com/go-openapi/errors"
"github.com/go-openapi/strfmt"
"github.com/go-openapi/swag"
)
// LabelConfiguration Label configuration of an endpoint
//
// swagger:model LabelConfiguration
type LabelConfiguration struct {
// The user provided desired configuration
Spec *LabelConfigurationSpec `json:"spec,omitempty"`
// The current configuration
Status *LabelConfigurationStatus `json:"status,omitempty"`
}
// Validate validates this label configuration
func (m *LabelConfiguration) Validate(formats strfmt.Registry) error {
var res []error
if err := m.validateSpec(formats); err != nil {
res = append(res, err)
}
if err := m.validateStatus(formats); err != nil {
res = append(res, err)
}
if len(res) > 0 {
return errors.CompositeValidationError(res...)
}
return nil
}
func (m *LabelConfiguration) validateSpec(formats strfmt.Registry) error {
if swag.IsZero(m.Spec) { // not required
return nil
}
if m.Spec != nil {
if err := m.Spec.Validate(formats); err != nil {
if ve, ok := err.(*errors.Validation); ok {
return ve.ValidateName("spec")
} else if ce, ok := err.(*errors.CompositeError); ok {
return ce.ValidateName("spec")
}
return err
}
}
return nil
}
func (m *LabelConfiguration) validateStatus(formats strfmt.Registry) error {
if swag.IsZero(m.Status) { // not required
return nil
}
if m.Status != nil {
if err := m.Status.Validate(formats); err != nil {
if ve, ok := err.(*errors.Validation); ok {
return ve.ValidateName("status")
} else if ce, ok := err.(*errors.CompositeError); ok {
return ce.ValidateName("status")
}
return err
}
}
return nil
}
// ContextValidate validate this label configuration based on the context it is used
func (m *LabelConfiguration) ContextValidate(ctx context.Context, formats strfmt.Registry) error {
var res []error
if err := m.contextValidateSpec(ctx, formats); err != nil {
res = append(res, err)
}
if err := m.contextValidateStatus(ctx, formats); err != nil {
res = append(res, err)
}
if len(res) > 0 {
return errors.CompositeValidationError(res...)
}
return nil
}
func (m *LabelConfiguration) contextValidateSpec(ctx context.Context, formats strfmt.Registry) error {
if m.Spec != nil {
if swag.IsZero(m.Spec) { // not required
return nil
}
if err := m.Spec.ContextValidate(ctx, formats); err != nil {
if ve, ok := err.(*errors.Validation); ok {
return ve.ValidateName("spec")
} else if ce, ok := err.(*errors.CompositeError); ok {
return ce.ValidateName("spec")
}
return err
}
}
return nil
}
func (m *LabelConfiguration) contextValidateStatus(ctx context.Context, formats strfmt.Registry) error {
if m.Status != nil {
if swag.IsZero(m.Status) { // not required
return nil
}
if err := m.Status.ContextValidate(ctx, formats); err != nil {
if ve, ok := err.(*errors.Validation); ok {
return ve.ValidateName("status")
} else if ce, ok := err.(*errors.CompositeError); ok {
return ce.ValidateName("status")
}
return err
}
}
return nil
}
// MarshalBinary interface implementation
func (m *LabelConfiguration) MarshalBinary() ([]byte, error) {
if m == nil {
return nil, nil
}
return swag.WriteJSON(m)
}
// UnmarshalBinary interface implementation
func (m *LabelConfiguration) UnmarshalBinary(b []byte) error {
var res LabelConfiguration
if err := swag.ReadJSON(b, &res); err != nil {
return err
}
*m = res
return nil
}
// Code generated by go-swagger; DO NOT EDIT.
// Copyright Authors of Cilium
// SPDX-License-Identifier: Apache-2.0
package models
// This file was generated by the swagger tool.
// Editing this file might prove futile when you re-run the swagger generate command
import (
"context"
"github.com/go-openapi/errors"
"github.com/go-openapi/strfmt"
"github.com/go-openapi/swag"
)
// LabelConfigurationSpec User desired Label configuration of an endpoint
//
// swagger:model LabelConfigurationSpec
type LabelConfigurationSpec struct {
// Custom labels in addition to orchestration system labels.
User Labels `json:"user,omitempty"`
}
// Validate validates this label configuration spec
func (m *LabelConfigurationSpec) Validate(formats strfmt.Registry) error {
var res []error
if err := m.validateUser(formats); err != nil {
res = append(res, err)
}
if len(res) > 0 {
return errors.CompositeValidationError(res...)
}
return nil
}
func (m *LabelConfigurationSpec) validateUser(formats strfmt.Registry) error {
if swag.IsZero(m.User) { // not required
return nil
}
if err := m.User.Validate(formats); err != nil {
if ve, ok := err.(*errors.Validation); ok {
return ve.ValidateName("user")
} else if ce, ok := err.(*errors.CompositeError); ok {
return ce.ValidateName("user")
}
return err
}
return nil
}
// ContextValidate validate this label configuration spec based on the context it is used
func (m *LabelConfigurationSpec) ContextValidate(ctx context.Context, formats strfmt.Registry) error {
var res []error
if err := m.contextValidateUser(ctx, formats); err != nil {
res = append(res, err)
}
if len(res) > 0 {
return errors.CompositeValidationError(res...)
}
return nil
}
func (m *LabelConfigurationSpec) contextValidateUser(ctx context.Context, formats strfmt.Registry) error {
if err := m.User.ContextValidate(ctx, formats); err != nil {
if ve, ok := err.(*errors.Validation); ok {
return ve.ValidateName("user")
} else if ce, ok := err.(*errors.CompositeError); ok {
return ce.ValidateName("user")
}
return err
}
return nil
}
// MarshalBinary interface implementation
func (m *LabelConfigurationSpec) MarshalBinary() ([]byte, error) {
if m == nil {
return nil, nil
}
return swag.WriteJSON(m)
}
// UnmarshalBinary interface implementation
func (m *LabelConfigurationSpec) UnmarshalBinary(b []byte) error {
var res LabelConfigurationSpec
if err := swag.ReadJSON(b, &res); err != nil {
return err
}
*m = res
return nil
}
// Code generated by go-swagger; DO NOT EDIT.
// Copyright Authors of Cilium
// SPDX-License-Identifier: Apache-2.0
package models
// This file was generated by the swagger tool.
// Editing this file might prove futile when you re-run the swagger generate command
import (
"context"
"github.com/go-openapi/errors"
"github.com/go-openapi/strfmt"
"github.com/go-openapi/swag"
)
// LabelConfigurationStatus Labels and label configuration of an endpoint
//
// swagger:model LabelConfigurationStatus
type LabelConfigurationStatus struct {
// All labels derived from the orchestration system
Derived Labels `json:"derived,omitempty"`
// Labels derived from orchestration system which have been disabled.
Disabled Labels `json:"disabled,omitempty"`
// The current configuration
Realized *LabelConfigurationSpec `json:"realized,omitempty"`
// Labels derived from orchestration system that are used in computing a security identity
SecurityRelevant Labels `json:"security-relevant,omitempty"`
}
// Validate validates this label configuration status
func (m *LabelConfigurationStatus) Validate(formats strfmt.Registry) error {
var res []error
if err := m.validateDerived(formats); err != nil {
res = append(res, err)
}
if err := m.validateDisabled(formats); err != nil {
res = append(res, err)
}
if err := m.validateRealized(formats); err != nil {
res = append(res, err)
}
if err := m.validateSecurityRelevant(formats); err != nil {
res = append(res, err)
}
if len(res) > 0 {
return errors.CompositeValidationError(res...)
}
return nil
}
func (m *LabelConfigurationStatus) validateDerived(formats strfmt.Registry) error {
if swag.IsZero(m.Derived) { // not required
return nil
}
if err := m.Derived.Validate(formats); err != nil {
if ve, ok := err.(*errors.Validation); ok {
return ve.ValidateName("derived")
} else if ce, ok := err.(*errors.CompositeError); ok {
return ce.ValidateName("derived")
}
return err
}
return nil
}
func (m *LabelConfigurationStatus) validateDisabled(formats strfmt.Registry) error {
if swag.IsZero(m.Disabled) { // not required
return nil
}
if err := m.Disabled.Validate(formats); err != nil {
if ve, ok := err.(*errors.Validation); ok {
return ve.ValidateName("disabled")
} else if ce, ok := err.(*errors.CompositeError); ok {
return ce.ValidateName("disabled")
}
return err
}
return nil
}
func (m *LabelConfigurationStatus) validateRealized(formats strfmt.Registry) error {
if swag.IsZero(m.Realized) { // not required
return nil
}
if m.Realized != nil {
if err := m.Realized.Validate(formats); err != nil {
if ve, ok := err.(*errors.Validation); ok {
return ve.ValidateName("realized")
} else if ce, ok := err.(*errors.CompositeError); ok {
return ce.ValidateName("realized")
}
return err
}
}
return nil
}
func (m *LabelConfigurationStatus) validateSecurityRelevant(formats strfmt.Registry) error {
if swag.IsZero(m.SecurityRelevant) { // not required
return nil
}
if err := m.SecurityRelevant.Validate(formats); err != nil {
if ve, ok := err.(*errors.Validation); ok {
return ve.ValidateName("security-relevant")
} else if ce, ok := err.(*errors.CompositeError); ok {
return ce.ValidateName("security-relevant")
}
return err
}
return nil
}
// ContextValidate validate this label configuration status based on the context it is used
func (m *LabelConfigurationStatus) ContextValidate(ctx context.Context, formats strfmt.Registry) error {
var res []error
if err := m.contextValidateDerived(ctx, formats); err != nil {
res = append(res, err)
}
if err := m.contextValidateDisabled(ctx, formats); err != nil {
res = append(res, err)
}
if err := m.contextValidateRealized(ctx, formats); err != nil {
res = append(res, err)
}
if err := m.contextValidateSecurityRelevant(ctx, formats); err != nil {
res = append(res, err)
}
if len(res) > 0 {
return errors.CompositeValidationError(res...)
}
return nil
}
func (m *LabelConfigurationStatus) contextValidateDerived(ctx context.Context, formats strfmt.Registry) error {
if err := m.Derived.ContextValidate(ctx, formats); err != nil {
if ve, ok := err.(*errors.Validation); ok {
return ve.ValidateName("derived")
} else if ce, ok := err.(*errors.CompositeError); ok {
return ce.ValidateName("derived")
}
return err
}
return nil
}
func (m *LabelConfigurationStatus) contextValidateDisabled(ctx context.Context, formats strfmt.Registry) error {
if err := m.Disabled.ContextValidate(ctx, formats); err != nil {
if ve, ok := err.(*errors.Validation); ok {
return ve.ValidateName("disabled")
} else if ce, ok := err.(*errors.CompositeError); ok {
return ce.ValidateName("disabled")
}
return err
}
return nil
}
func (m *LabelConfigurationStatus) contextValidateRealized(ctx context.Context, formats strfmt.Registry) error {
if m.Realized != nil {
if swag.IsZero(m.Realized) { // not required
return nil
}
if err := m.Realized.ContextValidate(ctx, formats); err != nil {
if ve, ok := err.(*errors.Validation); ok {
return ve.ValidateName("realized")
} else if ce, ok := err.(*errors.CompositeError); ok {
return ce.ValidateName("realized")
}
return err
}
}
return nil
}
func (m *LabelConfigurationStatus) contextValidateSecurityRelevant(ctx context.Context, formats strfmt.Registry) error {
if err := m.SecurityRelevant.ContextValidate(ctx, formats); err != nil {
if ve, ok := err.(*errors.Validation); ok {
return ve.ValidateName("security-relevant")
} else if ce, ok := err.(*errors.CompositeError); ok {
return ce.ValidateName("security-relevant")
}
return err
}
return nil
}
// MarshalBinary interface implementation
func (m *LabelConfigurationStatus) MarshalBinary() ([]byte, error) {
if m == nil {
return nil, nil
}
return swag.WriteJSON(m)
}
// UnmarshalBinary interface implementation
func (m *LabelConfigurationStatus) UnmarshalBinary(b []byte) error {
var res LabelConfigurationStatus
if err := swag.ReadJSON(b, &res); err != nil {
return err
}
*m = res
return nil
}
// Code generated by go-swagger; DO NOT EDIT.
// Copyright Authors of Cilium
// SPDX-License-Identifier: Apache-2.0
package models
// This file was generated by the swagger tool.
// Editing this file might prove futile when you re-run the swagger generate command
import (
"context"
"github.com/go-openapi/strfmt"
)
// Labels Set of labels
//
// swagger:model Labels
type Labels []string
// Validate validates this labels
func (m Labels) Validate(formats strfmt.Registry) error {
return nil
}
// ContextValidate validates this labels based on context it is used
func (m Labels) ContextValidate(ctx context.Context, formats strfmt.Registry) error {
return nil
}
// Code generated by go-swagger; DO NOT EDIT.
// Copyright Authors of Cilium
// SPDX-License-Identifier: Apache-2.0
package models
// This file was generated by the swagger tool.
// Editing this file might prove futile when you re-run the swagger generate command
import (
"context"
"encoding/json"
"github.com/go-openapi/errors"
"github.com/go-openapi/strfmt"
"github.com/go-openapi/swag"
"github.com/go-openapi/validate"
)
// MapEvent Event on Map
//
// swagger:model MapEvent
type MapEvent struct {
// Action type for event
// Enum: ["update","delete"]
Action string `json:"action,omitempty"`
// Desired action to be performed after this event
// Enum: ["ok","insert","delete"]
DesiredAction string `json:"desired-action,omitempty"`
// Map key on which the event occured
Key string `json:"key,omitempty"`
// Last error seen while performing desired action
LastError string `json:"last-error,omitempty"`
// Timestamp when the event occurred
// Format: date-time
Timestamp strfmt.DateTime `json:"timestamp,omitempty"`
// Map value on which the event occured
Value string `json:"value,omitempty"`
}
// Validate validates this map event
func (m *MapEvent) Validate(formats strfmt.Registry) error {
var res []error
if err := m.validateAction(formats); err != nil {
res = append(res, err)
}
if err := m.validateDesiredAction(formats); err != nil {
res = append(res, err)
}
if err := m.validateTimestamp(formats); err != nil {
res = append(res, err)
}
if len(res) > 0 {
return errors.CompositeValidationError(res...)
}
return nil
}
var mapEventTypeActionPropEnum []interface{}
func init() {
var res []string
if err := json.Unmarshal([]byte(`["update","delete"]`), &res); err != nil {
panic(err)
}
for _, v := range res {
mapEventTypeActionPropEnum = append(mapEventTypeActionPropEnum, v)
}
}
const (
// MapEventActionUpdate captures enum value "update"
MapEventActionUpdate string = "update"
// MapEventActionDelete captures enum value "delete"
MapEventActionDelete string = "delete"
)
// prop value enum
func (m *MapEvent) validateActionEnum(path, location string, value string) error {
if err := validate.EnumCase(path, location, value, mapEventTypeActionPropEnum, true); err != nil {
return err
}
return nil
}
func (m *MapEvent) validateAction(formats strfmt.Registry) error {
if swag.IsZero(m.Action) { // not required
return nil
}
// value enum
if err := m.validateActionEnum("action", "body", m.Action); err != nil {
return err
}
return nil
}
var mapEventTypeDesiredActionPropEnum []interface{}
func init() {
var res []string
if err := json.Unmarshal([]byte(`["ok","insert","delete"]`), &res); err != nil {
panic(err)
}
for _, v := range res {
mapEventTypeDesiredActionPropEnum = append(mapEventTypeDesiredActionPropEnum, v)
}
}
const (
// MapEventDesiredActionOk captures enum value "ok"
MapEventDesiredActionOk string = "ok"
// MapEventDesiredActionInsert captures enum value "insert"
MapEventDesiredActionInsert string = "insert"
// MapEventDesiredActionDelete captures enum value "delete"
MapEventDesiredActionDelete string = "delete"
)
// prop value enum
func (m *MapEvent) validateDesiredActionEnum(path, location string, value string) error {
if err := validate.EnumCase(path, location, value, mapEventTypeDesiredActionPropEnum, true); err != nil {
return err
}
return nil
}
func (m *MapEvent) validateDesiredAction(formats strfmt.Registry) error {
if swag.IsZero(m.DesiredAction) { // not required
return nil
}
// value enum
if err := m.validateDesiredActionEnum("desired-action", "body", m.DesiredAction); err != nil {
return err
}
return nil
}
func (m *MapEvent) validateTimestamp(formats strfmt.Registry) error {
if swag.IsZero(m.Timestamp) { // not required
return nil
}
if err := validate.FormatOf("timestamp", "body", "date-time", m.Timestamp.String(), formats); err != nil {
return err
}
return nil
}
// ContextValidate validates this map event based on context it is used
func (m *MapEvent) ContextValidate(ctx context.Context, formats strfmt.Registry) error {
return nil
}
// MarshalBinary interface implementation
func (m *MapEvent) MarshalBinary() ([]byte, error) {
if m == nil {
return nil, nil
}
return swag.WriteJSON(m)
}
// UnmarshalBinary interface implementation
func (m *MapEvent) UnmarshalBinary(b []byte) error {
var res MapEvent
if err := swag.ReadJSON(b, &res); err != nil {
return err
}
*m = res
return nil
}
// Code generated by go-swagger; DO NOT EDIT.
// Copyright Authors of Cilium
// SPDX-License-Identifier: Apache-2.0
package models
// This file was generated by the swagger tool.
// Editing this file might prove futile when you re-run the swagger generate command
import (
"context"
"encoding/json"
"github.com/go-openapi/errors"
"github.com/go-openapi/strfmt"
"github.com/go-openapi/swag"
"github.com/go-openapi/validate"
)
// Masquerading Status of masquerading
//
// +k8s:deepcopy-gen=true
//
// swagger:model Masquerading
type Masquerading struct {
// enabled
Enabled bool `json:"enabled,omitempty"`
// enabled protocols
EnabledProtocols *MasqueradingEnabledProtocols `json:"enabledProtocols,omitempty"`
// Is BPF ip-masq-agent enabled
IPMasqAgent bool `json:"ip-masq-agent,omitempty"`
// mode
// Enum: ["BPF","iptables"]
Mode string `json:"mode,omitempty"`
// This field is obsolete, please use snat-exclusion-cidr-v4 or snat-exclusion-cidr-v6.
SnatExclusionCidr string `json:"snat-exclusion-cidr,omitempty"`
// SnatExclusionCIDRv4 exempts SNAT from being performed on any packet sent to
// an IPv4 address that belongs to this CIDR.
SnatExclusionCidrV4 string `json:"snat-exclusion-cidr-v4,omitempty"`
// SnatExclusionCIDRv6 exempts SNAT from being performed on any packet sent to
// an IPv6 address that belongs to this CIDR.
// For IPv6 we only do masquerading in iptables mode.
SnatExclusionCidrV6 string `json:"snat-exclusion-cidr-v6,omitempty"`
}
// Validate validates this masquerading
func (m *Masquerading) Validate(formats strfmt.Registry) error {
var res []error
if err := m.validateEnabledProtocols(formats); err != nil {
res = append(res, err)
}
if err := m.validateMode(formats); err != nil {
res = append(res, err)
}
if len(res) > 0 {
return errors.CompositeValidationError(res...)
}
return nil
}
func (m *Masquerading) validateEnabledProtocols(formats strfmt.Registry) error {
if swag.IsZero(m.EnabledProtocols) { // not required
return nil
}
if m.EnabledProtocols != nil {
if err := m.EnabledProtocols.Validate(formats); err != nil {
if ve, ok := err.(*errors.Validation); ok {
return ve.ValidateName("enabledProtocols")
} else if ce, ok := err.(*errors.CompositeError); ok {
return ce.ValidateName("enabledProtocols")
}
return err
}
}
return nil
}
var masqueradingTypeModePropEnum []interface{}
func init() {
var res []string
if err := json.Unmarshal([]byte(`["BPF","iptables"]`), &res); err != nil {
panic(err)
}
for _, v := range res {
masqueradingTypeModePropEnum = append(masqueradingTypeModePropEnum, v)
}
}
const (
// MasqueradingModeBPF captures enum value "BPF"
MasqueradingModeBPF string = "BPF"
// MasqueradingModeIptables captures enum value "iptables"
MasqueradingModeIptables string = "iptables"
)
// prop value enum
func (m *Masquerading) validateModeEnum(path, location string, value string) error {
if err := validate.EnumCase(path, location, value, masqueradingTypeModePropEnum, true); err != nil {
return err
}
return nil
}
func (m *Masquerading) validateMode(formats strfmt.Registry) error {
if swag.IsZero(m.Mode) { // not required
return nil
}
// value enum
if err := m.validateModeEnum("mode", "body", m.Mode); err != nil {
return err
}
return nil
}
// ContextValidate validate this masquerading based on the context it is used
func (m *Masquerading) ContextValidate(ctx context.Context, formats strfmt.Registry) error {
var res []error
if err := m.contextValidateEnabledProtocols(ctx, formats); err != nil {
res = append(res, err)
}
if len(res) > 0 {
return errors.CompositeValidationError(res...)
}
return nil
}
func (m *Masquerading) contextValidateEnabledProtocols(ctx context.Context, formats strfmt.Registry) error {
if m.EnabledProtocols != nil {
if swag.IsZero(m.EnabledProtocols) { // not required
return nil
}
if err := m.EnabledProtocols.ContextValidate(ctx, formats); err != nil {
if ve, ok := err.(*errors.Validation); ok {
return ve.ValidateName("enabledProtocols")
} else if ce, ok := err.(*errors.CompositeError); ok {
return ce.ValidateName("enabledProtocols")
}
return err
}
}
return nil
}
// MarshalBinary interface implementation
func (m *Masquerading) MarshalBinary() ([]byte, error) {
if m == nil {
return nil, nil
}
return swag.WriteJSON(m)
}
// UnmarshalBinary interface implementation
func (m *Masquerading) UnmarshalBinary(b []byte) error {
var res Masquerading
if err := swag.ReadJSON(b, &res); err != nil {
return err
}
*m = res
return nil
}
// MasqueradingEnabledProtocols Is masquerading enabled
//
// swagger:model MasqueradingEnabledProtocols
type MasqueradingEnabledProtocols struct {
// Is masquerading enabled for IPv4 traffic
IPV4 bool `json:"ipv4,omitempty"`
// Is masquerading enabled for IPv6 traffic
IPV6 bool `json:"ipv6,omitempty"`
}
// Validate validates this masquerading enabled protocols
func (m *MasqueradingEnabledProtocols) Validate(formats strfmt.Registry) error {
return nil
}
// ContextValidate validates this masquerading enabled protocols based on context it is used
func (m *MasqueradingEnabledProtocols) ContextValidate(ctx context.Context, formats strfmt.Registry) error {
return nil
}
// MarshalBinary interface implementation
func (m *MasqueradingEnabledProtocols) MarshalBinary() ([]byte, error) {
if m == nil {
return nil, nil
}
return swag.WriteJSON(m)
}
// UnmarshalBinary interface implementation
func (m *MasqueradingEnabledProtocols) UnmarshalBinary(b []byte) error {
var res MasqueradingEnabledProtocols
if err := swag.ReadJSON(b, &res); err != nil {
return err
}
*m = res
return nil
}
// Code generated by go-swagger; DO NOT EDIT.
// Copyright Authors of Cilium
// SPDX-License-Identifier: Apache-2.0
package models
// This file was generated by the swagger tool.
// Editing this file might prove futile when you re-run the swagger generate command
import (
"context"
"github.com/go-openapi/strfmt"
"github.com/go-openapi/swag"
)
// MessageForwardingStatistics Statistics of a message forwarding entity
//
// swagger:model MessageForwardingStatistics
type MessageForwardingStatistics struct {
// Number of messages denied
Denied int64 `json:"denied,omitempty"`
// Number of errors while parsing messages
Error int64 `json:"error,omitempty"`
// Number of messages forwarded
Forwarded int64 `json:"forwarded,omitempty"`
// Number of messages received
Received int64 `json:"received,omitempty"`
}
// Validate validates this message forwarding statistics
func (m *MessageForwardingStatistics) Validate(formats strfmt.Registry) error {
return nil
}
// ContextValidate validates this message forwarding statistics based on context it is used
func (m *MessageForwardingStatistics) ContextValidate(ctx context.Context, formats strfmt.Registry) error {
return nil
}
// MarshalBinary interface implementation
func (m *MessageForwardingStatistics) MarshalBinary() ([]byte, error) {
if m == nil {
return nil, nil
}
return swag.WriteJSON(m)
}
// UnmarshalBinary interface implementation
func (m *MessageForwardingStatistics) UnmarshalBinary(b []byte) error {
var res MessageForwardingStatistics
if err := swag.ReadJSON(b, &res); err != nil {
return err
}
*m = res
return nil
}
// Code generated by go-swagger; DO NOT EDIT.
// Copyright Authors of Cilium
// SPDX-License-Identifier: Apache-2.0
package models
// This file was generated by the swagger tool.
// Editing this file might prove futile when you re-run the swagger generate command
import (
"context"
"github.com/go-openapi/strfmt"
"github.com/go-openapi/swag"
)
// Metric Metric information
//
// swagger:model Metric
type Metric struct {
// Labels of the metric
Labels map[string]string `json:"labels,omitempty"`
// Name of the metric
Name string `json:"name,omitempty"`
// Value of the metric
Value float64 `json:"value,omitempty"`
}
// Validate validates this metric
func (m *Metric) Validate(formats strfmt.Registry) error {
return nil
}
// ContextValidate validates this metric based on context it is used
func (m *Metric) ContextValidate(ctx context.Context, formats strfmt.Registry) error {
return nil
}
// MarshalBinary interface implementation
func (m *Metric) MarshalBinary() ([]byte, error) {
if m == nil {
return nil, nil
}
return swag.WriteJSON(m)
}
// UnmarshalBinary interface implementation
func (m *Metric) UnmarshalBinary(b []byte) error {
var res Metric
if err := swag.ReadJSON(b, &res); err != nil {
return err
}
*m = res
return nil
}
// Code generated by go-swagger; DO NOT EDIT.
// Copyright Authors of Cilium
// SPDX-License-Identifier: Apache-2.0
package models
// This file was generated by the swagger tool.
// Editing this file might prove futile when you re-run the swagger generate command
import (
"context"
"github.com/go-openapi/strfmt"
"github.com/go-openapi/swag"
)
// MonitorStatus Status of the node monitor
//
// swagger:model MonitorStatus
type MonitorStatus struct {
// Number of CPUs to listen on for events.
Cpus int64 `json:"cpus,omitempty"`
// Number of samples lost by perf.
Lost int64 `json:"lost,omitempty"`
// Number of pages used for the perf ring buffer.
Npages int64 `json:"npages,omitempty"`
// Pages size used for the perf ring buffer.
Pagesize int64 `json:"pagesize,omitempty"`
// Number of unknown samples.
Unknown int64 `json:"unknown,omitempty"`
}
// Validate validates this monitor status
func (m *MonitorStatus) Validate(formats strfmt.Registry) error {
return nil
}
// ContextValidate validates this monitor status based on context it is used
func (m *MonitorStatus) ContextValidate(ctx context.Context, formats strfmt.Registry) error {
return nil
}
// MarshalBinary interface implementation
func (m *MonitorStatus) MarshalBinary() ([]byte, error) {
if m == nil {
return nil, nil
}
return swag.WriteJSON(m)
}
// UnmarshalBinary interface implementation
func (m *MonitorStatus) UnmarshalBinary(b []byte) error {
var res MonitorStatus
if err := swag.ReadJSON(b, &res); err != nil {
return err
}
*m = res
return nil
}
// Code generated by go-swagger; DO NOT EDIT.
// Copyright Authors of Cilium
// SPDX-License-Identifier: Apache-2.0
package models
// This file was generated by the swagger tool.
// Editing this file might prove futile when you re-run the swagger generate command
import (
"context"
"strconv"
"github.com/go-openapi/errors"
"github.com/go-openapi/strfmt"
"github.com/go-openapi/swag"
)
// NameManager Internal state about DNS names in relation to policy subsystem
//
// swagger:model NameManager
type NameManager struct {
// Names to poll for DNS Poller
DNSPollNames []string `json:"DNSPollNames"`
// Mapping of FQDNSelectors to corresponding regular expressions
FQDNPolicySelectors []*SelectorEntry `json:"FQDNPolicySelectors"`
}
// Validate validates this name manager
func (m *NameManager) Validate(formats strfmt.Registry) error {
var res []error
if err := m.validateFQDNPolicySelectors(formats); err != nil {
res = append(res, err)
}
if len(res) > 0 {
return errors.CompositeValidationError(res...)
}
return nil
}
func (m *NameManager) validateFQDNPolicySelectors(formats strfmt.Registry) error {
if swag.IsZero(m.FQDNPolicySelectors) { // not required
return nil
}
for i := 0; i < len(m.FQDNPolicySelectors); i++ {
if swag.IsZero(m.FQDNPolicySelectors[i]) { // not required
continue
}
if m.FQDNPolicySelectors[i] != nil {
if err := m.FQDNPolicySelectors[i].Validate(formats); err != nil {
if ve, ok := err.(*errors.Validation); ok {
return ve.ValidateName("FQDNPolicySelectors" + "." + strconv.Itoa(i))
} else if ce, ok := err.(*errors.CompositeError); ok {
return ce.ValidateName("FQDNPolicySelectors" + "." + strconv.Itoa(i))
}
return err
}
}
}
return nil
}
// ContextValidate validate this name manager based on the context it is used
func (m *NameManager) ContextValidate(ctx context.Context, formats strfmt.Registry) error {
var res []error
if err := m.contextValidateFQDNPolicySelectors(ctx, formats); err != nil {
res = append(res, err)
}
if len(res) > 0 {
return errors.CompositeValidationError(res...)
}
return nil
}
func (m *NameManager) contextValidateFQDNPolicySelectors(ctx context.Context, formats strfmt.Registry) error {
for i := 0; i < len(m.FQDNPolicySelectors); i++ {
if m.FQDNPolicySelectors[i] != nil {
if swag.IsZero(m.FQDNPolicySelectors[i]) { // not required
return nil
}
if err := m.FQDNPolicySelectors[i].ContextValidate(ctx, formats); err != nil {
if ve, ok := err.(*errors.Validation); ok {
return ve.ValidateName("FQDNPolicySelectors" + "." + strconv.Itoa(i))
} else if ce, ok := err.(*errors.CompositeError); ok {
return ce.ValidateName("FQDNPolicySelectors" + "." + strconv.Itoa(i))
}
return err
}
}
}
return nil
}
// MarshalBinary interface implementation
func (m *NameManager) MarshalBinary() ([]byte, error) {
if m == nil {
return nil, nil
}
return swag.WriteJSON(m)
}
// UnmarshalBinary interface implementation
func (m *NameManager) UnmarshalBinary(b []byte) error {
var res NameManager
if err := swag.ReadJSON(b, &res); err != nil {
return err
}
*m = res
return nil
}
// Code generated by go-swagger; DO NOT EDIT.
// Copyright Authors of Cilium
// SPDX-License-Identifier: Apache-2.0
package models
// This file was generated by the swagger tool.
// Editing this file might prove futile when you re-run the swagger generate command
import (
"context"
"strconv"
"github.com/go-openapi/errors"
"github.com/go-openapi/strfmt"
"github.com/go-openapi/swag"
)
// NamedPorts List of named Layer 4 port and protocol pairs which will be used in Network
// Policy specs.
//
// +deepequal-gen=true
// +k8s:deepcopy-gen=true
//
// swagger:model NamedPorts
type NamedPorts []*Port
// Validate validates this named ports
func (m NamedPorts) Validate(formats strfmt.Registry) error {
var res []error
for i := 0; i < len(m); i++ {
if swag.IsZero(m[i]) { // not required
continue
}
if m[i] != nil {
if err := m[i].Validate(formats); err != nil {
if ve, ok := err.(*errors.Validation); ok {
return ve.ValidateName(strconv.Itoa(i))
} else if ce, ok := err.(*errors.CompositeError); ok {
return ce.ValidateName(strconv.Itoa(i))
}
return err
}
}
}
if len(res) > 0 {
return errors.CompositeValidationError(res...)
}
return nil
}
// ContextValidate validate this named ports based on the context it is used
func (m NamedPorts) ContextValidate(ctx context.Context, formats strfmt.Registry) error {
var res []error
for i := 0; i < len(m); i++ {
if m[i] != nil {
if swag.IsZero(m[i]) { // not required
return nil
}
if err := m[i].ContextValidate(ctx, formats); err != nil {
if ve, ok := err.(*errors.Validation); ok {
return ve.ValidateName(strconv.Itoa(i))
} else if ce, ok := err.(*errors.CompositeError); ok {
return ce.ValidateName(strconv.Itoa(i))
}
return err
}
}
}
if len(res) > 0 {
return errors.CompositeValidationError(res...)
}
return nil
}
// Code generated by go-swagger; DO NOT EDIT.
// Copyright Authors of Cilium
// SPDX-License-Identifier: Apache-2.0
package models
// This file was generated by the swagger tool.
// Editing this file might prove futile when you re-run the swagger generate command
import (
"context"
"github.com/go-openapi/errors"
"github.com/go-openapi/strfmt"
"github.com/go-openapi/swag"
)
// NodeAddressing Addressing information of a node for all address families
//
// +k8s:deepcopy-gen=true
//
// swagger:model NodeAddressing
type NodeAddressing struct {
// ipv4
IPV4 *NodeAddressingElement `json:"ipv4,omitempty"`
// ipv6
IPV6 *NodeAddressingElement `json:"ipv6,omitempty"`
}
// Validate validates this node addressing
func (m *NodeAddressing) Validate(formats strfmt.Registry) error {
var res []error
if err := m.validateIPV4(formats); err != nil {
res = append(res, err)
}
if err := m.validateIPV6(formats); err != nil {
res = append(res, err)
}
if len(res) > 0 {
return errors.CompositeValidationError(res...)
}
return nil
}
func (m *NodeAddressing) validateIPV4(formats strfmt.Registry) error {
if swag.IsZero(m.IPV4) { // not required
return nil
}
if m.IPV4 != nil {
if err := m.IPV4.Validate(formats); err != nil {
if ve, ok := err.(*errors.Validation); ok {
return ve.ValidateName("ipv4")
} else if ce, ok := err.(*errors.CompositeError); ok {
return ce.ValidateName("ipv4")
}
return err
}
}
return nil
}
func (m *NodeAddressing) validateIPV6(formats strfmt.Registry) error {
if swag.IsZero(m.IPV6) { // not required
return nil
}
if m.IPV6 != nil {
if err := m.IPV6.Validate(formats); err != nil {
if ve, ok := err.(*errors.Validation); ok {
return ve.ValidateName("ipv6")
} else if ce, ok := err.(*errors.CompositeError); ok {
return ce.ValidateName("ipv6")
}
return err
}
}
return nil
}
// ContextValidate validate this node addressing based on the context it is used
func (m *NodeAddressing) ContextValidate(ctx context.Context, formats strfmt.Registry) error {
var res []error
if err := m.contextValidateIPV4(ctx, formats); err != nil {
res = append(res, err)
}
if err := m.contextValidateIPV6(ctx, formats); err != nil {
res = append(res, err)
}
if len(res) > 0 {
return errors.CompositeValidationError(res...)
}
return nil
}
func (m *NodeAddressing) contextValidateIPV4(ctx context.Context, formats strfmt.Registry) error {
if m.IPV4 != nil {
if swag.IsZero(m.IPV4) { // not required
return nil
}
if err := m.IPV4.ContextValidate(ctx, formats); err != nil {
if ve, ok := err.(*errors.Validation); ok {
return ve.ValidateName("ipv4")
} else if ce, ok := err.(*errors.CompositeError); ok {
return ce.ValidateName("ipv4")
}
return err
}
}
return nil
}
func (m *NodeAddressing) contextValidateIPV6(ctx context.Context, formats strfmt.Registry) error {
if m.IPV6 != nil {
if swag.IsZero(m.IPV6) { // not required
return nil
}
if err := m.IPV6.ContextValidate(ctx, formats); err != nil {
if ve, ok := err.(*errors.Validation); ok {
return ve.ValidateName("ipv6")
} else if ce, ok := err.(*errors.CompositeError); ok {
return ce.ValidateName("ipv6")
}
return err
}
}
return nil
}
// MarshalBinary interface implementation
func (m *NodeAddressing) MarshalBinary() ([]byte, error) {
if m == nil {
return nil, nil
}
return swag.WriteJSON(m)
}
// UnmarshalBinary interface implementation
func (m *NodeAddressing) UnmarshalBinary(b []byte) error {
var res NodeAddressing
if err := swag.ReadJSON(b, &res); err != nil {
return err
}
*m = res
return nil
}
// Code generated by go-swagger; DO NOT EDIT.
// Copyright Authors of Cilium
// SPDX-License-Identifier: Apache-2.0
package models
// This file was generated by the swagger tool.
// Editing this file might prove futile when you re-run the swagger generate command
import (
"context"
"github.com/go-openapi/strfmt"
"github.com/go-openapi/swag"
)
// NodeAddressingElement Addressing information
//
// swagger:model NodeAddressingElement
type NodeAddressingElement struct {
// Node address type, one of HostName, ExternalIP or InternalIP
AddressType string `json:"address-type,omitempty"`
// Address pool to be used for local endpoints
AllocRange string `json:"alloc-range,omitempty"`
// True if address family is enabled
Enabled bool `json:"enabled,omitempty"`
// IP address of node
IP string `json:"ip,omitempty"`
}
// Validate validates this node addressing element
func (m *NodeAddressingElement) Validate(formats strfmt.Registry) error {
return nil
}
// ContextValidate validates this node addressing element based on context it is used
func (m *NodeAddressingElement) ContextValidate(ctx context.Context, formats strfmt.Registry) error {
return nil
}
// MarshalBinary interface implementation
func (m *NodeAddressingElement) MarshalBinary() ([]byte, error) {
if m == nil {
return nil, nil
}
return swag.WriteJSON(m)
}
// UnmarshalBinary interface implementation
func (m *NodeAddressingElement) UnmarshalBinary(b []byte) error {
var res NodeAddressingElement
if err := swag.ReadJSON(b, &res); err != nil {
return err
}
*m = res
return nil
}
// Code generated by go-swagger; DO NOT EDIT.
// Copyright Authors of Cilium
// SPDX-License-Identifier: Apache-2.0
package models
// This file was generated by the swagger tool.
// Editing this file might prove futile when you re-run the swagger generate command
import (
"context"
"strconv"
"github.com/go-openapi/errors"
"github.com/go-openapi/strfmt"
"github.com/go-openapi/swag"
)
// NodeElement Known node in the cluster
//
// +k8s:deepcopy-gen=true
//
// swagger:model NodeElement
type NodeElement struct {
// Address used for probing cluster connectivity
HealthEndpointAddress *NodeAddressing `json:"health-endpoint-address,omitempty"`
// Source address for Ingress listener
IngressAddress *NodeAddressing `json:"ingress-address,omitempty"`
// Name of the node including the cluster association. This is typically
// <clustername>/<hostname>.
//
Name string `json:"name,omitempty"`
// Primary address used for intra-cluster communication
PrimaryAddress *NodeAddressing `json:"primary-address,omitempty"`
// Alternative addresses assigned to the node
SecondaryAddresses []*NodeAddressingElement `json:"secondary-addresses"`
// Source of the node configuration
Source string `json:"source,omitempty"`
}
// Validate validates this node element
func (m *NodeElement) Validate(formats strfmt.Registry) error {
var res []error
if err := m.validateHealthEndpointAddress(formats); err != nil {
res = append(res, err)
}
if err := m.validateIngressAddress(formats); err != nil {
res = append(res, err)
}
if err := m.validatePrimaryAddress(formats); err != nil {
res = append(res, err)
}
if err := m.validateSecondaryAddresses(formats); err != nil {
res = append(res, err)
}
if len(res) > 0 {
return errors.CompositeValidationError(res...)
}
return nil
}
func (m *NodeElement) validateHealthEndpointAddress(formats strfmt.Registry) error {
if swag.IsZero(m.HealthEndpointAddress) { // not required
return nil
}
if m.HealthEndpointAddress != nil {
if err := m.HealthEndpointAddress.Validate(formats); err != nil {
if ve, ok := err.(*errors.Validation); ok {
return ve.ValidateName("health-endpoint-address")
} else if ce, ok := err.(*errors.CompositeError); ok {
return ce.ValidateName("health-endpoint-address")
}
return err
}
}
return nil
}
func (m *NodeElement) validateIngressAddress(formats strfmt.Registry) error {
if swag.IsZero(m.IngressAddress) { // not required
return nil
}
if m.IngressAddress != nil {
if err := m.IngressAddress.Validate(formats); err != nil {
if ve, ok := err.(*errors.Validation); ok {
return ve.ValidateName("ingress-address")
} else if ce, ok := err.(*errors.CompositeError); ok {
return ce.ValidateName("ingress-address")
}
return err
}
}
return nil
}
func (m *NodeElement) validatePrimaryAddress(formats strfmt.Registry) error {
if swag.IsZero(m.PrimaryAddress) { // not required
return nil
}
if m.PrimaryAddress != nil {
if err := m.PrimaryAddress.Validate(formats); err != nil {
if ve, ok := err.(*errors.Validation); ok {
return ve.ValidateName("primary-address")
} else if ce, ok := err.(*errors.CompositeError); ok {
return ce.ValidateName("primary-address")
}
return err
}
}
return nil
}
func (m *NodeElement) validateSecondaryAddresses(formats strfmt.Registry) error {
if swag.IsZero(m.SecondaryAddresses) { // not required
return nil
}
for i := 0; i < len(m.SecondaryAddresses); i++ {
if swag.IsZero(m.SecondaryAddresses[i]) { // not required
continue
}
if m.SecondaryAddresses[i] != nil {
if err := m.SecondaryAddresses[i].Validate(formats); err != nil {
if ve, ok := err.(*errors.Validation); ok {
return ve.ValidateName("secondary-addresses" + "." + strconv.Itoa(i))
} else if ce, ok := err.(*errors.CompositeError); ok {
return ce.ValidateName("secondary-addresses" + "." + strconv.Itoa(i))
}
return err
}
}
}
return nil
}
// ContextValidate validate this node element based on the context it is used
func (m *NodeElement) ContextValidate(ctx context.Context, formats strfmt.Registry) error {
var res []error
if err := m.contextValidateHealthEndpointAddress(ctx, formats); err != nil {
res = append(res, err)
}
if err := m.contextValidateIngressAddress(ctx, formats); err != nil {
res = append(res, err)
}
if err := m.contextValidatePrimaryAddress(ctx, formats); err != nil {
res = append(res, err)
}
if err := m.contextValidateSecondaryAddresses(ctx, formats); err != nil {
res = append(res, err)
}
if len(res) > 0 {
return errors.CompositeValidationError(res...)
}
return nil
}
func (m *NodeElement) contextValidateHealthEndpointAddress(ctx context.Context, formats strfmt.Registry) error {
if m.HealthEndpointAddress != nil {
if swag.IsZero(m.HealthEndpointAddress) { // not required
return nil
}
if err := m.HealthEndpointAddress.ContextValidate(ctx, formats); err != nil {
if ve, ok := err.(*errors.Validation); ok {
return ve.ValidateName("health-endpoint-address")
} else if ce, ok := err.(*errors.CompositeError); ok {
return ce.ValidateName("health-endpoint-address")
}
return err
}
}
return nil
}
func (m *NodeElement) contextValidateIngressAddress(ctx context.Context, formats strfmt.Registry) error {
if m.IngressAddress != nil {
if swag.IsZero(m.IngressAddress) { // not required
return nil
}
if err := m.IngressAddress.ContextValidate(ctx, formats); err != nil {
if ve, ok := err.(*errors.Validation); ok {
return ve.ValidateName("ingress-address")
} else if ce, ok := err.(*errors.CompositeError); ok {
return ce.ValidateName("ingress-address")
}
return err
}
}
return nil
}
func (m *NodeElement) contextValidatePrimaryAddress(ctx context.Context, formats strfmt.Registry) error {
if m.PrimaryAddress != nil {
if swag.IsZero(m.PrimaryAddress) { // not required
return nil
}
if err := m.PrimaryAddress.ContextValidate(ctx, formats); err != nil {
if ve, ok := err.(*errors.Validation); ok {
return ve.ValidateName("primary-address")
} else if ce, ok := err.(*errors.CompositeError); ok {
return ce.ValidateName("primary-address")
}
return err
}
}
return nil
}
func (m *NodeElement) contextValidateSecondaryAddresses(ctx context.Context, formats strfmt.Registry) error {
for i := 0; i < len(m.SecondaryAddresses); i++ {
if m.SecondaryAddresses[i] != nil {
if swag.IsZero(m.SecondaryAddresses[i]) { // not required
return nil
}
if err := m.SecondaryAddresses[i].ContextValidate(ctx, formats); err != nil {
if ve, ok := err.(*errors.Validation); ok {
return ve.ValidateName("secondary-addresses" + "." + strconv.Itoa(i))
} else if ce, ok := err.(*errors.CompositeError); ok {
return ce.ValidateName("secondary-addresses" + "." + strconv.Itoa(i))
}
return err
}
}
}
return nil
}
// MarshalBinary interface implementation
func (m *NodeElement) MarshalBinary() ([]byte, error) {
if m == nil {
return nil, nil
}
return swag.WriteJSON(m)
}
// UnmarshalBinary interface implementation
func (m *NodeElement) UnmarshalBinary(b []byte) error {
var res NodeElement
if err := swag.ReadJSON(b, &res); err != nil {
return err
}
*m = res
return nil
}
// Code generated by go-swagger; DO NOT EDIT.
// Copyright Authors of Cilium
// SPDX-License-Identifier: Apache-2.0
package models
// This file was generated by the swagger tool.
// Editing this file might prove futile when you re-run the swagger generate command
import (
"context"
"github.com/go-openapi/errors"
"github.com/go-openapi/strfmt"
"github.com/go-openapi/swag"
"github.com/go-openapi/validate"
)
// NodeID Node ID with associated node IP addresses
//
// swagger:model NodeID
type NodeID struct {
// ID allocated by the agent for the node
// Required: true
ID *int64 `json:"id"`
// IP addresses of the node associated with the ID in the agent
// Required: true
Ips []string `json:"ips"`
}
// Validate validates this node ID
func (m *NodeID) Validate(formats strfmt.Registry) error {
var res []error
if err := m.validateID(formats); err != nil {
res = append(res, err)
}
if err := m.validateIps(formats); err != nil {
res = append(res, err)
}
if len(res) > 0 {
return errors.CompositeValidationError(res...)
}
return nil
}
func (m *NodeID) validateID(formats strfmt.Registry) error {
if err := validate.Required("id", "body", m.ID); err != nil {
return err
}
return nil
}
func (m *NodeID) validateIps(formats strfmt.Registry) error {
if err := validate.Required("ips", "body", m.Ips); err != nil {
return err
}
return nil
}
// ContextValidate validates this node ID based on context it is used
func (m *NodeID) ContextValidate(ctx context.Context, formats strfmt.Registry) error {
return nil
}
// MarshalBinary interface implementation
func (m *NodeID) MarshalBinary() ([]byte, error) {
if m == nil {
return nil, nil
}
return swag.WriteJSON(m)
}
// UnmarshalBinary interface implementation
func (m *NodeID) UnmarshalBinary(b []byte) error {
var res NodeID
if err := swag.ReadJSON(b, &res); err != nil {
return err
}
*m = res
return nil
}
// Code generated by go-swagger; DO NOT EDIT.
// Copyright Authors of Cilium
// SPDX-License-Identifier: Apache-2.0
package models
// This file was generated by the swagger tool.
// Editing this file might prove futile when you re-run the swagger generate command
import (
"context"
"github.com/go-openapi/strfmt"
"github.com/go-openapi/swag"
)
// Policy Policy definition
// Deprecated, will be removed in v1.19.
//
// swagger:model Policy
type Policy struct {
// Policy definition as JSON.
Policy string `json:"policy,omitempty"`
// Revision number of the policy. Incremented each time the policy is
// changed in the agent's repository
//
Revision int64 `json:"revision,omitempty"`
}
// Validate validates this policy
func (m *Policy) Validate(formats strfmt.Registry) error {
return nil
}
// ContextValidate validates this policy based on context it is used
func (m *Policy) ContextValidate(ctx context.Context, formats strfmt.Registry) error {
return nil
}
// MarshalBinary interface implementation
func (m *Policy) MarshalBinary() ([]byte, error) {
if m == nil {
return nil, nil
}
return swag.WriteJSON(m)
}
// UnmarshalBinary interface implementation
func (m *Policy) UnmarshalBinary(b []byte) error {
var res Policy
if err := swag.ReadJSON(b, &res); err != nil {
return err
}
*m = res
return nil
}
// Code generated by go-swagger; DO NOT EDIT.
// Copyright Authors of Cilium
// SPDX-License-Identifier: Apache-2.0
package models
// This file was generated by the swagger tool.
// Editing this file might prove futile when you re-run the swagger generate command
import (
"context"
"github.com/go-openapi/strfmt"
"github.com/go-openapi/swag"
)
// PolicyRule A policy rule including the rule labels it derives from
//
// swagger:model PolicyRule
type PolicyRule struct {
// The policy rule labels identifying the policy rules this rule derives from
DerivedFromRules [][]string `json:"derived-from-rules"`
// The policy rule as json
Rule string `json:"rule,omitempty"`
// The policy rule labels identifying the policy rules this rule derives from, mapped by selector
RulesBySelector map[string][][]string `json:"rules-by-selector,omitempty"`
}
// Validate validates this policy rule
func (m *PolicyRule) Validate(formats strfmt.Registry) error {
return nil
}
// ContextValidate validates this policy rule based on context it is used
func (m *PolicyRule) ContextValidate(ctx context.Context, formats strfmt.Registry) error {
return nil
}
// MarshalBinary interface implementation
func (m *PolicyRule) MarshalBinary() ([]byte, error) {
if m == nil {
return nil, nil
}
return swag.WriteJSON(m)
}
// UnmarshalBinary interface implementation
func (m *PolicyRule) UnmarshalBinary(b []byte) error {
var res PolicyRule
if err := swag.ReadJSON(b, &res); err != nil {
return err
}
*m = res
return nil
}
// Code generated by go-swagger; DO NOT EDIT.
// Copyright Authors of Cilium
// SPDX-License-Identifier: Apache-2.0
package models
// This file was generated by the swagger tool.
// Editing this file might prove futile when you re-run the swagger generate command
import (
"context"
"github.com/go-openapi/strfmt"
"github.com/go-openapi/swag"
)
// PolicyTraceResult Response to a policy resolution process
//
// swagger:model PolicyTraceResult
type PolicyTraceResult struct {
// log
Log string `json:"log,omitempty"`
// verdict
Verdict string `json:"verdict,omitempty"`
}
// Validate validates this policy trace result
func (m *PolicyTraceResult) Validate(formats strfmt.Registry) error {
return nil
}
// ContextValidate validates this policy trace result based on context it is used
func (m *PolicyTraceResult) ContextValidate(ctx context.Context, formats strfmt.Registry) error {
return nil
}
// MarshalBinary interface implementation
func (m *PolicyTraceResult) MarshalBinary() ([]byte, error) {
if m == nil {
return nil, nil
}
return swag.WriteJSON(m)
}
// UnmarshalBinary interface implementation
func (m *PolicyTraceResult) UnmarshalBinary(b []byte) error {
var res PolicyTraceResult
if err := swag.ReadJSON(b, &res); err != nil {
return err
}
*m = res
return nil
}
// Code generated by go-swagger; DO NOT EDIT.
// Copyright Authors of Cilium
// SPDX-License-Identifier: Apache-2.0
package models
// This file was generated by the swagger tool.
// Editing this file might prove futile when you re-run the swagger generate command
import (
"context"
"encoding/json"
"github.com/go-openapi/errors"
"github.com/go-openapi/strfmt"
"github.com/go-openapi/swag"
"github.com/go-openapi/validate"
)
// Port Layer 4 port / protocol pair
//
// +deepequal-gen=true
//
// swagger:model Port
type Port struct {
// Optional layer 4 port name
Name string `json:"name,omitempty"`
// Layer 4 port number
Port uint16 `json:"port,omitempty"`
// Layer 4 protocol
// Enum: ["TCP","UDP","SCTP","ICMP","ICMPV6","ANY"]
Protocol string `json:"protocol,omitempty"`
}
// Validate validates this port
func (m *Port) Validate(formats strfmt.Registry) error {
var res []error
if err := m.validateProtocol(formats); err != nil {
res = append(res, err)
}
if len(res) > 0 {
return errors.CompositeValidationError(res...)
}
return nil
}
var portTypeProtocolPropEnum []interface{}
func init() {
var res []string
if err := json.Unmarshal([]byte(`["TCP","UDP","SCTP","ICMP","ICMPV6","ANY"]`), &res); err != nil {
panic(err)
}
for _, v := range res {
portTypeProtocolPropEnum = append(portTypeProtocolPropEnum, v)
}
}
const (
// PortProtocolTCP captures enum value "TCP"
PortProtocolTCP string = "TCP"
// PortProtocolUDP captures enum value "UDP"
PortProtocolUDP string = "UDP"
// PortProtocolSCTP captures enum value "SCTP"
PortProtocolSCTP string = "SCTP"
// PortProtocolICMP captures enum value "ICMP"
PortProtocolICMP string = "ICMP"
// PortProtocolICMPV6 captures enum value "ICMPV6"
PortProtocolICMPV6 string = "ICMPV6"
// PortProtocolANY captures enum value "ANY"
PortProtocolANY string = "ANY"
)
// prop value enum
func (m *Port) validateProtocolEnum(path, location string, value string) error {
if err := validate.EnumCase(path, location, value, portTypeProtocolPropEnum, true); err != nil {
return err
}
return nil
}
func (m *Port) validateProtocol(formats strfmt.Registry) error {
if swag.IsZero(m.Protocol) { // not required
return nil
}
// value enum
if err := m.validateProtocolEnum("protocol", "body", m.Protocol); err != nil {
return err
}
return nil
}
// ContextValidate validates this port based on context it is used
func (m *Port) ContextValidate(ctx context.Context, formats strfmt.Registry) error {
return nil
}
// MarshalBinary interface implementation
func (m *Port) MarshalBinary() ([]byte, error) {
if m == nil {
return nil, nil
}
return swag.WriteJSON(m)
}
// UnmarshalBinary interface implementation
func (m *Port) UnmarshalBinary(b []byte) error {
var res Port
if err := swag.ReadJSON(b, &res); err != nil {
return err
}
*m = res
return nil
}
// Code generated by go-swagger; DO NOT EDIT.
// Copyright Authors of Cilium
// SPDX-License-Identifier: Apache-2.0
package models
// This file was generated by the swagger tool.
// Editing this file might prove futile when you re-run the swagger generate command
import (
"context"
"github.com/go-openapi/errors"
"github.com/go-openapi/strfmt"
"github.com/go-openapi/swag"
)
// Prefilter Collection of endpoints to be served
//
// swagger:model Prefilter
type Prefilter struct {
// spec
Spec *PrefilterSpec `json:"spec,omitempty"`
// status
Status *PrefilterStatus `json:"status,omitempty"`
}
// Validate validates this prefilter
func (m *Prefilter) Validate(formats strfmt.Registry) error {
var res []error
if err := m.validateSpec(formats); err != nil {
res = append(res, err)
}
if err := m.validateStatus(formats); err != nil {
res = append(res, err)
}
if len(res) > 0 {
return errors.CompositeValidationError(res...)
}
return nil
}
func (m *Prefilter) validateSpec(formats strfmt.Registry) error {
if swag.IsZero(m.Spec) { // not required
return nil
}
if m.Spec != nil {
if err := m.Spec.Validate(formats); err != nil {
if ve, ok := err.(*errors.Validation); ok {
return ve.ValidateName("spec")
} else if ce, ok := err.(*errors.CompositeError); ok {
return ce.ValidateName("spec")
}
return err
}
}
return nil
}
func (m *Prefilter) validateStatus(formats strfmt.Registry) error {
if swag.IsZero(m.Status) { // not required
return nil
}
if m.Status != nil {
if err := m.Status.Validate(formats); err != nil {
if ve, ok := err.(*errors.Validation); ok {
return ve.ValidateName("status")
} else if ce, ok := err.(*errors.CompositeError); ok {
return ce.ValidateName("status")
}
return err
}
}
return nil
}
// ContextValidate validate this prefilter based on the context it is used
func (m *Prefilter) ContextValidate(ctx context.Context, formats strfmt.Registry) error {
var res []error
if err := m.contextValidateSpec(ctx, formats); err != nil {
res = append(res, err)
}
if err := m.contextValidateStatus(ctx, formats); err != nil {
res = append(res, err)
}
if len(res) > 0 {
return errors.CompositeValidationError(res...)
}
return nil
}
func (m *Prefilter) contextValidateSpec(ctx context.Context, formats strfmt.Registry) error {
if m.Spec != nil {
if swag.IsZero(m.Spec) { // not required
return nil
}
if err := m.Spec.ContextValidate(ctx, formats); err != nil {
if ve, ok := err.(*errors.Validation); ok {
return ve.ValidateName("spec")
} else if ce, ok := err.(*errors.CompositeError); ok {
return ce.ValidateName("spec")
}
return err
}
}
return nil
}
func (m *Prefilter) contextValidateStatus(ctx context.Context, formats strfmt.Registry) error {
if m.Status != nil {
if swag.IsZero(m.Status) { // not required
return nil
}
if err := m.Status.ContextValidate(ctx, formats); err != nil {
if ve, ok := err.(*errors.Validation); ok {
return ve.ValidateName("status")
} else if ce, ok := err.(*errors.CompositeError); ok {
return ce.ValidateName("status")
}
return err
}
}
return nil
}
// MarshalBinary interface implementation
func (m *Prefilter) MarshalBinary() ([]byte, error) {
if m == nil {
return nil, nil
}
return swag.WriteJSON(m)
}
// UnmarshalBinary interface implementation
func (m *Prefilter) UnmarshalBinary(b []byte) error {
var res Prefilter
if err := swag.ReadJSON(b, &res); err != nil {
return err
}
*m = res
return nil
}
// Code generated by go-swagger; DO NOT EDIT.
// Copyright Authors of Cilium
// SPDX-License-Identifier: Apache-2.0
package models
// This file was generated by the swagger tool.
// Editing this file might prove futile when you re-run the swagger generate command
import (
"context"
"github.com/go-openapi/strfmt"
"github.com/go-openapi/swag"
)
// PrefilterSpec CIDR ranges implemented in the Prefilter
//
// swagger:model PrefilterSpec
type PrefilterSpec struct {
// deny
Deny []string `json:"deny"`
// revision
Revision int64 `json:"revision,omitempty"`
}
// Validate validates this prefilter spec
func (m *PrefilterSpec) Validate(formats strfmt.Registry) error {
return nil
}
// ContextValidate validates this prefilter spec based on context it is used
func (m *PrefilterSpec) ContextValidate(ctx context.Context, formats strfmt.Registry) error {
return nil
}
// MarshalBinary interface implementation
func (m *PrefilterSpec) MarshalBinary() ([]byte, error) {
if m == nil {
return nil, nil
}
return swag.WriteJSON(m)
}
// UnmarshalBinary interface implementation
func (m *PrefilterSpec) UnmarshalBinary(b []byte) error {
var res PrefilterSpec
if err := swag.ReadJSON(b, &res); err != nil {
return err
}
*m = res
return nil
}
// Code generated by go-swagger; DO NOT EDIT.
// Copyright Authors of Cilium
// SPDX-License-Identifier: Apache-2.0
package models
// This file was generated by the swagger tool.
// Editing this file might prove futile when you re-run the swagger generate command
import (
"context"
"github.com/go-openapi/errors"
"github.com/go-openapi/strfmt"
"github.com/go-openapi/swag"
)
// PrefilterStatus CIDR ranges implemented in the Prefilter
//
// swagger:model PrefilterStatus
type PrefilterStatus struct {
// realized
Realized *PrefilterSpec `json:"realized,omitempty"`
}
// Validate validates this prefilter status
func (m *PrefilterStatus) Validate(formats strfmt.Registry) error {
var res []error
if err := m.validateRealized(formats); err != nil {
res = append(res, err)
}
if len(res) > 0 {
return errors.CompositeValidationError(res...)
}
return nil
}
func (m *PrefilterStatus) validateRealized(formats strfmt.Registry) error {
if swag.IsZero(m.Realized) { // not required
return nil
}
if m.Realized != nil {
if err := m.Realized.Validate(formats); err != nil {
if ve, ok := err.(*errors.Validation); ok {
return ve.ValidateName("realized")
} else if ce, ok := err.(*errors.CompositeError); ok {
return ce.ValidateName("realized")
}
return err
}
}
return nil
}
// ContextValidate validate this prefilter status based on the context it is used
func (m *PrefilterStatus) ContextValidate(ctx context.Context, formats strfmt.Registry) error {
var res []error
if err := m.contextValidateRealized(ctx, formats); err != nil {
res = append(res, err)
}
if len(res) > 0 {
return errors.CompositeValidationError(res...)
}
return nil
}
func (m *PrefilterStatus) contextValidateRealized(ctx context.Context, formats strfmt.Registry) error {
if m.Realized != nil {
if swag.IsZero(m.Realized) { // not required
return nil
}
if err := m.Realized.ContextValidate(ctx, formats); err != nil {
if ve, ok := err.(*errors.Validation); ok {
return ve.ValidateName("realized")
} else if ce, ok := err.(*errors.CompositeError); ok {
return ce.ValidateName("realized")
}
return err
}
}
return nil
}
// MarshalBinary interface implementation
func (m *PrefilterStatus) MarshalBinary() ([]byte, error) {
if m == nil {
return nil, nil
}
return swag.WriteJSON(m)
}
// UnmarshalBinary interface implementation
func (m *PrefilterStatus) UnmarshalBinary(b []byte) error {
var res PrefilterStatus
if err := swag.ReadJSON(b, &res); err != nil {
return err
}
*m = res
return nil
}
// Code generated by go-swagger; DO NOT EDIT.
// Copyright Authors of Cilium
// SPDX-License-Identifier: Apache-2.0
package models
// This file was generated by the swagger tool.
// Editing this file might prove futile when you re-run the swagger generate command
import (
"context"
"github.com/go-openapi/strfmt"
"github.com/go-openapi/swag"
)
// ProxyRedirect Configured proxy redirection state
//
// swagger:model ProxyRedirect
type ProxyRedirect struct {
// Name of the proxy redirect
Name string `json:"name,omitempty"`
// Name of the proxy this redirect points to
Proxy string `json:"proxy,omitempty"`
// Host port that this redirect points to
ProxyPort int64 `json:"proxy-port,omitempty"`
}
// Validate validates this proxy redirect
func (m *ProxyRedirect) Validate(formats strfmt.Registry) error {
return nil
}
// ContextValidate validates this proxy redirect based on context it is used
func (m *ProxyRedirect) ContextValidate(ctx context.Context, formats strfmt.Registry) error {
return nil
}
// MarshalBinary interface implementation
func (m *ProxyRedirect) MarshalBinary() ([]byte, error) {
if m == nil {
return nil, nil
}
return swag.WriteJSON(m)
}
// UnmarshalBinary interface implementation
func (m *ProxyRedirect) UnmarshalBinary(b []byte) error {
var res ProxyRedirect
if err := swag.ReadJSON(b, &res); err != nil {
return err
}
*m = res
return nil
}
// Code generated by go-swagger; DO NOT EDIT.
// Copyright Authors of Cilium
// SPDX-License-Identifier: Apache-2.0
package models
// This file was generated by the swagger tool.
// Editing this file might prove futile when you re-run the swagger generate command
import (
"context"
"encoding/json"
"github.com/go-openapi/errors"
"github.com/go-openapi/strfmt"
"github.com/go-openapi/swag"
"github.com/go-openapi/validate"
)
// ProxyStatistics Statistics of a set of proxy redirects for an endpoint
//
// +k8s:deepcopy-gen=true
//
// swagger:model ProxyStatistics
type ProxyStatistics struct {
// The port the proxy is listening on
AllocatedProxyPort int64 `json:"allocated-proxy-port,omitempty"`
// Location of where the redirect is installed
// Enum: ["ingress","egress"]
Location string `json:"location,omitempty"`
// The port subject to the redirect
Port int64 `json:"port,omitempty"`
// Name of the L7 protocol
Protocol string `json:"protocol,omitempty"`
// Statistics of this set of proxy redirect
Statistics *RequestResponseStatistics `json:"statistics,omitempty"`
}
// Validate validates this proxy statistics
func (m *ProxyStatistics) Validate(formats strfmt.Registry) error {
var res []error
if err := m.validateLocation(formats); err != nil {
res = append(res, err)
}
if err := m.validateStatistics(formats); err != nil {
res = append(res, err)
}
if len(res) > 0 {
return errors.CompositeValidationError(res...)
}
return nil
}
var proxyStatisticsTypeLocationPropEnum []interface{}
func init() {
var res []string
if err := json.Unmarshal([]byte(`["ingress","egress"]`), &res); err != nil {
panic(err)
}
for _, v := range res {
proxyStatisticsTypeLocationPropEnum = append(proxyStatisticsTypeLocationPropEnum, v)
}
}
const (
// ProxyStatisticsLocationIngress captures enum value "ingress"
ProxyStatisticsLocationIngress string = "ingress"
// ProxyStatisticsLocationEgress captures enum value "egress"
ProxyStatisticsLocationEgress string = "egress"
)
// prop value enum
func (m *ProxyStatistics) validateLocationEnum(path, location string, value string) error {
if err := validate.EnumCase(path, location, value, proxyStatisticsTypeLocationPropEnum, true); err != nil {
return err
}
return nil
}
func (m *ProxyStatistics) validateLocation(formats strfmt.Registry) error {
if swag.IsZero(m.Location) { // not required
return nil
}
// value enum
if err := m.validateLocationEnum("location", "body", m.Location); err != nil {
return err
}
return nil
}
func (m *ProxyStatistics) validateStatistics(formats strfmt.Registry) error {
if swag.IsZero(m.Statistics) { // not required
return nil
}
if m.Statistics != nil {
if err := m.Statistics.Validate(formats); err != nil {
if ve, ok := err.(*errors.Validation); ok {
return ve.ValidateName("statistics")
} else if ce, ok := err.(*errors.CompositeError); ok {
return ce.ValidateName("statistics")
}
return err
}
}
return nil
}
// ContextValidate validate this proxy statistics based on the context it is used
func (m *ProxyStatistics) ContextValidate(ctx context.Context, formats strfmt.Registry) error {
var res []error
if err := m.contextValidateStatistics(ctx, formats); err != nil {
res = append(res, err)
}
if len(res) > 0 {
return errors.CompositeValidationError(res...)
}
return nil
}
func (m *ProxyStatistics) contextValidateStatistics(ctx context.Context, formats strfmt.Registry) error {
if m.Statistics != nil {
if swag.IsZero(m.Statistics) { // not required
return nil
}
if err := m.Statistics.ContextValidate(ctx, formats); err != nil {
if ve, ok := err.(*errors.Validation); ok {
return ve.ValidateName("statistics")
} else if ce, ok := err.(*errors.CompositeError); ok {
return ce.ValidateName("statistics")
}
return err
}
}
return nil
}
// MarshalBinary interface implementation
func (m *ProxyStatistics) MarshalBinary() ([]byte, error) {
if m == nil {
return nil, nil
}
return swag.WriteJSON(m)
}
// UnmarshalBinary interface implementation
func (m *ProxyStatistics) UnmarshalBinary(b []byte) error {
var res ProxyStatistics
if err := swag.ReadJSON(b, &res); err != nil {
return err
}
*m = res
return nil
}
// Code generated by go-swagger; DO NOT EDIT.
// Copyright Authors of Cilium
// SPDX-License-Identifier: Apache-2.0
package models
// This file was generated by the swagger tool.
// Editing this file might prove futile when you re-run the swagger generate command
import (
"context"
"encoding/json"
"strconv"
"github.com/go-openapi/errors"
"github.com/go-openapi/strfmt"
"github.com/go-openapi/swag"
"github.com/go-openapi/validate"
)
// ProxyStatus Status of proxy
//
// +k8s:deepcopy-gen=true
//
// swagger:model ProxyStatus
type ProxyStatus struct {
// Deployment mode of Envoy L7 proxy
// Enum: ["embedded","external"]
EnvoyDeploymentMode string `json:"envoy-deployment-mode,omitempty"`
// IP address that the proxy listens on
IP string `json:"ip,omitempty"`
// Port range used for proxying
PortRange string `json:"port-range,omitempty"`
// Detailed description of configured redirects
Redirects []*ProxyRedirect `json:"redirects"`
// Total number of listening proxy ports
TotalPorts int64 `json:"total-ports,omitempty"`
// Total number of ports configured to redirect to proxies
TotalRedirects int64 `json:"total-redirects,omitempty"`
}
// Validate validates this proxy status
func (m *ProxyStatus) Validate(formats strfmt.Registry) error {
var res []error
if err := m.validateEnvoyDeploymentMode(formats); err != nil {
res = append(res, err)
}
if err := m.validateRedirects(formats); err != nil {
res = append(res, err)
}
if len(res) > 0 {
return errors.CompositeValidationError(res...)
}
return nil
}
var proxyStatusTypeEnvoyDeploymentModePropEnum []interface{}
func init() {
var res []string
if err := json.Unmarshal([]byte(`["embedded","external"]`), &res); err != nil {
panic(err)
}
for _, v := range res {
proxyStatusTypeEnvoyDeploymentModePropEnum = append(proxyStatusTypeEnvoyDeploymentModePropEnum, v)
}
}
const (
// ProxyStatusEnvoyDeploymentModeEmbedded captures enum value "embedded"
ProxyStatusEnvoyDeploymentModeEmbedded string = "embedded"
// ProxyStatusEnvoyDeploymentModeExternal captures enum value "external"
ProxyStatusEnvoyDeploymentModeExternal string = "external"
)
// prop value enum
func (m *ProxyStatus) validateEnvoyDeploymentModeEnum(path, location string, value string) error {
if err := validate.EnumCase(path, location, value, proxyStatusTypeEnvoyDeploymentModePropEnum, true); err != nil {
return err
}
return nil
}
func (m *ProxyStatus) validateEnvoyDeploymentMode(formats strfmt.Registry) error {
if swag.IsZero(m.EnvoyDeploymentMode) { // not required
return nil
}
// value enum
if err := m.validateEnvoyDeploymentModeEnum("envoy-deployment-mode", "body", m.EnvoyDeploymentMode); err != nil {
return err
}
return nil
}
func (m *ProxyStatus) validateRedirects(formats strfmt.Registry) error {
if swag.IsZero(m.Redirects) { // not required
return nil
}
for i := 0; i < len(m.Redirects); i++ {
if swag.IsZero(m.Redirects[i]) { // not required
continue
}
if m.Redirects[i] != nil {
if err := m.Redirects[i].Validate(formats); err != nil {
if ve, ok := err.(*errors.Validation); ok {
return ve.ValidateName("redirects" + "." + strconv.Itoa(i))
} else if ce, ok := err.(*errors.CompositeError); ok {
return ce.ValidateName("redirects" + "." + strconv.Itoa(i))
}
return err
}
}
}
return nil
}
// ContextValidate validate this proxy status based on the context it is used
func (m *ProxyStatus) ContextValidate(ctx context.Context, formats strfmt.Registry) error {
var res []error
if err := m.contextValidateRedirects(ctx, formats); err != nil {
res = append(res, err)
}
if len(res) > 0 {
return errors.CompositeValidationError(res...)
}
return nil
}
func (m *ProxyStatus) contextValidateRedirects(ctx context.Context, formats strfmt.Registry) error {
for i := 0; i < len(m.Redirects); i++ {
if m.Redirects[i] != nil {
if swag.IsZero(m.Redirects[i]) { // not required
return nil
}
if err := m.Redirects[i].ContextValidate(ctx, formats); err != nil {
if ve, ok := err.(*errors.Validation); ok {
return ve.ValidateName("redirects" + "." + strconv.Itoa(i))
} else if ce, ok := err.(*errors.CompositeError); ok {
return ce.ValidateName("redirects" + "." + strconv.Itoa(i))
}
return err
}
}
}
return nil
}
// MarshalBinary interface implementation
func (m *ProxyStatus) MarshalBinary() ([]byte, error) {
if m == nil {
return nil, nil
}
return swag.WriteJSON(m)
}
// UnmarshalBinary interface implementation
func (m *ProxyStatus) UnmarshalBinary(b []byte) error {
var res ProxyStatus
if err := swag.ReadJSON(b, &res); err != nil {
return err
}
*m = res
return nil
}
// Code generated by go-swagger; DO NOT EDIT.
// Copyright Authors of Cilium
// SPDX-License-Identifier: Apache-2.0
package models
// This file was generated by the swagger tool.
// Editing this file might prove futile when you re-run the swagger generate command
import (
"context"
"github.com/go-openapi/errors"
"github.com/go-openapi/strfmt"
"github.com/go-openapi/swag"
"github.com/go-openapi/validate"
)
// RemoteCluster Status of remote cluster
//
// +k8s:deepcopy-gen=true
//
// swagger:model RemoteCluster
type RemoteCluster struct {
// Cluster configuration exposed by the remote cluster
Config *RemoteClusterConfig `json:"config,omitempty"`
// Indicates whether the connection to the remote kvstore is established
Connected bool `json:"connected,omitempty"`
// Time of last failure that occurred while attempting to reach the cluster
// Format: date-time
LastFailure strfmt.DateTime `json:"last-failure,omitempty"`
// Name of the cluster
Name string `json:"name,omitempty"`
// Number of endpoints in the cluster
NumEndpoints int64 `json:"num-endpoints,omitempty"`
// Number of failures reaching the cluster
NumFailures int64 `json:"num-failures,omitempty"`
// Number of identities in the cluster
NumIdentities int64 `json:"num-identities,omitempty"`
// Number of nodes in the cluster
NumNodes int64 `json:"num-nodes,omitempty"`
// Number of MCS-API service exports in the cluster
NumServiceExports int64 `json:"num-service-exports,omitempty"`
// Number of services in the cluster
NumSharedServices int64 `json:"num-shared-services,omitempty"`
// Indicates readiness of the remote cluster
Ready bool `json:"ready,omitempty"`
// Status of the control plane
Status string `json:"status,omitempty"`
// Synchronization status about each resource type
Synced *RemoteClusterSynced `json:"synced,omitempty"`
}
// Validate validates this remote cluster
func (m *RemoteCluster) Validate(formats strfmt.Registry) error {
var res []error
if err := m.validateConfig(formats); err != nil {
res = append(res, err)
}
if err := m.validateLastFailure(formats); err != nil {
res = append(res, err)
}
if err := m.validateSynced(formats); err != nil {
res = append(res, err)
}
if len(res) > 0 {
return errors.CompositeValidationError(res...)
}
return nil
}
func (m *RemoteCluster) validateConfig(formats strfmt.Registry) error {
if swag.IsZero(m.Config) { // not required
return nil
}
if m.Config != nil {
if err := m.Config.Validate(formats); err != nil {
if ve, ok := err.(*errors.Validation); ok {
return ve.ValidateName("config")
} else if ce, ok := err.(*errors.CompositeError); ok {
return ce.ValidateName("config")
}
return err
}
}
return nil
}
func (m *RemoteCluster) validateLastFailure(formats strfmt.Registry) error {
if swag.IsZero(m.LastFailure) { // not required
return nil
}
if err := validate.FormatOf("last-failure", "body", "date-time", m.LastFailure.String(), formats); err != nil {
return err
}
return nil
}
func (m *RemoteCluster) validateSynced(formats strfmt.Registry) error {
if swag.IsZero(m.Synced) { // not required
return nil
}
if m.Synced != nil {
if err := m.Synced.Validate(formats); err != nil {
if ve, ok := err.(*errors.Validation); ok {
return ve.ValidateName("synced")
} else if ce, ok := err.(*errors.CompositeError); ok {
return ce.ValidateName("synced")
}
return err
}
}
return nil
}
// ContextValidate validate this remote cluster based on the context it is used
func (m *RemoteCluster) ContextValidate(ctx context.Context, formats strfmt.Registry) error {
var res []error
if err := m.contextValidateConfig(ctx, formats); err != nil {
res = append(res, err)
}
if err := m.contextValidateSynced(ctx, formats); err != nil {
res = append(res, err)
}
if len(res) > 0 {
return errors.CompositeValidationError(res...)
}
return nil
}
func (m *RemoteCluster) contextValidateConfig(ctx context.Context, formats strfmt.Registry) error {
if m.Config != nil {
if swag.IsZero(m.Config) { // not required
return nil
}
if err := m.Config.ContextValidate(ctx, formats); err != nil {
if ve, ok := err.(*errors.Validation); ok {
return ve.ValidateName("config")
} else if ce, ok := err.(*errors.CompositeError); ok {
return ce.ValidateName("config")
}
return err
}
}
return nil
}
func (m *RemoteCluster) contextValidateSynced(ctx context.Context, formats strfmt.Registry) error {
if m.Synced != nil {
if swag.IsZero(m.Synced) { // not required
return nil
}
if err := m.Synced.ContextValidate(ctx, formats); err != nil {
if ve, ok := err.(*errors.Validation); ok {
return ve.ValidateName("synced")
} else if ce, ok := err.(*errors.CompositeError); ok {
return ce.ValidateName("synced")
}
return err
}
}
return nil
}
// MarshalBinary interface implementation
func (m *RemoteCluster) MarshalBinary() ([]byte, error) {
if m == nil {
return nil, nil
}
return swag.WriteJSON(m)
}
// UnmarshalBinary interface implementation
func (m *RemoteCluster) UnmarshalBinary(b []byte) error {
var res RemoteCluster
if err := swag.ReadJSON(b, &res); err != nil {
return err
}
*m = res
return nil
}
// Code generated by go-swagger; DO NOT EDIT.
// Copyright Authors of Cilium
// SPDX-License-Identifier: Apache-2.0
package models
// This file was generated by the swagger tool.
// Editing this file might prove futile when you re-run the swagger generate command
import (
"context"
"github.com/go-openapi/strfmt"
"github.com/go-openapi/swag"
)
// RemoteClusterConfig Cluster configuration exposed by the remote cluster
//
// +k8s:deepcopy-gen=true
//
// swagger:model RemoteClusterConfig
type RemoteClusterConfig struct {
// The Cluster ID advertised by the remote cluster
ClusterID int64 `json:"cluster-id,omitempty"`
// Whether the remote cluster information is locally cached by kvstoremesh
Kvstoremesh bool `json:"kvstoremesh,omitempty"`
// Whether the configuration is required to be present
Required bool `json:"required,omitempty"`
// Whether the configuration has been correctly retrieved
Retrieved bool `json:"retrieved,omitempty"`
// Whether or not MCS-API ServiceExports is enabled by the cluster (null means unsupported).
ServiceExportsEnabled *bool `json:"service-exports-enabled,omitempty"`
// Whether the remote cluster supports per-prefix "synced" canaries
SyncCanaries bool `json:"sync-canaries,omitempty"`
}
// Validate validates this remote cluster config
func (m *RemoteClusterConfig) Validate(formats strfmt.Registry) error {
return nil
}
// ContextValidate validates this remote cluster config based on context it is used
func (m *RemoteClusterConfig) ContextValidate(ctx context.Context, formats strfmt.Registry) error {
return nil
}
// MarshalBinary interface implementation
func (m *RemoteClusterConfig) MarshalBinary() ([]byte, error) {
if m == nil {
return nil, nil
}
return swag.WriteJSON(m)
}
// UnmarshalBinary interface implementation
func (m *RemoteClusterConfig) UnmarshalBinary(b []byte) error {
var res RemoteClusterConfig
if err := swag.ReadJSON(b, &res); err != nil {
return err
}
*m = res
return nil
}
// Code generated by go-swagger; DO NOT EDIT.
// Copyright Authors of Cilium
// SPDX-License-Identifier: Apache-2.0
package models
// This file was generated by the swagger tool.
// Editing this file might prove futile when you re-run the swagger generate command
import (
"context"
"github.com/go-openapi/strfmt"
"github.com/go-openapi/swag"
)
// RemoteClusterSynced Status of the synchronization with the remote cluster, about each resource
// type. A given resource is considered to be synchronized if the initial
// list of entries has been completely received from the remote cluster, and
// new events are currently being watched.
//
// +k8s:deepcopy-gen=true
//
// swagger:model RemoteClusterSynced
type RemoteClusterSynced struct {
// Endpoints synchronization status
Endpoints bool `json:"endpoints,omitempty"`
// Identities synchronization status
Identities bool `json:"identities,omitempty"`
// Nodes synchronization status
Nodes bool `json:"nodes,omitempty"`
// MCS-API service exports synchronization status (null means that the component is not watching service exports)
ServiceExports *bool `json:"service-exports,omitempty"`
// Services synchronization status
Services bool `json:"services,omitempty"`
}
// Validate validates this remote cluster synced
func (m *RemoteClusterSynced) Validate(formats strfmt.Registry) error {
return nil
}
// ContextValidate validates this remote cluster synced based on context it is used
func (m *RemoteClusterSynced) ContextValidate(ctx context.Context, formats strfmt.Registry) error {
return nil
}
// MarshalBinary interface implementation
func (m *RemoteClusterSynced) MarshalBinary() ([]byte, error) {
if m == nil {
return nil, nil
}
return swag.WriteJSON(m)
}
// UnmarshalBinary interface implementation
func (m *RemoteClusterSynced) UnmarshalBinary(b []byte) error {
var res RemoteClusterSynced
if err := swag.ReadJSON(b, &res); err != nil {
return err
}
*m = res
return nil
}
// Code generated by go-swagger; DO NOT EDIT.
// Copyright Authors of Cilium
// SPDX-License-Identifier: Apache-2.0
package models
// This file was generated by the swagger tool.
// Editing this file might prove futile when you re-run the swagger generate command
import (
"context"
"github.com/go-openapi/errors"
"github.com/go-openapi/strfmt"
"github.com/go-openapi/swag"
)
// RequestResponseStatistics Statistics of a proxy redirect
//
// +k8s:deepcopy-gen=true
//
// swagger:model RequestResponseStatistics
type RequestResponseStatistics struct {
// requests
Requests *MessageForwardingStatistics `json:"requests,omitempty"`
// responses
Responses *MessageForwardingStatistics `json:"responses,omitempty"`
}
// Validate validates this request response statistics
func (m *RequestResponseStatistics) Validate(formats strfmt.Registry) error {
var res []error
if err := m.validateRequests(formats); err != nil {
res = append(res, err)
}
if err := m.validateResponses(formats); err != nil {
res = append(res, err)
}
if len(res) > 0 {
return errors.CompositeValidationError(res...)
}
return nil
}
func (m *RequestResponseStatistics) validateRequests(formats strfmt.Registry) error {
if swag.IsZero(m.Requests) { // not required
return nil
}
if m.Requests != nil {
if err := m.Requests.Validate(formats); err != nil {
if ve, ok := err.(*errors.Validation); ok {
return ve.ValidateName("requests")
} else if ce, ok := err.(*errors.CompositeError); ok {
return ce.ValidateName("requests")
}
return err
}
}
return nil
}
func (m *RequestResponseStatistics) validateResponses(formats strfmt.Registry) error {
if swag.IsZero(m.Responses) { // not required
return nil
}
if m.Responses != nil {
if err := m.Responses.Validate(formats); err != nil {
if ve, ok := err.(*errors.Validation); ok {
return ve.ValidateName("responses")
} else if ce, ok := err.(*errors.CompositeError); ok {
return ce.ValidateName("responses")
}
return err
}
}
return nil
}
// ContextValidate validate this request response statistics based on the context it is used
func (m *RequestResponseStatistics) ContextValidate(ctx context.Context, formats strfmt.Registry) error {
var res []error
if err := m.contextValidateRequests(ctx, formats); err != nil {
res = append(res, err)
}
if err := m.contextValidateResponses(ctx, formats); err != nil {
res = append(res, err)
}
if len(res) > 0 {
return errors.CompositeValidationError(res...)
}
return nil
}
func (m *RequestResponseStatistics) contextValidateRequests(ctx context.Context, formats strfmt.Registry) error {
if m.Requests != nil {
if swag.IsZero(m.Requests) { // not required
return nil
}
if err := m.Requests.ContextValidate(ctx, formats); err != nil {
if ve, ok := err.(*errors.Validation); ok {
return ve.ValidateName("requests")
} else if ce, ok := err.(*errors.CompositeError); ok {
return ce.ValidateName("requests")
}
return err
}
}
return nil
}
func (m *RequestResponseStatistics) contextValidateResponses(ctx context.Context, formats strfmt.Registry) error {
if m.Responses != nil {
if swag.IsZero(m.Responses) { // not required
return nil
}
if err := m.Responses.ContextValidate(ctx, formats); err != nil {
if ve, ok := err.(*errors.Validation); ok {
return ve.ValidateName("responses")
} else if ce, ok := err.(*errors.CompositeError); ok {
return ce.ValidateName("responses")
}
return err
}
}
return nil
}
// MarshalBinary interface implementation
func (m *RequestResponseStatistics) MarshalBinary() ([]byte, error) {
if m == nil {
return nil, nil
}
return swag.WriteJSON(m)
}
// UnmarshalBinary interface implementation
func (m *RequestResponseStatistics) UnmarshalBinary(b []byte) error {
var res RequestResponseStatistics
if err := swag.ReadJSON(b, &res); err != nil {
return err
}
*m = res
return nil
}
// Code generated by go-swagger; DO NOT EDIT.
// Copyright Authors of Cilium
// SPDX-License-Identifier: Apache-2.0
package models
// This file was generated by the swagger tool.
// Editing this file might prove futile when you re-run the swagger generate command
import (
"context"
"encoding/json"
"github.com/go-openapi/errors"
"github.com/go-openapi/strfmt"
"github.com/go-openapi/swag"
"github.com/go-openapi/validate"
)
// Routing Status of routing
//
// swagger:model Routing
type Routing struct {
// Datapath routing mode for cross-cluster connectivity
// Enum: ["Native","Tunnel"]
InterHostRoutingMode string `json:"inter-host-routing-mode,omitempty"`
// Datapath routing mode for connectivity within the host
// Enum: ["BPF","Legacy"]
IntraHostRoutingMode string `json:"intra-host-routing-mode,omitempty"`
// Tunnel protocol in use for cross-cluster connectivity
TunnelProtocol string `json:"tunnel-protocol,omitempty"`
}
// Validate validates this routing
func (m *Routing) Validate(formats strfmt.Registry) error {
var res []error
if err := m.validateInterHostRoutingMode(formats); err != nil {
res = append(res, err)
}
if err := m.validateIntraHostRoutingMode(formats); err != nil {
res = append(res, err)
}
if len(res) > 0 {
return errors.CompositeValidationError(res...)
}
return nil
}
var routingTypeInterHostRoutingModePropEnum []interface{}
func init() {
var res []string
if err := json.Unmarshal([]byte(`["Native","Tunnel"]`), &res); err != nil {
panic(err)
}
for _, v := range res {
routingTypeInterHostRoutingModePropEnum = append(routingTypeInterHostRoutingModePropEnum, v)
}
}
const (
// RoutingInterHostRoutingModeNative captures enum value "Native"
RoutingInterHostRoutingModeNative string = "Native"
// RoutingInterHostRoutingModeTunnel captures enum value "Tunnel"
RoutingInterHostRoutingModeTunnel string = "Tunnel"
)
// prop value enum
func (m *Routing) validateInterHostRoutingModeEnum(path, location string, value string) error {
if err := validate.EnumCase(path, location, value, routingTypeInterHostRoutingModePropEnum, true); err != nil {
return err
}
return nil
}
func (m *Routing) validateInterHostRoutingMode(formats strfmt.Registry) error {
if swag.IsZero(m.InterHostRoutingMode) { // not required
return nil
}
// value enum
if err := m.validateInterHostRoutingModeEnum("inter-host-routing-mode", "body", m.InterHostRoutingMode); err != nil {
return err
}
return nil
}
var routingTypeIntraHostRoutingModePropEnum []interface{}
func init() {
var res []string
if err := json.Unmarshal([]byte(`["BPF","Legacy"]`), &res); err != nil {
panic(err)
}
for _, v := range res {
routingTypeIntraHostRoutingModePropEnum = append(routingTypeIntraHostRoutingModePropEnum, v)
}
}
const (
// RoutingIntraHostRoutingModeBPF captures enum value "BPF"
RoutingIntraHostRoutingModeBPF string = "BPF"
// RoutingIntraHostRoutingModeLegacy captures enum value "Legacy"
RoutingIntraHostRoutingModeLegacy string = "Legacy"
)
// prop value enum
func (m *Routing) validateIntraHostRoutingModeEnum(path, location string, value string) error {
if err := validate.EnumCase(path, location, value, routingTypeIntraHostRoutingModePropEnum, true); err != nil {
return err
}
return nil
}
func (m *Routing) validateIntraHostRoutingMode(formats strfmt.Registry) error {
if swag.IsZero(m.IntraHostRoutingMode) { // not required
return nil
}
// value enum
if err := m.validateIntraHostRoutingModeEnum("intra-host-routing-mode", "body", m.IntraHostRoutingMode); err != nil {
return err
}
return nil
}
// ContextValidate validates this routing based on context it is used
func (m *Routing) ContextValidate(ctx context.Context, formats strfmt.Registry) error {
return nil
}
// MarshalBinary interface implementation
func (m *Routing) MarshalBinary() ([]byte, error) {
if m == nil {
return nil, nil
}
return swag.WriteJSON(m)
}
// UnmarshalBinary interface implementation
func (m *Routing) UnmarshalBinary(b []byte) error {
var res Routing
if err := swag.ReadJSON(b, &res); err != nil {
return err
}
*m = res
return nil
}
// Code generated by go-swagger; DO NOT EDIT.
// Copyright Authors of Cilium
// SPDX-License-Identifier: Apache-2.0
package models
// This file was generated by the swagger tool.
// Editing this file might prove futile when you re-run the swagger generate command
import (
"context"
"strconv"
"github.com/go-openapi/errors"
"github.com/go-openapi/strfmt"
"github.com/go-openapi/swag"
)
// SelectorCache cache of which identities match selectors in the policy repository
//
// swagger:model SelectorCache
type SelectorCache []*SelectorIdentityMapping
// Validate validates this selector cache
func (m SelectorCache) Validate(formats strfmt.Registry) error {
var res []error
for i := 0; i < len(m); i++ {
if swag.IsZero(m[i]) { // not required
continue
}
if m[i] != nil {
if err := m[i].Validate(formats); err != nil {
if ve, ok := err.(*errors.Validation); ok {
return ve.ValidateName(strconv.Itoa(i))
} else if ce, ok := err.(*errors.CompositeError); ok {
return ce.ValidateName(strconv.Itoa(i))
}
return err
}
}
}
if len(res) > 0 {
return errors.CompositeValidationError(res...)
}
return nil
}
// ContextValidate validate this selector cache based on the context it is used
func (m SelectorCache) ContextValidate(ctx context.Context, formats strfmt.Registry) error {
var res []error
for i := 0; i < len(m); i++ {
if m[i] != nil {
if swag.IsZero(m[i]) { // not required
return nil
}
if err := m[i].ContextValidate(ctx, formats); err != nil {
if ve, ok := err.(*errors.Validation); ok {
return ve.ValidateName(strconv.Itoa(i))
} else if ce, ok := err.(*errors.CompositeError); ok {
return ce.ValidateName(strconv.Itoa(i))
}
return err
}
}
}
if len(res) > 0 {
return errors.CompositeValidationError(res...)
}
return nil
}
// Code generated by go-swagger; DO NOT EDIT.
// Copyright Authors of Cilium
// SPDX-License-Identifier: Apache-2.0
package models
// This file was generated by the swagger tool.
// Editing this file might prove futile when you re-run the swagger generate command
import (
"context"
"github.com/go-openapi/strfmt"
"github.com/go-openapi/swag"
)
// SelectorEntry Mapping of FQDNSelector to corresponding regular expression
//
// swagger:model SelectorEntry
type SelectorEntry struct {
// String representation of regular expression form of FQDNSelector
RegexString string `json:"regexString,omitempty"`
// FQDNSelector in string representation
SelectorString string `json:"selectorString,omitempty"`
}
// Validate validates this selector entry
func (m *SelectorEntry) Validate(formats strfmt.Registry) error {
return nil
}
// ContextValidate validates this selector entry based on context it is used
func (m *SelectorEntry) ContextValidate(ctx context.Context, formats strfmt.Registry) error {
return nil
}
// MarshalBinary interface implementation
func (m *SelectorEntry) MarshalBinary() ([]byte, error) {
if m == nil {
return nil, nil
}
return swag.WriteJSON(m)
}
// UnmarshalBinary interface implementation
func (m *SelectorEntry) UnmarshalBinary(b []byte) error {
var res SelectorEntry
if err := swag.ReadJSON(b, &res); err != nil {
return err
}
*m = res
return nil
}
// Code generated by go-swagger; DO NOT EDIT.
// Copyright Authors of Cilium
// SPDX-License-Identifier: Apache-2.0
package models
// This file was generated by the swagger tool.
// Editing this file might prove futile when you re-run the swagger generate command
import (
"context"
"github.com/go-openapi/errors"
"github.com/go-openapi/strfmt"
"github.com/go-openapi/swag"
)
// SelectorIdentityMapping mapping of selector to identities which match it
//
// swagger:model SelectorIdentityMapping
type SelectorIdentityMapping struct {
// identities mapping to this selector
Identities []int64 `json:"identities"`
// Labels are the metadata labels associated with the selector
Labels LabelArray `json:"labels,omitempty"`
// string form of selector
Selector string `json:"selector,omitempty"`
// number of users of this selector in the cache
Users int64 `json:"users,omitempty"`
}
// Validate validates this selector identity mapping
func (m *SelectorIdentityMapping) Validate(formats strfmt.Registry) error {
var res []error
if err := m.validateLabels(formats); err != nil {
res = append(res, err)
}
if len(res) > 0 {
return errors.CompositeValidationError(res...)
}
return nil
}
func (m *SelectorIdentityMapping) validateLabels(formats strfmt.Registry) error {
if swag.IsZero(m.Labels) { // not required
return nil
}
if err := m.Labels.Validate(formats); err != nil {
if ve, ok := err.(*errors.Validation); ok {
return ve.ValidateName("labels")
} else if ce, ok := err.(*errors.CompositeError); ok {
return ce.ValidateName("labels")
}
return err
}
return nil
}
// ContextValidate validate this selector identity mapping based on the context it is used
func (m *SelectorIdentityMapping) ContextValidate(ctx context.Context, formats strfmt.Registry) error {
var res []error
if err := m.contextValidateLabels(ctx, formats); err != nil {
res = append(res, err)
}
if len(res) > 0 {
return errors.CompositeValidationError(res...)
}
return nil
}
func (m *SelectorIdentityMapping) contextValidateLabels(ctx context.Context, formats strfmt.Registry) error {
if err := m.Labels.ContextValidate(ctx, formats); err != nil {
if ve, ok := err.(*errors.Validation); ok {
return ve.ValidateName("labels")
} else if ce, ok := err.(*errors.CompositeError); ok {
return ce.ValidateName("labels")
}
return err
}
return nil
}
// MarshalBinary interface implementation
func (m *SelectorIdentityMapping) MarshalBinary() ([]byte, error) {
if m == nil {
return nil, nil
}
return swag.WriteJSON(m)
}
// UnmarshalBinary interface implementation
func (m *SelectorIdentityMapping) UnmarshalBinary(b []byte) error {
var res SelectorIdentityMapping
if err := swag.ReadJSON(b, &res); err != nil {
return err
}
*m = res
return nil
}
// Code generated by go-swagger; DO NOT EDIT.
// Copyright Authors of Cilium
// SPDX-License-Identifier: Apache-2.0
package models
// This file was generated by the swagger tool.
// Editing this file might prove futile when you re-run the swagger generate command
import (
"context"
"github.com/go-openapi/errors"
"github.com/go-openapi/strfmt"
"github.com/go-openapi/swag"
)
// Service Collection of endpoints to be served
//
// swagger:model Service
type Service struct {
// spec
Spec *ServiceSpec `json:"spec,omitempty"`
// status
Status *ServiceStatus `json:"status,omitempty"`
}
// Validate validates this service
func (m *Service) Validate(formats strfmt.Registry) error {
var res []error
if err := m.validateSpec(formats); err != nil {
res = append(res, err)
}
if err := m.validateStatus(formats); err != nil {
res = append(res, err)
}
if len(res) > 0 {
return errors.CompositeValidationError(res...)
}
return nil
}
func (m *Service) validateSpec(formats strfmt.Registry) error {
if swag.IsZero(m.Spec) { // not required
return nil
}
if m.Spec != nil {
if err := m.Spec.Validate(formats); err != nil {
if ve, ok := err.(*errors.Validation); ok {
return ve.ValidateName("spec")
} else if ce, ok := err.(*errors.CompositeError); ok {
return ce.ValidateName("spec")
}
return err
}
}
return nil
}
func (m *Service) validateStatus(formats strfmt.Registry) error {
if swag.IsZero(m.Status) { // not required
return nil
}
if m.Status != nil {
if err := m.Status.Validate(formats); err != nil {
if ve, ok := err.(*errors.Validation); ok {
return ve.ValidateName("status")
} else if ce, ok := err.(*errors.CompositeError); ok {
return ce.ValidateName("status")
}
return err
}
}
return nil
}
// ContextValidate validate this service based on the context it is used
func (m *Service) ContextValidate(ctx context.Context, formats strfmt.Registry) error {
var res []error
if err := m.contextValidateSpec(ctx, formats); err != nil {
res = append(res, err)
}
if err := m.contextValidateStatus(ctx, formats); err != nil {
res = append(res, err)
}
if len(res) > 0 {
return errors.CompositeValidationError(res...)
}
return nil
}
func (m *Service) contextValidateSpec(ctx context.Context, formats strfmt.Registry) error {
if m.Spec != nil {
if swag.IsZero(m.Spec) { // not required
return nil
}
if err := m.Spec.ContextValidate(ctx, formats); err != nil {
if ve, ok := err.(*errors.Validation); ok {
return ve.ValidateName("spec")
} else if ce, ok := err.(*errors.CompositeError); ok {
return ce.ValidateName("spec")
}
return err
}
}
return nil
}
func (m *Service) contextValidateStatus(ctx context.Context, formats strfmt.Registry) error {
if m.Status != nil {
if swag.IsZero(m.Status) { // not required
return nil
}
if err := m.Status.ContextValidate(ctx, formats); err != nil {
if ve, ok := err.(*errors.Validation); ok {
return ve.ValidateName("status")
} else if ce, ok := err.(*errors.CompositeError); ok {
return ce.ValidateName("status")
}
return err
}
}
return nil
}
// MarshalBinary interface implementation
func (m *Service) MarshalBinary() ([]byte, error) {
if m == nil {
return nil, nil
}
return swag.WriteJSON(m)
}
// UnmarshalBinary interface implementation
func (m *Service) UnmarshalBinary(b []byte) error {
var res Service
if err := swag.ReadJSON(b, &res); err != nil {
return err
}
*m = res
return nil
}
// Code generated by go-swagger; DO NOT EDIT.
// Copyright Authors of Cilium
// SPDX-License-Identifier: Apache-2.0
package models
// This file was generated by the swagger tool.
// Editing this file might prove futile when you re-run the swagger generate command
import (
"context"
"encoding/json"
"strconv"
"github.com/go-openapi/errors"
"github.com/go-openapi/strfmt"
"github.com/go-openapi/swag"
"github.com/go-openapi/validate"
)
// ServiceSpec Configuration of a service
//
// swagger:model ServiceSpec
type ServiceSpec struct {
// List of backend addresses
BackendAddresses []*BackendAddress `json:"backend-addresses"`
// flags
Flags *ServiceSpecFlags `json:"flags,omitempty"`
// Frontend address
// Required: true
FrontendAddress *FrontendAddress `json:"frontend-address"`
// Unique identification
ID int64 `json:"id,omitempty"`
// Update all services selecting the backends with their given states
// (id and frontend are ignored)
//
UpdateServices bool `json:"updateServices,omitempty"`
}
// Validate validates this service spec
func (m *ServiceSpec) Validate(formats strfmt.Registry) error {
var res []error
if err := m.validateBackendAddresses(formats); err != nil {
res = append(res, err)
}
if err := m.validateFlags(formats); err != nil {
res = append(res, err)
}
if err := m.validateFrontendAddress(formats); err != nil {
res = append(res, err)
}
if len(res) > 0 {
return errors.CompositeValidationError(res...)
}
return nil
}
func (m *ServiceSpec) validateBackendAddresses(formats strfmt.Registry) error {
if swag.IsZero(m.BackendAddresses) { // not required
return nil
}
for i := 0; i < len(m.BackendAddresses); i++ {
if swag.IsZero(m.BackendAddresses[i]) { // not required
continue
}
if m.BackendAddresses[i] != nil {
if err := m.BackendAddresses[i].Validate(formats); err != nil {
if ve, ok := err.(*errors.Validation); ok {
return ve.ValidateName("backend-addresses" + "." + strconv.Itoa(i))
} else if ce, ok := err.(*errors.CompositeError); ok {
return ce.ValidateName("backend-addresses" + "." + strconv.Itoa(i))
}
return err
}
}
}
return nil
}
func (m *ServiceSpec) validateFlags(formats strfmt.Registry) error {
if swag.IsZero(m.Flags) { // not required
return nil
}
if m.Flags != nil {
if err := m.Flags.Validate(formats); err != nil {
if ve, ok := err.(*errors.Validation); ok {
return ve.ValidateName("flags")
} else if ce, ok := err.(*errors.CompositeError); ok {
return ce.ValidateName("flags")
}
return err
}
}
return nil
}
func (m *ServiceSpec) validateFrontendAddress(formats strfmt.Registry) error {
if err := validate.Required("frontend-address", "body", m.FrontendAddress); err != nil {
return err
}
if m.FrontendAddress != nil {
if err := m.FrontendAddress.Validate(formats); err != nil {
if ve, ok := err.(*errors.Validation); ok {
return ve.ValidateName("frontend-address")
} else if ce, ok := err.(*errors.CompositeError); ok {
return ce.ValidateName("frontend-address")
}
return err
}
}
return nil
}
// ContextValidate validate this service spec based on the context it is used
func (m *ServiceSpec) ContextValidate(ctx context.Context, formats strfmt.Registry) error {
var res []error
if err := m.contextValidateBackendAddresses(ctx, formats); err != nil {
res = append(res, err)
}
if err := m.contextValidateFlags(ctx, formats); err != nil {
res = append(res, err)
}
if err := m.contextValidateFrontendAddress(ctx, formats); err != nil {
res = append(res, err)
}
if len(res) > 0 {
return errors.CompositeValidationError(res...)
}
return nil
}
func (m *ServiceSpec) contextValidateBackendAddresses(ctx context.Context, formats strfmt.Registry) error {
for i := 0; i < len(m.BackendAddresses); i++ {
if m.BackendAddresses[i] != nil {
if swag.IsZero(m.BackendAddresses[i]) { // not required
return nil
}
if err := m.BackendAddresses[i].ContextValidate(ctx, formats); err != nil {
if ve, ok := err.(*errors.Validation); ok {
return ve.ValidateName("backend-addresses" + "." + strconv.Itoa(i))
} else if ce, ok := err.(*errors.CompositeError); ok {
return ce.ValidateName("backend-addresses" + "." + strconv.Itoa(i))
}
return err
}
}
}
return nil
}
func (m *ServiceSpec) contextValidateFlags(ctx context.Context, formats strfmt.Registry) error {
if m.Flags != nil {
if swag.IsZero(m.Flags) { // not required
return nil
}
if err := m.Flags.ContextValidate(ctx, formats); err != nil {
if ve, ok := err.(*errors.Validation); ok {
return ve.ValidateName("flags")
} else if ce, ok := err.(*errors.CompositeError); ok {
return ce.ValidateName("flags")
}
return err
}
}
return nil
}
func (m *ServiceSpec) contextValidateFrontendAddress(ctx context.Context, formats strfmt.Registry) error {
if m.FrontendAddress != nil {
if err := m.FrontendAddress.ContextValidate(ctx, formats); err != nil {
if ve, ok := err.(*errors.Validation); ok {
return ve.ValidateName("frontend-address")
} else if ce, ok := err.(*errors.CompositeError); ok {
return ce.ValidateName("frontend-address")
}
return err
}
}
return nil
}
// MarshalBinary interface implementation
func (m *ServiceSpec) MarshalBinary() ([]byte, error) {
if m == nil {
return nil, nil
}
return swag.WriteJSON(m)
}
// UnmarshalBinary interface implementation
func (m *ServiceSpec) UnmarshalBinary(b []byte) error {
var res ServiceSpec
if err := swag.ReadJSON(b, &res); err != nil {
return err
}
*m = res
return nil
}
// ServiceSpecFlags Optional service configuration flags
//
// swagger:model ServiceSpecFlags
type ServiceSpecFlags struct {
// Service cluster
Cluster string `json:"cluster,omitempty"`
// Service external traffic policy
// Enum: ["Cluster","Local"]
ExtTrafficPolicy string `json:"extTrafficPolicy,omitempty"`
// Service health check node port
HealthCheckNodePort uint16 `json:"healthCheckNodePort,omitempty"`
// Service internal traffic policy
// Enum: ["Cluster","Local"]
IntTrafficPolicy string `json:"intTrafficPolicy,omitempty"`
// Service name (e.g. Kubernetes service name)
Name string `json:"name,omitempty"`
// Service namespace (e.g. Kubernetes namespace)
Namespace string `json:"namespace,omitempty"`
// Service protocol NAT policy
// Enum: ["None","Nat46","Nat64"]
NatPolicy string `json:"natPolicy,omitempty"`
// Service external traffic policy (deprecated in favor of extTrafficPolicy)
// Enum: ["Cluster","Local"]
TrafficPolicy string `json:"trafficPolicy,omitempty"`
// Service type
// Enum: ["ClusterIP","NodePort","ExternalIPs","HostPort","LoadBalancer","LocalRedirect"]
Type string `json:"type,omitempty"`
}
// Validate validates this service spec flags
func (m *ServiceSpecFlags) Validate(formats strfmt.Registry) error {
var res []error
if err := m.validateExtTrafficPolicy(formats); err != nil {
res = append(res, err)
}
if err := m.validateIntTrafficPolicy(formats); err != nil {
res = append(res, err)
}
if err := m.validateNatPolicy(formats); err != nil {
res = append(res, err)
}
if err := m.validateTrafficPolicy(formats); err != nil {
res = append(res, err)
}
if err := m.validateType(formats); err != nil {
res = append(res, err)
}
if len(res) > 0 {
return errors.CompositeValidationError(res...)
}
return nil
}
var serviceSpecFlagsTypeExtTrafficPolicyPropEnum []interface{}
func init() {
var res []string
if err := json.Unmarshal([]byte(`["Cluster","Local"]`), &res); err != nil {
panic(err)
}
for _, v := range res {
serviceSpecFlagsTypeExtTrafficPolicyPropEnum = append(serviceSpecFlagsTypeExtTrafficPolicyPropEnum, v)
}
}
const (
// ServiceSpecFlagsExtTrafficPolicyCluster captures enum value "Cluster"
ServiceSpecFlagsExtTrafficPolicyCluster string = "Cluster"
// ServiceSpecFlagsExtTrafficPolicyLocal captures enum value "Local"
ServiceSpecFlagsExtTrafficPolicyLocal string = "Local"
)
// prop value enum
func (m *ServiceSpecFlags) validateExtTrafficPolicyEnum(path, location string, value string) error {
if err := validate.EnumCase(path, location, value, serviceSpecFlagsTypeExtTrafficPolicyPropEnum, true); err != nil {
return err
}
return nil
}
func (m *ServiceSpecFlags) validateExtTrafficPolicy(formats strfmt.Registry) error {
if swag.IsZero(m.ExtTrafficPolicy) { // not required
return nil
}
// value enum
if err := m.validateExtTrafficPolicyEnum("flags"+"."+"extTrafficPolicy", "body", m.ExtTrafficPolicy); err != nil {
return err
}
return nil
}
var serviceSpecFlagsTypeIntTrafficPolicyPropEnum []interface{}
func init() {
var res []string
if err := json.Unmarshal([]byte(`["Cluster","Local"]`), &res); err != nil {
panic(err)
}
for _, v := range res {
serviceSpecFlagsTypeIntTrafficPolicyPropEnum = append(serviceSpecFlagsTypeIntTrafficPolicyPropEnum, v)
}
}
const (
// ServiceSpecFlagsIntTrafficPolicyCluster captures enum value "Cluster"
ServiceSpecFlagsIntTrafficPolicyCluster string = "Cluster"
// ServiceSpecFlagsIntTrafficPolicyLocal captures enum value "Local"
ServiceSpecFlagsIntTrafficPolicyLocal string = "Local"
)
// prop value enum
func (m *ServiceSpecFlags) validateIntTrafficPolicyEnum(path, location string, value string) error {
if err := validate.EnumCase(path, location, value, serviceSpecFlagsTypeIntTrafficPolicyPropEnum, true); err != nil {
return err
}
return nil
}
func (m *ServiceSpecFlags) validateIntTrafficPolicy(formats strfmt.Registry) error {
if swag.IsZero(m.IntTrafficPolicy) { // not required
return nil
}
// value enum
if err := m.validateIntTrafficPolicyEnum("flags"+"."+"intTrafficPolicy", "body", m.IntTrafficPolicy); err != nil {
return err
}
return nil
}
var serviceSpecFlagsTypeNatPolicyPropEnum []interface{}
func init() {
var res []string
if err := json.Unmarshal([]byte(`["None","Nat46","Nat64"]`), &res); err != nil {
panic(err)
}
for _, v := range res {
serviceSpecFlagsTypeNatPolicyPropEnum = append(serviceSpecFlagsTypeNatPolicyPropEnum, v)
}
}
const (
// ServiceSpecFlagsNatPolicyNone captures enum value "None"
ServiceSpecFlagsNatPolicyNone string = "None"
// ServiceSpecFlagsNatPolicyNat46 captures enum value "Nat46"
ServiceSpecFlagsNatPolicyNat46 string = "Nat46"
// ServiceSpecFlagsNatPolicyNat64 captures enum value "Nat64"
ServiceSpecFlagsNatPolicyNat64 string = "Nat64"
)
// prop value enum
func (m *ServiceSpecFlags) validateNatPolicyEnum(path, location string, value string) error {
if err := validate.EnumCase(path, location, value, serviceSpecFlagsTypeNatPolicyPropEnum, true); err != nil {
return err
}
return nil
}
func (m *ServiceSpecFlags) validateNatPolicy(formats strfmt.Registry) error {
if swag.IsZero(m.NatPolicy) { // not required
return nil
}
// value enum
if err := m.validateNatPolicyEnum("flags"+"."+"natPolicy", "body", m.NatPolicy); err != nil {
return err
}
return nil
}
var serviceSpecFlagsTypeTrafficPolicyPropEnum []interface{}
func init() {
var res []string
if err := json.Unmarshal([]byte(`["Cluster","Local"]`), &res); err != nil {
panic(err)
}
for _, v := range res {
serviceSpecFlagsTypeTrafficPolicyPropEnum = append(serviceSpecFlagsTypeTrafficPolicyPropEnum, v)
}
}
const (
// ServiceSpecFlagsTrafficPolicyCluster captures enum value "Cluster"
ServiceSpecFlagsTrafficPolicyCluster string = "Cluster"
// ServiceSpecFlagsTrafficPolicyLocal captures enum value "Local"
ServiceSpecFlagsTrafficPolicyLocal string = "Local"
)
// prop value enum
func (m *ServiceSpecFlags) validateTrafficPolicyEnum(path, location string, value string) error {
if err := validate.EnumCase(path, location, value, serviceSpecFlagsTypeTrafficPolicyPropEnum, true); err != nil {
return err
}
return nil
}
func (m *ServiceSpecFlags) validateTrafficPolicy(formats strfmt.Registry) error {
if swag.IsZero(m.TrafficPolicy) { // not required
return nil
}
// value enum
if err := m.validateTrafficPolicyEnum("flags"+"."+"trafficPolicy", "body", m.TrafficPolicy); err != nil {
return err
}
return nil
}
var serviceSpecFlagsTypeTypePropEnum []interface{}
func init() {
var res []string
if err := json.Unmarshal([]byte(`["ClusterIP","NodePort","ExternalIPs","HostPort","LoadBalancer","LocalRedirect"]`), &res); err != nil {
panic(err)
}
for _, v := range res {
serviceSpecFlagsTypeTypePropEnum = append(serviceSpecFlagsTypeTypePropEnum, v)
}
}
const (
// ServiceSpecFlagsTypeClusterIP captures enum value "ClusterIP"
ServiceSpecFlagsTypeClusterIP string = "ClusterIP"
// ServiceSpecFlagsTypeNodePort captures enum value "NodePort"
ServiceSpecFlagsTypeNodePort string = "NodePort"
// ServiceSpecFlagsTypeExternalIPs captures enum value "ExternalIPs"
ServiceSpecFlagsTypeExternalIPs string = "ExternalIPs"
// ServiceSpecFlagsTypeHostPort captures enum value "HostPort"
ServiceSpecFlagsTypeHostPort string = "HostPort"
// ServiceSpecFlagsTypeLoadBalancer captures enum value "LoadBalancer"
ServiceSpecFlagsTypeLoadBalancer string = "LoadBalancer"
// ServiceSpecFlagsTypeLocalRedirect captures enum value "LocalRedirect"
ServiceSpecFlagsTypeLocalRedirect string = "LocalRedirect"
)
// prop value enum
func (m *ServiceSpecFlags) validateTypeEnum(path, location string, value string) error {
if err := validate.EnumCase(path, location, value, serviceSpecFlagsTypeTypePropEnum, true); err != nil {
return err
}
return nil
}
func (m *ServiceSpecFlags) validateType(formats strfmt.Registry) error {
if swag.IsZero(m.Type) { // not required
return nil
}
// value enum
if err := m.validateTypeEnum("flags"+"."+"type", "body", m.Type); err != nil {
return err
}
return nil
}
// ContextValidate validates this service spec flags based on context it is used
func (m *ServiceSpecFlags) ContextValidate(ctx context.Context, formats strfmt.Registry) error {
return nil
}
// MarshalBinary interface implementation
func (m *ServiceSpecFlags) MarshalBinary() ([]byte, error) {
if m == nil {
return nil, nil
}
return swag.WriteJSON(m)
}
// UnmarshalBinary interface implementation
func (m *ServiceSpecFlags) UnmarshalBinary(b []byte) error {
var res ServiceSpecFlags
if err := swag.ReadJSON(b, &res); err != nil {
return err
}
*m = res
return nil
}
// Code generated by go-swagger; DO NOT EDIT.
// Copyright Authors of Cilium
// SPDX-License-Identifier: Apache-2.0
package models
// This file was generated by the swagger tool.
// Editing this file might prove futile when you re-run the swagger generate command
import (
"context"
"github.com/go-openapi/errors"
"github.com/go-openapi/strfmt"
"github.com/go-openapi/swag"
)
// ServiceStatus Configuration of a service
//
// swagger:model ServiceStatus
type ServiceStatus struct {
// realized
Realized *ServiceSpec `json:"realized,omitempty"`
}
// Validate validates this service status
func (m *ServiceStatus) Validate(formats strfmt.Registry) error {
var res []error
if err := m.validateRealized(formats); err != nil {
res = append(res, err)
}
if len(res) > 0 {
return errors.CompositeValidationError(res...)
}
return nil
}
func (m *ServiceStatus) validateRealized(formats strfmt.Registry) error {
if swag.IsZero(m.Realized) { // not required
return nil
}
if m.Realized != nil {
if err := m.Realized.Validate(formats); err != nil {
if ve, ok := err.(*errors.Validation); ok {
return ve.ValidateName("realized")
} else if ce, ok := err.(*errors.CompositeError); ok {
return ce.ValidateName("realized")
}
return err
}
}
return nil
}
// ContextValidate validate this service status based on the context it is used
func (m *ServiceStatus) ContextValidate(ctx context.Context, formats strfmt.Registry) error {
var res []error
if err := m.contextValidateRealized(ctx, formats); err != nil {
res = append(res, err)
}
if len(res) > 0 {
return errors.CompositeValidationError(res...)
}
return nil
}
func (m *ServiceStatus) contextValidateRealized(ctx context.Context, formats strfmt.Registry) error {
if m.Realized != nil {
if swag.IsZero(m.Realized) { // not required
return nil
}
if err := m.Realized.ContextValidate(ctx, formats); err != nil {
if ve, ok := err.(*errors.Validation); ok {
return ve.ValidateName("realized")
} else if ce, ok := err.(*errors.CompositeError); ok {
return ce.ValidateName("realized")
}
return err
}
}
return nil
}
// MarshalBinary interface implementation
func (m *ServiceStatus) MarshalBinary() ([]byte, error) {
if m == nil {
return nil, nil
}
return swag.WriteJSON(m)
}
// UnmarshalBinary interface implementation
func (m *ServiceStatus) UnmarshalBinary(b []byte) error {
var res ServiceStatus
if err := swag.ReadJSON(b, &res); err != nil {
return err
}
*m = res
return nil
}
// Code generated by go-swagger; DO NOT EDIT.
// Copyright Authors of Cilium
// SPDX-License-Identifier: Apache-2.0
package models
// This file was generated by the swagger tool.
// Editing this file might prove futile when you re-run the swagger generate command
import (
"context"
"encoding/json"
"github.com/go-openapi/errors"
"github.com/go-openapi/strfmt"
"github.com/go-openapi/swag"
"github.com/go-openapi/validate"
)
// Srv6 Status of the SRv6
//
// swagger:model Srv6
type Srv6 struct {
// enabled
Enabled bool `json:"enabled,omitempty"`
// srv6 encap mode
// Enum: ["SRH","Reduced"]
Srv6EncapMode string `json:"srv6EncapMode,omitempty"`
}
// Validate validates this srv6
func (m *Srv6) Validate(formats strfmt.Registry) error {
var res []error
if err := m.validateSrv6EncapMode(formats); err != nil {
res = append(res, err)
}
if len(res) > 0 {
return errors.CompositeValidationError(res...)
}
return nil
}
var srv6TypeSrv6EncapModePropEnum []interface{}
func init() {
var res []string
if err := json.Unmarshal([]byte(`["SRH","Reduced"]`), &res); err != nil {
panic(err)
}
for _, v := range res {
srv6TypeSrv6EncapModePropEnum = append(srv6TypeSrv6EncapModePropEnum, v)
}
}
const (
// Srv6Srv6EncapModeSRH captures enum value "SRH"
Srv6Srv6EncapModeSRH string = "SRH"
// Srv6Srv6EncapModeReduced captures enum value "Reduced"
Srv6Srv6EncapModeReduced string = "Reduced"
)
// prop value enum
func (m *Srv6) validateSrv6EncapModeEnum(path, location string, value string) error {
if err := validate.EnumCase(path, location, value, srv6TypeSrv6EncapModePropEnum, true); err != nil {
return err
}
return nil
}
func (m *Srv6) validateSrv6EncapMode(formats strfmt.Registry) error {
if swag.IsZero(m.Srv6EncapMode) { // not required
return nil
}
// value enum
if err := m.validateSrv6EncapModeEnum("srv6EncapMode", "body", m.Srv6EncapMode); err != nil {
return err
}
return nil
}
// ContextValidate validates this srv6 based on context it is used
func (m *Srv6) ContextValidate(ctx context.Context, formats strfmt.Registry) error {
return nil
}
// MarshalBinary interface implementation
func (m *Srv6) MarshalBinary() ([]byte, error) {
if m == nil {
return nil, nil
}
return swag.WriteJSON(m)
}
// UnmarshalBinary interface implementation
func (m *Srv6) UnmarshalBinary(b []byte) error {
var res Srv6
if err := swag.ReadJSON(b, &res); err != nil {
return err
}
*m = res
return nil
}
// Code generated by go-swagger; DO NOT EDIT.
// Copyright Authors of Cilium
// SPDX-License-Identifier: Apache-2.0
package models
// This file was generated by the swagger tool.
// Editing this file might prove futile when you re-run the swagger generate command
import (
"context"
"github.com/go-openapi/strfmt"
"github.com/go-openapi/swag"
)
// StateDBQuery StateDB query
//
// swagger:model StateDBQuery
type StateDBQuery struct {
// Index to query against
Index string `json:"index,omitempty"`
// Key to query with. Base64 encoded.
Key string `json:"key,omitempty"`
// LowerBound prefix search or full-matching Get
Lowerbound bool `json:"lowerbound,omitempty"`
// Name of the table to query
Table string `json:"table,omitempty"`
}
// Validate validates this state d b query
func (m *StateDBQuery) Validate(formats strfmt.Registry) error {
return nil
}
// ContextValidate validates this state d b query based on context it is used
func (m *StateDBQuery) ContextValidate(ctx context.Context, formats strfmt.Registry) error {
return nil
}
// MarshalBinary interface implementation
func (m *StateDBQuery) MarshalBinary() ([]byte, error) {
if m == nil {
return nil, nil
}
return swag.WriteJSON(m)
}
// UnmarshalBinary interface implementation
func (m *StateDBQuery) UnmarshalBinary(b []byte) error {
var res StateDBQuery
if err := swag.ReadJSON(b, &res); err != nil {
return err
}
*m = res
return nil
}
// Code generated by go-swagger; DO NOT EDIT.
// Copyright Authors of Cilium
// SPDX-License-Identifier: Apache-2.0
package models
// This file was generated by the swagger tool.
// Editing this file might prove futile when you re-run the swagger generate command
import (
"context"
"encoding/json"
"github.com/go-openapi/errors"
"github.com/go-openapi/strfmt"
"github.com/go-openapi/swag"
"github.com/go-openapi/validate"
)
// Status Status of an individual component
//
// swagger:model Status
type Status struct {
// Human readable status/error/warning message
Msg string `json:"msg,omitempty"`
// State the component is in
// Enum: ["Ok","Warning","Failure","Disabled"]
State string `json:"state,omitempty"`
}
// Validate validates this status
func (m *Status) Validate(formats strfmt.Registry) error {
var res []error
if err := m.validateState(formats); err != nil {
res = append(res, err)
}
if len(res) > 0 {
return errors.CompositeValidationError(res...)
}
return nil
}
var statusTypeStatePropEnum []interface{}
func init() {
var res []string
if err := json.Unmarshal([]byte(`["Ok","Warning","Failure","Disabled"]`), &res); err != nil {
panic(err)
}
for _, v := range res {
statusTypeStatePropEnum = append(statusTypeStatePropEnum, v)
}
}
const (
// StatusStateOk captures enum value "Ok"
StatusStateOk string = "Ok"
// StatusStateWarning captures enum value "Warning"
StatusStateWarning string = "Warning"
// StatusStateFailure captures enum value "Failure"
StatusStateFailure string = "Failure"
// StatusStateDisabled captures enum value "Disabled"
StatusStateDisabled string = "Disabled"
)
// prop value enum
func (m *Status) validateStateEnum(path, location string, value string) error {
if err := validate.EnumCase(path, location, value, statusTypeStatePropEnum, true); err != nil {
return err
}
return nil
}
func (m *Status) validateState(formats strfmt.Registry) error {
if swag.IsZero(m.State) { // not required
return nil
}
// value enum
if err := m.validateStateEnum("state", "body", m.State); err != nil {
return err
}
return nil
}
// ContextValidate validates this status based on context it is used
func (m *Status) ContextValidate(ctx context.Context, formats strfmt.Registry) error {
return nil
}
// MarshalBinary interface implementation
func (m *Status) MarshalBinary() ([]byte, error) {
if m == nil {
return nil, nil
}
return swag.WriteJSON(m)
}
// UnmarshalBinary interface implementation
func (m *Status) UnmarshalBinary(b []byte) error {
var res Status
if err := swag.ReadJSON(b, &res); err != nil {
return err
}
*m = res
return nil
}
// Code generated by go-swagger; DO NOT EDIT.
// Copyright Authors of Cilium
// SPDX-License-Identifier: Apache-2.0
package models
// This file was generated by the swagger tool.
// Editing this file might prove futile when you re-run the swagger generate command
import (
"context"
"github.com/go-openapi/errors"
"github.com/go-openapi/strfmt"
"github.com/go-openapi/swag"
"github.com/go-openapi/validate"
)
// StatusResponse Health and status information of daemon
//
// +k8s:deepcopy-gen=true
//
// swagger:model StatusResponse
type StatusResponse struct {
// Status of core datapath attachment mode
AttachMode AttachMode `json:"attach-mode,omitempty"`
// Status of Mutual Authentication certificate provider
AuthCertificateProvider *Status `json:"auth-certificate-provider,omitempty"`
// Status of bandwidth manager
BandwidthManager *BandwidthManager `json:"bandwidth-manager,omitempty"`
// Status of BPF maps
BpfMaps *BPFMapStatus `json:"bpf-maps,omitempty"`
// Status of Cilium daemon
Cilium *Status `json:"cilium,omitempty"`
// When supported by the API, this client ID should be used by the
// client when making another request to the server.
// See for example "/cluster/nodes".
//
ClientID int64 `json:"client-id,omitempty"`
// Status of clock source
ClockSource *ClockSource `json:"clock-source,omitempty"`
// Status of cluster
Cluster *ClusterStatus `json:"cluster,omitempty"`
// Status of ClusterMesh
ClusterMesh *ClusterMeshStatus `json:"cluster-mesh,omitempty"`
// Status of CNI chaining
CniChaining *CNIChainingStatus `json:"cni-chaining,omitempty"`
// Status of the CNI configuration file
CniFile *Status `json:"cni-file,omitempty"`
// Status of local container runtime
ContainerRuntime *Status `json:"container-runtime,omitempty"`
// Status of all endpoint controllers
Controllers ControllerStatuses `json:"controllers,omitempty"`
// Status of datapath mode
DatapathMode DatapathMode `json:"datapath-mode,omitempty"`
// Status of transparent encryption
Encryption *EncryptionStatus `json:"encryption,omitempty"`
// Status of the host firewall
HostFirewall *HostFirewall `json:"host-firewall,omitempty"`
// Status of Hubble server
Hubble *HubbleStatus `json:"hubble,omitempty"`
// Status of Hubble metrics server
HubbleMetrics *HubbleMetricsStatus `json:"hubble-metrics,omitempty"`
// Status of identity range of the cluster
IdentityRange *IdentityRange `json:"identity-range,omitempty"`
// Status of IP address management
Ipam *IPAMStatus `json:"ipam,omitempty"`
// Status of IPv4 BIG TCP
IPV4BigTCP *IPV4BigTCP `json:"ipv4-big-tcp,omitempty"`
// Status of IPv6 BIG TCP
IPV6BigTCP *IPV6BigTCP `json:"ipv6-big-tcp,omitempty"`
// Status of kube-proxy replacement
KubeProxyReplacement *KubeProxyReplacement `json:"kube-proxy-replacement,omitempty"`
// Status of Kubernetes integration
Kubernetes *K8sStatus `json:"kubernetes,omitempty"`
// Status of key/value datastore
Kvstore *Status `json:"kvstore,omitempty"`
// Status of masquerading
Masquerading *Masquerading `json:"masquerading,omitempty"`
// Status of the node monitor
NodeMonitor *MonitorStatus `json:"nodeMonitor,omitempty"`
// Status of proxy
Proxy *ProxyStatus `json:"proxy,omitempty"`
// Status of routing
Routing *Routing `json:"routing,omitempty"`
// Status of SRv6
Srv6 *Srv6 `json:"srv6,omitempty"`
// List of stale information in the status
Stale map[string]strfmt.DateTime `json:"stale,omitempty"`
}
// Validate validates this status response
func (m *StatusResponse) Validate(formats strfmt.Registry) error {
var res []error
if err := m.validateAttachMode(formats); err != nil {
res = append(res, err)
}
if err := m.validateAuthCertificateProvider(formats); err != nil {
res = append(res, err)
}
if err := m.validateBandwidthManager(formats); err != nil {
res = append(res, err)
}
if err := m.validateBpfMaps(formats); err != nil {
res = append(res, err)
}
if err := m.validateCilium(formats); err != nil {
res = append(res, err)
}
if err := m.validateClockSource(formats); err != nil {
res = append(res, err)
}
if err := m.validateCluster(formats); err != nil {
res = append(res, err)
}
if err := m.validateClusterMesh(formats); err != nil {
res = append(res, err)
}
if err := m.validateCniChaining(formats); err != nil {
res = append(res, err)
}
if err := m.validateCniFile(formats); err != nil {
res = append(res, err)
}
if err := m.validateContainerRuntime(formats); err != nil {
res = append(res, err)
}
if err := m.validateControllers(formats); err != nil {
res = append(res, err)
}
if err := m.validateDatapathMode(formats); err != nil {
res = append(res, err)
}
if err := m.validateEncryption(formats); err != nil {
res = append(res, err)
}
if err := m.validateHostFirewall(formats); err != nil {
res = append(res, err)
}
if err := m.validateHubble(formats); err != nil {
res = append(res, err)
}
if err := m.validateHubbleMetrics(formats); err != nil {
res = append(res, err)
}
if err := m.validateIdentityRange(formats); err != nil {
res = append(res, err)
}
if err := m.validateIpam(formats); err != nil {
res = append(res, err)
}
if err := m.validateIPV4BigTCP(formats); err != nil {
res = append(res, err)
}
if err := m.validateIPV6BigTCP(formats); err != nil {
res = append(res, err)
}
if err := m.validateKubeProxyReplacement(formats); err != nil {
res = append(res, err)
}
if err := m.validateKubernetes(formats); err != nil {
res = append(res, err)
}
if err := m.validateKvstore(formats); err != nil {
res = append(res, err)
}
if err := m.validateMasquerading(formats); err != nil {
res = append(res, err)
}
if err := m.validateNodeMonitor(formats); err != nil {
res = append(res, err)
}
if err := m.validateProxy(formats); err != nil {
res = append(res, err)
}
if err := m.validateRouting(formats); err != nil {
res = append(res, err)
}
if err := m.validateSrv6(formats); err != nil {
res = append(res, err)
}
if err := m.validateStale(formats); err != nil {
res = append(res, err)
}
if len(res) > 0 {
return errors.CompositeValidationError(res...)
}
return nil
}
func (m *StatusResponse) validateAttachMode(formats strfmt.Registry) error {
if swag.IsZero(m.AttachMode) { // not required
return nil
}
if err := m.AttachMode.Validate(formats); err != nil {
if ve, ok := err.(*errors.Validation); ok {
return ve.ValidateName("attach-mode")
} else if ce, ok := err.(*errors.CompositeError); ok {
return ce.ValidateName("attach-mode")
}
return err
}
return nil
}
func (m *StatusResponse) validateAuthCertificateProvider(formats strfmt.Registry) error {
if swag.IsZero(m.AuthCertificateProvider) { // not required
return nil
}
if m.AuthCertificateProvider != nil {
if err := m.AuthCertificateProvider.Validate(formats); err != nil {
if ve, ok := err.(*errors.Validation); ok {
return ve.ValidateName("auth-certificate-provider")
} else if ce, ok := err.(*errors.CompositeError); ok {
return ce.ValidateName("auth-certificate-provider")
}
return err
}
}
return nil
}
func (m *StatusResponse) validateBandwidthManager(formats strfmt.Registry) error {
if swag.IsZero(m.BandwidthManager) { // not required
return nil
}
if m.BandwidthManager != nil {
if err := m.BandwidthManager.Validate(formats); err != nil {
if ve, ok := err.(*errors.Validation); ok {
return ve.ValidateName("bandwidth-manager")
} else if ce, ok := err.(*errors.CompositeError); ok {
return ce.ValidateName("bandwidth-manager")
}
return err
}
}
return nil
}
func (m *StatusResponse) validateBpfMaps(formats strfmt.Registry) error {
if swag.IsZero(m.BpfMaps) { // not required
return nil
}
if m.BpfMaps != nil {
if err := m.BpfMaps.Validate(formats); err != nil {
if ve, ok := err.(*errors.Validation); ok {
return ve.ValidateName("bpf-maps")
} else if ce, ok := err.(*errors.CompositeError); ok {
return ce.ValidateName("bpf-maps")
}
return err
}
}
return nil
}
func (m *StatusResponse) validateCilium(formats strfmt.Registry) error {
if swag.IsZero(m.Cilium) { // not required
return nil
}
if m.Cilium != nil {
if err := m.Cilium.Validate(formats); err != nil {
if ve, ok := err.(*errors.Validation); ok {
return ve.ValidateName("cilium")
} else if ce, ok := err.(*errors.CompositeError); ok {
return ce.ValidateName("cilium")
}
return err
}
}
return nil
}
func (m *StatusResponse) validateClockSource(formats strfmt.Registry) error {
if swag.IsZero(m.ClockSource) { // not required
return nil
}
if m.ClockSource != nil {
if err := m.ClockSource.Validate(formats); err != nil {
if ve, ok := err.(*errors.Validation); ok {
return ve.ValidateName("clock-source")
} else if ce, ok := err.(*errors.CompositeError); ok {
return ce.ValidateName("clock-source")
}
return err
}
}
return nil
}
func (m *StatusResponse) validateCluster(formats strfmt.Registry) error {
if swag.IsZero(m.Cluster) { // not required
return nil
}
if m.Cluster != nil {
if err := m.Cluster.Validate(formats); err != nil {
if ve, ok := err.(*errors.Validation); ok {
return ve.ValidateName("cluster")
} else if ce, ok := err.(*errors.CompositeError); ok {
return ce.ValidateName("cluster")
}
return err
}
}
return nil
}
func (m *StatusResponse) validateClusterMesh(formats strfmt.Registry) error {
if swag.IsZero(m.ClusterMesh) { // not required
return nil
}
if m.ClusterMesh != nil {
if err := m.ClusterMesh.Validate(formats); err != nil {
if ve, ok := err.(*errors.Validation); ok {
return ve.ValidateName("cluster-mesh")
} else if ce, ok := err.(*errors.CompositeError); ok {
return ce.ValidateName("cluster-mesh")
}
return err
}
}
return nil
}
func (m *StatusResponse) validateCniChaining(formats strfmt.Registry) error {
if swag.IsZero(m.CniChaining) { // not required
return nil
}
if m.CniChaining != nil {
if err := m.CniChaining.Validate(formats); err != nil {
if ve, ok := err.(*errors.Validation); ok {
return ve.ValidateName("cni-chaining")
} else if ce, ok := err.(*errors.CompositeError); ok {
return ce.ValidateName("cni-chaining")
}
return err
}
}
return nil
}
func (m *StatusResponse) validateCniFile(formats strfmt.Registry) error {
if swag.IsZero(m.CniFile) { // not required
return nil
}
if m.CniFile != nil {
if err := m.CniFile.Validate(formats); err != nil {
if ve, ok := err.(*errors.Validation); ok {
return ve.ValidateName("cni-file")
} else if ce, ok := err.(*errors.CompositeError); ok {
return ce.ValidateName("cni-file")
}
return err
}
}
return nil
}
func (m *StatusResponse) validateContainerRuntime(formats strfmt.Registry) error {
if swag.IsZero(m.ContainerRuntime) { // not required
return nil
}
if m.ContainerRuntime != nil {
if err := m.ContainerRuntime.Validate(formats); err != nil {
if ve, ok := err.(*errors.Validation); ok {
return ve.ValidateName("container-runtime")
} else if ce, ok := err.(*errors.CompositeError); ok {
return ce.ValidateName("container-runtime")
}
return err
}
}
return nil
}
func (m *StatusResponse) validateControllers(formats strfmt.Registry) error {
if swag.IsZero(m.Controllers) { // not required
return nil
}
if err := m.Controllers.Validate(formats); err != nil {
if ve, ok := err.(*errors.Validation); ok {
return ve.ValidateName("controllers")
} else if ce, ok := err.(*errors.CompositeError); ok {
return ce.ValidateName("controllers")
}
return err
}
return nil
}
func (m *StatusResponse) validateDatapathMode(formats strfmt.Registry) error {
if swag.IsZero(m.DatapathMode) { // not required
return nil
}
if err := m.DatapathMode.Validate(formats); err != nil {
if ve, ok := err.(*errors.Validation); ok {
return ve.ValidateName("datapath-mode")
} else if ce, ok := err.(*errors.CompositeError); ok {
return ce.ValidateName("datapath-mode")
}
return err
}
return nil
}
func (m *StatusResponse) validateEncryption(formats strfmt.Registry) error {
if swag.IsZero(m.Encryption) { // not required
return nil
}
if m.Encryption != nil {
if err := m.Encryption.Validate(formats); err != nil {
if ve, ok := err.(*errors.Validation); ok {
return ve.ValidateName("encryption")
} else if ce, ok := err.(*errors.CompositeError); ok {
return ce.ValidateName("encryption")
}
return err
}
}
return nil
}
func (m *StatusResponse) validateHostFirewall(formats strfmt.Registry) error {
if swag.IsZero(m.HostFirewall) { // not required
return nil
}
if m.HostFirewall != nil {
if err := m.HostFirewall.Validate(formats); err != nil {
if ve, ok := err.(*errors.Validation); ok {
return ve.ValidateName("host-firewall")
} else if ce, ok := err.(*errors.CompositeError); ok {
return ce.ValidateName("host-firewall")
}
return err
}
}
return nil
}
func (m *StatusResponse) validateHubble(formats strfmt.Registry) error {
if swag.IsZero(m.Hubble) { // not required
return nil
}
if m.Hubble != nil {
if err := m.Hubble.Validate(formats); err != nil {
if ve, ok := err.(*errors.Validation); ok {
return ve.ValidateName("hubble")
} else if ce, ok := err.(*errors.CompositeError); ok {
return ce.ValidateName("hubble")
}
return err
}
}
return nil
}
func (m *StatusResponse) validateHubbleMetrics(formats strfmt.Registry) error {
if swag.IsZero(m.HubbleMetrics) { // not required
return nil
}
if m.HubbleMetrics != nil {
if err := m.HubbleMetrics.Validate(formats); err != nil {
if ve, ok := err.(*errors.Validation); ok {
return ve.ValidateName("hubble-metrics")
} else if ce, ok := err.(*errors.CompositeError); ok {
return ce.ValidateName("hubble-metrics")
}
return err
}
}
return nil
}
func (m *StatusResponse) validateIdentityRange(formats strfmt.Registry) error {
if swag.IsZero(m.IdentityRange) { // not required
return nil
}
if m.IdentityRange != nil {
if err := m.IdentityRange.Validate(formats); err != nil {
if ve, ok := err.(*errors.Validation); ok {
return ve.ValidateName("identity-range")
} else if ce, ok := err.(*errors.CompositeError); ok {
return ce.ValidateName("identity-range")
}
return err
}
}
return nil
}
func (m *StatusResponse) validateIpam(formats strfmt.Registry) error {
if swag.IsZero(m.Ipam) { // not required
return nil
}
if m.Ipam != nil {
if err := m.Ipam.Validate(formats); err != nil {
if ve, ok := err.(*errors.Validation); ok {
return ve.ValidateName("ipam")
} else if ce, ok := err.(*errors.CompositeError); ok {
return ce.ValidateName("ipam")
}
return err
}
}
return nil
}
func (m *StatusResponse) validateIPV4BigTCP(formats strfmt.Registry) error {
if swag.IsZero(m.IPV4BigTCP) { // not required
return nil
}
if m.IPV4BigTCP != nil {
if err := m.IPV4BigTCP.Validate(formats); err != nil {
if ve, ok := err.(*errors.Validation); ok {
return ve.ValidateName("ipv4-big-tcp")
} else if ce, ok := err.(*errors.CompositeError); ok {
return ce.ValidateName("ipv4-big-tcp")
}
return err
}
}
return nil
}
func (m *StatusResponse) validateIPV6BigTCP(formats strfmt.Registry) error {
if swag.IsZero(m.IPV6BigTCP) { // not required
return nil
}
if m.IPV6BigTCP != nil {
if err := m.IPV6BigTCP.Validate(formats); err != nil {
if ve, ok := err.(*errors.Validation); ok {
return ve.ValidateName("ipv6-big-tcp")
} else if ce, ok := err.(*errors.CompositeError); ok {
return ce.ValidateName("ipv6-big-tcp")
}
return err
}
}
return nil
}
func (m *StatusResponse) validateKubeProxyReplacement(formats strfmt.Registry) error {
if swag.IsZero(m.KubeProxyReplacement) { // not required
return nil
}
if m.KubeProxyReplacement != nil {
if err := m.KubeProxyReplacement.Validate(formats); err != nil {
if ve, ok := err.(*errors.Validation); ok {
return ve.ValidateName("kube-proxy-replacement")
} else if ce, ok := err.(*errors.CompositeError); ok {
return ce.ValidateName("kube-proxy-replacement")
}
return err
}
}
return nil
}
func (m *StatusResponse) validateKubernetes(formats strfmt.Registry) error {
if swag.IsZero(m.Kubernetes) { // not required
return nil
}
if m.Kubernetes != nil {
if err := m.Kubernetes.Validate(formats); err != nil {
if ve, ok := err.(*errors.Validation); ok {
return ve.ValidateName("kubernetes")
} else if ce, ok := err.(*errors.CompositeError); ok {
return ce.ValidateName("kubernetes")
}
return err
}
}
return nil
}
func (m *StatusResponse) validateKvstore(formats strfmt.Registry) error {
if swag.IsZero(m.Kvstore) { // not required
return nil
}
if m.Kvstore != nil {
if err := m.Kvstore.Validate(formats); err != nil {
if ve, ok := err.(*errors.Validation); ok {
return ve.ValidateName("kvstore")
} else if ce, ok := err.(*errors.CompositeError); ok {
return ce.ValidateName("kvstore")
}
return err
}
}
return nil
}
func (m *StatusResponse) validateMasquerading(formats strfmt.Registry) error {
if swag.IsZero(m.Masquerading) { // not required
return nil
}
if m.Masquerading != nil {
if err := m.Masquerading.Validate(formats); err != nil {
if ve, ok := err.(*errors.Validation); ok {
return ve.ValidateName("masquerading")
} else if ce, ok := err.(*errors.CompositeError); ok {
return ce.ValidateName("masquerading")
}
return err
}
}
return nil
}
func (m *StatusResponse) validateNodeMonitor(formats strfmt.Registry) error {
if swag.IsZero(m.NodeMonitor) { // not required
return nil
}
if m.NodeMonitor != nil {
if err := m.NodeMonitor.Validate(formats); err != nil {
if ve, ok := err.(*errors.Validation); ok {
return ve.ValidateName("nodeMonitor")
} else if ce, ok := err.(*errors.CompositeError); ok {
return ce.ValidateName("nodeMonitor")
}
return err
}
}
return nil
}
func (m *StatusResponse) validateProxy(formats strfmt.Registry) error {
if swag.IsZero(m.Proxy) { // not required
return nil
}
if m.Proxy != nil {
if err := m.Proxy.Validate(formats); err != nil {
if ve, ok := err.(*errors.Validation); ok {
return ve.ValidateName("proxy")
} else if ce, ok := err.(*errors.CompositeError); ok {
return ce.ValidateName("proxy")
}
return err
}
}
return nil
}
func (m *StatusResponse) validateRouting(formats strfmt.Registry) error {
if swag.IsZero(m.Routing) { // not required
return nil
}
if m.Routing != nil {
if err := m.Routing.Validate(formats); err != nil {
if ve, ok := err.(*errors.Validation); ok {
return ve.ValidateName("routing")
} else if ce, ok := err.(*errors.CompositeError); ok {
return ce.ValidateName("routing")
}
return err
}
}
return nil
}
func (m *StatusResponse) validateSrv6(formats strfmt.Registry) error {
if swag.IsZero(m.Srv6) { // not required
return nil
}
if m.Srv6 != nil {
if err := m.Srv6.Validate(formats); err != nil {
if ve, ok := err.(*errors.Validation); ok {
return ve.ValidateName("srv6")
} else if ce, ok := err.(*errors.CompositeError); ok {
return ce.ValidateName("srv6")
}
return err
}
}
return nil
}
func (m *StatusResponse) validateStale(formats strfmt.Registry) error {
if swag.IsZero(m.Stale) { // not required
return nil
}
for k := range m.Stale {
if err := validate.FormatOf("stale"+"."+k, "body", "date-time", m.Stale[k].String(), formats); err != nil {
return err
}
}
return nil
}
// ContextValidate validate this status response based on the context it is used
func (m *StatusResponse) ContextValidate(ctx context.Context, formats strfmt.Registry) error {
var res []error
if err := m.contextValidateAttachMode(ctx, formats); err != nil {
res = append(res, err)
}
if err := m.contextValidateAuthCertificateProvider(ctx, formats); err != nil {
res = append(res, err)
}
if err := m.contextValidateBandwidthManager(ctx, formats); err != nil {
res = append(res, err)
}
if err := m.contextValidateBpfMaps(ctx, formats); err != nil {
res = append(res, err)
}
if err := m.contextValidateCilium(ctx, formats); err != nil {
res = append(res, err)
}
if err := m.contextValidateClockSource(ctx, formats); err != nil {
res = append(res, err)
}
if err := m.contextValidateCluster(ctx, formats); err != nil {
res = append(res, err)
}
if err := m.contextValidateClusterMesh(ctx, formats); err != nil {
res = append(res, err)
}
if err := m.contextValidateCniChaining(ctx, formats); err != nil {
res = append(res, err)
}
if err := m.contextValidateCniFile(ctx, formats); err != nil {
res = append(res, err)
}
if err := m.contextValidateContainerRuntime(ctx, formats); err != nil {
res = append(res, err)
}
if err := m.contextValidateControllers(ctx, formats); err != nil {
res = append(res, err)
}
if err := m.contextValidateDatapathMode(ctx, formats); err != nil {
res = append(res, err)
}
if err := m.contextValidateEncryption(ctx, formats); err != nil {
res = append(res, err)
}
if err := m.contextValidateHostFirewall(ctx, formats); err != nil {
res = append(res, err)
}
if err := m.contextValidateHubble(ctx, formats); err != nil {
res = append(res, err)
}
if err := m.contextValidateHubbleMetrics(ctx, formats); err != nil {
res = append(res, err)
}
if err := m.contextValidateIdentityRange(ctx, formats); err != nil {
res = append(res, err)
}
if err := m.contextValidateIpam(ctx, formats); err != nil {
res = append(res, err)
}
if err := m.contextValidateIPV4BigTCP(ctx, formats); err != nil {
res = append(res, err)
}
if err := m.contextValidateIPV6BigTCP(ctx, formats); err != nil {
res = append(res, err)
}
if err := m.contextValidateKubeProxyReplacement(ctx, formats); err != nil {
res = append(res, err)
}
if err := m.contextValidateKubernetes(ctx, formats); err != nil {
res = append(res, err)
}
if err := m.contextValidateKvstore(ctx, formats); err != nil {
res = append(res, err)
}
if err := m.contextValidateMasquerading(ctx, formats); err != nil {
res = append(res, err)
}
if err := m.contextValidateNodeMonitor(ctx, formats); err != nil {
res = append(res, err)
}
if err := m.contextValidateProxy(ctx, formats); err != nil {
res = append(res, err)
}
if err := m.contextValidateRouting(ctx, formats); err != nil {
res = append(res, err)
}
if err := m.contextValidateSrv6(ctx, formats); err != nil {
res = append(res, err)
}
if len(res) > 0 {
return errors.CompositeValidationError(res...)
}
return nil
}
func (m *StatusResponse) contextValidateAttachMode(ctx context.Context, formats strfmt.Registry) error {
if swag.IsZero(m.AttachMode) { // not required
return nil
}
if err := m.AttachMode.ContextValidate(ctx, formats); err != nil {
if ve, ok := err.(*errors.Validation); ok {
return ve.ValidateName("attach-mode")
} else if ce, ok := err.(*errors.CompositeError); ok {
return ce.ValidateName("attach-mode")
}
return err
}
return nil
}
func (m *StatusResponse) contextValidateAuthCertificateProvider(ctx context.Context, formats strfmt.Registry) error {
if m.AuthCertificateProvider != nil {
if swag.IsZero(m.AuthCertificateProvider) { // not required
return nil
}
if err := m.AuthCertificateProvider.ContextValidate(ctx, formats); err != nil {
if ve, ok := err.(*errors.Validation); ok {
return ve.ValidateName("auth-certificate-provider")
} else if ce, ok := err.(*errors.CompositeError); ok {
return ce.ValidateName("auth-certificate-provider")
}
return err
}
}
return nil
}
func (m *StatusResponse) contextValidateBandwidthManager(ctx context.Context, formats strfmt.Registry) error {
if m.BandwidthManager != nil {
if swag.IsZero(m.BandwidthManager) { // not required
return nil
}
if err := m.BandwidthManager.ContextValidate(ctx, formats); err != nil {
if ve, ok := err.(*errors.Validation); ok {
return ve.ValidateName("bandwidth-manager")
} else if ce, ok := err.(*errors.CompositeError); ok {
return ce.ValidateName("bandwidth-manager")
}
return err
}
}
return nil
}
func (m *StatusResponse) contextValidateBpfMaps(ctx context.Context, formats strfmt.Registry) error {
if m.BpfMaps != nil {
if swag.IsZero(m.BpfMaps) { // not required
return nil
}
if err := m.BpfMaps.ContextValidate(ctx, formats); err != nil {
if ve, ok := err.(*errors.Validation); ok {
return ve.ValidateName("bpf-maps")
} else if ce, ok := err.(*errors.CompositeError); ok {
return ce.ValidateName("bpf-maps")
}
return err
}
}
return nil
}
func (m *StatusResponse) contextValidateCilium(ctx context.Context, formats strfmt.Registry) error {
if m.Cilium != nil {
if swag.IsZero(m.Cilium) { // not required
return nil
}
if err := m.Cilium.ContextValidate(ctx, formats); err != nil {
if ve, ok := err.(*errors.Validation); ok {
return ve.ValidateName("cilium")
} else if ce, ok := err.(*errors.CompositeError); ok {
return ce.ValidateName("cilium")
}
return err
}
}
return nil
}
func (m *StatusResponse) contextValidateClockSource(ctx context.Context, formats strfmt.Registry) error {
if m.ClockSource != nil {
if swag.IsZero(m.ClockSource) { // not required
return nil
}
if err := m.ClockSource.ContextValidate(ctx, formats); err != nil {
if ve, ok := err.(*errors.Validation); ok {
return ve.ValidateName("clock-source")
} else if ce, ok := err.(*errors.CompositeError); ok {
return ce.ValidateName("clock-source")
}
return err
}
}
return nil
}
func (m *StatusResponse) contextValidateCluster(ctx context.Context, formats strfmt.Registry) error {
if m.Cluster != nil {
if swag.IsZero(m.Cluster) { // not required
return nil
}
if err := m.Cluster.ContextValidate(ctx, formats); err != nil {
if ve, ok := err.(*errors.Validation); ok {
return ve.ValidateName("cluster")
} else if ce, ok := err.(*errors.CompositeError); ok {
return ce.ValidateName("cluster")
}
return err
}
}
return nil
}
func (m *StatusResponse) contextValidateClusterMesh(ctx context.Context, formats strfmt.Registry) error {
if m.ClusterMesh != nil {
if swag.IsZero(m.ClusterMesh) { // not required
return nil
}
if err := m.ClusterMesh.ContextValidate(ctx, formats); err != nil {
if ve, ok := err.(*errors.Validation); ok {
return ve.ValidateName("cluster-mesh")
} else if ce, ok := err.(*errors.CompositeError); ok {
return ce.ValidateName("cluster-mesh")
}
return err
}
}
return nil
}
func (m *StatusResponse) contextValidateCniChaining(ctx context.Context, formats strfmt.Registry) error {
if m.CniChaining != nil {
if swag.IsZero(m.CniChaining) { // not required
return nil
}
if err := m.CniChaining.ContextValidate(ctx, formats); err != nil {
if ve, ok := err.(*errors.Validation); ok {
return ve.ValidateName("cni-chaining")
} else if ce, ok := err.(*errors.CompositeError); ok {
return ce.ValidateName("cni-chaining")
}
return err
}
}
return nil
}
func (m *StatusResponse) contextValidateCniFile(ctx context.Context, formats strfmt.Registry) error {
if m.CniFile != nil {
if swag.IsZero(m.CniFile) { // not required
return nil
}
if err := m.CniFile.ContextValidate(ctx, formats); err != nil {
if ve, ok := err.(*errors.Validation); ok {
return ve.ValidateName("cni-file")
} else if ce, ok := err.(*errors.CompositeError); ok {
return ce.ValidateName("cni-file")
}
return err
}
}
return nil
}
func (m *StatusResponse) contextValidateContainerRuntime(ctx context.Context, formats strfmt.Registry) error {
if m.ContainerRuntime != nil {
if swag.IsZero(m.ContainerRuntime) { // not required
return nil
}
if err := m.ContainerRuntime.ContextValidate(ctx, formats); err != nil {
if ve, ok := err.(*errors.Validation); ok {
return ve.ValidateName("container-runtime")
} else if ce, ok := err.(*errors.CompositeError); ok {
return ce.ValidateName("container-runtime")
}
return err
}
}
return nil
}
func (m *StatusResponse) contextValidateControllers(ctx context.Context, formats strfmt.Registry) error {
if err := m.Controllers.ContextValidate(ctx, formats); err != nil {
if ve, ok := err.(*errors.Validation); ok {
return ve.ValidateName("controllers")
} else if ce, ok := err.(*errors.CompositeError); ok {
return ce.ValidateName("controllers")
}
return err
}
return nil
}
func (m *StatusResponse) contextValidateDatapathMode(ctx context.Context, formats strfmt.Registry) error {
if swag.IsZero(m.DatapathMode) { // not required
return nil
}
if err := m.DatapathMode.ContextValidate(ctx, formats); err != nil {
if ve, ok := err.(*errors.Validation); ok {
return ve.ValidateName("datapath-mode")
} else if ce, ok := err.(*errors.CompositeError); ok {
return ce.ValidateName("datapath-mode")
}
return err
}
return nil
}
func (m *StatusResponse) contextValidateEncryption(ctx context.Context, formats strfmt.Registry) error {
if m.Encryption != nil {
if swag.IsZero(m.Encryption) { // not required
return nil
}
if err := m.Encryption.ContextValidate(ctx, formats); err != nil {
if ve, ok := err.(*errors.Validation); ok {
return ve.ValidateName("encryption")
} else if ce, ok := err.(*errors.CompositeError); ok {
return ce.ValidateName("encryption")
}
return err
}
}
return nil
}
func (m *StatusResponse) contextValidateHostFirewall(ctx context.Context, formats strfmt.Registry) error {
if m.HostFirewall != nil {
if swag.IsZero(m.HostFirewall) { // not required
return nil
}
if err := m.HostFirewall.ContextValidate(ctx, formats); err != nil {
if ve, ok := err.(*errors.Validation); ok {
return ve.ValidateName("host-firewall")
} else if ce, ok := err.(*errors.CompositeError); ok {
return ce.ValidateName("host-firewall")
}
return err
}
}
return nil
}
func (m *StatusResponse) contextValidateHubble(ctx context.Context, formats strfmt.Registry) error {
if m.Hubble != nil {
if swag.IsZero(m.Hubble) { // not required
return nil
}
if err := m.Hubble.ContextValidate(ctx, formats); err != nil {
if ve, ok := err.(*errors.Validation); ok {
return ve.ValidateName("hubble")
} else if ce, ok := err.(*errors.CompositeError); ok {
return ce.ValidateName("hubble")
}
return err
}
}
return nil
}
func (m *StatusResponse) contextValidateHubbleMetrics(ctx context.Context, formats strfmt.Registry) error {
if m.HubbleMetrics != nil {
if swag.IsZero(m.HubbleMetrics) { // not required
return nil
}
if err := m.HubbleMetrics.ContextValidate(ctx, formats); err != nil {
if ve, ok := err.(*errors.Validation); ok {
return ve.ValidateName("hubble-metrics")
} else if ce, ok := err.(*errors.CompositeError); ok {
return ce.ValidateName("hubble-metrics")
}
return err
}
}
return nil
}
func (m *StatusResponse) contextValidateIdentityRange(ctx context.Context, formats strfmt.Registry) error {
if m.IdentityRange != nil {
if swag.IsZero(m.IdentityRange) { // not required
return nil
}
if err := m.IdentityRange.ContextValidate(ctx, formats); err != nil {
if ve, ok := err.(*errors.Validation); ok {
return ve.ValidateName("identity-range")
} else if ce, ok := err.(*errors.CompositeError); ok {
return ce.ValidateName("identity-range")
}
return err
}
}
return nil
}
func (m *StatusResponse) contextValidateIpam(ctx context.Context, formats strfmt.Registry) error {
if m.Ipam != nil {
if swag.IsZero(m.Ipam) { // not required
return nil
}
if err := m.Ipam.ContextValidate(ctx, formats); err != nil {
if ve, ok := err.(*errors.Validation); ok {
return ve.ValidateName("ipam")
} else if ce, ok := err.(*errors.CompositeError); ok {
return ce.ValidateName("ipam")
}
return err
}
}
return nil
}
func (m *StatusResponse) contextValidateIPV4BigTCP(ctx context.Context, formats strfmt.Registry) error {
if m.IPV4BigTCP != nil {
if swag.IsZero(m.IPV4BigTCP) { // not required
return nil
}
if err := m.IPV4BigTCP.ContextValidate(ctx, formats); err != nil {
if ve, ok := err.(*errors.Validation); ok {
return ve.ValidateName("ipv4-big-tcp")
} else if ce, ok := err.(*errors.CompositeError); ok {
return ce.ValidateName("ipv4-big-tcp")
}
return err
}
}
return nil
}
func (m *StatusResponse) contextValidateIPV6BigTCP(ctx context.Context, formats strfmt.Registry) error {
if m.IPV6BigTCP != nil {
if swag.IsZero(m.IPV6BigTCP) { // not required
return nil
}
if err := m.IPV6BigTCP.ContextValidate(ctx, formats); err != nil {
if ve, ok := err.(*errors.Validation); ok {
return ve.ValidateName("ipv6-big-tcp")
} else if ce, ok := err.(*errors.CompositeError); ok {
return ce.ValidateName("ipv6-big-tcp")
}
return err
}
}
return nil
}
func (m *StatusResponse) contextValidateKubeProxyReplacement(ctx context.Context, formats strfmt.Registry) error {
if m.KubeProxyReplacement != nil {
if swag.IsZero(m.KubeProxyReplacement) { // not required
return nil
}
if err := m.KubeProxyReplacement.ContextValidate(ctx, formats); err != nil {
if ve, ok := err.(*errors.Validation); ok {
return ve.ValidateName("kube-proxy-replacement")
} else if ce, ok := err.(*errors.CompositeError); ok {
return ce.ValidateName("kube-proxy-replacement")
}
return err
}
}
return nil
}
func (m *StatusResponse) contextValidateKubernetes(ctx context.Context, formats strfmt.Registry) error {
if m.Kubernetes != nil {
if swag.IsZero(m.Kubernetes) { // not required
return nil
}
if err := m.Kubernetes.ContextValidate(ctx, formats); err != nil {
if ve, ok := err.(*errors.Validation); ok {
return ve.ValidateName("kubernetes")
} else if ce, ok := err.(*errors.CompositeError); ok {
return ce.ValidateName("kubernetes")
}
return err
}
}
return nil
}
func (m *StatusResponse) contextValidateKvstore(ctx context.Context, formats strfmt.Registry) error {
if m.Kvstore != nil {
if swag.IsZero(m.Kvstore) { // not required
return nil
}
if err := m.Kvstore.ContextValidate(ctx, formats); err != nil {
if ve, ok := err.(*errors.Validation); ok {
return ve.ValidateName("kvstore")
} else if ce, ok := err.(*errors.CompositeError); ok {
return ce.ValidateName("kvstore")
}
return err
}
}
return nil
}
func (m *StatusResponse) contextValidateMasquerading(ctx context.Context, formats strfmt.Registry) error {
if m.Masquerading != nil {
if swag.IsZero(m.Masquerading) { // not required
return nil
}
if err := m.Masquerading.ContextValidate(ctx, formats); err != nil {
if ve, ok := err.(*errors.Validation); ok {
return ve.ValidateName("masquerading")
} else if ce, ok := err.(*errors.CompositeError); ok {
return ce.ValidateName("masquerading")
}
return err
}
}
return nil
}
func (m *StatusResponse) contextValidateNodeMonitor(ctx context.Context, formats strfmt.Registry) error {
if m.NodeMonitor != nil {
if swag.IsZero(m.NodeMonitor) { // not required
return nil
}
if err := m.NodeMonitor.ContextValidate(ctx, formats); err != nil {
if ve, ok := err.(*errors.Validation); ok {
return ve.ValidateName("nodeMonitor")
} else if ce, ok := err.(*errors.CompositeError); ok {
return ce.ValidateName("nodeMonitor")
}
return err
}
}
return nil
}
func (m *StatusResponse) contextValidateProxy(ctx context.Context, formats strfmt.Registry) error {
if m.Proxy != nil {
if swag.IsZero(m.Proxy) { // not required
return nil
}
if err := m.Proxy.ContextValidate(ctx, formats); err != nil {
if ve, ok := err.(*errors.Validation); ok {
return ve.ValidateName("proxy")
} else if ce, ok := err.(*errors.CompositeError); ok {
return ce.ValidateName("proxy")
}
return err
}
}
return nil
}
func (m *StatusResponse) contextValidateRouting(ctx context.Context, formats strfmt.Registry) error {
if m.Routing != nil {
if swag.IsZero(m.Routing) { // not required
return nil
}
if err := m.Routing.ContextValidate(ctx, formats); err != nil {
if ve, ok := err.(*errors.Validation); ok {
return ve.ValidateName("routing")
} else if ce, ok := err.(*errors.CompositeError); ok {
return ce.ValidateName("routing")
}
return err
}
}
return nil
}
func (m *StatusResponse) contextValidateSrv6(ctx context.Context, formats strfmt.Registry) error {
if m.Srv6 != nil {
if swag.IsZero(m.Srv6) { // not required
return nil
}
if err := m.Srv6.ContextValidate(ctx, formats); err != nil {
if ve, ok := err.(*errors.Validation); ok {
return ve.ValidateName("srv6")
} else if ce, ok := err.(*errors.CompositeError); ok {
return ce.ValidateName("srv6")
}
return err
}
}
return nil
}
// MarshalBinary interface implementation
func (m *StatusResponse) MarshalBinary() ([]byte, error) {
if m == nil {
return nil, nil
}
return swag.WriteJSON(m)
}
// UnmarshalBinary interface implementation
func (m *StatusResponse) UnmarshalBinary(b []byte) error {
var res StatusResponse
if err := swag.ReadJSON(b, &res); err != nil {
return err
}
*m = res
return nil
}
// Code generated by go-swagger; DO NOT EDIT.
// Copyright Authors of Cilium
// SPDX-License-Identifier: Apache-2.0
package models
// This file was generated by the swagger tool.
// Editing this file might prove futile when you re-run the swagger generate command
import (
"context"
"github.com/go-openapi/errors"
"github.com/go-openapi/strfmt"
"github.com/go-openapi/swag"
)
// TraceFrom trace from
//
// swagger:model TraceFrom
type TraceFrom struct {
// labels
Labels Labels `json:"labels,omitempty"`
}
// Validate validates this trace from
func (m *TraceFrom) Validate(formats strfmt.Registry) error {
var res []error
if err := m.validateLabels(formats); err != nil {
res = append(res, err)
}
if len(res) > 0 {
return errors.CompositeValidationError(res...)
}
return nil
}
func (m *TraceFrom) validateLabels(formats strfmt.Registry) error {
if swag.IsZero(m.Labels) { // not required
return nil
}
if err := m.Labels.Validate(formats); err != nil {
if ve, ok := err.(*errors.Validation); ok {
return ve.ValidateName("labels")
} else if ce, ok := err.(*errors.CompositeError); ok {
return ce.ValidateName("labels")
}
return err
}
return nil
}
// ContextValidate validate this trace from based on the context it is used
func (m *TraceFrom) ContextValidate(ctx context.Context, formats strfmt.Registry) error {
var res []error
if err := m.contextValidateLabels(ctx, formats); err != nil {
res = append(res, err)
}
if len(res) > 0 {
return errors.CompositeValidationError(res...)
}
return nil
}
func (m *TraceFrom) contextValidateLabels(ctx context.Context, formats strfmt.Registry) error {
if err := m.Labels.ContextValidate(ctx, formats); err != nil {
if ve, ok := err.(*errors.Validation); ok {
return ve.ValidateName("labels")
} else if ce, ok := err.(*errors.CompositeError); ok {
return ce.ValidateName("labels")
}
return err
}
return nil
}
// MarshalBinary interface implementation
func (m *TraceFrom) MarshalBinary() ([]byte, error) {
if m == nil {
return nil, nil
}
return swag.WriteJSON(m)
}
// UnmarshalBinary interface implementation
func (m *TraceFrom) UnmarshalBinary(b []byte) error {
var res TraceFrom
if err := swag.ReadJSON(b, &res); err != nil {
return err
}
*m = res
return nil
}
// Code generated by go-swagger; DO NOT EDIT.
// Copyright Authors of Cilium
// SPDX-License-Identifier: Apache-2.0
package models
// This file was generated by the swagger tool.
// Editing this file might prove futile when you re-run the swagger generate command
import (
"context"
"github.com/go-openapi/errors"
"github.com/go-openapi/strfmt"
"github.com/go-openapi/swag"
)
// TraceSelector Context describing a pair of source and destination identity
//
// swagger:model TraceSelector
type TraceSelector struct {
// from
From *TraceFrom `json:"from,omitempty"`
// to
To *TraceTo `json:"to,omitempty"`
// Enable verbose tracing.
//
Verbose bool `json:"verbose,omitempty"`
}
// Validate validates this trace selector
func (m *TraceSelector) Validate(formats strfmt.Registry) error {
var res []error
if err := m.validateFrom(formats); err != nil {
res = append(res, err)
}
if err := m.validateTo(formats); err != nil {
res = append(res, err)
}
if len(res) > 0 {
return errors.CompositeValidationError(res...)
}
return nil
}
func (m *TraceSelector) validateFrom(formats strfmt.Registry) error {
if swag.IsZero(m.From) { // not required
return nil
}
if m.From != nil {
if err := m.From.Validate(formats); err != nil {
if ve, ok := err.(*errors.Validation); ok {
return ve.ValidateName("from")
} else if ce, ok := err.(*errors.CompositeError); ok {
return ce.ValidateName("from")
}
return err
}
}
return nil
}
func (m *TraceSelector) validateTo(formats strfmt.Registry) error {
if swag.IsZero(m.To) { // not required
return nil
}
if m.To != nil {
if err := m.To.Validate(formats); err != nil {
if ve, ok := err.(*errors.Validation); ok {
return ve.ValidateName("to")
} else if ce, ok := err.(*errors.CompositeError); ok {
return ce.ValidateName("to")
}
return err
}
}
return nil
}
// ContextValidate validate this trace selector based on the context it is used
func (m *TraceSelector) ContextValidate(ctx context.Context, formats strfmt.Registry) error {
var res []error
if err := m.contextValidateFrom(ctx, formats); err != nil {
res = append(res, err)
}
if err := m.contextValidateTo(ctx, formats); err != nil {
res = append(res, err)
}
if len(res) > 0 {
return errors.CompositeValidationError(res...)
}
return nil
}
func (m *TraceSelector) contextValidateFrom(ctx context.Context, formats strfmt.Registry) error {
if m.From != nil {
if swag.IsZero(m.From) { // not required
return nil
}
if err := m.From.ContextValidate(ctx, formats); err != nil {
if ve, ok := err.(*errors.Validation); ok {
return ve.ValidateName("from")
} else if ce, ok := err.(*errors.CompositeError); ok {
return ce.ValidateName("from")
}
return err
}
}
return nil
}
func (m *TraceSelector) contextValidateTo(ctx context.Context, formats strfmt.Registry) error {
if m.To != nil {
if swag.IsZero(m.To) { // not required
return nil
}
if err := m.To.ContextValidate(ctx, formats); err != nil {
if ve, ok := err.(*errors.Validation); ok {
return ve.ValidateName("to")
} else if ce, ok := err.(*errors.CompositeError); ok {
return ce.ValidateName("to")
}
return err
}
}
return nil
}
// MarshalBinary interface implementation
func (m *TraceSelector) MarshalBinary() ([]byte, error) {
if m == nil {
return nil, nil
}
return swag.WriteJSON(m)
}
// UnmarshalBinary interface implementation
func (m *TraceSelector) UnmarshalBinary(b []byte) error {
var res TraceSelector
if err := swag.ReadJSON(b, &res); err != nil {
return err
}
*m = res
return nil
}
// Code generated by go-swagger; DO NOT EDIT.
// Copyright Authors of Cilium
// SPDX-License-Identifier: Apache-2.0
package models
// This file was generated by the swagger tool.
// Editing this file might prove futile when you re-run the swagger generate command
import (
"context"
"strconv"
"github.com/go-openapi/errors"
"github.com/go-openapi/strfmt"
"github.com/go-openapi/swag"
)
// TraceTo trace to
//
// swagger:model TraceTo
type TraceTo struct {
// List of Layer 4 port and protocol pairs which will be used in communication
// from the source identity to the destination identity.
//
Dports []*Port `json:"dports"`
// labels
Labels Labels `json:"labels,omitempty"`
}
// Validate validates this trace to
func (m *TraceTo) Validate(formats strfmt.Registry) error {
var res []error
if err := m.validateDports(formats); err != nil {
res = append(res, err)
}
if err := m.validateLabels(formats); err != nil {
res = append(res, err)
}
if len(res) > 0 {
return errors.CompositeValidationError(res...)
}
return nil
}
func (m *TraceTo) validateDports(formats strfmt.Registry) error {
if swag.IsZero(m.Dports) { // not required
return nil
}
for i := 0; i < len(m.Dports); i++ {
if swag.IsZero(m.Dports[i]) { // not required
continue
}
if m.Dports[i] != nil {
if err := m.Dports[i].Validate(formats); err != nil {
if ve, ok := err.(*errors.Validation); ok {
return ve.ValidateName("dports" + "." + strconv.Itoa(i))
} else if ce, ok := err.(*errors.CompositeError); ok {
return ce.ValidateName("dports" + "." + strconv.Itoa(i))
}
return err
}
}
}
return nil
}
func (m *TraceTo) validateLabels(formats strfmt.Registry) error {
if swag.IsZero(m.Labels) { // not required
return nil
}
if err := m.Labels.Validate(formats); err != nil {
if ve, ok := err.(*errors.Validation); ok {
return ve.ValidateName("labels")
} else if ce, ok := err.(*errors.CompositeError); ok {
return ce.ValidateName("labels")
}
return err
}
return nil
}
// ContextValidate validate this trace to based on the context it is used
func (m *TraceTo) ContextValidate(ctx context.Context, formats strfmt.Registry) error {
var res []error
if err := m.contextValidateDports(ctx, formats); err != nil {
res = append(res, err)
}
if err := m.contextValidateLabels(ctx, formats); err != nil {
res = append(res, err)
}
if len(res) > 0 {
return errors.CompositeValidationError(res...)
}
return nil
}
func (m *TraceTo) contextValidateDports(ctx context.Context, formats strfmt.Registry) error {
for i := 0; i < len(m.Dports); i++ {
if m.Dports[i] != nil {
if swag.IsZero(m.Dports[i]) { // not required
return nil
}
if err := m.Dports[i].ContextValidate(ctx, formats); err != nil {
if ve, ok := err.(*errors.Validation); ok {
return ve.ValidateName("dports" + "." + strconv.Itoa(i))
} else if ce, ok := err.(*errors.CompositeError); ok {
return ce.ValidateName("dports" + "." + strconv.Itoa(i))
}
return err
}
}
}
return nil
}
func (m *TraceTo) contextValidateLabels(ctx context.Context, formats strfmt.Registry) error {
if err := m.Labels.ContextValidate(ctx, formats); err != nil {
if ve, ok := err.(*errors.Validation); ok {
return ve.ValidateName("labels")
} else if ce, ok := err.(*errors.CompositeError); ok {
return ce.ValidateName("labels")
}
return err
}
return nil
}
// MarshalBinary interface implementation
func (m *TraceTo) MarshalBinary() ([]byte, error) {
if m == nil {
return nil, nil
}
return swag.WriteJSON(m)
}
// UnmarshalBinary interface implementation
func (m *TraceTo) UnmarshalBinary(b []byte) error {
var res TraceTo
if err := swag.ReadJSON(b, &res); err != nil {
return err
}
*m = res
return nil
}
// Code generated by go-swagger; DO NOT EDIT.
// Copyright Authors of Cilium
// SPDX-License-Identifier: Apache-2.0
package models
// This file was generated by the swagger tool.
// Editing this file might prove futile when you re-run the swagger generate command
import (
"context"
"strconv"
"github.com/go-openapi/errors"
"github.com/go-openapi/strfmt"
"github.com/go-openapi/swag"
)
// WireguardInterface Status of a WireGuard interface
//
// +k8s:deepcopy-gen=true
//
// swagger:model WireguardInterface
type WireguardInterface struct {
// Port on which the WireGuard endpoint is exposed
ListenPort int64 `json:"listen-port,omitempty"`
// Name of the interface
Name string `json:"name,omitempty"`
// Number of peers configured on this interface
PeerCount int64 `json:"peer-count,omitempty"`
// Optional list of WireGuard peers
Peers []*WireguardPeer `json:"peers"`
// Public key of this interface
PublicKey string `json:"public-key,omitempty"`
}
// Validate validates this wireguard interface
func (m *WireguardInterface) Validate(formats strfmt.Registry) error {
var res []error
if err := m.validatePeers(formats); err != nil {
res = append(res, err)
}
if len(res) > 0 {
return errors.CompositeValidationError(res...)
}
return nil
}
func (m *WireguardInterface) validatePeers(formats strfmt.Registry) error {
if swag.IsZero(m.Peers) { // not required
return nil
}
for i := 0; i < len(m.Peers); i++ {
if swag.IsZero(m.Peers[i]) { // not required
continue
}
if m.Peers[i] != nil {
if err := m.Peers[i].Validate(formats); err != nil {
if ve, ok := err.(*errors.Validation); ok {
return ve.ValidateName("peers" + "." + strconv.Itoa(i))
} else if ce, ok := err.(*errors.CompositeError); ok {
return ce.ValidateName("peers" + "." + strconv.Itoa(i))
}
return err
}
}
}
return nil
}
// ContextValidate validate this wireguard interface based on the context it is used
func (m *WireguardInterface) ContextValidate(ctx context.Context, formats strfmt.Registry) error {
var res []error
if err := m.contextValidatePeers(ctx, formats); err != nil {
res = append(res, err)
}
if len(res) > 0 {
return errors.CompositeValidationError(res...)
}
return nil
}
func (m *WireguardInterface) contextValidatePeers(ctx context.Context, formats strfmt.Registry) error {
for i := 0; i < len(m.Peers); i++ {
if m.Peers[i] != nil {
if swag.IsZero(m.Peers[i]) { // not required
return nil
}
if err := m.Peers[i].ContextValidate(ctx, formats); err != nil {
if ve, ok := err.(*errors.Validation); ok {
return ve.ValidateName("peers" + "." + strconv.Itoa(i))
} else if ce, ok := err.(*errors.CompositeError); ok {
return ce.ValidateName("peers" + "." + strconv.Itoa(i))
}
return err
}
}
}
return nil
}
// MarshalBinary interface implementation
func (m *WireguardInterface) MarshalBinary() ([]byte, error) {
if m == nil {
return nil, nil
}
return swag.WriteJSON(m)
}
// UnmarshalBinary interface implementation
func (m *WireguardInterface) UnmarshalBinary(b []byte) error {
var res WireguardInterface
if err := swag.ReadJSON(b, &res); err != nil {
return err
}
*m = res
return nil
}
// Code generated by go-swagger; DO NOT EDIT.
// Copyright Authors of Cilium
// SPDX-License-Identifier: Apache-2.0
package models
// This file was generated by the swagger tool.
// Editing this file might prove futile when you re-run the swagger generate command
import (
"context"
"github.com/go-openapi/errors"
"github.com/go-openapi/strfmt"
"github.com/go-openapi/swag"
"github.com/go-openapi/validate"
)
// WireguardPeer Status of a WireGuard peer
//
// +k8s:deepcopy-gen=true
//
// swagger:model WireguardPeer
type WireguardPeer struct {
// List of IPs which may be routed through this peer
AllowedIps []string `json:"allowed-ips"`
// Endpoint on which we are connected to this peer
Endpoint string `json:"endpoint,omitempty"`
// Timestamp of the last handshake with this peer
// Format: date-time
LastHandshakeTime strfmt.DateTime `json:"last-handshake-time,omitempty"`
// Public key of this peer
PublicKey string `json:"public-key,omitempty"`
// Number of received bytes
TransferRx int64 `json:"transfer-rx,omitempty"`
// Number of sent bytes
TransferTx int64 `json:"transfer-tx,omitempty"`
}
// Validate validates this wireguard peer
func (m *WireguardPeer) Validate(formats strfmt.Registry) error {
var res []error
if err := m.validateLastHandshakeTime(formats); err != nil {
res = append(res, err)
}
if len(res) > 0 {
return errors.CompositeValidationError(res...)
}
return nil
}
func (m *WireguardPeer) validateLastHandshakeTime(formats strfmt.Registry) error {
if swag.IsZero(m.LastHandshakeTime) { // not required
return nil
}
if err := validate.FormatOf("last-handshake-time", "body", "date-time", m.LastHandshakeTime.String(), formats); err != nil {
return err
}
return nil
}
// ContextValidate validates this wireguard peer based on context it is used
func (m *WireguardPeer) ContextValidate(ctx context.Context, formats strfmt.Registry) error {
return nil
}
// MarshalBinary interface implementation
func (m *WireguardPeer) MarshalBinary() ([]byte, error) {
if m == nil {
return nil, nil
}
return swag.WriteJSON(m)
}
// UnmarshalBinary interface implementation
func (m *WireguardPeer) UnmarshalBinary(b []byte) error {
var res WireguardPeer
if err := swag.ReadJSON(b, &res); err != nil {
return err
}
*m = res
return nil
}
// Code generated by go-swagger; DO NOT EDIT.
// Copyright Authors of Cilium
// SPDX-License-Identifier: Apache-2.0
package models
// This file was generated by the swagger tool.
// Editing this file might prove futile when you re-run the swagger generate command
import (
"context"
"strconv"
"github.com/go-openapi/errors"
"github.com/go-openapi/strfmt"
"github.com/go-openapi/swag"
)
// WireguardStatus Status of the WireGuard agent
//
// +k8s:deepcopy-gen=true
//
// swagger:model WireguardStatus
type WireguardStatus struct {
// WireGuard interfaces managed by this Cilium instance
Interfaces []*WireguardInterface `json:"interfaces"`
// Label selector for nodes which will opt-out of node-to-node encryption
NodeEncryptOptOutLabels string `json:"node-encrypt-opt-out-labels,omitempty"`
// Node Encryption status
NodeEncryption string `json:"node-encryption,omitempty"`
}
// Validate validates this wireguard status
func (m *WireguardStatus) Validate(formats strfmt.Registry) error {
var res []error
if err := m.validateInterfaces(formats); err != nil {
res = append(res, err)
}
if len(res) > 0 {
return errors.CompositeValidationError(res...)
}
return nil
}
func (m *WireguardStatus) validateInterfaces(formats strfmt.Registry) error {
if swag.IsZero(m.Interfaces) { // not required
return nil
}
for i := 0; i < len(m.Interfaces); i++ {
if swag.IsZero(m.Interfaces[i]) { // not required
continue
}
if m.Interfaces[i] != nil {
if err := m.Interfaces[i].Validate(formats); err != nil {
if ve, ok := err.(*errors.Validation); ok {
return ve.ValidateName("interfaces" + "." + strconv.Itoa(i))
} else if ce, ok := err.(*errors.CompositeError); ok {
return ce.ValidateName("interfaces" + "." + strconv.Itoa(i))
}
return err
}
}
}
return nil
}
// ContextValidate validate this wireguard status based on the context it is used
func (m *WireguardStatus) ContextValidate(ctx context.Context, formats strfmt.Registry) error {
var res []error
if err := m.contextValidateInterfaces(ctx, formats); err != nil {
res = append(res, err)
}
if len(res) > 0 {
return errors.CompositeValidationError(res...)
}
return nil
}
func (m *WireguardStatus) contextValidateInterfaces(ctx context.Context, formats strfmt.Registry) error {
for i := 0; i < len(m.Interfaces); i++ {
if m.Interfaces[i] != nil {
if swag.IsZero(m.Interfaces[i]) { // not required
return nil
}
if err := m.Interfaces[i].ContextValidate(ctx, formats); err != nil {
if ve, ok := err.(*errors.Validation); ok {
return ve.ValidateName("interfaces" + "." + strconv.Itoa(i))
} else if ce, ok := err.(*errors.CompositeError); ok {
return ce.ValidateName("interfaces" + "." + strconv.Itoa(i))
}
return err
}
}
}
return nil
}
// MarshalBinary interface implementation
func (m *WireguardStatus) MarshalBinary() ([]byte, error) {
if m == nil {
return nil, nil
}
return swag.WriteJSON(m)
}
// UnmarshalBinary interface implementation
func (m *WireguardStatus) UnmarshalBinary(b []byte) error {
var res WireguardStatus
if err := swag.ReadJSON(b, &res); err != nil {
return err
}
*m = res
return nil
}
//go:build !ignore_autogenerated
// +build !ignore_autogenerated
// SPDX-License-Identifier: Apache-2.0
// Copyright Authors of Cilium
// Code generated by deepcopy-gen. DO NOT EDIT.
package models
import (
strfmt "github.com/go-openapi/strfmt"
)
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *BPFMapStatus) DeepCopyInto(out *BPFMapStatus) {
*out = *in
if in.Maps != nil {
in, out := &in.Maps, &out.Maps
*out = make([]*BPFMapProperties, len(*in))
for i := range *in {
if (*in)[i] != nil {
in, out := &(*in)[i], &(*out)[i]
*out = new(BPFMapProperties)
**out = **in
}
}
}
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new BPFMapStatus.
func (in *BPFMapStatus) DeepCopy() *BPFMapStatus {
if in == nil {
return nil
}
out := new(BPFMapStatus)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *BandwidthManager) DeepCopyInto(out *BandwidthManager) {
*out = *in
if in.Devices != nil {
in, out := &in.Devices, &out.Devices
*out = make([]string, len(*in))
copy(*out, *in)
}
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new BandwidthManager.
func (in *BandwidthManager) DeepCopy() *BandwidthManager {
if in == nil {
return nil
}
out := new(BandwidthManager)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *BgpPeer) DeepCopyInto(out *BgpPeer) {
*out = *in
if in.Families != nil {
in, out := &in.Families, &out.Families
*out = make([]*BgpPeerFamilies, len(*in))
for i := range *in {
if (*in)[i] != nil {
in, out := &(*in)[i], &(*out)[i]
*out = new(BgpPeerFamilies)
**out = **in
}
}
}
if in.GracefulRestart != nil {
in, out := &in.GracefulRestart, &out.GracefulRestart
*out = new(BgpGracefulRestart)
**out = **in
}
if in.LocalCapabilities != nil {
in, out := &in.LocalCapabilities, &out.LocalCapabilities
*out = make([]*BgpCapabilities, len(*in))
for i := range *in {
if (*in)[i] != nil {
in, out := &(*in)[i], &(*out)[i]
*out = new(BgpCapabilities)
**out = **in
}
}
}
if in.RemoteCapabilities != nil {
in, out := &in.RemoteCapabilities, &out.RemoteCapabilities
*out = make([]*BgpCapabilities, len(*in))
for i := range *in {
if (*in)[i] != nil {
in, out := &(*in)[i], &(*out)[i]
*out = new(BgpCapabilities)
**out = **in
}
}
}
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new BgpPeer.
func (in *BgpPeer) DeepCopy() *BgpPeer {
if in == nil {
return nil
}
out := new(BgpPeer)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *ClusterMeshStatus) DeepCopyInto(out *ClusterMeshStatus) {
*out = *in
if in.Clusters != nil {
in, out := &in.Clusters, &out.Clusters
*out = make([]*RemoteCluster, len(*in))
for i := range *in {
if (*in)[i] != nil {
in, out := &(*in)[i], &(*out)[i]
*out = new(RemoteCluster)
(*in).DeepCopyInto(*out)
}
}
}
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ClusterMeshStatus.
func (in *ClusterMeshStatus) DeepCopy() *ClusterMeshStatus {
if in == nil {
return nil
}
out := new(ClusterMeshStatus)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *ClusterStatus) DeepCopyInto(out *ClusterStatus) {
*out = *in
if in.CiliumHealth != nil {
in, out := &in.CiliumHealth, &out.CiliumHealth
*out = new(Status)
**out = **in
}
if in.Nodes != nil {
in, out := &in.Nodes, &out.Nodes
*out = make([]*NodeElement, len(*in))
for i := range *in {
if (*in)[i] != nil {
in, out := &(*in)[i], &(*out)[i]
*out = new(NodeElement)
(*in).DeepCopyInto(*out)
}
}
}
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ClusterStatus.
func (in *ClusterStatus) DeepCopy() *ClusterStatus {
if in == nil {
return nil
}
out := new(ClusterStatus)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *ControllerStatus) DeepCopyInto(out *ControllerStatus) {
*out = *in
if in.Configuration != nil {
in, out := &in.Configuration, &out.Configuration
*out = new(ControllerStatusConfiguration)
**out = **in
}
if in.Status != nil {
in, out := &in.Status, &out.Status
*out = new(ControllerStatusStatus)
(*in).DeepCopyInto(*out)
}
in.UUID.DeepCopyInto(&out.UUID)
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ControllerStatus.
func (in *ControllerStatus) DeepCopy() *ControllerStatus {
if in == nil {
return nil
}
out := new(ControllerStatus)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *ControllerStatusConfiguration) DeepCopyInto(out *ControllerStatusConfiguration) {
*out = *in
in.ErrorRetryBase.DeepCopyInto(&out.ErrorRetryBase)
in.Interval.DeepCopyInto(&out.Interval)
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ControllerStatusConfiguration.
func (in *ControllerStatusConfiguration) DeepCopy() *ControllerStatusConfiguration {
if in == nil {
return nil
}
out := new(ControllerStatusConfiguration)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *ControllerStatusStatus) DeepCopyInto(out *ControllerStatusStatus) {
*out = *in
in.LastFailureTimestamp.DeepCopyInto(&out.LastFailureTimestamp)
in.LastSuccessTimestamp.DeepCopyInto(&out.LastSuccessTimestamp)
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ControllerStatusStatus.
func (in *ControllerStatusStatus) DeepCopy() *ControllerStatusStatus {
if in == nil {
return nil
}
out := new(ControllerStatusStatus)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *EncryptionStatus) DeepCopyInto(out *EncryptionStatus) {
*out = *in
if in.Ipsec != nil {
in, out := &in.Ipsec, &out.Ipsec
*out = new(IPsecStatus)
(*in).DeepCopyInto(*out)
}
if in.Wireguard != nil {
in, out := &in.Wireguard, &out.Wireguard
*out = new(WireguardStatus)
(*in).DeepCopyInto(*out)
}
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new EncryptionStatus.
func (in *EncryptionStatus) DeepCopy() *EncryptionStatus {
if in == nil {
return nil
}
out := new(EncryptionStatus)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *HostFirewall) DeepCopyInto(out *HostFirewall) {
*out = *in
if in.Devices != nil {
in, out := &in.Devices, &out.Devices
*out = make([]string, len(*in))
copy(*out, *in)
}
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new HostFirewall.
func (in *HostFirewall) DeepCopy() *HostFirewall {
if in == nil {
return nil
}
out := new(HostFirewall)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *HubbleMetricsStatus) DeepCopyInto(out *HubbleMetricsStatus) {
*out = *in
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new HubbleMetricsStatus.
func (in *HubbleMetricsStatus) DeepCopy() *HubbleMetricsStatus {
if in == nil {
return nil
}
out := new(HubbleMetricsStatus)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *HubbleStatus) DeepCopyInto(out *HubbleStatus) {
*out = *in
if in.Observer != nil {
in, out := &in.Observer, &out.Observer
*out = new(HubbleStatusObserver)
**out = **in
}
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new HubbleStatus.
func (in *HubbleStatus) DeepCopy() *HubbleStatus {
if in == nil {
return nil
}
out := new(HubbleStatus)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *HubbleStatusObserver) DeepCopyInto(out *HubbleStatusObserver) {
*out = *in
in.Uptime.DeepCopyInto(&out.Uptime)
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new HubbleStatusObserver.
func (in *HubbleStatusObserver) DeepCopy() *HubbleStatusObserver {
if in == nil {
return nil
}
out := new(HubbleStatusObserver)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *IPAMStatus) DeepCopyInto(out *IPAMStatus) {
*out = *in
if in.Allocations != nil {
in, out := &in.Allocations, &out.Allocations
*out = make(AllocationMap, len(*in))
for key, val := range *in {
(*out)[key] = val
}
}
if in.IPV4 != nil {
in, out := &in.IPV4, &out.IPV4
*out = make([]string, len(*in))
copy(*out, *in)
}
if in.IPV6 != nil {
in, out := &in.IPV6, &out.IPV6
*out = make([]string, len(*in))
copy(*out, *in)
}
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new IPAMStatus.
func (in *IPAMStatus) DeepCopy() *IPAMStatus {
if in == nil {
return nil
}
out := new(IPAMStatus)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *IPsecStatus) DeepCopyInto(out *IPsecStatus) {
*out = *in
if in.DecryptInterfaces != nil {
in, out := &in.DecryptInterfaces, &out.DecryptInterfaces
*out = make([]string, len(*in))
copy(*out, *in)
}
if in.XfrmErrors != nil {
in, out := &in.XfrmErrors, &out.XfrmErrors
*out = make(map[string]int64, len(*in))
for key, val := range *in {
(*out)[key] = val
}
}
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new IPsecStatus.
func (in *IPsecStatus) DeepCopy() *IPsecStatus {
if in == nil {
return nil
}
out := new(IPsecStatus)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *K8sStatus) DeepCopyInto(out *K8sStatus) {
*out = *in
if in.K8sAPIVersions != nil {
in, out := &in.K8sAPIVersions, &out.K8sAPIVersions
*out = make([]string, len(*in))
copy(*out, *in)
}
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new K8sStatus.
func (in *K8sStatus) DeepCopy() *K8sStatus {
if in == nil {
return nil
}
out := new(K8sStatus)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *KubeProxyReplacement) DeepCopyInto(out *KubeProxyReplacement) {
*out = *in
if in.DeviceList != nil {
in, out := &in.DeviceList, &out.DeviceList
*out = make([]*KubeProxyReplacementDeviceListItems0, len(*in))
for i := range *in {
if (*in)[i] != nil {
in, out := &(*in)[i], &(*out)[i]
*out = new(KubeProxyReplacementDeviceListItems0)
(*in).DeepCopyInto(*out)
}
}
}
if in.Devices != nil {
in, out := &in.Devices, &out.Devices
*out = make([]string, len(*in))
copy(*out, *in)
}
if in.Features != nil {
in, out := &in.Features, &out.Features
*out = new(KubeProxyReplacementFeatures)
(*in).DeepCopyInto(*out)
}
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new KubeProxyReplacement.
func (in *KubeProxyReplacement) DeepCopy() *KubeProxyReplacement {
if in == nil {
return nil
}
out := new(KubeProxyReplacement)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *KubeProxyReplacementDeviceListItems0) DeepCopyInto(out *KubeProxyReplacementDeviceListItems0) {
*out = *in
if in.IP != nil {
in, out := &in.IP, &out.IP
*out = make([]string, len(*in))
copy(*out, *in)
}
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new KubeProxyReplacementDeviceListItems0.
func (in *KubeProxyReplacementDeviceListItems0) DeepCopy() *KubeProxyReplacementDeviceListItems0 {
if in == nil {
return nil
}
out := new(KubeProxyReplacementDeviceListItems0)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *KubeProxyReplacementFeatures) DeepCopyInto(out *KubeProxyReplacementFeatures) {
*out = *in
if in.Annotations != nil {
in, out := &in.Annotations, &out.Annotations
*out = make([]string, len(*in))
copy(*out, *in)
}
if in.ExternalIPs != nil {
in, out := &in.ExternalIPs, &out.ExternalIPs
*out = new(KubeProxyReplacementFeaturesExternalIPs)
**out = **in
}
if in.GracefulTermination != nil {
in, out := &in.GracefulTermination, &out.GracefulTermination
*out = new(KubeProxyReplacementFeaturesGracefulTermination)
**out = **in
}
if in.HostPort != nil {
in, out := &in.HostPort, &out.HostPort
*out = new(KubeProxyReplacementFeaturesHostPort)
**out = **in
}
if in.HostReachableServices != nil {
in, out := &in.HostReachableServices, &out.HostReachableServices
*out = new(KubeProxyReplacementFeaturesHostReachableServices)
(*in).DeepCopyInto(*out)
}
if in.Nat46X64 != nil {
in, out := &in.Nat46X64, &out.Nat46X64
*out = new(KubeProxyReplacementFeaturesNat46X64)
(*in).DeepCopyInto(*out)
}
if in.NodePort != nil {
in, out := &in.NodePort, &out.NodePort
*out = new(KubeProxyReplacementFeaturesNodePort)
**out = **in
}
if in.SessionAffinity != nil {
in, out := &in.SessionAffinity, &out.SessionAffinity
*out = new(KubeProxyReplacementFeaturesSessionAffinity)
**out = **in
}
if in.SocketLB != nil {
in, out := &in.SocketLB, &out.SocketLB
*out = new(KubeProxyReplacementFeaturesSocketLB)
**out = **in
}
if in.SocketLBTracing != nil {
in, out := &in.SocketLBTracing, &out.SocketLBTracing
*out = new(KubeProxyReplacementFeaturesSocketLBTracing)
**out = **in
}
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new KubeProxyReplacementFeatures.
func (in *KubeProxyReplacementFeatures) DeepCopy() *KubeProxyReplacementFeatures {
if in == nil {
return nil
}
out := new(KubeProxyReplacementFeatures)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *KubeProxyReplacementFeaturesHostReachableServices) DeepCopyInto(out *KubeProxyReplacementFeaturesHostReachableServices) {
*out = *in
if in.Protocols != nil {
in, out := &in.Protocols, &out.Protocols
*out = make([]string, len(*in))
copy(*out, *in)
}
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new KubeProxyReplacementFeaturesHostReachableServices.
func (in *KubeProxyReplacementFeaturesHostReachableServices) DeepCopy() *KubeProxyReplacementFeaturesHostReachableServices {
if in == nil {
return nil
}
out := new(KubeProxyReplacementFeaturesHostReachableServices)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *KubeProxyReplacementFeaturesNat46X64) DeepCopyInto(out *KubeProxyReplacementFeaturesNat46X64) {
*out = *in
if in.Gateway != nil {
in, out := &in.Gateway, &out.Gateway
*out = new(KubeProxyReplacementFeaturesNat46X64Gateway)
(*in).DeepCopyInto(*out)
}
if in.Service != nil {
in, out := &in.Service, &out.Service
*out = new(KubeProxyReplacementFeaturesNat46X64Service)
**out = **in
}
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new KubeProxyReplacementFeaturesNat46X64.
func (in *KubeProxyReplacementFeaturesNat46X64) DeepCopy() *KubeProxyReplacementFeaturesNat46X64 {
if in == nil {
return nil
}
out := new(KubeProxyReplacementFeaturesNat46X64)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *KubeProxyReplacementFeaturesNat46X64Gateway) DeepCopyInto(out *KubeProxyReplacementFeaturesNat46X64Gateway) {
*out = *in
if in.Prefixes != nil {
in, out := &in.Prefixes, &out.Prefixes
*out = make([]string, len(*in))
copy(*out, *in)
}
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new KubeProxyReplacementFeaturesNat46X64Gateway.
func (in *KubeProxyReplacementFeaturesNat46X64Gateway) DeepCopy() *KubeProxyReplacementFeaturesNat46X64Gateway {
if in == nil {
return nil
}
out := new(KubeProxyReplacementFeaturesNat46X64Gateway)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *Masquerading) DeepCopyInto(out *Masquerading) {
*out = *in
if in.EnabledProtocols != nil {
in, out := &in.EnabledProtocols, &out.EnabledProtocols
*out = new(MasqueradingEnabledProtocols)
**out = **in
}
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Masquerading.
func (in *Masquerading) DeepCopy() *Masquerading {
if in == nil {
return nil
}
out := new(Masquerading)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in NamedPorts) DeepCopyInto(out *NamedPorts) {
{
in := &in
*out = make(NamedPorts, len(*in))
for i := range *in {
if (*in)[i] != nil {
in, out := &(*in)[i], &(*out)[i]
*out = new(Port)
**out = **in
}
}
return
}
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NamedPorts.
func (in NamedPorts) DeepCopy() NamedPorts {
if in == nil {
return nil
}
out := new(NamedPorts)
in.DeepCopyInto(out)
return *out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *NodeAddressing) DeepCopyInto(out *NodeAddressing) {
*out = *in
if in.IPV4 != nil {
in, out := &in.IPV4, &out.IPV4
*out = new(NodeAddressingElement)
**out = **in
}
if in.IPV6 != nil {
in, out := &in.IPV6, &out.IPV6
*out = new(NodeAddressingElement)
**out = **in
}
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NodeAddressing.
func (in *NodeAddressing) DeepCopy() *NodeAddressing {
if in == nil {
return nil
}
out := new(NodeAddressing)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *NodeElement) DeepCopyInto(out *NodeElement) {
*out = *in
if in.HealthEndpointAddress != nil {
in, out := &in.HealthEndpointAddress, &out.HealthEndpointAddress
*out = new(NodeAddressing)
(*in).DeepCopyInto(*out)
}
if in.IngressAddress != nil {
in, out := &in.IngressAddress, &out.IngressAddress
*out = new(NodeAddressing)
(*in).DeepCopyInto(*out)
}
if in.PrimaryAddress != nil {
in, out := &in.PrimaryAddress, &out.PrimaryAddress
*out = new(NodeAddressing)
(*in).DeepCopyInto(*out)
}
if in.SecondaryAddresses != nil {
in, out := &in.SecondaryAddresses, &out.SecondaryAddresses
*out = make([]*NodeAddressingElement, len(*in))
for i := range *in {
if (*in)[i] != nil {
in, out := &(*in)[i], &(*out)[i]
*out = new(NodeAddressingElement)
**out = **in
}
}
}
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NodeElement.
func (in *NodeElement) DeepCopy() *NodeElement {
if in == nil {
return nil
}
out := new(NodeElement)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *ProxyStatistics) DeepCopyInto(out *ProxyStatistics) {
*out = *in
if in.Statistics != nil {
in, out := &in.Statistics, &out.Statistics
*out = new(RequestResponseStatistics)
(*in).DeepCopyInto(*out)
}
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ProxyStatistics.
func (in *ProxyStatistics) DeepCopy() *ProxyStatistics {
if in == nil {
return nil
}
out := new(ProxyStatistics)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *ProxyStatus) DeepCopyInto(out *ProxyStatus) {
*out = *in
if in.Redirects != nil {
in, out := &in.Redirects, &out.Redirects
*out = make([]*ProxyRedirect, len(*in))
for i := range *in {
if (*in)[i] != nil {
in, out := &(*in)[i], &(*out)[i]
*out = new(ProxyRedirect)
**out = **in
}
}
}
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ProxyStatus.
func (in *ProxyStatus) DeepCopy() *ProxyStatus {
if in == nil {
return nil
}
out := new(ProxyStatus)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *RemoteCluster) DeepCopyInto(out *RemoteCluster) {
*out = *in
if in.Config != nil {
in, out := &in.Config, &out.Config
*out = new(RemoteClusterConfig)
(*in).DeepCopyInto(*out)
}
in.LastFailure.DeepCopyInto(&out.LastFailure)
if in.Synced != nil {
in, out := &in.Synced, &out.Synced
*out = new(RemoteClusterSynced)
(*in).DeepCopyInto(*out)
}
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RemoteCluster.
func (in *RemoteCluster) DeepCopy() *RemoteCluster {
if in == nil {
return nil
}
out := new(RemoteCluster)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *RemoteClusterConfig) DeepCopyInto(out *RemoteClusterConfig) {
*out = *in
if in.ServiceExportsEnabled != nil {
in, out := &in.ServiceExportsEnabled, &out.ServiceExportsEnabled
*out = new(bool)
**out = **in
}
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RemoteClusterConfig.
func (in *RemoteClusterConfig) DeepCopy() *RemoteClusterConfig {
if in == nil {
return nil
}
out := new(RemoteClusterConfig)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *RemoteClusterSynced) DeepCopyInto(out *RemoteClusterSynced) {
*out = *in
if in.ServiceExports != nil {
in, out := &in.ServiceExports, &out.ServiceExports
*out = new(bool)
**out = **in
}
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RemoteClusterSynced.
func (in *RemoteClusterSynced) DeepCopy() *RemoteClusterSynced {
if in == nil {
return nil
}
out := new(RemoteClusterSynced)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *RequestResponseStatistics) DeepCopyInto(out *RequestResponseStatistics) {
*out = *in
if in.Requests != nil {
in, out := &in.Requests, &out.Requests
*out = new(MessageForwardingStatistics)
**out = **in
}
if in.Responses != nil {
in, out := &in.Responses, &out.Responses
*out = new(MessageForwardingStatistics)
**out = **in
}
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RequestResponseStatistics.
func (in *RequestResponseStatistics) DeepCopy() *RequestResponseStatistics {
if in == nil {
return nil
}
out := new(RequestResponseStatistics)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *StatusResponse) DeepCopyInto(out *StatusResponse) {
*out = *in
if in.AuthCertificateProvider != nil {
in, out := &in.AuthCertificateProvider, &out.AuthCertificateProvider
*out = new(Status)
**out = **in
}
if in.BandwidthManager != nil {
in, out := &in.BandwidthManager, &out.BandwidthManager
*out = new(BandwidthManager)
(*in).DeepCopyInto(*out)
}
if in.BpfMaps != nil {
in, out := &in.BpfMaps, &out.BpfMaps
*out = new(BPFMapStatus)
(*in).DeepCopyInto(*out)
}
if in.Cilium != nil {
in, out := &in.Cilium, &out.Cilium
*out = new(Status)
**out = **in
}
if in.ClockSource != nil {
in, out := &in.ClockSource, &out.ClockSource
*out = new(ClockSource)
**out = **in
}
if in.Cluster != nil {
in, out := &in.Cluster, &out.Cluster
*out = new(ClusterStatus)
(*in).DeepCopyInto(*out)
}
if in.ClusterMesh != nil {
in, out := &in.ClusterMesh, &out.ClusterMesh
*out = new(ClusterMeshStatus)
(*in).DeepCopyInto(*out)
}
if in.CniChaining != nil {
in, out := &in.CniChaining, &out.CniChaining
*out = new(CNIChainingStatus)
**out = **in
}
if in.CniFile != nil {
in, out := &in.CniFile, &out.CniFile
*out = new(Status)
**out = **in
}
if in.ContainerRuntime != nil {
in, out := &in.ContainerRuntime, &out.ContainerRuntime
*out = new(Status)
**out = **in
}
if in.Controllers != nil {
in, out := &in.Controllers, &out.Controllers
*out = make(ControllerStatuses, len(*in))
for i := range *in {
if (*in)[i] != nil {
in, out := &(*in)[i], &(*out)[i]
*out = new(ControllerStatus)
(*in).DeepCopyInto(*out)
}
}
}
if in.Encryption != nil {
in, out := &in.Encryption, &out.Encryption
*out = new(EncryptionStatus)
(*in).DeepCopyInto(*out)
}
if in.HostFirewall != nil {
in, out := &in.HostFirewall, &out.HostFirewall
*out = new(HostFirewall)
(*in).DeepCopyInto(*out)
}
if in.Hubble != nil {
in, out := &in.Hubble, &out.Hubble
*out = new(HubbleStatus)
(*in).DeepCopyInto(*out)
}
if in.HubbleMetrics != nil {
in, out := &in.HubbleMetrics, &out.HubbleMetrics
*out = new(HubbleMetricsStatus)
**out = **in
}
if in.IdentityRange != nil {
in, out := &in.IdentityRange, &out.IdentityRange
*out = new(IdentityRange)
**out = **in
}
if in.Ipam != nil {
in, out := &in.Ipam, &out.Ipam
*out = new(IPAMStatus)
(*in).DeepCopyInto(*out)
}
if in.IPV4BigTCP != nil {
in, out := &in.IPV4BigTCP, &out.IPV4BigTCP
*out = new(IPV4BigTCP)
**out = **in
}
if in.IPV6BigTCP != nil {
in, out := &in.IPV6BigTCP, &out.IPV6BigTCP
*out = new(IPV6BigTCP)
**out = **in
}
if in.KubeProxyReplacement != nil {
in, out := &in.KubeProxyReplacement, &out.KubeProxyReplacement
*out = new(KubeProxyReplacement)
(*in).DeepCopyInto(*out)
}
if in.Kubernetes != nil {
in, out := &in.Kubernetes, &out.Kubernetes
*out = new(K8sStatus)
(*in).DeepCopyInto(*out)
}
if in.Kvstore != nil {
in, out := &in.Kvstore, &out.Kvstore
*out = new(Status)
**out = **in
}
if in.Masquerading != nil {
in, out := &in.Masquerading, &out.Masquerading
*out = new(Masquerading)
(*in).DeepCopyInto(*out)
}
if in.NodeMonitor != nil {
in, out := &in.NodeMonitor, &out.NodeMonitor
*out = new(MonitorStatus)
**out = **in
}
if in.Proxy != nil {
in, out := &in.Proxy, &out.Proxy
*out = new(ProxyStatus)
(*in).DeepCopyInto(*out)
}
if in.Routing != nil {
in, out := &in.Routing, &out.Routing
*out = new(Routing)
**out = **in
}
if in.Srv6 != nil {
in, out := &in.Srv6, &out.Srv6
*out = new(Srv6)
**out = **in
}
if in.Stale != nil {
in, out := &in.Stale, &out.Stale
*out = make(map[string]strfmt.DateTime, len(*in))
for key, val := range *in {
(*out)[key] = *val.DeepCopy()
}
}
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new StatusResponse.
func (in *StatusResponse) DeepCopy() *StatusResponse {
if in == nil {
return nil
}
out := new(StatusResponse)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *WireguardInterface) DeepCopyInto(out *WireguardInterface) {
*out = *in
if in.Peers != nil {
in, out := &in.Peers, &out.Peers
*out = make([]*WireguardPeer, len(*in))
for i := range *in {
if (*in)[i] != nil {
in, out := &(*in)[i], &(*out)[i]
*out = new(WireguardPeer)
(*in).DeepCopyInto(*out)
}
}
}
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new WireguardInterface.
func (in *WireguardInterface) DeepCopy() *WireguardInterface {
if in == nil {
return nil
}
out := new(WireguardInterface)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *WireguardPeer) DeepCopyInto(out *WireguardPeer) {
*out = *in
if in.AllowedIps != nil {
in, out := &in.AllowedIps, &out.AllowedIps
*out = make([]string, len(*in))
copy(*out, *in)
}
in.LastHandshakeTime.DeepCopyInto(&out.LastHandshakeTime)
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new WireguardPeer.
func (in *WireguardPeer) DeepCopy() *WireguardPeer {
if in == nil {
return nil
}
out := new(WireguardPeer)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *WireguardStatus) DeepCopyInto(out *WireguardStatus) {
*out = *in
if in.Interfaces != nil {
in, out := &in.Interfaces, &out.Interfaces
*out = make([]*WireguardInterface, len(*in))
for i := range *in {
if (*in)[i] != nil {
in, out := &(*in)[i], &(*out)[i]
*out = new(WireguardInterface)
(*in).DeepCopyInto(*out)
}
}
}
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new WireguardStatus.
func (in *WireguardStatus) DeepCopy() *WireguardStatus {
if in == nil {
return nil
}
out := new(WireguardStatus)
in.DeepCopyInto(out)
return out
}
//go:build !ignore_autogenerated
// +build !ignore_autogenerated
// SPDX-License-Identifier: Apache-2.0
// Copyright Authors of Cilium
// Code generated by deepequal-gen. DO NOT EDIT.
package models
// DeepEqual is an autogenerated deepequal function, deeply comparing the
// receiver with other. in must be non-nil.
func (in *ControllerStatusConfiguration) DeepEqual(other *ControllerStatusConfiguration) bool {
if other == nil {
return false
}
if in.ErrorRetry != other.ErrorRetry {
return false
}
if in.ErrorRetryBase != other.ErrorRetryBase {
return false
}
if in.Interval != other.Interval {
return false
}
return true
}
// DeepEqual is an autogenerated deepequal function, deeply comparing the
// receiver with other. in must be non-nil.
func (in *EndpointHealth) DeepEqual(other *EndpointHealth) bool {
if other == nil {
return false
}
if in.Bpf != other.Bpf {
return false
}
if in.Connected != other.Connected {
return false
}
if in.OverallHealth != other.OverallHealth {
return false
}
if in.Policy != other.Policy {
return false
}
return true
}
// DeepEqual is an autogenerated deepequal function, deeply comparing the
// receiver with other. in must be non-nil.
func (in *EndpointIdentifiers) DeepEqual(other *EndpointIdentifiers) bool {
if other == nil {
return false
}
if in.CniAttachmentID != other.CniAttachmentID {
return false
}
if in.ContainerID != other.ContainerID {
return false
}
if in.ContainerName != other.ContainerName {
return false
}
if in.DockerEndpointID != other.DockerEndpointID {
return false
}
if in.DockerNetworkID != other.DockerNetworkID {
return false
}
if in.K8sNamespace != other.K8sNamespace {
return false
}
if in.K8sPodName != other.K8sPodName {
return false
}
if in.PodName != other.PodName {
return false
}
return true
}
// DeepEqual is an autogenerated deepequal function, deeply comparing the
// receiver with other. in must be non-nil.
func (in *EndpointStatusChange) DeepEqual(other *EndpointStatusChange) bool {
if other == nil {
return false
}
if in.Code != other.Code {
return false
}
if in.Message != other.Message {
return false
}
if in.State != other.State {
return false
}
if in.Timestamp != other.Timestamp {
return false
}
return true
}
// DeepEqual is an autogenerated deepequal function, deeply comparing the
// receiver with other. in must be non-nil.
func (in *NamedPorts) DeepEqual(other *NamedPorts) bool {
if other == nil {
return false
}
if len(*in) != len(*other) {
return false
} else {
for i, inElement := range *in {
if !inElement.DeepEqual((*other)[i]) {
return false
}
}
}
return true
}
// DeepEqual is an autogenerated deepequal function, deeply comparing the
// receiver with other. in must be non-nil.
func (in *Port) DeepEqual(other *Port) bool {
if other == nil {
return false
}
if in.Name != other.Name {
return false
}
if in.Port != other.Port {
return false
}
if in.Protocol != other.Protocol {
return false
}
return true
}
// SPDX-License-Identifier: Apache-2.0
// Copyright Authors of Hubble
// Code generated by protoc-gen-go. DO NOT EDIT.
// versions:
// protoc-gen-go v1.36.7
// protoc v6.32.0
// source: observer/observer.proto
package observer
import (
flow "github.com/cilium/cilium/api/v1/flow"
relay "github.com/cilium/cilium/api/v1/relay"
protoreflect "google.golang.org/protobuf/reflect/protoreflect"
protoimpl "google.golang.org/protobuf/runtime/protoimpl"
anypb "google.golang.org/protobuf/types/known/anypb"
fieldmaskpb "google.golang.org/protobuf/types/known/fieldmaskpb"
timestamppb "google.golang.org/protobuf/types/known/timestamppb"
wrapperspb "google.golang.org/protobuf/types/known/wrapperspb"
reflect "reflect"
sync "sync"
unsafe "unsafe"
)
const (
// Verify that this generated code is sufficiently up-to-date.
_ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion)
// Verify that runtime/protoimpl is sufficiently up-to-date.
_ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20)
)
// Symbols defined in public import of flow/flow.proto.
type FlowType = flow.FlowType
const FlowType_UNKNOWN_TYPE = flow.FlowType_UNKNOWN_TYPE
const FlowType_L3_L4 = flow.FlowType_L3_L4
const FlowType_L7 = flow.FlowType_L7
const FlowType_SOCK = flow.FlowType_SOCK
var FlowType_name = flow.FlowType_name
var FlowType_value = flow.FlowType_value
type AuthType = flow.AuthType
const AuthType_DISABLED = flow.AuthType_DISABLED
const AuthType_SPIRE = flow.AuthType_SPIRE
const AuthType_TEST_ALWAYS_FAIL = flow.AuthType_TEST_ALWAYS_FAIL
var AuthType_name = flow.AuthType_name
var AuthType_value = flow.AuthType_value
type TraceObservationPoint = flow.TraceObservationPoint
const TraceObservationPoint_UNKNOWN_POINT = flow.TraceObservationPoint_UNKNOWN_POINT
const TraceObservationPoint_TO_PROXY = flow.TraceObservationPoint_TO_PROXY
const TraceObservationPoint_TO_HOST = flow.TraceObservationPoint_TO_HOST
const TraceObservationPoint_TO_STACK = flow.TraceObservationPoint_TO_STACK
const TraceObservationPoint_TO_OVERLAY = flow.TraceObservationPoint_TO_OVERLAY
const TraceObservationPoint_TO_ENDPOINT = flow.TraceObservationPoint_TO_ENDPOINT
const TraceObservationPoint_FROM_ENDPOINT = flow.TraceObservationPoint_FROM_ENDPOINT
const TraceObservationPoint_FROM_PROXY = flow.TraceObservationPoint_FROM_PROXY
const TraceObservationPoint_FROM_HOST = flow.TraceObservationPoint_FROM_HOST
const TraceObservationPoint_FROM_STACK = flow.TraceObservationPoint_FROM_STACK
const TraceObservationPoint_FROM_OVERLAY = flow.TraceObservationPoint_FROM_OVERLAY
const TraceObservationPoint_FROM_NETWORK = flow.TraceObservationPoint_FROM_NETWORK
const TraceObservationPoint_TO_NETWORK = flow.TraceObservationPoint_TO_NETWORK
const TraceObservationPoint_FROM_CRYPTO = flow.TraceObservationPoint_FROM_CRYPTO
const TraceObservationPoint_TO_CRYPTO = flow.TraceObservationPoint_TO_CRYPTO
var TraceObservationPoint_name = flow.TraceObservationPoint_name
var TraceObservationPoint_value = flow.TraceObservationPoint_value
type TraceReason = flow.TraceReason
const TraceReason_TRACE_REASON_UNKNOWN = flow.TraceReason_TRACE_REASON_UNKNOWN
const TraceReason_NEW = flow.TraceReason_NEW
const TraceReason_ESTABLISHED = flow.TraceReason_ESTABLISHED
const TraceReason_REPLY = flow.TraceReason_REPLY
const TraceReason_RELATED = flow.TraceReason_RELATED
const TraceReason_REOPENED = flow.TraceReason_REOPENED
const TraceReason_SRV6_ENCAP = flow.TraceReason_SRV6_ENCAP
const TraceReason_SRV6_DECAP = flow.TraceReason_SRV6_DECAP
const TraceReason_ENCRYPT_OVERLAY = flow.TraceReason_ENCRYPT_OVERLAY
var TraceReason_name = flow.TraceReason_name
var TraceReason_value = flow.TraceReason_value
type L7FlowType = flow.L7FlowType
const L7FlowType_UNKNOWN_L7_TYPE = flow.L7FlowType_UNKNOWN_L7_TYPE
const L7FlowType_REQUEST = flow.L7FlowType_REQUEST
const L7FlowType_RESPONSE = flow.L7FlowType_RESPONSE
const L7FlowType_SAMPLE = flow.L7FlowType_SAMPLE
var L7FlowType_name = flow.L7FlowType_name
var L7FlowType_value = flow.L7FlowType_value
type IPVersion = flow.IPVersion
const IPVersion_IP_NOT_USED = flow.IPVersion_IP_NOT_USED
const IPVersion_IPv4 = flow.IPVersion_IPv4
const IPVersion_IPv6 = flow.IPVersion_IPv6
var IPVersion_name = flow.IPVersion_name
var IPVersion_value = flow.IPVersion_value
type Verdict = flow.Verdict
const Verdict_VERDICT_UNKNOWN = flow.Verdict_VERDICT_UNKNOWN
const Verdict_FORWARDED = flow.Verdict_FORWARDED
const Verdict_DROPPED = flow.Verdict_DROPPED
const Verdict_ERROR = flow.Verdict_ERROR
const Verdict_AUDIT = flow.Verdict_AUDIT
const Verdict_REDIRECTED = flow.Verdict_REDIRECTED
const Verdict_TRACED = flow.Verdict_TRACED
const Verdict_TRANSLATED = flow.Verdict_TRANSLATED
var Verdict_name = flow.Verdict_name
var Verdict_value = flow.Verdict_value
type DropReason = flow.DropReason
const DropReason_DROP_REASON_UNKNOWN = flow.DropReason_DROP_REASON_UNKNOWN
const DropReason_INVALID_SOURCE_MAC = flow.DropReason_INVALID_SOURCE_MAC
const DropReason_INVALID_DESTINATION_MAC = flow.DropReason_INVALID_DESTINATION_MAC
const DropReason_INVALID_SOURCE_IP = flow.DropReason_INVALID_SOURCE_IP
const DropReason_POLICY_DENIED = flow.DropReason_POLICY_DENIED
const DropReason_INVALID_PACKET_DROPPED = flow.DropReason_INVALID_PACKET_DROPPED
const DropReason_CT_TRUNCATED_OR_INVALID_HEADER = flow.DropReason_CT_TRUNCATED_OR_INVALID_HEADER
const DropReason_CT_MISSING_TCP_ACK_FLAG = flow.DropReason_CT_MISSING_TCP_ACK_FLAG
const DropReason_CT_UNKNOWN_L4_PROTOCOL = flow.DropReason_CT_UNKNOWN_L4_PROTOCOL
const DropReason_CT_CANNOT_CREATE_ENTRY_FROM_PACKET = flow.DropReason_CT_CANNOT_CREATE_ENTRY_FROM_PACKET
const DropReason_UNSUPPORTED_L3_PROTOCOL = flow.DropReason_UNSUPPORTED_L3_PROTOCOL
const DropReason_MISSED_TAIL_CALL = flow.DropReason_MISSED_TAIL_CALL
const DropReason_ERROR_WRITING_TO_PACKET = flow.DropReason_ERROR_WRITING_TO_PACKET
const DropReason_UNKNOWN_L4_PROTOCOL = flow.DropReason_UNKNOWN_L4_PROTOCOL
const DropReason_UNKNOWN_ICMPV4_CODE = flow.DropReason_UNKNOWN_ICMPV4_CODE
const DropReason_UNKNOWN_ICMPV4_TYPE = flow.DropReason_UNKNOWN_ICMPV4_TYPE
const DropReason_UNKNOWN_ICMPV6_CODE = flow.DropReason_UNKNOWN_ICMPV6_CODE
const DropReason_UNKNOWN_ICMPV6_TYPE = flow.DropReason_UNKNOWN_ICMPV6_TYPE
const DropReason_ERROR_RETRIEVING_TUNNEL_KEY = flow.DropReason_ERROR_RETRIEVING_TUNNEL_KEY
const DropReason_ERROR_RETRIEVING_TUNNEL_OPTIONS = flow.DropReason_ERROR_RETRIEVING_TUNNEL_OPTIONS
const DropReason_INVALID_GENEVE_OPTION = flow.DropReason_INVALID_GENEVE_OPTION
const DropReason_UNKNOWN_L3_TARGET_ADDRESS = flow.DropReason_UNKNOWN_L3_TARGET_ADDRESS
const DropReason_STALE_OR_UNROUTABLE_IP = flow.DropReason_STALE_OR_UNROUTABLE_IP
const DropReason_NO_MATCHING_LOCAL_CONTAINER_FOUND = flow.DropReason_NO_MATCHING_LOCAL_CONTAINER_FOUND
const DropReason_ERROR_WHILE_CORRECTING_L3_CHECKSUM = flow.DropReason_ERROR_WHILE_CORRECTING_L3_CHECKSUM
const DropReason_ERROR_WHILE_CORRECTING_L4_CHECKSUM = flow.DropReason_ERROR_WHILE_CORRECTING_L4_CHECKSUM
const DropReason_CT_MAP_INSERTION_FAILED = flow.DropReason_CT_MAP_INSERTION_FAILED
const DropReason_INVALID_IPV6_EXTENSION_HEADER = flow.DropReason_INVALID_IPV6_EXTENSION_HEADER
const DropReason_IP_FRAGMENTATION_NOT_SUPPORTED = flow.DropReason_IP_FRAGMENTATION_NOT_SUPPORTED
const DropReason_SERVICE_BACKEND_NOT_FOUND = flow.DropReason_SERVICE_BACKEND_NOT_FOUND
const DropReason_NO_TUNNEL_OR_ENCAPSULATION_ENDPOINT = flow.DropReason_NO_TUNNEL_OR_ENCAPSULATION_ENDPOINT
const DropReason_FAILED_TO_INSERT_INTO_PROXYMAP = flow.DropReason_FAILED_TO_INSERT_INTO_PROXYMAP
const DropReason_REACHED_EDT_RATE_LIMITING_DROP_HORIZON = flow.DropReason_REACHED_EDT_RATE_LIMITING_DROP_HORIZON
const DropReason_UNKNOWN_CONNECTION_TRACKING_STATE = flow.DropReason_UNKNOWN_CONNECTION_TRACKING_STATE
const DropReason_LOCAL_HOST_IS_UNREACHABLE = flow.DropReason_LOCAL_HOST_IS_UNREACHABLE
const DropReason_NO_CONFIGURATION_AVAILABLE_TO_PERFORM_POLICY_DECISION = flow.DropReason_NO_CONFIGURATION_AVAILABLE_TO_PERFORM_POLICY_DECISION
const DropReason_UNSUPPORTED_L2_PROTOCOL = flow.DropReason_UNSUPPORTED_L2_PROTOCOL
const DropReason_NO_MAPPING_FOR_NAT_MASQUERADE = flow.DropReason_NO_MAPPING_FOR_NAT_MASQUERADE
const DropReason_UNSUPPORTED_PROTOCOL_FOR_NAT_MASQUERADE = flow.DropReason_UNSUPPORTED_PROTOCOL_FOR_NAT_MASQUERADE
const DropReason_FIB_LOOKUP_FAILED = flow.DropReason_FIB_LOOKUP_FAILED
const DropReason_ENCAPSULATION_TRAFFIC_IS_PROHIBITED = flow.DropReason_ENCAPSULATION_TRAFFIC_IS_PROHIBITED
const DropReason_INVALID_IDENTITY = flow.DropReason_INVALID_IDENTITY
const DropReason_UNKNOWN_SENDER = flow.DropReason_UNKNOWN_SENDER
const DropReason_NAT_NOT_NEEDED = flow.DropReason_NAT_NOT_NEEDED
const DropReason_IS_A_CLUSTERIP = flow.DropReason_IS_A_CLUSTERIP
const DropReason_FIRST_LOGICAL_DATAGRAM_FRAGMENT_NOT_FOUND = flow.DropReason_FIRST_LOGICAL_DATAGRAM_FRAGMENT_NOT_FOUND
const DropReason_FORBIDDEN_ICMPV6_MESSAGE = flow.DropReason_FORBIDDEN_ICMPV6_MESSAGE
const DropReason_DENIED_BY_LB_SRC_RANGE_CHECK = flow.DropReason_DENIED_BY_LB_SRC_RANGE_CHECK
const DropReason_SOCKET_LOOKUP_FAILED = flow.DropReason_SOCKET_LOOKUP_FAILED
const DropReason_SOCKET_ASSIGN_FAILED = flow.DropReason_SOCKET_ASSIGN_FAILED
const DropReason_PROXY_REDIRECTION_NOT_SUPPORTED_FOR_PROTOCOL = flow.DropReason_PROXY_REDIRECTION_NOT_SUPPORTED_FOR_PROTOCOL
const DropReason_POLICY_DENY = flow.DropReason_POLICY_DENY
const DropReason_VLAN_FILTERED = flow.DropReason_VLAN_FILTERED
const DropReason_INVALID_VNI = flow.DropReason_INVALID_VNI
const DropReason_INVALID_TC_BUFFER = flow.DropReason_INVALID_TC_BUFFER
const DropReason_NO_SID = flow.DropReason_NO_SID
const DropReason_MISSING_SRV6_STATE = flow.DropReason_MISSING_SRV6_STATE
const DropReason_NAT46 = flow.DropReason_NAT46
const DropReason_NAT64 = flow.DropReason_NAT64
const DropReason_AUTH_REQUIRED = flow.DropReason_AUTH_REQUIRED
const DropReason_CT_NO_MAP_FOUND = flow.DropReason_CT_NO_MAP_FOUND
const DropReason_SNAT_NO_MAP_FOUND = flow.DropReason_SNAT_NO_MAP_FOUND
const DropReason_INVALID_CLUSTER_ID = flow.DropReason_INVALID_CLUSTER_ID
const DropReason_UNSUPPORTED_PROTOCOL_FOR_DSR_ENCAP = flow.DropReason_UNSUPPORTED_PROTOCOL_FOR_DSR_ENCAP
const DropReason_NO_EGRESS_GATEWAY = flow.DropReason_NO_EGRESS_GATEWAY
const DropReason_UNENCRYPTED_TRAFFIC = flow.DropReason_UNENCRYPTED_TRAFFIC
const DropReason_TTL_EXCEEDED = flow.DropReason_TTL_EXCEEDED
const DropReason_NO_NODE_ID = flow.DropReason_NO_NODE_ID
const DropReason_DROP_RATE_LIMITED = flow.DropReason_DROP_RATE_LIMITED
const DropReason_IGMP_HANDLED = flow.DropReason_IGMP_HANDLED
const DropReason_IGMP_SUBSCRIBED = flow.DropReason_IGMP_SUBSCRIBED
const DropReason_MULTICAST_HANDLED = flow.DropReason_MULTICAST_HANDLED
const DropReason_DROP_HOST_NOT_READY = flow.DropReason_DROP_HOST_NOT_READY
const DropReason_DROP_EP_NOT_READY = flow.DropReason_DROP_EP_NOT_READY
const DropReason_DROP_NO_EGRESS_IP = flow.DropReason_DROP_NO_EGRESS_IP
const DropReason_DROP_PUNT_PROXY = flow.DropReason_DROP_PUNT_PROXY
var DropReason_name = flow.DropReason_name
var DropReason_value = flow.DropReason_value
type TrafficDirection = flow.TrafficDirection
const TrafficDirection_TRAFFIC_DIRECTION_UNKNOWN = flow.TrafficDirection_TRAFFIC_DIRECTION_UNKNOWN
const TrafficDirection_INGRESS = flow.TrafficDirection_INGRESS
const TrafficDirection_EGRESS = flow.TrafficDirection_EGRESS
var TrafficDirection_name = flow.TrafficDirection_name
var TrafficDirection_value = flow.TrafficDirection_value
type DebugCapturePoint = flow.DebugCapturePoint
const DebugCapturePoint_DBG_CAPTURE_POINT_UNKNOWN = flow.DebugCapturePoint_DBG_CAPTURE_POINT_UNKNOWN
const DebugCapturePoint_DBG_CAPTURE_DELIVERY = flow.DebugCapturePoint_DBG_CAPTURE_DELIVERY
const DebugCapturePoint_DBG_CAPTURE_FROM_LB = flow.DebugCapturePoint_DBG_CAPTURE_FROM_LB
const DebugCapturePoint_DBG_CAPTURE_AFTER_V46 = flow.DebugCapturePoint_DBG_CAPTURE_AFTER_V46
const DebugCapturePoint_DBG_CAPTURE_AFTER_V64 = flow.DebugCapturePoint_DBG_CAPTURE_AFTER_V64
const DebugCapturePoint_DBG_CAPTURE_PROXY_PRE = flow.DebugCapturePoint_DBG_CAPTURE_PROXY_PRE
const DebugCapturePoint_DBG_CAPTURE_PROXY_POST = flow.DebugCapturePoint_DBG_CAPTURE_PROXY_POST
const DebugCapturePoint_DBG_CAPTURE_SNAT_PRE = flow.DebugCapturePoint_DBG_CAPTURE_SNAT_PRE
const DebugCapturePoint_DBG_CAPTURE_SNAT_POST = flow.DebugCapturePoint_DBG_CAPTURE_SNAT_POST
var DebugCapturePoint_name = flow.DebugCapturePoint_name
var DebugCapturePoint_value = flow.DebugCapturePoint_value
type EventType = flow.EventType
const EventType_UNKNOWN = flow.EventType_UNKNOWN
const EventType_EventSample = flow.EventType_EventSample
const EventType_RecordLost = flow.EventType_RecordLost
var EventType_name = flow.EventType_name
var EventType_value = flow.EventType_value
type LostEventSource = flow.LostEventSource
const LostEventSource_UNKNOWN_LOST_EVENT_SOURCE = flow.LostEventSource_UNKNOWN_LOST_EVENT_SOURCE
const LostEventSource_PERF_EVENT_RING_BUFFER = flow.LostEventSource_PERF_EVENT_RING_BUFFER
const LostEventSource_OBSERVER_EVENTS_QUEUE = flow.LostEventSource_OBSERVER_EVENTS_QUEUE
const LostEventSource_HUBBLE_RING_BUFFER = flow.LostEventSource_HUBBLE_RING_BUFFER
var LostEventSource_name = flow.LostEventSource_name
var LostEventSource_value = flow.LostEventSource_value
type AgentEventType = flow.AgentEventType
const AgentEventType_AGENT_EVENT_UNKNOWN = flow.AgentEventType_AGENT_EVENT_UNKNOWN
const AgentEventType_AGENT_STARTED = flow.AgentEventType_AGENT_STARTED
const AgentEventType_POLICY_UPDATED = flow.AgentEventType_POLICY_UPDATED
const AgentEventType_POLICY_DELETED = flow.AgentEventType_POLICY_DELETED
const AgentEventType_ENDPOINT_REGENERATE_SUCCESS = flow.AgentEventType_ENDPOINT_REGENERATE_SUCCESS
const AgentEventType_ENDPOINT_REGENERATE_FAILURE = flow.AgentEventType_ENDPOINT_REGENERATE_FAILURE
const AgentEventType_ENDPOINT_CREATED = flow.AgentEventType_ENDPOINT_CREATED
const AgentEventType_ENDPOINT_DELETED = flow.AgentEventType_ENDPOINT_DELETED
const AgentEventType_IPCACHE_UPSERTED = flow.AgentEventType_IPCACHE_UPSERTED
const AgentEventType_IPCACHE_DELETED = flow.AgentEventType_IPCACHE_DELETED
const AgentEventType_SERVICE_UPSERTED = flow.AgentEventType_SERVICE_UPSERTED
const AgentEventType_SERVICE_DELETED = flow.AgentEventType_SERVICE_DELETED
var AgentEventType_name = flow.AgentEventType_name
var AgentEventType_value = flow.AgentEventType_value
type SocketTranslationPoint = flow.SocketTranslationPoint
const SocketTranslationPoint_SOCK_XLATE_POINT_UNKNOWN = flow.SocketTranslationPoint_SOCK_XLATE_POINT_UNKNOWN
const SocketTranslationPoint_SOCK_XLATE_POINT_PRE_DIRECTION_FWD = flow.SocketTranslationPoint_SOCK_XLATE_POINT_PRE_DIRECTION_FWD
const SocketTranslationPoint_SOCK_XLATE_POINT_POST_DIRECTION_FWD = flow.SocketTranslationPoint_SOCK_XLATE_POINT_POST_DIRECTION_FWD
const SocketTranslationPoint_SOCK_XLATE_POINT_PRE_DIRECTION_REV = flow.SocketTranslationPoint_SOCK_XLATE_POINT_PRE_DIRECTION_REV
const SocketTranslationPoint_SOCK_XLATE_POINT_POST_DIRECTION_REV = flow.SocketTranslationPoint_SOCK_XLATE_POINT_POST_DIRECTION_REV
var SocketTranslationPoint_name = flow.SocketTranslationPoint_name
var SocketTranslationPoint_value = flow.SocketTranslationPoint_value
type DebugEventType = flow.DebugEventType
const DebugEventType_DBG_EVENT_UNKNOWN = flow.DebugEventType_DBG_EVENT_UNKNOWN
const DebugEventType_DBG_GENERIC = flow.DebugEventType_DBG_GENERIC
const DebugEventType_DBG_LOCAL_DELIVERY = flow.DebugEventType_DBG_LOCAL_DELIVERY
const DebugEventType_DBG_ENCAP = flow.DebugEventType_DBG_ENCAP
const DebugEventType_DBG_LXC_FOUND = flow.DebugEventType_DBG_LXC_FOUND
const DebugEventType_DBG_POLICY_DENIED = flow.DebugEventType_DBG_POLICY_DENIED
const DebugEventType_DBG_CT_LOOKUP = flow.DebugEventType_DBG_CT_LOOKUP
const DebugEventType_DBG_CT_LOOKUP_REV = flow.DebugEventType_DBG_CT_LOOKUP_REV
const DebugEventType_DBG_CT_MATCH = flow.DebugEventType_DBG_CT_MATCH
const DebugEventType_DBG_CT_CREATED = flow.DebugEventType_DBG_CT_CREATED
const DebugEventType_DBG_CT_CREATED2 = flow.DebugEventType_DBG_CT_CREATED2
const DebugEventType_DBG_ICMP6_HANDLE = flow.DebugEventType_DBG_ICMP6_HANDLE
const DebugEventType_DBG_ICMP6_REQUEST = flow.DebugEventType_DBG_ICMP6_REQUEST
const DebugEventType_DBG_ICMP6_NS = flow.DebugEventType_DBG_ICMP6_NS
const DebugEventType_DBG_ICMP6_TIME_EXCEEDED = flow.DebugEventType_DBG_ICMP6_TIME_EXCEEDED
const DebugEventType_DBG_CT_VERDICT = flow.DebugEventType_DBG_CT_VERDICT
const DebugEventType_DBG_DECAP = flow.DebugEventType_DBG_DECAP
const DebugEventType_DBG_PORT_MAP = flow.DebugEventType_DBG_PORT_MAP
const DebugEventType_DBG_ERROR_RET = flow.DebugEventType_DBG_ERROR_RET
const DebugEventType_DBG_TO_HOST = flow.DebugEventType_DBG_TO_HOST
const DebugEventType_DBG_TO_STACK = flow.DebugEventType_DBG_TO_STACK
const DebugEventType_DBG_PKT_HASH = flow.DebugEventType_DBG_PKT_HASH
const DebugEventType_DBG_LB6_LOOKUP_FRONTEND = flow.DebugEventType_DBG_LB6_LOOKUP_FRONTEND
const DebugEventType_DBG_LB6_LOOKUP_FRONTEND_FAIL = flow.DebugEventType_DBG_LB6_LOOKUP_FRONTEND_FAIL
const DebugEventType_DBG_LB6_LOOKUP_BACKEND_SLOT = flow.DebugEventType_DBG_LB6_LOOKUP_BACKEND_SLOT
const DebugEventType_DBG_LB6_LOOKUP_BACKEND_SLOT_SUCCESS = flow.DebugEventType_DBG_LB6_LOOKUP_BACKEND_SLOT_SUCCESS
const DebugEventType_DBG_LB6_LOOKUP_BACKEND_SLOT_V2_FAIL = flow.DebugEventType_DBG_LB6_LOOKUP_BACKEND_SLOT_V2_FAIL
const DebugEventType_DBG_LB6_LOOKUP_BACKEND_FAIL = flow.DebugEventType_DBG_LB6_LOOKUP_BACKEND_FAIL
const DebugEventType_DBG_LB6_REVERSE_NAT_LOOKUP = flow.DebugEventType_DBG_LB6_REVERSE_NAT_LOOKUP
const DebugEventType_DBG_LB6_REVERSE_NAT = flow.DebugEventType_DBG_LB6_REVERSE_NAT
const DebugEventType_DBG_LB4_LOOKUP_FRONTEND = flow.DebugEventType_DBG_LB4_LOOKUP_FRONTEND
const DebugEventType_DBG_LB4_LOOKUP_FRONTEND_FAIL = flow.DebugEventType_DBG_LB4_LOOKUP_FRONTEND_FAIL
const DebugEventType_DBG_LB4_LOOKUP_BACKEND_SLOT = flow.DebugEventType_DBG_LB4_LOOKUP_BACKEND_SLOT
const DebugEventType_DBG_LB4_LOOKUP_BACKEND_SLOT_SUCCESS = flow.DebugEventType_DBG_LB4_LOOKUP_BACKEND_SLOT_SUCCESS
const DebugEventType_DBG_LB4_LOOKUP_BACKEND_SLOT_V2_FAIL = flow.DebugEventType_DBG_LB4_LOOKUP_BACKEND_SLOT_V2_FAIL
const DebugEventType_DBG_LB4_LOOKUP_BACKEND_FAIL = flow.DebugEventType_DBG_LB4_LOOKUP_BACKEND_FAIL
const DebugEventType_DBG_LB4_REVERSE_NAT_LOOKUP = flow.DebugEventType_DBG_LB4_REVERSE_NAT_LOOKUP
const DebugEventType_DBG_LB4_REVERSE_NAT = flow.DebugEventType_DBG_LB4_REVERSE_NAT
const DebugEventType_DBG_LB4_LOOPBACK_SNAT = flow.DebugEventType_DBG_LB4_LOOPBACK_SNAT
const DebugEventType_DBG_LB4_LOOPBACK_SNAT_REV = flow.DebugEventType_DBG_LB4_LOOPBACK_SNAT_REV
const DebugEventType_DBG_CT_LOOKUP4 = flow.DebugEventType_DBG_CT_LOOKUP4
const DebugEventType_DBG_RR_BACKEND_SLOT_SEL = flow.DebugEventType_DBG_RR_BACKEND_SLOT_SEL
const DebugEventType_DBG_REV_PROXY_LOOKUP = flow.DebugEventType_DBG_REV_PROXY_LOOKUP
const DebugEventType_DBG_REV_PROXY_FOUND = flow.DebugEventType_DBG_REV_PROXY_FOUND
const DebugEventType_DBG_REV_PROXY_UPDATE = flow.DebugEventType_DBG_REV_PROXY_UPDATE
const DebugEventType_DBG_L4_POLICY = flow.DebugEventType_DBG_L4_POLICY
const DebugEventType_DBG_NETDEV_IN_CLUSTER = flow.DebugEventType_DBG_NETDEV_IN_CLUSTER
const DebugEventType_DBG_NETDEV_ENCAP4 = flow.DebugEventType_DBG_NETDEV_ENCAP4
const DebugEventType_DBG_CT_LOOKUP4_1 = flow.DebugEventType_DBG_CT_LOOKUP4_1
const DebugEventType_DBG_CT_LOOKUP4_2 = flow.DebugEventType_DBG_CT_LOOKUP4_2
const DebugEventType_DBG_CT_CREATED4 = flow.DebugEventType_DBG_CT_CREATED4
const DebugEventType_DBG_CT_LOOKUP6_1 = flow.DebugEventType_DBG_CT_LOOKUP6_1
const DebugEventType_DBG_CT_LOOKUP6_2 = flow.DebugEventType_DBG_CT_LOOKUP6_2
const DebugEventType_DBG_CT_CREATED6 = flow.DebugEventType_DBG_CT_CREATED6
const DebugEventType_DBG_SKIP_PROXY = flow.DebugEventType_DBG_SKIP_PROXY
const DebugEventType_DBG_L4_CREATE = flow.DebugEventType_DBG_L4_CREATE
const DebugEventType_DBG_IP_ID_MAP_FAILED4 = flow.DebugEventType_DBG_IP_ID_MAP_FAILED4
const DebugEventType_DBG_IP_ID_MAP_FAILED6 = flow.DebugEventType_DBG_IP_ID_MAP_FAILED6
const DebugEventType_DBG_IP_ID_MAP_SUCCEED4 = flow.DebugEventType_DBG_IP_ID_MAP_SUCCEED4
const DebugEventType_DBG_IP_ID_MAP_SUCCEED6 = flow.DebugEventType_DBG_IP_ID_MAP_SUCCEED6
const DebugEventType_DBG_LB_STALE_CT = flow.DebugEventType_DBG_LB_STALE_CT
const DebugEventType_DBG_INHERIT_IDENTITY = flow.DebugEventType_DBG_INHERIT_IDENTITY
const DebugEventType_DBG_SK_LOOKUP4 = flow.DebugEventType_DBG_SK_LOOKUP4
const DebugEventType_DBG_SK_LOOKUP6 = flow.DebugEventType_DBG_SK_LOOKUP6
const DebugEventType_DBG_SK_ASSIGN = flow.DebugEventType_DBG_SK_ASSIGN
const DebugEventType_DBG_L7_LB = flow.DebugEventType_DBG_L7_LB
const DebugEventType_DBG_SKIP_POLICY = flow.DebugEventType_DBG_SKIP_POLICY
var DebugEventType_name = flow.DebugEventType_name
var DebugEventType_value = flow.DebugEventType_value
type Tunnel_Protocol = flow.Tunnel_Protocol
const Tunnel_UNKNOWN = flow.Tunnel_UNKNOWN
const Tunnel_VXLAN = flow.Tunnel_VXLAN
const Tunnel_GENEVE = flow.Tunnel_GENEVE
var Tunnel_Protocol_name = flow.Tunnel_Protocol_name
var Tunnel_Protocol_value = flow.Tunnel_Protocol_value
type Flow = flow.Flow
type FileInfo = flow.FileInfo
type Layer4 = flow.Layer4
type Layer4_TCP = flow.Layer4_TCP
type Layer4_UDP = flow.Layer4_UDP
type Layer4_ICMPv4 = flow.Layer4_ICMPv4
type Layer4_ICMPv6 = flow.Layer4_ICMPv6
type Layer4_SCTP = flow.Layer4_SCTP
type Layer7 = flow.Layer7
type Layer7_Dns = flow.Layer7_Dns
type Layer7_Http = flow.Layer7_Http
type Layer7_Kafka = flow.Layer7_Kafka
type TraceContext = flow.TraceContext
type TraceParent = flow.TraceParent
type Endpoint = flow.Endpoint
type Workload = flow.Workload
type TCP = flow.TCP
type IP = flow.IP
type Ethernet = flow.Ethernet
type TCPFlags = flow.TCPFlags
type UDP = flow.UDP
type SCTP = flow.SCTP
type ICMPv4 = flow.ICMPv4
type ICMPv6 = flow.ICMPv6
type Tunnel = flow.Tunnel
type Policy = flow.Policy
type EventTypeFilter = flow.EventTypeFilter
type CiliumEventType = flow.CiliumEventType
type FlowFilter = flow.FlowFilter
type DNS = flow.DNS
type HTTPHeader = flow.HTTPHeader
type HTTP = flow.HTTP
type Kafka = flow.Kafka
type Service = flow.Service
type LostEvent = flow.LostEvent
type AgentEvent = flow.AgentEvent
type AgentEvent_Unknown = flow.AgentEvent_Unknown
type AgentEvent_AgentStart = flow.AgentEvent_AgentStart
type AgentEvent_PolicyUpdate = flow.AgentEvent_PolicyUpdate
type AgentEvent_EndpointRegenerate = flow.AgentEvent_EndpointRegenerate
type AgentEvent_EndpointUpdate = flow.AgentEvent_EndpointUpdate
type AgentEvent_IpcacheUpdate = flow.AgentEvent_IpcacheUpdate
type AgentEvent_ServiceUpsert = flow.AgentEvent_ServiceUpsert
type AgentEvent_ServiceDelete = flow.AgentEvent_ServiceDelete
type AgentEventUnknown = flow.AgentEventUnknown
type TimeNotification = flow.TimeNotification
type PolicyUpdateNotification = flow.PolicyUpdateNotification
type EndpointRegenNotification = flow.EndpointRegenNotification
type EndpointUpdateNotification = flow.EndpointUpdateNotification
type IPCacheNotification = flow.IPCacheNotification
type ServiceUpsertNotificationAddr = flow.ServiceUpsertNotificationAddr
type ServiceUpsertNotification = flow.ServiceUpsertNotification
type ServiceDeleteNotification = flow.ServiceDeleteNotification
type NetworkInterface = flow.NetworkInterface
type DebugEvent = flow.DebugEvent
type FlowFilter_Experimental = flow.FlowFilter_Experimental
type ServerStatusRequest struct {
state protoimpl.MessageState `protogen:"open.v1"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *ServerStatusRequest) Reset() {
*x = ServerStatusRequest{}
mi := &file_observer_observer_proto_msgTypes[0]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *ServerStatusRequest) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*ServerStatusRequest) ProtoMessage() {}
func (x *ServerStatusRequest) ProtoReflect() protoreflect.Message {
mi := &file_observer_observer_proto_msgTypes[0]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use ServerStatusRequest.ProtoReflect.Descriptor instead.
func (*ServerStatusRequest) Descriptor() ([]byte, []int) {
return file_observer_observer_proto_rawDescGZIP(), []int{0}
}
type ServerStatusResponse struct {
state protoimpl.MessageState `protogen:"open.v1"`
// number of currently captured flows
// In a multi-node context, this is the cumulative count of all captured
// flows.
NumFlows uint64 `protobuf:"varint,1,opt,name=num_flows,json=numFlows,proto3" json:"num_flows,omitempty"`
// maximum capacity of the ring buffer
// In a multi-node context, this is the aggregation of all ring buffers
// capacities.
MaxFlows uint64 `protobuf:"varint,2,opt,name=max_flows,json=maxFlows,proto3" json:"max_flows,omitempty"`
// total amount of flows observed since the observer was started
// In a multi-node context, this is the aggregation of all flows that have
// been seen.
SeenFlows uint64 `protobuf:"varint,3,opt,name=seen_flows,json=seenFlows,proto3" json:"seen_flows,omitempty"`
// uptime of this observer instance in nanoseconds
// In a multi-node context, this field corresponds to the uptime of the
// longest living instance.
UptimeNs uint64 `protobuf:"varint,4,opt,name=uptime_ns,json=uptimeNs,proto3" json:"uptime_ns,omitempty"`
// number of nodes for which a connection is established
NumConnectedNodes *wrapperspb.UInt32Value `protobuf:"bytes,5,opt,name=num_connected_nodes,json=numConnectedNodes,proto3" json:"num_connected_nodes,omitempty"`
// number of nodes for which a connection cannot be established
NumUnavailableNodes *wrapperspb.UInt32Value `protobuf:"bytes,6,opt,name=num_unavailable_nodes,json=numUnavailableNodes,proto3" json:"num_unavailable_nodes,omitempty"`
// list of nodes that are unavailable
// This list may not be exhaustive.
UnavailableNodes []string `protobuf:"bytes,7,rep,name=unavailable_nodes,json=unavailableNodes,proto3" json:"unavailable_nodes,omitempty"`
// Version is the version of Cilium/Hubble.
Version string `protobuf:"bytes,8,opt,name=version,proto3" json:"version,omitempty"`
// Approximate rate of flows seen by Hubble per second over the last minute.
// In a multi-node context, this is the sum of all flows rates.
FlowsRate float64 `protobuf:"fixed64,9,opt,name=flows_rate,json=flowsRate,proto3" json:"flows_rate,omitempty"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *ServerStatusResponse) Reset() {
*x = ServerStatusResponse{}
mi := &file_observer_observer_proto_msgTypes[1]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *ServerStatusResponse) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*ServerStatusResponse) ProtoMessage() {}
func (x *ServerStatusResponse) ProtoReflect() protoreflect.Message {
mi := &file_observer_observer_proto_msgTypes[1]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use ServerStatusResponse.ProtoReflect.Descriptor instead.
func (*ServerStatusResponse) Descriptor() ([]byte, []int) {
return file_observer_observer_proto_rawDescGZIP(), []int{1}
}
func (x *ServerStatusResponse) GetNumFlows() uint64 {
if x != nil {
return x.NumFlows
}
return 0
}
func (x *ServerStatusResponse) GetMaxFlows() uint64 {
if x != nil {
return x.MaxFlows
}
return 0
}
func (x *ServerStatusResponse) GetSeenFlows() uint64 {
if x != nil {
return x.SeenFlows
}
return 0
}
func (x *ServerStatusResponse) GetUptimeNs() uint64 {
if x != nil {
return x.UptimeNs
}
return 0
}
func (x *ServerStatusResponse) GetNumConnectedNodes() *wrapperspb.UInt32Value {
if x != nil {
return x.NumConnectedNodes
}
return nil
}
func (x *ServerStatusResponse) GetNumUnavailableNodes() *wrapperspb.UInt32Value {
if x != nil {
return x.NumUnavailableNodes
}
return nil
}
func (x *ServerStatusResponse) GetUnavailableNodes() []string {
if x != nil {
return x.UnavailableNodes
}
return nil
}
func (x *ServerStatusResponse) GetVersion() string {
if x != nil {
return x.Version
}
return ""
}
func (x *ServerStatusResponse) GetFlowsRate() float64 {
if x != nil {
return x.FlowsRate
}
return 0
}
type GetFlowsRequest struct {
state protoimpl.MessageState `protogen:"open.v1"`
// Number of flows that should be returned. Incompatible with `since/until`.
// Defaults to the most recent (last) `number` flows, unless `first` is
// true, then it will return the earliest `number` flows.
Number uint64 `protobuf:"varint,1,opt,name=number,proto3" json:"number,omitempty"`
// first specifies if we should look at the first `number` flows or the
// last `number` of flows. Incompatible with `follow`.
First bool `protobuf:"varint,9,opt,name=first,proto3" json:"first,omitempty"`
// follow sets when the server should continue to stream flows after
// printing the last N flows.
Follow bool `protobuf:"varint,3,opt,name=follow,proto3" json:"follow,omitempty"`
// blacklist defines a list of filters which have to match for a flow to be
// excluded from the result.
// If multiple blacklist filters are specified, only one of them has to
// match for a flow to be excluded.
Blacklist []*flow.FlowFilter `protobuf:"bytes,5,rep,name=blacklist,proto3" json:"blacklist,omitempty"`
// whitelist defines a list of filters which have to match for a flow to be
// included in the result.
// If multiple whitelist filters are specified, only one of them has to
// match for a flow to be included.
// The whitelist and blacklist can both be specified. In such cases, the
// set of the returned flows is the set difference `whitelist - blacklist`.
// In other words, the result will contain all flows matched by the
// whitelist that are not also simultaneously matched by the blacklist.
Whitelist []*flow.FlowFilter `protobuf:"bytes,6,rep,name=whitelist,proto3" json:"whitelist,omitempty"`
// Since this time for returned flows. Incompatible with `number`.
Since *timestamppb.Timestamp `protobuf:"bytes,7,opt,name=since,proto3" json:"since,omitempty"`
// Until this time for returned flows. Incompatible with `number`.
Until *timestamppb.Timestamp `protobuf:"bytes,8,opt,name=until,proto3" json:"until,omitempty"`
// FieldMask allows clients to limit flow's fields that will be returned.
// For example, {paths: ["source.id", "destination.id"]} will return flows
// with only these two fields set.
FieldMask *fieldmaskpb.FieldMask `protobuf:"bytes,10,opt,name=field_mask,json=fieldMask,proto3" json:"field_mask,omitempty"`
Experimental *GetFlowsRequest_Experimental `protobuf:"bytes,999,opt,name=experimental,proto3" json:"experimental,omitempty"`
// extensions can be used to add arbitrary additional metadata to GetFlowsRequest.
// This can be used to extend functionality for other Hubble compatible
// APIs, or experiment with new functionality without needing to change the public API.
Extensions *anypb.Any `protobuf:"bytes,150000,opt,name=extensions,proto3" json:"extensions,omitempty"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *GetFlowsRequest) Reset() {
*x = GetFlowsRequest{}
mi := &file_observer_observer_proto_msgTypes[2]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *GetFlowsRequest) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*GetFlowsRequest) ProtoMessage() {}
func (x *GetFlowsRequest) ProtoReflect() protoreflect.Message {
mi := &file_observer_observer_proto_msgTypes[2]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use GetFlowsRequest.ProtoReflect.Descriptor instead.
func (*GetFlowsRequest) Descriptor() ([]byte, []int) {
return file_observer_observer_proto_rawDescGZIP(), []int{2}
}
func (x *GetFlowsRequest) GetNumber() uint64 {
if x != nil {
return x.Number
}
return 0
}
func (x *GetFlowsRequest) GetFirst() bool {
if x != nil {
return x.First
}
return false
}
func (x *GetFlowsRequest) GetFollow() bool {
if x != nil {
return x.Follow
}
return false
}
func (x *GetFlowsRequest) GetBlacklist() []*flow.FlowFilter {
if x != nil {
return x.Blacklist
}
return nil
}
func (x *GetFlowsRequest) GetWhitelist() []*flow.FlowFilter {
if x != nil {
return x.Whitelist
}
return nil
}
func (x *GetFlowsRequest) GetSince() *timestamppb.Timestamp {
if x != nil {
return x.Since
}
return nil
}
func (x *GetFlowsRequest) GetUntil() *timestamppb.Timestamp {
if x != nil {
return x.Until
}
return nil
}
func (x *GetFlowsRequest) GetFieldMask() *fieldmaskpb.FieldMask {
if x != nil {
return x.FieldMask
}
return nil
}
func (x *GetFlowsRequest) GetExperimental() *GetFlowsRequest_Experimental {
if x != nil {
return x.Experimental
}
return nil
}
func (x *GetFlowsRequest) GetExtensions() *anypb.Any {
if x != nil {
return x.Extensions
}
return nil
}
// GetFlowsResponse contains either a flow or a protocol message.
type GetFlowsResponse struct {
state protoimpl.MessageState `protogen:"open.v1"`
// Types that are valid to be assigned to ResponseTypes:
//
// *GetFlowsResponse_Flow
// *GetFlowsResponse_NodeStatus
// *GetFlowsResponse_LostEvents
ResponseTypes isGetFlowsResponse_ResponseTypes `protobuf_oneof:"response_types"`
// Name of the node where this event was observed.
NodeName string `protobuf:"bytes,1000,opt,name=node_name,json=nodeName,proto3" json:"node_name,omitempty"`
// Timestamp at which this event was observed.
Time *timestamppb.Timestamp `protobuf:"bytes,1001,opt,name=time,proto3" json:"time,omitempty"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *GetFlowsResponse) Reset() {
*x = GetFlowsResponse{}
mi := &file_observer_observer_proto_msgTypes[3]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *GetFlowsResponse) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*GetFlowsResponse) ProtoMessage() {}
func (x *GetFlowsResponse) ProtoReflect() protoreflect.Message {
mi := &file_observer_observer_proto_msgTypes[3]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use GetFlowsResponse.ProtoReflect.Descriptor instead.
func (*GetFlowsResponse) Descriptor() ([]byte, []int) {
return file_observer_observer_proto_rawDescGZIP(), []int{3}
}
func (x *GetFlowsResponse) GetResponseTypes() isGetFlowsResponse_ResponseTypes {
if x != nil {
return x.ResponseTypes
}
return nil
}
func (x *GetFlowsResponse) GetFlow() *flow.Flow {
if x != nil {
if x, ok := x.ResponseTypes.(*GetFlowsResponse_Flow); ok {
return x.Flow
}
}
return nil
}
func (x *GetFlowsResponse) GetNodeStatus() *relay.NodeStatusEvent {
if x != nil {
if x, ok := x.ResponseTypes.(*GetFlowsResponse_NodeStatus); ok {
return x.NodeStatus
}
}
return nil
}
func (x *GetFlowsResponse) GetLostEvents() *flow.LostEvent {
if x != nil {
if x, ok := x.ResponseTypes.(*GetFlowsResponse_LostEvents); ok {
return x.LostEvents
}
}
return nil
}
func (x *GetFlowsResponse) GetNodeName() string {
if x != nil {
return x.NodeName
}
return ""
}
func (x *GetFlowsResponse) GetTime() *timestamppb.Timestamp {
if x != nil {
return x.Time
}
return nil
}
type isGetFlowsResponse_ResponseTypes interface {
isGetFlowsResponse_ResponseTypes()
}
type GetFlowsResponse_Flow struct {
Flow *flow.Flow `protobuf:"bytes,1,opt,name=flow,proto3,oneof"`
}
type GetFlowsResponse_NodeStatus struct {
// node_status informs clients about the state of the nodes
// participating in this particular GetFlows request.
NodeStatus *relay.NodeStatusEvent `protobuf:"bytes,2,opt,name=node_status,json=nodeStatus,proto3,oneof"`
}
type GetFlowsResponse_LostEvents struct {
// lost_events informs clients about events which got dropped due to
// a Hubble component being unavailable
LostEvents *flow.LostEvent `protobuf:"bytes,3,opt,name=lost_events,json=lostEvents,proto3,oneof"`
}
func (*GetFlowsResponse_Flow) isGetFlowsResponse_ResponseTypes() {}
func (*GetFlowsResponse_NodeStatus) isGetFlowsResponse_ResponseTypes() {}
func (*GetFlowsResponse_LostEvents) isGetFlowsResponse_ResponseTypes() {}
type GetAgentEventsRequest struct {
state protoimpl.MessageState `protogen:"open.v1"`
// Number of flows that should be returned. Incompatible with `since/until`.
// Defaults to the most recent (last) `number` events, unless `first` is
// true, then it will return the earliest `number` events.
Number uint64 `protobuf:"varint,1,opt,name=number,proto3" json:"number,omitempty"`
// first specifies if we should look at the first `number` events or the
// last `number` of events. Incompatible with `follow`.
First bool `protobuf:"varint,9,opt,name=first,proto3" json:"first,omitempty"`
// follow sets when the server should continue to stream agent events after
// printing the last N agent events.
Follow bool `protobuf:"varint,2,opt,name=follow,proto3" json:"follow,omitempty"`
// Since this time for returned agent events. Incompatible with `number`.
Since *timestamppb.Timestamp `protobuf:"bytes,7,opt,name=since,proto3" json:"since,omitempty"`
// Until this time for returned agent events. Incompatible with `number`.
Until *timestamppb.Timestamp `protobuf:"bytes,8,opt,name=until,proto3" json:"until,omitempty"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *GetAgentEventsRequest) Reset() {
*x = GetAgentEventsRequest{}
mi := &file_observer_observer_proto_msgTypes[4]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *GetAgentEventsRequest) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*GetAgentEventsRequest) ProtoMessage() {}
func (x *GetAgentEventsRequest) ProtoReflect() protoreflect.Message {
mi := &file_observer_observer_proto_msgTypes[4]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use GetAgentEventsRequest.ProtoReflect.Descriptor instead.
func (*GetAgentEventsRequest) Descriptor() ([]byte, []int) {
return file_observer_observer_proto_rawDescGZIP(), []int{4}
}
func (x *GetAgentEventsRequest) GetNumber() uint64 {
if x != nil {
return x.Number
}
return 0
}
func (x *GetAgentEventsRequest) GetFirst() bool {
if x != nil {
return x.First
}
return false
}
func (x *GetAgentEventsRequest) GetFollow() bool {
if x != nil {
return x.Follow
}
return false
}
func (x *GetAgentEventsRequest) GetSince() *timestamppb.Timestamp {
if x != nil {
return x.Since
}
return nil
}
func (x *GetAgentEventsRequest) GetUntil() *timestamppb.Timestamp {
if x != nil {
return x.Until
}
return nil
}
// GetAgentEventsResponse contains an event received from the Cilium agent.
type GetAgentEventsResponse struct {
state protoimpl.MessageState `protogen:"open.v1"`
AgentEvent *flow.AgentEvent `protobuf:"bytes,1,opt,name=agent_event,json=agentEvent,proto3" json:"agent_event,omitempty"`
// Name of the node where this event was observed.
NodeName string `protobuf:"bytes,1000,opt,name=node_name,json=nodeName,proto3" json:"node_name,omitempty"`
// Timestamp at which this event was observed.
Time *timestamppb.Timestamp `protobuf:"bytes,1001,opt,name=time,proto3" json:"time,omitempty"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *GetAgentEventsResponse) Reset() {
*x = GetAgentEventsResponse{}
mi := &file_observer_observer_proto_msgTypes[5]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *GetAgentEventsResponse) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*GetAgentEventsResponse) ProtoMessage() {}
func (x *GetAgentEventsResponse) ProtoReflect() protoreflect.Message {
mi := &file_observer_observer_proto_msgTypes[5]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use GetAgentEventsResponse.ProtoReflect.Descriptor instead.
func (*GetAgentEventsResponse) Descriptor() ([]byte, []int) {
return file_observer_observer_proto_rawDescGZIP(), []int{5}
}
func (x *GetAgentEventsResponse) GetAgentEvent() *flow.AgentEvent {
if x != nil {
return x.AgentEvent
}
return nil
}
func (x *GetAgentEventsResponse) GetNodeName() string {
if x != nil {
return x.NodeName
}
return ""
}
func (x *GetAgentEventsResponse) GetTime() *timestamppb.Timestamp {
if x != nil {
return x.Time
}
return nil
}
type GetDebugEventsRequest struct {
state protoimpl.MessageState `protogen:"open.v1"`
// Number of events that should be returned. Incompatible with `since/until`.
// Defaults to the most recent (last) `number` events, unless `first` is
// true, then it will return the earliest `number` events.
Number uint64 `protobuf:"varint,1,opt,name=number,proto3" json:"number,omitempty"`
// first specifies if we should look at the first `number` events or the
// last `number` of events. Incompatible with `follow`.
First bool `protobuf:"varint,9,opt,name=first,proto3" json:"first,omitempty"`
// follow sets when the server should continue to stream debug events after
// printing the last N debug events.
Follow bool `protobuf:"varint,2,opt,name=follow,proto3" json:"follow,omitempty"`
// Since this time for returned debug events. Incompatible with `number`.
Since *timestamppb.Timestamp `protobuf:"bytes,7,opt,name=since,proto3" json:"since,omitempty"`
// Until this time for returned debug events. Incompatible with `number`.
Until *timestamppb.Timestamp `protobuf:"bytes,8,opt,name=until,proto3" json:"until,omitempty"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *GetDebugEventsRequest) Reset() {
*x = GetDebugEventsRequest{}
mi := &file_observer_observer_proto_msgTypes[6]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *GetDebugEventsRequest) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*GetDebugEventsRequest) ProtoMessage() {}
func (x *GetDebugEventsRequest) ProtoReflect() protoreflect.Message {
mi := &file_observer_observer_proto_msgTypes[6]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use GetDebugEventsRequest.ProtoReflect.Descriptor instead.
func (*GetDebugEventsRequest) Descriptor() ([]byte, []int) {
return file_observer_observer_proto_rawDescGZIP(), []int{6}
}
func (x *GetDebugEventsRequest) GetNumber() uint64 {
if x != nil {
return x.Number
}
return 0
}
func (x *GetDebugEventsRequest) GetFirst() bool {
if x != nil {
return x.First
}
return false
}
func (x *GetDebugEventsRequest) GetFollow() bool {
if x != nil {
return x.Follow
}
return false
}
func (x *GetDebugEventsRequest) GetSince() *timestamppb.Timestamp {
if x != nil {
return x.Since
}
return nil
}
func (x *GetDebugEventsRequest) GetUntil() *timestamppb.Timestamp {
if x != nil {
return x.Until
}
return nil
}
// GetDebugEventsResponse contains a Cilium datapath debug events.
type GetDebugEventsResponse struct {
state protoimpl.MessageState `protogen:"open.v1"`
DebugEvent *flow.DebugEvent `protobuf:"bytes,1,opt,name=debug_event,json=debugEvent,proto3" json:"debug_event,omitempty"`
// Name of the node where this event was observed.
NodeName string `protobuf:"bytes,1000,opt,name=node_name,json=nodeName,proto3" json:"node_name,omitempty"`
// Timestamp at which this event was observed.
Time *timestamppb.Timestamp `protobuf:"bytes,1001,opt,name=time,proto3" json:"time,omitempty"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *GetDebugEventsResponse) Reset() {
*x = GetDebugEventsResponse{}
mi := &file_observer_observer_proto_msgTypes[7]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *GetDebugEventsResponse) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*GetDebugEventsResponse) ProtoMessage() {}
func (x *GetDebugEventsResponse) ProtoReflect() protoreflect.Message {
mi := &file_observer_observer_proto_msgTypes[7]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use GetDebugEventsResponse.ProtoReflect.Descriptor instead.
func (*GetDebugEventsResponse) Descriptor() ([]byte, []int) {
return file_observer_observer_proto_rawDescGZIP(), []int{7}
}
func (x *GetDebugEventsResponse) GetDebugEvent() *flow.DebugEvent {
if x != nil {
return x.DebugEvent
}
return nil
}
func (x *GetDebugEventsResponse) GetNodeName() string {
if x != nil {
return x.NodeName
}
return ""
}
func (x *GetDebugEventsResponse) GetTime() *timestamppb.Timestamp {
if x != nil {
return x.Time
}
return nil
}
type GetNodesRequest struct {
state protoimpl.MessageState `protogen:"open.v1"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *GetNodesRequest) Reset() {
*x = GetNodesRequest{}
mi := &file_observer_observer_proto_msgTypes[8]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *GetNodesRequest) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*GetNodesRequest) ProtoMessage() {}
func (x *GetNodesRequest) ProtoReflect() protoreflect.Message {
mi := &file_observer_observer_proto_msgTypes[8]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use GetNodesRequest.ProtoReflect.Descriptor instead.
func (*GetNodesRequest) Descriptor() ([]byte, []int) {
return file_observer_observer_proto_rawDescGZIP(), []int{8}
}
// GetNodesResponse contains the list of nodes.
type GetNodesResponse struct {
state protoimpl.MessageState `protogen:"open.v1"`
// Nodes is an exhaustive list of nodes.
Nodes []*Node `protobuf:"bytes,1,rep,name=nodes,proto3" json:"nodes,omitempty"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *GetNodesResponse) Reset() {
*x = GetNodesResponse{}
mi := &file_observer_observer_proto_msgTypes[9]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *GetNodesResponse) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*GetNodesResponse) ProtoMessage() {}
func (x *GetNodesResponse) ProtoReflect() protoreflect.Message {
mi := &file_observer_observer_proto_msgTypes[9]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use GetNodesResponse.ProtoReflect.Descriptor instead.
func (*GetNodesResponse) Descriptor() ([]byte, []int) {
return file_observer_observer_proto_rawDescGZIP(), []int{9}
}
func (x *GetNodesResponse) GetNodes() []*Node {
if x != nil {
return x.Nodes
}
return nil
}
// Node represents a cluster node.
type Node struct {
state protoimpl.MessageState `protogen:"open.v1"`
// Name is the name of the node.
Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"`
// Version is the version of Cilium/Hubble as reported by the node.
Version string `protobuf:"bytes,2,opt,name=version,proto3" json:"version,omitempty"`
// Address is the network address of the API endpoint.
Address string `protobuf:"bytes,3,opt,name=address,proto3" json:"address,omitempty"`
// State represents the known state of the node.
State relay.NodeState `protobuf:"varint,4,opt,name=state,proto3,enum=relay.NodeState" json:"state,omitempty"`
// TLS reports TLS related information.
Tls *TLS `protobuf:"bytes,5,opt,name=tls,proto3" json:"tls,omitempty"`
// UptimeNS is the uptime of this instance in nanoseconds
UptimeNs uint64 `protobuf:"varint,6,opt,name=uptime_ns,json=uptimeNs,proto3" json:"uptime_ns,omitempty"`
// number of currently captured flows
NumFlows uint64 `protobuf:"varint,7,opt,name=num_flows,json=numFlows,proto3" json:"num_flows,omitempty"`
// maximum capacity of the ring buffer
MaxFlows uint64 `protobuf:"varint,8,opt,name=max_flows,json=maxFlows,proto3" json:"max_flows,omitempty"`
// total amount of flows observed since the observer was started
SeenFlows uint64 `protobuf:"varint,9,opt,name=seen_flows,json=seenFlows,proto3" json:"seen_flows,omitempty"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *Node) Reset() {
*x = Node{}
mi := &file_observer_observer_proto_msgTypes[10]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *Node) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*Node) ProtoMessage() {}
func (x *Node) ProtoReflect() protoreflect.Message {
mi := &file_observer_observer_proto_msgTypes[10]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use Node.ProtoReflect.Descriptor instead.
func (*Node) Descriptor() ([]byte, []int) {
return file_observer_observer_proto_rawDescGZIP(), []int{10}
}
func (x *Node) GetName() string {
if x != nil {
return x.Name
}
return ""
}
func (x *Node) GetVersion() string {
if x != nil {
return x.Version
}
return ""
}
func (x *Node) GetAddress() string {
if x != nil {
return x.Address
}
return ""
}
func (x *Node) GetState() relay.NodeState {
if x != nil {
return x.State
}
return relay.NodeState(0)
}
func (x *Node) GetTls() *TLS {
if x != nil {
return x.Tls
}
return nil
}
func (x *Node) GetUptimeNs() uint64 {
if x != nil {
return x.UptimeNs
}
return 0
}
func (x *Node) GetNumFlows() uint64 {
if x != nil {
return x.NumFlows
}
return 0
}
func (x *Node) GetMaxFlows() uint64 {
if x != nil {
return x.MaxFlows
}
return 0
}
func (x *Node) GetSeenFlows() uint64 {
if x != nil {
return x.SeenFlows
}
return 0
}
// TLS represents TLS information.
type TLS struct {
state protoimpl.MessageState `protogen:"open.v1"`
// Enabled reports whether TLS is enabled or not.
Enabled bool `protobuf:"varint,1,opt,name=enabled,proto3" json:"enabled,omitempty"`
// ServerName is the TLS server name that can be used as part of the TLS
// cert validation process.
ServerName string `protobuf:"bytes,2,opt,name=server_name,json=serverName,proto3" json:"server_name,omitempty"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *TLS) Reset() {
*x = TLS{}
mi := &file_observer_observer_proto_msgTypes[11]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *TLS) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*TLS) ProtoMessage() {}
func (x *TLS) ProtoReflect() protoreflect.Message {
mi := &file_observer_observer_proto_msgTypes[11]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use TLS.ProtoReflect.Descriptor instead.
func (*TLS) Descriptor() ([]byte, []int) {
return file_observer_observer_proto_rawDescGZIP(), []int{11}
}
func (x *TLS) GetEnabled() bool {
if x != nil {
return x.Enabled
}
return false
}
func (x *TLS) GetServerName() string {
if x != nil {
return x.ServerName
}
return ""
}
type GetNamespacesRequest struct {
state protoimpl.MessageState `protogen:"open.v1"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *GetNamespacesRequest) Reset() {
*x = GetNamespacesRequest{}
mi := &file_observer_observer_proto_msgTypes[12]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *GetNamespacesRequest) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*GetNamespacesRequest) ProtoMessage() {}
func (x *GetNamespacesRequest) ProtoReflect() protoreflect.Message {
mi := &file_observer_observer_proto_msgTypes[12]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use GetNamespacesRequest.ProtoReflect.Descriptor instead.
func (*GetNamespacesRequest) Descriptor() ([]byte, []int) {
return file_observer_observer_proto_rawDescGZIP(), []int{12}
}
// GetNamespacesResponse contains the list of namespaces.
type GetNamespacesResponse struct {
state protoimpl.MessageState `protogen:"open.v1"`
// Namespaces is a list of namespaces with flows
Namespaces []*Namespace `protobuf:"bytes,1,rep,name=namespaces,proto3" json:"namespaces,omitempty"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *GetNamespacesResponse) Reset() {
*x = GetNamespacesResponse{}
mi := &file_observer_observer_proto_msgTypes[13]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *GetNamespacesResponse) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*GetNamespacesResponse) ProtoMessage() {}
func (x *GetNamespacesResponse) ProtoReflect() protoreflect.Message {
mi := &file_observer_observer_proto_msgTypes[13]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use GetNamespacesResponse.ProtoReflect.Descriptor instead.
func (*GetNamespacesResponse) Descriptor() ([]byte, []int) {
return file_observer_observer_proto_rawDescGZIP(), []int{13}
}
func (x *GetNamespacesResponse) GetNamespaces() []*Namespace {
if x != nil {
return x.Namespaces
}
return nil
}
type Namespace struct {
state protoimpl.MessageState `protogen:"open.v1"`
Cluster string `protobuf:"bytes,1,opt,name=cluster,proto3" json:"cluster,omitempty"`
Namespace string `protobuf:"bytes,2,opt,name=namespace,proto3" json:"namespace,omitempty"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *Namespace) Reset() {
*x = Namespace{}
mi := &file_observer_observer_proto_msgTypes[14]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *Namespace) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*Namespace) ProtoMessage() {}
func (x *Namespace) ProtoReflect() protoreflect.Message {
mi := &file_observer_observer_proto_msgTypes[14]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use Namespace.ProtoReflect.Descriptor instead.
func (*Namespace) Descriptor() ([]byte, []int) {
return file_observer_observer_proto_rawDescGZIP(), []int{14}
}
func (x *Namespace) GetCluster() string {
if x != nil {
return x.Cluster
}
return ""
}
func (x *Namespace) GetNamespace() string {
if x != nil {
return x.Namespace
}
return ""
}
// ExportEvent contains an event to be exported. Not to be used outside of the
// exporter feature.
type ExportEvent struct {
state protoimpl.MessageState `protogen:"open.v1"`
// Types that are valid to be assigned to ResponseTypes:
//
// *ExportEvent_Flow
// *ExportEvent_NodeStatus
// *ExportEvent_LostEvents
// *ExportEvent_AgentEvent
// *ExportEvent_DebugEvent
ResponseTypes isExportEvent_ResponseTypes `protobuf_oneof:"response_types"`
// Name of the node where this event was observed.
NodeName string `protobuf:"bytes,1000,opt,name=node_name,json=nodeName,proto3" json:"node_name,omitempty"`
// Timestamp at which this event was observed.
Time *timestamppb.Timestamp `protobuf:"bytes,1001,opt,name=time,proto3" json:"time,omitempty"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *ExportEvent) Reset() {
*x = ExportEvent{}
mi := &file_observer_observer_proto_msgTypes[15]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *ExportEvent) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*ExportEvent) ProtoMessage() {}
func (x *ExportEvent) ProtoReflect() protoreflect.Message {
mi := &file_observer_observer_proto_msgTypes[15]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use ExportEvent.ProtoReflect.Descriptor instead.
func (*ExportEvent) Descriptor() ([]byte, []int) {
return file_observer_observer_proto_rawDescGZIP(), []int{15}
}
func (x *ExportEvent) GetResponseTypes() isExportEvent_ResponseTypes {
if x != nil {
return x.ResponseTypes
}
return nil
}
func (x *ExportEvent) GetFlow() *flow.Flow {
if x != nil {
if x, ok := x.ResponseTypes.(*ExportEvent_Flow); ok {
return x.Flow
}
}
return nil
}
func (x *ExportEvent) GetNodeStatus() *relay.NodeStatusEvent {
if x != nil {
if x, ok := x.ResponseTypes.(*ExportEvent_NodeStatus); ok {
return x.NodeStatus
}
}
return nil
}
func (x *ExportEvent) GetLostEvents() *flow.LostEvent {
if x != nil {
if x, ok := x.ResponseTypes.(*ExportEvent_LostEvents); ok {
return x.LostEvents
}
}
return nil
}
func (x *ExportEvent) GetAgentEvent() *flow.AgentEvent {
if x != nil {
if x, ok := x.ResponseTypes.(*ExportEvent_AgentEvent); ok {
return x.AgentEvent
}
}
return nil
}
func (x *ExportEvent) GetDebugEvent() *flow.DebugEvent {
if x != nil {
if x, ok := x.ResponseTypes.(*ExportEvent_DebugEvent); ok {
return x.DebugEvent
}
}
return nil
}
func (x *ExportEvent) GetNodeName() string {
if x != nil {
return x.NodeName
}
return ""
}
func (x *ExportEvent) GetTime() *timestamppb.Timestamp {
if x != nil {
return x.Time
}
return nil
}
type isExportEvent_ResponseTypes interface {
isExportEvent_ResponseTypes()
}
type ExportEvent_Flow struct {
Flow *flow.Flow `protobuf:"bytes,1,opt,name=flow,proto3,oneof"`
}
type ExportEvent_NodeStatus struct {
// node_status informs clients about the state of the nodes
// participating in this particular GetFlows request.
NodeStatus *relay.NodeStatusEvent `protobuf:"bytes,2,opt,name=node_status,json=nodeStatus,proto3,oneof"`
}
type ExportEvent_LostEvents struct {
// lost_events informs clients about events which got dropped due to
// a Hubble component being unavailable
LostEvents *flow.LostEvent `protobuf:"bytes,3,opt,name=lost_events,json=lostEvents,proto3,oneof"`
}
type ExportEvent_AgentEvent struct {
// agent_event informs clients about an event received from the Cilium
// agent.
AgentEvent *flow.AgentEvent `protobuf:"bytes,4,opt,name=agent_event,json=agentEvent,proto3,oneof"`
}
type ExportEvent_DebugEvent struct {
// debug_event contains Cilium datapath debug events
DebugEvent *flow.DebugEvent `protobuf:"bytes,5,opt,name=debug_event,json=debugEvent,proto3,oneof"`
}
func (*ExportEvent_Flow) isExportEvent_ResponseTypes() {}
func (*ExportEvent_NodeStatus) isExportEvent_ResponseTypes() {}
func (*ExportEvent_LostEvents) isExportEvent_ResponseTypes() {}
func (*ExportEvent_AgentEvent) isExportEvent_ResponseTypes() {}
func (*ExportEvent_DebugEvent) isExportEvent_ResponseTypes() {}
// Experimental contains fields that are not stable yet. Support for
// experimental features is always optional and subject to change.
type GetFlowsRequest_Experimental struct {
state protoimpl.MessageState `protogen:"open.v1"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *GetFlowsRequest_Experimental) Reset() {
*x = GetFlowsRequest_Experimental{}
mi := &file_observer_observer_proto_msgTypes[16]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *GetFlowsRequest_Experimental) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*GetFlowsRequest_Experimental) ProtoMessage() {}
func (x *GetFlowsRequest_Experimental) ProtoReflect() protoreflect.Message {
mi := &file_observer_observer_proto_msgTypes[16]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use GetFlowsRequest_Experimental.ProtoReflect.Descriptor instead.
func (*GetFlowsRequest_Experimental) Descriptor() ([]byte, []int) {
return file_observer_observer_proto_rawDescGZIP(), []int{2, 0}
}
var File_observer_observer_proto protoreflect.FileDescriptor
const file_observer_observer_proto_rawDesc = "" +
"\n" +
"\x17observer/observer.proto\x12\bobserver\x1a\x19google/protobuf/any.proto\x1a\x1egoogle/protobuf/wrappers.proto\x1a\x1fgoogle/protobuf/timestamp.proto\x1a google/protobuf/field_mask.proto\x1a\x0fflow/flow.proto\x1a\x11relay/relay.proto\"\x15\n" +
"\x13ServerStatusRequest\"\x92\x03\n" +
"\x14ServerStatusResponse\x12\x1b\n" +
"\tnum_flows\x18\x01 \x01(\x04R\bnumFlows\x12\x1b\n" +
"\tmax_flows\x18\x02 \x01(\x04R\bmaxFlows\x12\x1d\n" +
"\n" +
"seen_flows\x18\x03 \x01(\x04R\tseenFlows\x12\x1b\n" +
"\tuptime_ns\x18\x04 \x01(\x04R\buptimeNs\x12L\n" +
"\x13num_connected_nodes\x18\x05 \x01(\v2\x1c.google.protobuf.UInt32ValueR\x11numConnectedNodes\x12P\n" +
"\x15num_unavailable_nodes\x18\x06 \x01(\v2\x1c.google.protobuf.UInt32ValueR\x13numUnavailableNodes\x12+\n" +
"\x11unavailable_nodes\x18\a \x03(\tR\x10unavailableNodes\x12\x18\n" +
"\aversion\x18\b \x01(\tR\aversion\x12\x1d\n" +
"\n" +
"flows_rate\x18\t \x01(\x01R\tflowsRate\"\xf7\x03\n" +
"\x0fGetFlowsRequest\x12\x16\n" +
"\x06number\x18\x01 \x01(\x04R\x06number\x12\x14\n" +
"\x05first\x18\t \x01(\bR\x05first\x12\x16\n" +
"\x06follow\x18\x03 \x01(\bR\x06follow\x12.\n" +
"\tblacklist\x18\x05 \x03(\v2\x10.flow.FlowFilterR\tblacklist\x12.\n" +
"\twhitelist\x18\x06 \x03(\v2\x10.flow.FlowFilterR\twhitelist\x120\n" +
"\x05since\x18\a \x01(\v2\x1a.google.protobuf.TimestampR\x05since\x120\n" +
"\x05until\x18\b \x01(\v2\x1a.google.protobuf.TimestampR\x05until\x129\n" +
"\n" +
"field_mask\x18\n" +
" \x01(\v2\x1a.google.protobuf.FieldMaskR\tfieldMask\x12K\n" +
"\fexperimental\x18\xe7\a \x01(\v2&.observer.GetFlowsRequest.ExperimentalR\fexperimental\x126\n" +
"\n" +
"extensions\x18\xf0\x93\t \x01(\v2\x14.google.protobuf.AnyR\n" +
"extensions\x1a\x14\n" +
"\fExperimentalJ\x04\b\x01\x10\x02J\x04\b\x02\x10\x03\"\x84\x02\n" +
"\x10GetFlowsResponse\x12 \n" +
"\x04flow\x18\x01 \x01(\v2\n" +
".flow.FlowH\x00R\x04flow\x129\n" +
"\vnode_status\x18\x02 \x01(\v2\x16.relay.NodeStatusEventH\x00R\n" +
"nodeStatus\x122\n" +
"\vlost_events\x18\x03 \x01(\v2\x0f.flow.LostEventH\x00R\n" +
"lostEvents\x12\x1c\n" +
"\tnode_name\x18\xe8\a \x01(\tR\bnodeName\x12/\n" +
"\x04time\x18\xe9\a \x01(\v2\x1a.google.protobuf.TimestampR\x04timeB\x10\n" +
"\x0eresponse_types\"\xc1\x01\n" +
"\x15GetAgentEventsRequest\x12\x16\n" +
"\x06number\x18\x01 \x01(\x04R\x06number\x12\x14\n" +
"\x05first\x18\t \x01(\bR\x05first\x12\x16\n" +
"\x06follow\x18\x02 \x01(\bR\x06follow\x120\n" +
"\x05since\x18\a \x01(\v2\x1a.google.protobuf.TimestampR\x05since\x120\n" +
"\x05until\x18\b \x01(\v2\x1a.google.protobuf.TimestampR\x05until\"\x9a\x01\n" +
"\x16GetAgentEventsResponse\x121\n" +
"\vagent_event\x18\x01 \x01(\v2\x10.flow.AgentEventR\n" +
"agentEvent\x12\x1c\n" +
"\tnode_name\x18\xe8\a \x01(\tR\bnodeName\x12/\n" +
"\x04time\x18\xe9\a \x01(\v2\x1a.google.protobuf.TimestampR\x04time\"\xc1\x01\n" +
"\x15GetDebugEventsRequest\x12\x16\n" +
"\x06number\x18\x01 \x01(\x04R\x06number\x12\x14\n" +
"\x05first\x18\t \x01(\bR\x05first\x12\x16\n" +
"\x06follow\x18\x02 \x01(\bR\x06follow\x120\n" +
"\x05since\x18\a \x01(\v2\x1a.google.protobuf.TimestampR\x05since\x120\n" +
"\x05until\x18\b \x01(\v2\x1a.google.protobuf.TimestampR\x05until\"\x9a\x01\n" +
"\x16GetDebugEventsResponse\x121\n" +
"\vdebug_event\x18\x01 \x01(\v2\x10.flow.DebugEventR\n" +
"debugEvent\x12\x1c\n" +
"\tnode_name\x18\xe8\a \x01(\tR\bnodeName\x12/\n" +
"\x04time\x18\xe9\a \x01(\v2\x1a.google.protobuf.TimestampR\x04time\"\x11\n" +
"\x0fGetNodesRequest\"8\n" +
"\x10GetNodesResponse\x12$\n" +
"\x05nodes\x18\x01 \x03(\v2\x0e.observer.NodeR\x05nodes\"\x8d\x02\n" +
"\x04Node\x12\x12\n" +
"\x04name\x18\x01 \x01(\tR\x04name\x12\x18\n" +
"\aversion\x18\x02 \x01(\tR\aversion\x12\x18\n" +
"\aaddress\x18\x03 \x01(\tR\aaddress\x12&\n" +
"\x05state\x18\x04 \x01(\x0e2\x10.relay.NodeStateR\x05state\x12\x1f\n" +
"\x03tls\x18\x05 \x01(\v2\r.observer.TLSR\x03tls\x12\x1b\n" +
"\tuptime_ns\x18\x06 \x01(\x04R\buptimeNs\x12\x1b\n" +
"\tnum_flows\x18\a \x01(\x04R\bnumFlows\x12\x1b\n" +
"\tmax_flows\x18\b \x01(\x04R\bmaxFlows\x12\x1d\n" +
"\n" +
"seen_flows\x18\t \x01(\x04R\tseenFlows\"@\n" +
"\x03TLS\x12\x18\n" +
"\aenabled\x18\x01 \x01(\bR\aenabled\x12\x1f\n" +
"\vserver_name\x18\x02 \x01(\tR\n" +
"serverName\"\x16\n" +
"\x14GetNamespacesRequest\"L\n" +
"\x15GetNamespacesResponse\x123\n" +
"\n" +
"namespaces\x18\x01 \x03(\v2\x13.observer.NamespaceR\n" +
"namespaces\"C\n" +
"\tNamespace\x12\x18\n" +
"\acluster\x18\x01 \x01(\tR\acluster\x12\x1c\n" +
"\tnamespace\x18\x02 \x01(\tR\tnamespace\"\xe9\x02\n" +
"\vExportEvent\x12 \n" +
"\x04flow\x18\x01 \x01(\v2\n" +
".flow.FlowH\x00R\x04flow\x129\n" +
"\vnode_status\x18\x02 \x01(\v2\x16.relay.NodeStatusEventH\x00R\n" +
"nodeStatus\x122\n" +
"\vlost_events\x18\x03 \x01(\v2\x0f.flow.LostEventH\x00R\n" +
"lostEvents\x123\n" +
"\vagent_event\x18\x04 \x01(\v2\x10.flow.AgentEventH\x00R\n" +
"agentEvent\x123\n" +
"\vdebug_event\x18\x05 \x01(\v2\x10.flow.DebugEventH\x00R\n" +
"debugEvent\x12\x1c\n" +
"\tnode_name\x18\xe8\a \x01(\tR\bnodeName\x12/\n" +
"\x04time\x18\xe9\a \x01(\v2\x1a.google.protobuf.TimestampR\x04timeB\x10\n" +
"\x0eresponse_types2\xed\x03\n" +
"\bObserver\x12E\n" +
"\bGetFlows\x12\x19.observer.GetFlowsRequest\x1a\x1a.observer.GetFlowsResponse\"\x000\x01\x12W\n" +
"\x0eGetAgentEvents\x12\x1f.observer.GetAgentEventsRequest\x1a .observer.GetAgentEventsResponse\"\x000\x01\x12W\n" +
"\x0eGetDebugEvents\x12\x1f.observer.GetDebugEventsRequest\x1a .observer.GetDebugEventsResponse\"\x000\x01\x12C\n" +
"\bGetNodes\x12\x19.observer.GetNodesRequest\x1a\x1a.observer.GetNodesResponse\"\x00\x12R\n" +
"\rGetNamespaces\x12\x1e.observer.GetNamespacesRequest\x1a\x1f.observer.GetNamespacesResponse\"\x00\x12O\n" +
"\fServerStatus\x12\x1d.observer.ServerStatusRequest\x1a\x1e.observer.ServerStatusResponse\"\x00B*Z(github.com/cilium/cilium/api/v1/observerP\x04b\x06proto3"
var (
file_observer_observer_proto_rawDescOnce sync.Once
file_observer_observer_proto_rawDescData []byte
)
func file_observer_observer_proto_rawDescGZIP() []byte {
file_observer_observer_proto_rawDescOnce.Do(func() {
file_observer_observer_proto_rawDescData = protoimpl.X.CompressGZIP(unsafe.Slice(unsafe.StringData(file_observer_observer_proto_rawDesc), len(file_observer_observer_proto_rawDesc)))
})
return file_observer_observer_proto_rawDescData
}
var file_observer_observer_proto_msgTypes = make([]protoimpl.MessageInfo, 17)
var file_observer_observer_proto_goTypes = []any{
(*ServerStatusRequest)(nil), // 0: observer.ServerStatusRequest
(*ServerStatusResponse)(nil), // 1: observer.ServerStatusResponse
(*GetFlowsRequest)(nil), // 2: observer.GetFlowsRequest
(*GetFlowsResponse)(nil), // 3: observer.GetFlowsResponse
(*GetAgentEventsRequest)(nil), // 4: observer.GetAgentEventsRequest
(*GetAgentEventsResponse)(nil), // 5: observer.GetAgentEventsResponse
(*GetDebugEventsRequest)(nil), // 6: observer.GetDebugEventsRequest
(*GetDebugEventsResponse)(nil), // 7: observer.GetDebugEventsResponse
(*GetNodesRequest)(nil), // 8: observer.GetNodesRequest
(*GetNodesResponse)(nil), // 9: observer.GetNodesResponse
(*Node)(nil), // 10: observer.Node
(*TLS)(nil), // 11: observer.TLS
(*GetNamespacesRequest)(nil), // 12: observer.GetNamespacesRequest
(*GetNamespacesResponse)(nil), // 13: observer.GetNamespacesResponse
(*Namespace)(nil), // 14: observer.Namespace
(*ExportEvent)(nil), // 15: observer.ExportEvent
(*GetFlowsRequest_Experimental)(nil), // 16: observer.GetFlowsRequest.Experimental
(*wrapperspb.UInt32Value)(nil), // 17: google.protobuf.UInt32Value
(*flow.FlowFilter)(nil), // 18: flow.FlowFilter
(*timestamppb.Timestamp)(nil), // 19: google.protobuf.Timestamp
(*fieldmaskpb.FieldMask)(nil), // 20: google.protobuf.FieldMask
(*anypb.Any)(nil), // 21: google.protobuf.Any
(*flow.Flow)(nil), // 22: flow.Flow
(*relay.NodeStatusEvent)(nil), // 23: relay.NodeStatusEvent
(*flow.LostEvent)(nil), // 24: flow.LostEvent
(*flow.AgentEvent)(nil), // 25: flow.AgentEvent
(*flow.DebugEvent)(nil), // 26: flow.DebugEvent
(relay.NodeState)(0), // 27: relay.NodeState
}
var file_observer_observer_proto_depIdxs = []int32{
17, // 0: observer.ServerStatusResponse.num_connected_nodes:type_name -> google.protobuf.UInt32Value
17, // 1: observer.ServerStatusResponse.num_unavailable_nodes:type_name -> google.protobuf.UInt32Value
18, // 2: observer.GetFlowsRequest.blacklist:type_name -> flow.FlowFilter
18, // 3: observer.GetFlowsRequest.whitelist:type_name -> flow.FlowFilter
19, // 4: observer.GetFlowsRequest.since:type_name -> google.protobuf.Timestamp
19, // 5: observer.GetFlowsRequest.until:type_name -> google.protobuf.Timestamp
20, // 6: observer.GetFlowsRequest.field_mask:type_name -> google.protobuf.FieldMask
16, // 7: observer.GetFlowsRequest.experimental:type_name -> observer.GetFlowsRequest.Experimental
21, // 8: observer.GetFlowsRequest.extensions:type_name -> google.protobuf.Any
22, // 9: observer.GetFlowsResponse.flow:type_name -> flow.Flow
23, // 10: observer.GetFlowsResponse.node_status:type_name -> relay.NodeStatusEvent
24, // 11: observer.GetFlowsResponse.lost_events:type_name -> flow.LostEvent
19, // 12: observer.GetFlowsResponse.time:type_name -> google.protobuf.Timestamp
19, // 13: observer.GetAgentEventsRequest.since:type_name -> google.protobuf.Timestamp
19, // 14: observer.GetAgentEventsRequest.until:type_name -> google.protobuf.Timestamp
25, // 15: observer.GetAgentEventsResponse.agent_event:type_name -> flow.AgentEvent
19, // 16: observer.GetAgentEventsResponse.time:type_name -> google.protobuf.Timestamp
19, // 17: observer.GetDebugEventsRequest.since:type_name -> google.protobuf.Timestamp
19, // 18: observer.GetDebugEventsRequest.until:type_name -> google.protobuf.Timestamp
26, // 19: observer.GetDebugEventsResponse.debug_event:type_name -> flow.DebugEvent
19, // 20: observer.GetDebugEventsResponse.time:type_name -> google.protobuf.Timestamp
10, // 21: observer.GetNodesResponse.nodes:type_name -> observer.Node
27, // 22: observer.Node.state:type_name -> relay.NodeState
11, // 23: observer.Node.tls:type_name -> observer.TLS
14, // 24: observer.GetNamespacesResponse.namespaces:type_name -> observer.Namespace
22, // 25: observer.ExportEvent.flow:type_name -> flow.Flow
23, // 26: observer.ExportEvent.node_status:type_name -> relay.NodeStatusEvent
24, // 27: observer.ExportEvent.lost_events:type_name -> flow.LostEvent
25, // 28: observer.ExportEvent.agent_event:type_name -> flow.AgentEvent
26, // 29: observer.ExportEvent.debug_event:type_name -> flow.DebugEvent
19, // 30: observer.ExportEvent.time:type_name -> google.protobuf.Timestamp
2, // 31: observer.Observer.GetFlows:input_type -> observer.GetFlowsRequest
4, // 32: observer.Observer.GetAgentEvents:input_type -> observer.GetAgentEventsRequest
6, // 33: observer.Observer.GetDebugEvents:input_type -> observer.GetDebugEventsRequest
8, // 34: observer.Observer.GetNodes:input_type -> observer.GetNodesRequest
12, // 35: observer.Observer.GetNamespaces:input_type -> observer.GetNamespacesRequest
0, // 36: observer.Observer.ServerStatus:input_type -> observer.ServerStatusRequest
3, // 37: observer.Observer.GetFlows:output_type -> observer.GetFlowsResponse
5, // 38: observer.Observer.GetAgentEvents:output_type -> observer.GetAgentEventsResponse
7, // 39: observer.Observer.GetDebugEvents:output_type -> observer.GetDebugEventsResponse
9, // 40: observer.Observer.GetNodes:output_type -> observer.GetNodesResponse
13, // 41: observer.Observer.GetNamespaces:output_type -> observer.GetNamespacesResponse
1, // 42: observer.Observer.ServerStatus:output_type -> observer.ServerStatusResponse
37, // [37:43] is the sub-list for method output_type
31, // [31:37] is the sub-list for method input_type
31, // [31:31] is the sub-list for extension type_name
31, // [31:31] is the sub-list for extension extendee
0, // [0:31] is the sub-list for field type_name
}
func init() { file_observer_observer_proto_init() }
func file_observer_observer_proto_init() {
if File_observer_observer_proto != nil {
return
}
file_observer_observer_proto_msgTypes[3].OneofWrappers = []any{
(*GetFlowsResponse_Flow)(nil),
(*GetFlowsResponse_NodeStatus)(nil),
(*GetFlowsResponse_LostEvents)(nil),
}
file_observer_observer_proto_msgTypes[15].OneofWrappers = []any{
(*ExportEvent_Flow)(nil),
(*ExportEvent_NodeStatus)(nil),
(*ExportEvent_LostEvents)(nil),
(*ExportEvent_AgentEvent)(nil),
(*ExportEvent_DebugEvent)(nil),
}
type x struct{}
out := protoimpl.TypeBuilder{
File: protoimpl.DescBuilder{
GoPackagePath: reflect.TypeOf(x{}).PkgPath(),
RawDescriptor: unsafe.Slice(unsafe.StringData(file_observer_observer_proto_rawDesc), len(file_observer_observer_proto_rawDesc)),
NumEnums: 0,
NumMessages: 17,
NumExtensions: 0,
NumServices: 1,
},
GoTypes: file_observer_observer_proto_goTypes,
DependencyIndexes: file_observer_observer_proto_depIdxs,
MessageInfos: file_observer_observer_proto_msgTypes,
}.Build()
File_observer_observer_proto = out.File
file_observer_observer_proto_goTypes = nil
file_observer_observer_proto_depIdxs = nil
}
// SPDX-License-Identifier: Apache-2.0
// Copyright Authors of Hubble
// Code generated by protoc-gen-go-json. DO NOT EDIT.
// source: observer/observer.proto
package observer
import (
"google.golang.org/protobuf/encoding/protojson"
)
// MarshalJSON implements json.Marshaler
func (msg *ServerStatusRequest) MarshalJSON() ([]byte, error) {
return protojson.MarshalOptions{
UseProtoNames: true,
}.Marshal(msg)
}
// UnmarshalJSON implements json.Unmarshaler
func (msg *ServerStatusRequest) UnmarshalJSON(b []byte) error {
return protojson.UnmarshalOptions{}.Unmarshal(b, msg)
}
// MarshalJSON implements json.Marshaler
func (msg *ServerStatusResponse) MarshalJSON() ([]byte, error) {
return protojson.MarshalOptions{
UseProtoNames: true,
}.Marshal(msg)
}
// UnmarshalJSON implements json.Unmarshaler
func (msg *ServerStatusResponse) UnmarshalJSON(b []byte) error {
return protojson.UnmarshalOptions{}.Unmarshal(b, msg)
}
// MarshalJSON implements json.Marshaler
func (msg *GetFlowsRequest) MarshalJSON() ([]byte, error) {
return protojson.MarshalOptions{
UseProtoNames: true,
}.Marshal(msg)
}
// UnmarshalJSON implements json.Unmarshaler
func (msg *GetFlowsRequest) UnmarshalJSON(b []byte) error {
return protojson.UnmarshalOptions{}.Unmarshal(b, msg)
}
// MarshalJSON implements json.Marshaler
func (msg *GetFlowsRequest_Experimental) MarshalJSON() ([]byte, error) {
return protojson.MarshalOptions{
UseProtoNames: true,
}.Marshal(msg)
}
// UnmarshalJSON implements json.Unmarshaler
func (msg *GetFlowsRequest_Experimental) UnmarshalJSON(b []byte) error {
return protojson.UnmarshalOptions{}.Unmarshal(b, msg)
}
// MarshalJSON implements json.Marshaler
func (msg *GetFlowsResponse) MarshalJSON() ([]byte, error) {
return protojson.MarshalOptions{
UseProtoNames: true,
}.Marshal(msg)
}
// UnmarshalJSON implements json.Unmarshaler
func (msg *GetFlowsResponse) UnmarshalJSON(b []byte) error {
return protojson.UnmarshalOptions{}.Unmarshal(b, msg)
}
// MarshalJSON implements json.Marshaler
func (msg *GetAgentEventsRequest) MarshalJSON() ([]byte, error) {
return protojson.MarshalOptions{
UseProtoNames: true,
}.Marshal(msg)
}
// UnmarshalJSON implements json.Unmarshaler
func (msg *GetAgentEventsRequest) UnmarshalJSON(b []byte) error {
return protojson.UnmarshalOptions{}.Unmarshal(b, msg)
}
// MarshalJSON implements json.Marshaler
func (msg *GetAgentEventsResponse) MarshalJSON() ([]byte, error) {
return protojson.MarshalOptions{
UseProtoNames: true,
}.Marshal(msg)
}
// UnmarshalJSON implements json.Unmarshaler
func (msg *GetAgentEventsResponse) UnmarshalJSON(b []byte) error {
return protojson.UnmarshalOptions{}.Unmarshal(b, msg)
}
// MarshalJSON implements json.Marshaler
func (msg *GetDebugEventsRequest) MarshalJSON() ([]byte, error) {
return protojson.MarshalOptions{
UseProtoNames: true,
}.Marshal(msg)
}
// UnmarshalJSON implements json.Unmarshaler
func (msg *GetDebugEventsRequest) UnmarshalJSON(b []byte) error {
return protojson.UnmarshalOptions{}.Unmarshal(b, msg)
}
// MarshalJSON implements json.Marshaler
func (msg *GetDebugEventsResponse) MarshalJSON() ([]byte, error) {
return protojson.MarshalOptions{
UseProtoNames: true,
}.Marshal(msg)
}
// UnmarshalJSON implements json.Unmarshaler
func (msg *GetDebugEventsResponse) UnmarshalJSON(b []byte) error {
return protojson.UnmarshalOptions{}.Unmarshal(b, msg)
}
// MarshalJSON implements json.Marshaler
func (msg *GetNodesRequest) MarshalJSON() ([]byte, error) {
return protojson.MarshalOptions{
UseProtoNames: true,
}.Marshal(msg)
}
// UnmarshalJSON implements json.Unmarshaler
func (msg *GetNodesRequest) UnmarshalJSON(b []byte) error {
return protojson.UnmarshalOptions{}.Unmarshal(b, msg)
}
// MarshalJSON implements json.Marshaler
func (msg *GetNodesResponse) MarshalJSON() ([]byte, error) {
return protojson.MarshalOptions{
UseProtoNames: true,
}.Marshal(msg)
}
// UnmarshalJSON implements json.Unmarshaler
func (msg *GetNodesResponse) UnmarshalJSON(b []byte) error {
return protojson.UnmarshalOptions{}.Unmarshal(b, msg)
}
// MarshalJSON implements json.Marshaler
func (msg *Node) MarshalJSON() ([]byte, error) {
return protojson.MarshalOptions{
UseProtoNames: true,
}.Marshal(msg)
}
// UnmarshalJSON implements json.Unmarshaler
func (msg *Node) UnmarshalJSON(b []byte) error {
return protojson.UnmarshalOptions{}.Unmarshal(b, msg)
}
// MarshalJSON implements json.Marshaler
func (msg *TLS) MarshalJSON() ([]byte, error) {
return protojson.MarshalOptions{
UseProtoNames: true,
}.Marshal(msg)
}
// UnmarshalJSON implements json.Unmarshaler
func (msg *TLS) UnmarshalJSON(b []byte) error {
return protojson.UnmarshalOptions{}.Unmarshal(b, msg)
}
// MarshalJSON implements json.Marshaler
func (msg *GetNamespacesRequest) MarshalJSON() ([]byte, error) {
return protojson.MarshalOptions{
UseProtoNames: true,
}.Marshal(msg)
}
// UnmarshalJSON implements json.Unmarshaler
func (msg *GetNamespacesRequest) UnmarshalJSON(b []byte) error {
return protojson.UnmarshalOptions{}.Unmarshal(b, msg)
}
// MarshalJSON implements json.Marshaler
func (msg *GetNamespacesResponse) MarshalJSON() ([]byte, error) {
return protojson.MarshalOptions{
UseProtoNames: true,
}.Marshal(msg)
}
// UnmarshalJSON implements json.Unmarshaler
func (msg *GetNamespacesResponse) UnmarshalJSON(b []byte) error {
return protojson.UnmarshalOptions{}.Unmarshal(b, msg)
}
// MarshalJSON implements json.Marshaler
func (msg *Namespace) MarshalJSON() ([]byte, error) {
return protojson.MarshalOptions{
UseProtoNames: true,
}.Marshal(msg)
}
// UnmarshalJSON implements json.Unmarshaler
func (msg *Namespace) UnmarshalJSON(b []byte) error {
return protojson.UnmarshalOptions{}.Unmarshal(b, msg)
}
// MarshalJSON implements json.Marshaler
func (msg *ExportEvent) MarshalJSON() ([]byte, error) {
return protojson.MarshalOptions{
UseProtoNames: true,
}.Marshal(msg)
}
// UnmarshalJSON implements json.Unmarshaler
func (msg *ExportEvent) UnmarshalJSON(b []byte) error {
return protojson.UnmarshalOptions{}.Unmarshal(b, msg)
}
// SPDX-License-Identifier: Apache-2.0
// Copyright Authors of Hubble
// Code generated by protoc-gen-go-grpc. DO NOT EDIT.
// versions:
// - protoc-gen-go-grpc v1.5.1
// - protoc v6.32.0
// source: observer/observer.proto
package observer
import (
context "context"
grpc "google.golang.org/grpc"
codes "google.golang.org/grpc/codes"
status "google.golang.org/grpc/status"
)
// This is a compile-time assertion to ensure that this generated file
// is compatible with the grpc package it is being compiled against.
// Requires gRPC-Go v1.64.0 or later.
const _ = grpc.SupportPackageIsVersion9
const (
Observer_GetFlows_FullMethodName = "/observer.Observer/GetFlows"
Observer_GetAgentEvents_FullMethodName = "/observer.Observer/GetAgentEvents"
Observer_GetDebugEvents_FullMethodName = "/observer.Observer/GetDebugEvents"
Observer_GetNodes_FullMethodName = "/observer.Observer/GetNodes"
Observer_GetNamespaces_FullMethodName = "/observer.Observer/GetNamespaces"
Observer_ServerStatus_FullMethodName = "/observer.Observer/ServerStatus"
)
// ObserverClient is the client API for Observer service.
//
// For semantics around ctx use and closing/ending streaming RPCs, please refer to https://pkg.go.dev/google.golang.org/grpc/?tab=doc#ClientConn.NewStream.
//
// Observer returns a stream of Flows depending on which filter the user want
// to observe.
type ObserverClient interface {
// GetFlows returning structured data, meant to eventually obsolete GetLastNFlows.
GetFlows(ctx context.Context, in *GetFlowsRequest, opts ...grpc.CallOption) (grpc.ServerStreamingClient[GetFlowsResponse], error)
// GetAgentEvents returns Cilium agent events.
GetAgentEvents(ctx context.Context, in *GetAgentEventsRequest, opts ...grpc.CallOption) (grpc.ServerStreamingClient[GetAgentEventsResponse], error)
// GetDebugEvents returns Cilium datapath debug events.
GetDebugEvents(ctx context.Context, in *GetDebugEventsRequest, opts ...grpc.CallOption) (grpc.ServerStreamingClient[GetDebugEventsResponse], error)
// GetNodes returns information about nodes in a cluster.
GetNodes(ctx context.Context, in *GetNodesRequest, opts ...grpc.CallOption) (*GetNodesResponse, error)
// GetNamespaces returns information about namespaces in a cluster.
// The namespaces returned are namespaces which have had network flows in
// the last hour. The namespaces are returned sorted by cluster name and
// namespace in ascending order.
GetNamespaces(ctx context.Context, in *GetNamespacesRequest, opts ...grpc.CallOption) (*GetNamespacesResponse, error)
// ServerStatus returns some details about the running hubble server.
ServerStatus(ctx context.Context, in *ServerStatusRequest, opts ...grpc.CallOption) (*ServerStatusResponse, error)
}
type observerClient struct {
cc grpc.ClientConnInterface
}
func NewObserverClient(cc grpc.ClientConnInterface) ObserverClient {
return &observerClient{cc}
}
func (c *observerClient) GetFlows(ctx context.Context, in *GetFlowsRequest, opts ...grpc.CallOption) (grpc.ServerStreamingClient[GetFlowsResponse], error) {
cOpts := append([]grpc.CallOption{grpc.StaticMethod()}, opts...)
stream, err := c.cc.NewStream(ctx, &Observer_ServiceDesc.Streams[0], Observer_GetFlows_FullMethodName, cOpts...)
if err != nil {
return nil, err
}
x := &grpc.GenericClientStream[GetFlowsRequest, GetFlowsResponse]{ClientStream: stream}
if err := x.ClientStream.SendMsg(in); err != nil {
return nil, err
}
if err := x.ClientStream.CloseSend(); err != nil {
return nil, err
}
return x, nil
}
// This type alias is provided for backwards compatibility with existing code that references the prior non-generic stream type by name.
type Observer_GetFlowsClient = grpc.ServerStreamingClient[GetFlowsResponse]
func (c *observerClient) GetAgentEvents(ctx context.Context, in *GetAgentEventsRequest, opts ...grpc.CallOption) (grpc.ServerStreamingClient[GetAgentEventsResponse], error) {
cOpts := append([]grpc.CallOption{grpc.StaticMethod()}, opts...)
stream, err := c.cc.NewStream(ctx, &Observer_ServiceDesc.Streams[1], Observer_GetAgentEvents_FullMethodName, cOpts...)
if err != nil {
return nil, err
}
x := &grpc.GenericClientStream[GetAgentEventsRequest, GetAgentEventsResponse]{ClientStream: stream}
if err := x.ClientStream.SendMsg(in); err != nil {
return nil, err
}
if err := x.ClientStream.CloseSend(); err != nil {
return nil, err
}
return x, nil
}
// This type alias is provided for backwards compatibility with existing code that references the prior non-generic stream type by name.
type Observer_GetAgentEventsClient = grpc.ServerStreamingClient[GetAgentEventsResponse]
func (c *observerClient) GetDebugEvents(ctx context.Context, in *GetDebugEventsRequest, opts ...grpc.CallOption) (grpc.ServerStreamingClient[GetDebugEventsResponse], error) {
cOpts := append([]grpc.CallOption{grpc.StaticMethod()}, opts...)
stream, err := c.cc.NewStream(ctx, &Observer_ServiceDesc.Streams[2], Observer_GetDebugEvents_FullMethodName, cOpts...)
if err != nil {
return nil, err
}
x := &grpc.GenericClientStream[GetDebugEventsRequest, GetDebugEventsResponse]{ClientStream: stream}
if err := x.ClientStream.SendMsg(in); err != nil {
return nil, err
}
if err := x.ClientStream.CloseSend(); err != nil {
return nil, err
}
return x, nil
}
// This type alias is provided for backwards compatibility with existing code that references the prior non-generic stream type by name.
type Observer_GetDebugEventsClient = grpc.ServerStreamingClient[GetDebugEventsResponse]
func (c *observerClient) GetNodes(ctx context.Context, in *GetNodesRequest, opts ...grpc.CallOption) (*GetNodesResponse, error) {
cOpts := append([]grpc.CallOption{grpc.StaticMethod()}, opts...)
out := new(GetNodesResponse)
err := c.cc.Invoke(ctx, Observer_GetNodes_FullMethodName, in, out, cOpts...)
if err != nil {
return nil, err
}
return out, nil
}
func (c *observerClient) GetNamespaces(ctx context.Context, in *GetNamespacesRequest, opts ...grpc.CallOption) (*GetNamespacesResponse, error) {
cOpts := append([]grpc.CallOption{grpc.StaticMethod()}, opts...)
out := new(GetNamespacesResponse)
err := c.cc.Invoke(ctx, Observer_GetNamespaces_FullMethodName, in, out, cOpts...)
if err != nil {
return nil, err
}
return out, nil
}
func (c *observerClient) ServerStatus(ctx context.Context, in *ServerStatusRequest, opts ...grpc.CallOption) (*ServerStatusResponse, error) {
cOpts := append([]grpc.CallOption{grpc.StaticMethod()}, opts...)
out := new(ServerStatusResponse)
err := c.cc.Invoke(ctx, Observer_ServerStatus_FullMethodName, in, out, cOpts...)
if err != nil {
return nil, err
}
return out, nil
}
// ObserverServer is the server API for Observer service.
// All implementations should embed UnimplementedObserverServer
// for forward compatibility.
//
// Observer returns a stream of Flows depending on which filter the user want
// to observe.
type ObserverServer interface {
// GetFlows returning structured data, meant to eventually obsolete GetLastNFlows.
GetFlows(*GetFlowsRequest, grpc.ServerStreamingServer[GetFlowsResponse]) error
// GetAgentEvents returns Cilium agent events.
GetAgentEvents(*GetAgentEventsRequest, grpc.ServerStreamingServer[GetAgentEventsResponse]) error
// GetDebugEvents returns Cilium datapath debug events.
GetDebugEvents(*GetDebugEventsRequest, grpc.ServerStreamingServer[GetDebugEventsResponse]) error
// GetNodes returns information about nodes in a cluster.
GetNodes(context.Context, *GetNodesRequest) (*GetNodesResponse, error)
// GetNamespaces returns information about namespaces in a cluster.
// The namespaces returned are namespaces which have had network flows in
// the last hour. The namespaces are returned sorted by cluster name and
// namespace in ascending order.
GetNamespaces(context.Context, *GetNamespacesRequest) (*GetNamespacesResponse, error)
// ServerStatus returns some details about the running hubble server.
ServerStatus(context.Context, *ServerStatusRequest) (*ServerStatusResponse, error)
}
// UnimplementedObserverServer should be embedded to have
// forward compatible implementations.
//
// NOTE: this should be embedded by value instead of pointer to avoid a nil
// pointer dereference when methods are called.
type UnimplementedObserverServer struct{}
func (UnimplementedObserverServer) GetFlows(*GetFlowsRequest, grpc.ServerStreamingServer[GetFlowsResponse]) error {
return status.Errorf(codes.Unimplemented, "method GetFlows not implemented")
}
func (UnimplementedObserverServer) GetAgentEvents(*GetAgentEventsRequest, grpc.ServerStreamingServer[GetAgentEventsResponse]) error {
return status.Errorf(codes.Unimplemented, "method GetAgentEvents not implemented")
}
func (UnimplementedObserverServer) GetDebugEvents(*GetDebugEventsRequest, grpc.ServerStreamingServer[GetDebugEventsResponse]) error {
return status.Errorf(codes.Unimplemented, "method GetDebugEvents not implemented")
}
func (UnimplementedObserverServer) GetNodes(context.Context, *GetNodesRequest) (*GetNodesResponse, error) {
return nil, status.Errorf(codes.Unimplemented, "method GetNodes not implemented")
}
func (UnimplementedObserverServer) GetNamespaces(context.Context, *GetNamespacesRequest) (*GetNamespacesResponse, error) {
return nil, status.Errorf(codes.Unimplemented, "method GetNamespaces not implemented")
}
func (UnimplementedObserverServer) ServerStatus(context.Context, *ServerStatusRequest) (*ServerStatusResponse, error) {
return nil, status.Errorf(codes.Unimplemented, "method ServerStatus not implemented")
}
func (UnimplementedObserverServer) testEmbeddedByValue() {}
// UnsafeObserverServer may be embedded to opt out of forward compatibility for this service.
// Use of this interface is not recommended, as added methods to ObserverServer will
// result in compilation errors.
type UnsafeObserverServer interface {
mustEmbedUnimplementedObserverServer()
}
func RegisterObserverServer(s grpc.ServiceRegistrar, srv ObserverServer) {
// If the following call pancis, it indicates UnimplementedObserverServer was
// embedded by pointer and is nil. This will cause panics if an
// unimplemented method is ever invoked, so we test this at initialization
// time to prevent it from happening at runtime later due to I/O.
if t, ok := srv.(interface{ testEmbeddedByValue() }); ok {
t.testEmbeddedByValue()
}
s.RegisterService(&Observer_ServiceDesc, srv)
}
func _Observer_GetFlows_Handler(srv interface{}, stream grpc.ServerStream) error {
m := new(GetFlowsRequest)
if err := stream.RecvMsg(m); err != nil {
return err
}
return srv.(ObserverServer).GetFlows(m, &grpc.GenericServerStream[GetFlowsRequest, GetFlowsResponse]{ServerStream: stream})
}
// This type alias is provided for backwards compatibility with existing code that references the prior non-generic stream type by name.
type Observer_GetFlowsServer = grpc.ServerStreamingServer[GetFlowsResponse]
func _Observer_GetAgentEvents_Handler(srv interface{}, stream grpc.ServerStream) error {
m := new(GetAgentEventsRequest)
if err := stream.RecvMsg(m); err != nil {
return err
}
return srv.(ObserverServer).GetAgentEvents(m, &grpc.GenericServerStream[GetAgentEventsRequest, GetAgentEventsResponse]{ServerStream: stream})
}
// This type alias is provided for backwards compatibility with existing code that references the prior non-generic stream type by name.
type Observer_GetAgentEventsServer = grpc.ServerStreamingServer[GetAgentEventsResponse]
func _Observer_GetDebugEvents_Handler(srv interface{}, stream grpc.ServerStream) error {
m := new(GetDebugEventsRequest)
if err := stream.RecvMsg(m); err != nil {
return err
}
return srv.(ObserverServer).GetDebugEvents(m, &grpc.GenericServerStream[GetDebugEventsRequest, GetDebugEventsResponse]{ServerStream: stream})
}
// This type alias is provided for backwards compatibility with existing code that references the prior non-generic stream type by name.
type Observer_GetDebugEventsServer = grpc.ServerStreamingServer[GetDebugEventsResponse]
func _Observer_GetNodes_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
in := new(GetNodesRequest)
if err := dec(in); err != nil {
return nil, err
}
if interceptor == nil {
return srv.(ObserverServer).GetNodes(ctx, in)
}
info := &grpc.UnaryServerInfo{
Server: srv,
FullMethod: Observer_GetNodes_FullMethodName,
}
handler := func(ctx context.Context, req interface{}) (interface{}, error) {
return srv.(ObserverServer).GetNodes(ctx, req.(*GetNodesRequest))
}
return interceptor(ctx, in, info, handler)
}
func _Observer_GetNamespaces_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
in := new(GetNamespacesRequest)
if err := dec(in); err != nil {
return nil, err
}
if interceptor == nil {
return srv.(ObserverServer).GetNamespaces(ctx, in)
}
info := &grpc.UnaryServerInfo{
Server: srv,
FullMethod: Observer_GetNamespaces_FullMethodName,
}
handler := func(ctx context.Context, req interface{}) (interface{}, error) {
return srv.(ObserverServer).GetNamespaces(ctx, req.(*GetNamespacesRequest))
}
return interceptor(ctx, in, info, handler)
}
func _Observer_ServerStatus_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
in := new(ServerStatusRequest)
if err := dec(in); err != nil {
return nil, err
}
if interceptor == nil {
return srv.(ObserverServer).ServerStatus(ctx, in)
}
info := &grpc.UnaryServerInfo{
Server: srv,
FullMethod: Observer_ServerStatus_FullMethodName,
}
handler := func(ctx context.Context, req interface{}) (interface{}, error) {
return srv.(ObserverServer).ServerStatus(ctx, req.(*ServerStatusRequest))
}
return interceptor(ctx, in, info, handler)
}
// Observer_ServiceDesc is the grpc.ServiceDesc for Observer service.
// It's only intended for direct use with grpc.RegisterService,
// and not to be introspected or modified (even as a copy)
var Observer_ServiceDesc = grpc.ServiceDesc{
ServiceName: "observer.Observer",
HandlerType: (*ObserverServer)(nil),
Methods: []grpc.MethodDesc{
{
MethodName: "GetNodes",
Handler: _Observer_GetNodes_Handler,
},
{
MethodName: "GetNamespaces",
Handler: _Observer_GetNamespaces_Handler,
},
{
MethodName: "ServerStatus",
Handler: _Observer_ServerStatus_Handler,
},
},
Streams: []grpc.StreamDesc{
{
StreamName: "GetFlows",
Handler: _Observer_GetFlows_Handler,
ServerStreams: true,
},
{
StreamName: "GetAgentEvents",
Handler: _Observer_GetAgentEvents_Handler,
ServerStreams: true,
},
{
StreamName: "GetDebugEvents",
Handler: _Observer_GetDebugEvents_Handler,
ServerStreams: true,
},
},
Metadata: "observer/observer.proto",
}
// SPDX-License-Identifier: Apache-2.0
// Copyright Authors of Hubble
// Code generated by protoc-gen-go. DO NOT EDIT.
// versions:
// protoc-gen-go v1.36.7
// protoc v6.32.0
// source: peer/peer.proto
package peer
import (
protoreflect "google.golang.org/protobuf/reflect/protoreflect"
protoimpl "google.golang.org/protobuf/runtime/protoimpl"
reflect "reflect"
sync "sync"
unsafe "unsafe"
)
const (
// Verify that this generated code is sufficiently up-to-date.
_ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion)
// Verify that runtime/protoimpl is sufficiently up-to-date.
_ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20)
)
// ChangeNotificationType defines the peer change notification type.
type ChangeNotificationType int32
const (
ChangeNotificationType_UNKNOWN ChangeNotificationType = 0
ChangeNotificationType_PEER_ADDED ChangeNotificationType = 1
ChangeNotificationType_PEER_DELETED ChangeNotificationType = 2
ChangeNotificationType_PEER_UPDATED ChangeNotificationType = 3
)
// Enum value maps for ChangeNotificationType.
var (
ChangeNotificationType_name = map[int32]string{
0: "UNKNOWN",
1: "PEER_ADDED",
2: "PEER_DELETED",
3: "PEER_UPDATED",
}
ChangeNotificationType_value = map[string]int32{
"UNKNOWN": 0,
"PEER_ADDED": 1,
"PEER_DELETED": 2,
"PEER_UPDATED": 3,
}
)
func (x ChangeNotificationType) Enum() *ChangeNotificationType {
p := new(ChangeNotificationType)
*p = x
return p
}
func (x ChangeNotificationType) String() string {
return protoimpl.X.EnumStringOf(x.Descriptor(), protoreflect.EnumNumber(x))
}
func (ChangeNotificationType) Descriptor() protoreflect.EnumDescriptor {
return file_peer_peer_proto_enumTypes[0].Descriptor()
}
func (ChangeNotificationType) Type() protoreflect.EnumType {
return &file_peer_peer_proto_enumTypes[0]
}
func (x ChangeNotificationType) Number() protoreflect.EnumNumber {
return protoreflect.EnumNumber(x)
}
// Deprecated: Use ChangeNotificationType.Descriptor instead.
func (ChangeNotificationType) EnumDescriptor() ([]byte, []int) {
return file_peer_peer_proto_rawDescGZIP(), []int{0}
}
type NotifyRequest struct {
state protoimpl.MessageState `protogen:"open.v1"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *NotifyRequest) Reset() {
*x = NotifyRequest{}
mi := &file_peer_peer_proto_msgTypes[0]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *NotifyRequest) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*NotifyRequest) ProtoMessage() {}
func (x *NotifyRequest) ProtoReflect() protoreflect.Message {
mi := &file_peer_peer_proto_msgTypes[0]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use NotifyRequest.ProtoReflect.Descriptor instead.
func (*NotifyRequest) Descriptor() ([]byte, []int) {
return file_peer_peer_proto_rawDescGZIP(), []int{0}
}
// ChangeNotification indicates a change regarding a hubble peer.
type ChangeNotification struct {
state protoimpl.MessageState `protogen:"open.v1"`
// Name is the name of the peer, typically the hostname. The name includes
// the cluster name if a value other than default has been specified.
// This value can be used to uniquely identify the host.
// When the cluster name is not the default, the cluster name is prepended
// to the peer name and a forward slash is added.
//
// Examples:
// - runtime1
// - testcluster/runtime1
Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"`
// Address is the address of the peer's gRPC service.
Address string `protobuf:"bytes,2,opt,name=address,proto3" json:"address,omitempty"`
// ChangeNotificationType indicates the type of change, ie whether the peer
// was added, deleted or updated.
Type ChangeNotificationType `protobuf:"varint,3,opt,name=type,proto3,enum=peer.ChangeNotificationType" json:"type,omitempty"`
// TLS provides information to connect to the Address with TLS enabled.
// If not set, TLS shall be assumed to be disabled.
Tls *TLS `protobuf:"bytes,4,opt,name=tls,proto3" json:"tls,omitempty"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *ChangeNotification) Reset() {
*x = ChangeNotification{}
mi := &file_peer_peer_proto_msgTypes[1]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *ChangeNotification) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*ChangeNotification) ProtoMessage() {}
func (x *ChangeNotification) ProtoReflect() protoreflect.Message {
mi := &file_peer_peer_proto_msgTypes[1]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use ChangeNotification.ProtoReflect.Descriptor instead.
func (*ChangeNotification) Descriptor() ([]byte, []int) {
return file_peer_peer_proto_rawDescGZIP(), []int{1}
}
func (x *ChangeNotification) GetName() string {
if x != nil {
return x.Name
}
return ""
}
func (x *ChangeNotification) GetAddress() string {
if x != nil {
return x.Address
}
return ""
}
func (x *ChangeNotification) GetType() ChangeNotificationType {
if x != nil {
return x.Type
}
return ChangeNotificationType_UNKNOWN
}
func (x *ChangeNotification) GetTls() *TLS {
if x != nil {
return x.Tls
}
return nil
}
// TLS provides information to establish a TLS connection to the peer.
type TLS struct {
state protoimpl.MessageState `protogen:"open.v1"`
// ServerName is used to verify the hostname on the returned certificate.
ServerName string `protobuf:"bytes,1,opt,name=server_name,json=serverName,proto3" json:"server_name,omitempty"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *TLS) Reset() {
*x = TLS{}
mi := &file_peer_peer_proto_msgTypes[2]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *TLS) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*TLS) ProtoMessage() {}
func (x *TLS) ProtoReflect() protoreflect.Message {
mi := &file_peer_peer_proto_msgTypes[2]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use TLS.ProtoReflect.Descriptor instead.
func (*TLS) Descriptor() ([]byte, []int) {
return file_peer_peer_proto_rawDescGZIP(), []int{2}
}
func (x *TLS) GetServerName() string {
if x != nil {
return x.ServerName
}
return ""
}
var File_peer_peer_proto protoreflect.FileDescriptor
const file_peer_peer_proto_rawDesc = "" +
"\n" +
"\x0fpeer/peer.proto\x12\x04peer\"\x0f\n" +
"\rNotifyRequest\"\x91\x01\n" +
"\x12ChangeNotification\x12\x12\n" +
"\x04name\x18\x01 \x01(\tR\x04name\x12\x18\n" +
"\aaddress\x18\x02 \x01(\tR\aaddress\x120\n" +
"\x04type\x18\x03 \x01(\x0e2\x1c.peer.ChangeNotificationTypeR\x04type\x12\x1b\n" +
"\x03tls\x18\x04 \x01(\v2\t.peer.TLSR\x03tls\"&\n" +
"\x03TLS\x12\x1f\n" +
"\vserver_name\x18\x01 \x01(\tR\n" +
"serverName*Y\n" +
"\x16ChangeNotificationType\x12\v\n" +
"\aUNKNOWN\x10\x00\x12\x0e\n" +
"\n" +
"PEER_ADDED\x10\x01\x12\x10\n" +
"\fPEER_DELETED\x10\x02\x12\x10\n" +
"\fPEER_UPDATED\x10\x032C\n" +
"\x04Peer\x12;\n" +
"\x06Notify\x12\x13.peer.NotifyRequest\x1a\x18.peer.ChangeNotification\"\x000\x01B&Z$github.com/cilium/cilium/api/v1/peerb\x06proto3"
var (
file_peer_peer_proto_rawDescOnce sync.Once
file_peer_peer_proto_rawDescData []byte
)
func file_peer_peer_proto_rawDescGZIP() []byte {
file_peer_peer_proto_rawDescOnce.Do(func() {
file_peer_peer_proto_rawDescData = protoimpl.X.CompressGZIP(unsafe.Slice(unsafe.StringData(file_peer_peer_proto_rawDesc), len(file_peer_peer_proto_rawDesc)))
})
return file_peer_peer_proto_rawDescData
}
var file_peer_peer_proto_enumTypes = make([]protoimpl.EnumInfo, 1)
var file_peer_peer_proto_msgTypes = make([]protoimpl.MessageInfo, 3)
var file_peer_peer_proto_goTypes = []any{
(ChangeNotificationType)(0), // 0: peer.ChangeNotificationType
(*NotifyRequest)(nil), // 1: peer.NotifyRequest
(*ChangeNotification)(nil), // 2: peer.ChangeNotification
(*TLS)(nil), // 3: peer.TLS
}
var file_peer_peer_proto_depIdxs = []int32{
0, // 0: peer.ChangeNotification.type:type_name -> peer.ChangeNotificationType
3, // 1: peer.ChangeNotification.tls:type_name -> peer.TLS
1, // 2: peer.Peer.Notify:input_type -> peer.NotifyRequest
2, // 3: peer.Peer.Notify:output_type -> peer.ChangeNotification
3, // [3:4] is the sub-list for method output_type
2, // [2:3] is the sub-list for method input_type
2, // [2:2] is the sub-list for extension type_name
2, // [2:2] is the sub-list for extension extendee
0, // [0:2] is the sub-list for field type_name
}
func init() { file_peer_peer_proto_init() }
func file_peer_peer_proto_init() {
if File_peer_peer_proto != nil {
return
}
type x struct{}
out := protoimpl.TypeBuilder{
File: protoimpl.DescBuilder{
GoPackagePath: reflect.TypeOf(x{}).PkgPath(),
RawDescriptor: unsafe.Slice(unsafe.StringData(file_peer_peer_proto_rawDesc), len(file_peer_peer_proto_rawDesc)),
NumEnums: 1,
NumMessages: 3,
NumExtensions: 0,
NumServices: 1,
},
GoTypes: file_peer_peer_proto_goTypes,
DependencyIndexes: file_peer_peer_proto_depIdxs,
EnumInfos: file_peer_peer_proto_enumTypes,
MessageInfos: file_peer_peer_proto_msgTypes,
}.Build()
File_peer_peer_proto = out.File
file_peer_peer_proto_goTypes = nil
file_peer_peer_proto_depIdxs = nil
}
// SPDX-License-Identifier: Apache-2.0
// Copyright Authors of Hubble
// Code generated by protoc-gen-go-json. DO NOT EDIT.
// source: peer/peer.proto
package peer
import (
"google.golang.org/protobuf/encoding/protojson"
)
// MarshalJSON implements json.Marshaler
func (msg *NotifyRequest) MarshalJSON() ([]byte, error) {
return protojson.MarshalOptions{
UseProtoNames: true,
}.Marshal(msg)
}
// UnmarshalJSON implements json.Unmarshaler
func (msg *NotifyRequest) UnmarshalJSON(b []byte) error {
return protojson.UnmarshalOptions{}.Unmarshal(b, msg)
}
// MarshalJSON implements json.Marshaler
func (msg *ChangeNotification) MarshalJSON() ([]byte, error) {
return protojson.MarshalOptions{
UseProtoNames: true,
}.Marshal(msg)
}
// UnmarshalJSON implements json.Unmarshaler
func (msg *ChangeNotification) UnmarshalJSON(b []byte) error {
return protojson.UnmarshalOptions{}.Unmarshal(b, msg)
}
// MarshalJSON implements json.Marshaler
func (msg *TLS) MarshalJSON() ([]byte, error) {
return protojson.MarshalOptions{
UseProtoNames: true,
}.Marshal(msg)
}
// UnmarshalJSON implements json.Unmarshaler
func (msg *TLS) UnmarshalJSON(b []byte) error {
return protojson.UnmarshalOptions{}.Unmarshal(b, msg)
}
// SPDX-License-Identifier: Apache-2.0
// Copyright Authors of Hubble
// Code generated by protoc-gen-go-grpc. DO NOT EDIT.
// versions:
// - protoc-gen-go-grpc v1.5.1
// - protoc v6.32.0
// source: peer/peer.proto
package peer
import (
context "context"
grpc "google.golang.org/grpc"
codes "google.golang.org/grpc/codes"
status "google.golang.org/grpc/status"
)
// This is a compile-time assertion to ensure that this generated file
// is compatible with the grpc package it is being compiled against.
// Requires gRPC-Go v1.64.0 or later.
const _ = grpc.SupportPackageIsVersion9
const (
Peer_Notify_FullMethodName = "/peer.Peer/Notify"
)
// PeerClient is the client API for Peer service.
//
// For semantics around ctx use and closing/ending streaming RPCs, please refer to https://pkg.go.dev/google.golang.org/grpc/?tab=doc#ClientConn.NewStream.
//
// Peer lists hubble peers and notifies of changes.
type PeerClient interface {
// Notify sends information about hubble peers in the cluster.
// When Notify is called, it sends information about all the peers that are
// already part of the cluster (with the type as PEER_ADDED). It
// subsequently notifies of any change.
Notify(ctx context.Context, in *NotifyRequest, opts ...grpc.CallOption) (grpc.ServerStreamingClient[ChangeNotification], error)
}
type peerClient struct {
cc grpc.ClientConnInterface
}
func NewPeerClient(cc grpc.ClientConnInterface) PeerClient {
return &peerClient{cc}
}
func (c *peerClient) Notify(ctx context.Context, in *NotifyRequest, opts ...grpc.CallOption) (grpc.ServerStreamingClient[ChangeNotification], error) {
cOpts := append([]grpc.CallOption{grpc.StaticMethod()}, opts...)
stream, err := c.cc.NewStream(ctx, &Peer_ServiceDesc.Streams[0], Peer_Notify_FullMethodName, cOpts...)
if err != nil {
return nil, err
}
x := &grpc.GenericClientStream[NotifyRequest, ChangeNotification]{ClientStream: stream}
if err := x.ClientStream.SendMsg(in); err != nil {
return nil, err
}
if err := x.ClientStream.CloseSend(); err != nil {
return nil, err
}
return x, nil
}
// This type alias is provided for backwards compatibility with existing code that references the prior non-generic stream type by name.
type Peer_NotifyClient = grpc.ServerStreamingClient[ChangeNotification]
// PeerServer is the server API for Peer service.
// All implementations should embed UnimplementedPeerServer
// for forward compatibility.
//
// Peer lists hubble peers and notifies of changes.
type PeerServer interface {
// Notify sends information about hubble peers in the cluster.
// When Notify is called, it sends information about all the peers that are
// already part of the cluster (with the type as PEER_ADDED). It
// subsequently notifies of any change.
Notify(*NotifyRequest, grpc.ServerStreamingServer[ChangeNotification]) error
}
// UnimplementedPeerServer should be embedded to have
// forward compatible implementations.
//
// NOTE: this should be embedded by value instead of pointer to avoid a nil
// pointer dereference when methods are called.
type UnimplementedPeerServer struct{}
func (UnimplementedPeerServer) Notify(*NotifyRequest, grpc.ServerStreamingServer[ChangeNotification]) error {
return status.Errorf(codes.Unimplemented, "method Notify not implemented")
}
func (UnimplementedPeerServer) testEmbeddedByValue() {}
// UnsafePeerServer may be embedded to opt out of forward compatibility for this service.
// Use of this interface is not recommended, as added methods to PeerServer will
// result in compilation errors.
type UnsafePeerServer interface {
mustEmbedUnimplementedPeerServer()
}
func RegisterPeerServer(s grpc.ServiceRegistrar, srv PeerServer) {
// If the following call pancis, it indicates UnimplementedPeerServer was
// embedded by pointer and is nil. This will cause panics if an
// unimplemented method is ever invoked, so we test this at initialization
// time to prevent it from happening at runtime later due to I/O.
if t, ok := srv.(interface{ testEmbeddedByValue() }); ok {
t.testEmbeddedByValue()
}
s.RegisterService(&Peer_ServiceDesc, srv)
}
func _Peer_Notify_Handler(srv interface{}, stream grpc.ServerStream) error {
m := new(NotifyRequest)
if err := stream.RecvMsg(m); err != nil {
return err
}
return srv.(PeerServer).Notify(m, &grpc.GenericServerStream[NotifyRequest, ChangeNotification]{ServerStream: stream})
}
// This type alias is provided for backwards compatibility with existing code that references the prior non-generic stream type by name.
type Peer_NotifyServer = grpc.ServerStreamingServer[ChangeNotification]
// Peer_ServiceDesc is the grpc.ServiceDesc for Peer service.
// It's only intended for direct use with grpc.RegisterService,
// and not to be introspected or modified (even as a copy)
var Peer_ServiceDesc = grpc.ServiceDesc{
ServiceName: "peer.Peer",
HandlerType: (*PeerServer)(nil),
Methods: []grpc.MethodDesc{},
Streams: []grpc.StreamDesc{
{
StreamName: "Notify",
Handler: _Peer_Notify_Handler,
ServerStreams: true,
},
},
Metadata: "peer/peer.proto",
}
// SPDX-License-Identifier: Apache-2.0
// Copyright Authors of Cilium
// Code generated by protoc-gen-go. DO NOT EDIT.
// versions:
// protoc-gen-go v1.36.7
// protoc v6.32.0
// source: relay/relay.proto
package relay
import (
protoreflect "google.golang.org/protobuf/reflect/protoreflect"
protoimpl "google.golang.org/protobuf/runtime/protoimpl"
reflect "reflect"
sync "sync"
unsafe "unsafe"
)
const (
// Verify that this generated code is sufficiently up-to-date.
_ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion)
// Verify that runtime/protoimpl is sufficiently up-to-date.
_ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20)
)
type NodeState int32
const (
// UNKNOWN_NODE_STATE indicates that the state of this node is unknown.
NodeState_UNKNOWN_NODE_STATE NodeState = 0
// NODE_CONNECTED indicates that we have established a connection
// to this node. The client can expect to observe flows from this node.
NodeState_NODE_CONNECTED NodeState = 1
// NODE_UNAVAILABLE indicates that the connection to this
// node is currently unavailable. The client can expect to not see any
// flows from this node until either the connection is re-established or
// the node is gone.
NodeState_NODE_UNAVAILABLE NodeState = 2
// NODE_GONE indicates that a node has been removed from the
// cluster. No reconnection attempts will be made.
NodeState_NODE_GONE NodeState = 3
// NODE_ERROR indicates that a node has reported an error while processing
// the request. No reconnection attempts will be made.
NodeState_NODE_ERROR NodeState = 4
)
// Enum value maps for NodeState.
var (
NodeState_name = map[int32]string{
0: "UNKNOWN_NODE_STATE",
1: "NODE_CONNECTED",
2: "NODE_UNAVAILABLE",
3: "NODE_GONE",
4: "NODE_ERROR",
}
NodeState_value = map[string]int32{
"UNKNOWN_NODE_STATE": 0,
"NODE_CONNECTED": 1,
"NODE_UNAVAILABLE": 2,
"NODE_GONE": 3,
"NODE_ERROR": 4,
}
)
func (x NodeState) Enum() *NodeState {
p := new(NodeState)
*p = x
return p
}
func (x NodeState) String() string {
return protoimpl.X.EnumStringOf(x.Descriptor(), protoreflect.EnumNumber(x))
}
func (NodeState) Descriptor() protoreflect.EnumDescriptor {
return file_relay_relay_proto_enumTypes[0].Descriptor()
}
func (NodeState) Type() protoreflect.EnumType {
return &file_relay_relay_proto_enumTypes[0]
}
func (x NodeState) Number() protoreflect.EnumNumber {
return protoreflect.EnumNumber(x)
}
// Deprecated: Use NodeState.Descriptor instead.
func (NodeState) EnumDescriptor() ([]byte, []int) {
return file_relay_relay_proto_rawDescGZIP(), []int{0}
}
// NodeStatusEvent is a message sent by hubble-relay to inform clients about
// the state of a particular node.
type NodeStatusEvent struct {
state protoimpl.MessageState `protogen:"open.v1"`
// state_change contains the new node state
StateChange NodeState `protobuf:"varint,1,opt,name=state_change,json=stateChange,proto3,enum=relay.NodeState" json:"state_change,omitempty"`
// node_names is the list of nodes for which the above state changes applies
NodeNames []string `protobuf:"bytes,2,rep,name=node_names,json=nodeNames,proto3" json:"node_names,omitempty"`
// message is an optional message attached to the state change (e.g. an
// error message). The message applies to all nodes in node_names.
Message string `protobuf:"bytes,3,opt,name=message,proto3" json:"message,omitempty"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *NodeStatusEvent) Reset() {
*x = NodeStatusEvent{}
mi := &file_relay_relay_proto_msgTypes[0]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *NodeStatusEvent) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*NodeStatusEvent) ProtoMessage() {}
func (x *NodeStatusEvent) ProtoReflect() protoreflect.Message {
mi := &file_relay_relay_proto_msgTypes[0]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use NodeStatusEvent.ProtoReflect.Descriptor instead.
func (*NodeStatusEvent) Descriptor() ([]byte, []int) {
return file_relay_relay_proto_rawDescGZIP(), []int{0}
}
func (x *NodeStatusEvent) GetStateChange() NodeState {
if x != nil {
return x.StateChange
}
return NodeState_UNKNOWN_NODE_STATE
}
func (x *NodeStatusEvent) GetNodeNames() []string {
if x != nil {
return x.NodeNames
}
return nil
}
func (x *NodeStatusEvent) GetMessage() string {
if x != nil {
return x.Message
}
return ""
}
var File_relay_relay_proto protoreflect.FileDescriptor
const file_relay_relay_proto_rawDesc = "" +
"\n" +
"\x11relay/relay.proto\x12\x05relay\"\x7f\n" +
"\x0fNodeStatusEvent\x123\n" +
"\fstate_change\x18\x01 \x01(\x0e2\x10.relay.NodeStateR\vstateChange\x12\x1d\n" +
"\n" +
"node_names\x18\x02 \x03(\tR\tnodeNames\x12\x18\n" +
"\amessage\x18\x03 \x01(\tR\amessage*l\n" +
"\tNodeState\x12\x16\n" +
"\x12UNKNOWN_NODE_STATE\x10\x00\x12\x12\n" +
"\x0eNODE_CONNECTED\x10\x01\x12\x14\n" +
"\x10NODE_UNAVAILABLE\x10\x02\x12\r\n" +
"\tNODE_GONE\x10\x03\x12\x0e\n" +
"\n" +
"NODE_ERROR\x10\x04B'Z%github.com/cilium/cilium/api/v1/relayb\x06proto3"
var (
file_relay_relay_proto_rawDescOnce sync.Once
file_relay_relay_proto_rawDescData []byte
)
func file_relay_relay_proto_rawDescGZIP() []byte {
file_relay_relay_proto_rawDescOnce.Do(func() {
file_relay_relay_proto_rawDescData = protoimpl.X.CompressGZIP(unsafe.Slice(unsafe.StringData(file_relay_relay_proto_rawDesc), len(file_relay_relay_proto_rawDesc)))
})
return file_relay_relay_proto_rawDescData
}
var file_relay_relay_proto_enumTypes = make([]protoimpl.EnumInfo, 1)
var file_relay_relay_proto_msgTypes = make([]protoimpl.MessageInfo, 1)
var file_relay_relay_proto_goTypes = []any{
(NodeState)(0), // 0: relay.NodeState
(*NodeStatusEvent)(nil), // 1: relay.NodeStatusEvent
}
var file_relay_relay_proto_depIdxs = []int32{
0, // 0: relay.NodeStatusEvent.state_change:type_name -> relay.NodeState
1, // [1:1] is the sub-list for method output_type
1, // [1:1] is the sub-list for method input_type
1, // [1:1] is the sub-list for extension type_name
1, // [1:1] is the sub-list for extension extendee
0, // [0:1] is the sub-list for field type_name
}
func init() { file_relay_relay_proto_init() }
func file_relay_relay_proto_init() {
if File_relay_relay_proto != nil {
return
}
type x struct{}
out := protoimpl.TypeBuilder{
File: protoimpl.DescBuilder{
GoPackagePath: reflect.TypeOf(x{}).PkgPath(),
RawDescriptor: unsafe.Slice(unsafe.StringData(file_relay_relay_proto_rawDesc), len(file_relay_relay_proto_rawDesc)),
NumEnums: 1,
NumMessages: 1,
NumExtensions: 0,
NumServices: 0,
},
GoTypes: file_relay_relay_proto_goTypes,
DependencyIndexes: file_relay_relay_proto_depIdxs,
EnumInfos: file_relay_relay_proto_enumTypes,
MessageInfos: file_relay_relay_proto_msgTypes,
}.Build()
File_relay_relay_proto = out.File
file_relay_relay_proto_goTypes = nil
file_relay_relay_proto_depIdxs = nil
}
// SPDX-License-Identifier: Apache-2.0
// Copyright Authors of Cilium
// Code generated by protoc-gen-go-json. DO NOT EDIT.
// source: relay/relay.proto
package relay
import (
"google.golang.org/protobuf/encoding/protojson"
)
// MarshalJSON implements json.Marshaler
func (msg *NodeStatusEvent) MarshalJSON() ([]byte, error) {
return protojson.MarshalOptions{
UseProtoNames: true,
}.Marshal(msg)
}
// UnmarshalJSON implements json.Unmarshaler
func (msg *NodeStatusEvent) UnmarshalJSON(b []byte) error {
return protojson.UnmarshalOptions{}.Unmarshal(b, msg)
}
// Code generated by go-swagger; DO NOT EDIT.
// Copyright Authors of Cilium
// SPDX-License-Identifier: Apache-2.0
package daemon
// This file was generated by the swagger tool.
// Editing this file might prove futile when you re-run the generate command
import (
"net/http"
"github.com/go-openapi/runtime/middleware"
)
// GetCgroupDumpMetadataHandlerFunc turns a function with the right signature into a get cgroup dump metadata handler
type GetCgroupDumpMetadataHandlerFunc func(GetCgroupDumpMetadataParams) middleware.Responder
// Handle executing the request and returning a response
func (fn GetCgroupDumpMetadataHandlerFunc) Handle(params GetCgroupDumpMetadataParams) middleware.Responder {
return fn(params)
}
// GetCgroupDumpMetadataHandler interface for that can handle valid get cgroup dump metadata params
type GetCgroupDumpMetadataHandler interface {
Handle(GetCgroupDumpMetadataParams) middleware.Responder
}
// NewGetCgroupDumpMetadata creates a new http.Handler for the get cgroup dump metadata operation
func NewGetCgroupDumpMetadata(ctx *middleware.Context, handler GetCgroupDumpMetadataHandler) *GetCgroupDumpMetadata {
return &GetCgroupDumpMetadata{Context: ctx, Handler: handler}
}
/*
GetCgroupDumpMetadata swagger:route GET /cgroup-dump-metadata daemon getCgroupDumpMetadata
Retrieve cgroup metadata for all pods
*/
type GetCgroupDumpMetadata struct {
Context *middleware.Context
Handler GetCgroupDumpMetadataHandler
}
func (o *GetCgroupDumpMetadata) ServeHTTP(rw http.ResponseWriter, r *http.Request) {
route, rCtx, _ := o.Context.RouteInfo(r)
if rCtx != nil {
*r = *rCtx
}
var Params = NewGetCgroupDumpMetadataParams()
if err := o.Context.BindValidRequest(r, route, &Params); err != nil { // bind params
o.Context.Respond(rw, r, route.Produces, route, err)
return
}
res := o.Handler.Handle(Params) // actually handle the request
o.Context.Respond(rw, r, route.Produces, route, res)
}
// Code generated by go-swagger; DO NOT EDIT.
// Copyright Authors of Cilium
// SPDX-License-Identifier: Apache-2.0
package daemon
// This file was generated by the swagger tool.
// Editing this file might prove futile when you re-run the swagger generate command
import (
"net/http"
"github.com/go-openapi/errors"
"github.com/go-openapi/runtime/middleware"
)
// NewGetCgroupDumpMetadataParams creates a new GetCgroupDumpMetadataParams object
//
// There are no default values defined in the spec.
func NewGetCgroupDumpMetadataParams() GetCgroupDumpMetadataParams {
return GetCgroupDumpMetadataParams{}
}
// GetCgroupDumpMetadataParams contains all the bound params for the get cgroup dump metadata operation
// typically these are obtained from a http.Request
//
// swagger:parameters GetCgroupDumpMetadata
type GetCgroupDumpMetadataParams struct {
// HTTP Request Object
HTTPRequest *http.Request `json:"-"`
}
// BindRequest both binds and validates a request, it assumes that complex things implement a Validatable(strfmt.Registry) error interface
// for simple values it will use straight method calls.
//
// To ensure default values, the struct must have been initialized with NewGetCgroupDumpMetadataParams() beforehand.
func (o *GetCgroupDumpMetadataParams) BindRequest(r *http.Request, route *middleware.MatchedRoute) error {
var res []error
o.HTTPRequest = r
if len(res) > 0 {
return errors.CompositeValidationError(res...)
}
return nil
}
// Code generated by go-swagger; DO NOT EDIT.
// Copyright Authors of Cilium
// SPDX-License-Identifier: Apache-2.0
package daemon
// This file was generated by the swagger tool.
// Editing this file might prove futile when you re-run the swagger generate command
import (
"net/http"
"github.com/go-openapi/runtime"
"github.com/cilium/cilium/api/v1/models"
)
// GetCgroupDumpMetadataOKCode is the HTTP code returned for type GetCgroupDumpMetadataOK
const GetCgroupDumpMetadataOKCode int = 200
/*
GetCgroupDumpMetadataOK Success
swagger:response getCgroupDumpMetadataOK
*/
type GetCgroupDumpMetadataOK struct {
/*
In: Body
*/
Payload *models.CgroupDumpMetadata `json:"body,omitempty"`
}
// NewGetCgroupDumpMetadataOK creates GetCgroupDumpMetadataOK with default headers values
func NewGetCgroupDumpMetadataOK() *GetCgroupDumpMetadataOK {
return &GetCgroupDumpMetadataOK{}
}
// WithPayload adds the payload to the get cgroup dump metadata o k response
func (o *GetCgroupDumpMetadataOK) WithPayload(payload *models.CgroupDumpMetadata) *GetCgroupDumpMetadataOK {
o.Payload = payload
return o
}
// SetPayload sets the payload to the get cgroup dump metadata o k response
func (o *GetCgroupDumpMetadataOK) SetPayload(payload *models.CgroupDumpMetadata) {
o.Payload = payload
}
// WriteResponse to the client
func (o *GetCgroupDumpMetadataOK) WriteResponse(rw http.ResponseWriter, producer runtime.Producer) {
rw.WriteHeader(200)
if o.Payload != nil {
payload := o.Payload
if err := producer.Produce(rw, payload); err != nil {
panic(err) // let the recovery middleware deal with this
}
}
}
// GetCgroupDumpMetadataFailureCode is the HTTP code returned for type GetCgroupDumpMetadataFailure
const GetCgroupDumpMetadataFailureCode int = 500
/*
GetCgroupDumpMetadataFailure CgroupDumpMetadata get failed
swagger:response getCgroupDumpMetadataFailure
*/
type GetCgroupDumpMetadataFailure struct {
/*
In: Body
*/
Payload models.Error `json:"body,omitempty"`
}
// NewGetCgroupDumpMetadataFailure creates GetCgroupDumpMetadataFailure with default headers values
func NewGetCgroupDumpMetadataFailure() *GetCgroupDumpMetadataFailure {
return &GetCgroupDumpMetadataFailure{}
}
// WithPayload adds the payload to the get cgroup dump metadata failure response
func (o *GetCgroupDumpMetadataFailure) WithPayload(payload models.Error) *GetCgroupDumpMetadataFailure {
o.Payload = payload
return o
}
// SetPayload sets the payload to the get cgroup dump metadata failure response
func (o *GetCgroupDumpMetadataFailure) SetPayload(payload models.Error) {
o.Payload = payload
}
// WriteResponse to the client
func (o *GetCgroupDumpMetadataFailure) WriteResponse(rw http.ResponseWriter, producer runtime.Producer) {
rw.WriteHeader(500)
payload := o.Payload
if err := producer.Produce(rw, payload); err != nil {
panic(err) // let the recovery middleware deal with this
}
}
// Code generated by go-swagger; DO NOT EDIT.
// Copyright Authors of Cilium
// SPDX-License-Identifier: Apache-2.0
package daemon
// This file was generated by the swagger tool.
// Editing this file might prove futile when you re-run the generate command
import (
"net/http"
"github.com/go-openapi/runtime/middleware"
)
// GetClusterNodesHandlerFunc turns a function with the right signature into a get cluster nodes handler
type GetClusterNodesHandlerFunc func(GetClusterNodesParams) middleware.Responder
// Handle executing the request and returning a response
func (fn GetClusterNodesHandlerFunc) Handle(params GetClusterNodesParams) middleware.Responder {
return fn(params)
}
// GetClusterNodesHandler interface for that can handle valid get cluster nodes params
type GetClusterNodesHandler interface {
Handle(GetClusterNodesParams) middleware.Responder
}
// NewGetClusterNodes creates a new http.Handler for the get cluster nodes operation
func NewGetClusterNodes(ctx *middleware.Context, handler GetClusterNodesHandler) *GetClusterNodes {
return &GetClusterNodes{Context: ctx, Handler: handler}
}
/*
GetClusterNodes swagger:route GET /cluster/nodes daemon getClusterNodes
Get nodes information stored in the cilium-agent
*/
type GetClusterNodes struct {
Context *middleware.Context
Handler GetClusterNodesHandler
}
func (o *GetClusterNodes) ServeHTTP(rw http.ResponseWriter, r *http.Request) {
route, rCtx, _ := o.Context.RouteInfo(r)
if rCtx != nil {
*r = *rCtx
}
var Params = NewGetClusterNodesParams()
if err := o.Context.BindValidRequest(r, route, &Params); err != nil { // bind params
o.Context.Respond(rw, r, route.Produces, route, err)
return
}
res := o.Handler.Handle(Params) // actually handle the request
o.Context.Respond(rw, r, route.Produces, route, res)
}
// Code generated by go-swagger; DO NOT EDIT.
// Copyright Authors of Cilium
// SPDX-License-Identifier: Apache-2.0
package daemon
// This file was generated by the swagger tool.
// Editing this file might prove futile when you re-run the swagger generate command
import (
"net/http"
"github.com/go-openapi/errors"
"github.com/go-openapi/runtime/middleware"
"github.com/go-openapi/strfmt"
"github.com/go-openapi/swag"
)
// NewGetClusterNodesParams creates a new GetClusterNodesParams object
//
// There are no default values defined in the spec.
func NewGetClusterNodesParams() GetClusterNodesParams {
return GetClusterNodesParams{}
}
// GetClusterNodesParams contains all the bound params for the get cluster nodes operation
// typically these are obtained from a http.Request
//
// swagger:parameters GetClusterNodes
type GetClusterNodesParams struct {
// HTTP Request Object
HTTPRequest *http.Request `json:"-"`
/*Client UUID should be used when the client wants to request
a diff of nodes added and / or removed since the last time
that client has made a request.
In: header
*/
ClientID *int64
}
// BindRequest both binds and validates a request, it assumes that complex things implement a Validatable(strfmt.Registry) error interface
// for simple values it will use straight method calls.
//
// To ensure default values, the struct must have been initialized with NewGetClusterNodesParams() beforehand.
func (o *GetClusterNodesParams) BindRequest(r *http.Request, route *middleware.MatchedRoute) error {
var res []error
o.HTTPRequest = r
if err := o.bindClientID(r.Header[http.CanonicalHeaderKey("client-id")], true, route.Formats); err != nil {
res = append(res, err)
}
if len(res) > 0 {
return errors.CompositeValidationError(res...)
}
return nil
}
// bindClientID binds and validates parameter ClientID from header.
func (o *GetClusterNodesParams) bindClientID(rawData []string, hasKey bool, formats strfmt.Registry) error {
var raw string
if len(rawData) > 0 {
raw = rawData[len(rawData)-1]
}
// Required: false
if raw == "" { // empty values pass all other validations
return nil
}
value, err := swag.ConvertInt64(raw)
if err != nil {
return errors.InvalidType("client-id", "header", "int64", raw)
}
o.ClientID = &value
return nil
}
// Code generated by go-swagger; DO NOT EDIT.
// Copyright Authors of Cilium
// SPDX-License-Identifier: Apache-2.0
package daemon
// This file was generated by the swagger tool.
// Editing this file might prove futile when you re-run the swagger generate command
import (
"net/http"
"github.com/go-openapi/runtime"
"github.com/cilium/cilium/api/v1/models"
)
// GetClusterNodesOKCode is the HTTP code returned for type GetClusterNodesOK
const GetClusterNodesOKCode int = 200
/*
GetClusterNodesOK Success
swagger:response getClusterNodesOK
*/
type GetClusterNodesOK struct {
/*
In: Body
*/
Payload *models.ClusterNodeStatus `json:"body,omitempty"`
}
// NewGetClusterNodesOK creates GetClusterNodesOK with default headers values
func NewGetClusterNodesOK() *GetClusterNodesOK {
return &GetClusterNodesOK{}
}
// WithPayload adds the payload to the get cluster nodes o k response
func (o *GetClusterNodesOK) WithPayload(payload *models.ClusterNodeStatus) *GetClusterNodesOK {
o.Payload = payload
return o
}
// SetPayload sets the payload to the get cluster nodes o k response
func (o *GetClusterNodesOK) SetPayload(payload *models.ClusterNodeStatus) {
o.Payload = payload
}
// WriteResponse to the client
func (o *GetClusterNodesOK) WriteResponse(rw http.ResponseWriter, producer runtime.Producer) {
rw.WriteHeader(200)
if o.Payload != nil {
payload := o.Payload
if err := producer.Produce(rw, payload); err != nil {
panic(err) // let the recovery middleware deal with this
}
}
}
// Code generated by go-swagger; DO NOT EDIT.
// Copyright Authors of Cilium
// SPDX-License-Identifier: Apache-2.0
package daemon
// This file was generated by the swagger tool.
// Editing this file might prove futile when you re-run the generate command
import (
"net/http"
"github.com/go-openapi/runtime/middleware"
)
// GetConfigHandlerFunc turns a function with the right signature into a get config handler
type GetConfigHandlerFunc func(GetConfigParams) middleware.Responder
// Handle executing the request and returning a response
func (fn GetConfigHandlerFunc) Handle(params GetConfigParams) middleware.Responder {
return fn(params)
}
// GetConfigHandler interface for that can handle valid get config params
type GetConfigHandler interface {
Handle(GetConfigParams) middleware.Responder
}
// NewGetConfig creates a new http.Handler for the get config operation
func NewGetConfig(ctx *middleware.Context, handler GetConfigHandler) *GetConfig {
return &GetConfig{Context: ctx, Handler: handler}
}
/*
GetConfig swagger:route GET /config daemon getConfig
# Get configuration of Cilium daemon
Returns the configuration of the Cilium daemon.
*/
type GetConfig struct {
Context *middleware.Context
Handler GetConfigHandler
}
func (o *GetConfig) ServeHTTP(rw http.ResponseWriter, r *http.Request) {
route, rCtx, _ := o.Context.RouteInfo(r)
if rCtx != nil {
*r = *rCtx
}
var Params = NewGetConfigParams()
if err := o.Context.BindValidRequest(r, route, &Params); err != nil { // bind params
o.Context.Respond(rw, r, route.Produces, route, err)
return
}
res := o.Handler.Handle(Params) // actually handle the request
o.Context.Respond(rw, r, route.Produces, route, res)
}
// Code generated by go-swagger; DO NOT EDIT.
// Copyright Authors of Cilium
// SPDX-License-Identifier: Apache-2.0
package daemon
// This file was generated by the swagger tool.
// Editing this file might prove futile when you re-run the swagger generate command
import (
"net/http"
"github.com/go-openapi/errors"
"github.com/go-openapi/runtime/middleware"
)
// NewGetConfigParams creates a new GetConfigParams object
//
// There are no default values defined in the spec.
func NewGetConfigParams() GetConfigParams {
return GetConfigParams{}
}
// GetConfigParams contains all the bound params for the get config operation
// typically these are obtained from a http.Request
//
// swagger:parameters GetConfig
type GetConfigParams struct {
// HTTP Request Object
HTTPRequest *http.Request `json:"-"`
}
// BindRequest both binds and validates a request, it assumes that complex things implement a Validatable(strfmt.Registry) error interface
// for simple values it will use straight method calls.
//
// To ensure default values, the struct must have been initialized with NewGetConfigParams() beforehand.
func (o *GetConfigParams) BindRequest(r *http.Request, route *middleware.MatchedRoute) error {
var res []error
o.HTTPRequest = r
if len(res) > 0 {
return errors.CompositeValidationError(res...)
}
return nil
}
// Code generated by go-swagger; DO NOT EDIT.
// Copyright Authors of Cilium
// SPDX-License-Identifier: Apache-2.0
package daemon
// This file was generated by the swagger tool.
// Editing this file might prove futile when you re-run the swagger generate command
import (
"net/http"
"github.com/go-openapi/runtime"
"github.com/cilium/cilium/api/v1/models"
)
// GetConfigOKCode is the HTTP code returned for type GetConfigOK
const GetConfigOKCode int = 200
/*
GetConfigOK Success
swagger:response getConfigOK
*/
type GetConfigOK struct {
/*
In: Body
*/
Payload *models.DaemonConfiguration `json:"body,omitempty"`
}
// NewGetConfigOK creates GetConfigOK with default headers values
func NewGetConfigOK() *GetConfigOK {
return &GetConfigOK{}
}
// WithPayload adds the payload to the get config o k response
func (o *GetConfigOK) WithPayload(payload *models.DaemonConfiguration) *GetConfigOK {
o.Payload = payload
return o
}
// SetPayload sets the payload to the get config o k response
func (o *GetConfigOK) SetPayload(payload *models.DaemonConfiguration) {
o.Payload = payload
}
// WriteResponse to the client
func (o *GetConfigOK) WriteResponse(rw http.ResponseWriter, producer runtime.Producer) {
rw.WriteHeader(200)
if o.Payload != nil {
payload := o.Payload
if err := producer.Produce(rw, payload); err != nil {
panic(err) // let the recovery middleware deal with this
}
}
}
// Code generated by go-swagger; DO NOT EDIT.
// Copyright Authors of Cilium
// SPDX-License-Identifier: Apache-2.0
package daemon
// This file was generated by the swagger tool.
// Editing this file might prove futile when you re-run the generate command
import (
"net/http"
"github.com/go-openapi/runtime/middleware"
)
// GetDebuginfoHandlerFunc turns a function with the right signature into a get debuginfo handler
type GetDebuginfoHandlerFunc func(GetDebuginfoParams) middleware.Responder
// Handle executing the request and returning a response
func (fn GetDebuginfoHandlerFunc) Handle(params GetDebuginfoParams) middleware.Responder {
return fn(params)
}
// GetDebuginfoHandler interface for that can handle valid get debuginfo params
type GetDebuginfoHandler interface {
Handle(GetDebuginfoParams) middleware.Responder
}
// NewGetDebuginfo creates a new http.Handler for the get debuginfo operation
func NewGetDebuginfo(ctx *middleware.Context, handler GetDebuginfoHandler) *GetDebuginfo {
return &GetDebuginfo{Context: ctx, Handler: handler}
}
/*
GetDebuginfo swagger:route GET /debuginfo daemon getDebuginfo
Retrieve information about the agent and environment for debugging
*/
type GetDebuginfo struct {
Context *middleware.Context
Handler GetDebuginfoHandler
}
func (o *GetDebuginfo) ServeHTTP(rw http.ResponseWriter, r *http.Request) {
route, rCtx, _ := o.Context.RouteInfo(r)
if rCtx != nil {
*r = *rCtx
}
var Params = NewGetDebuginfoParams()
if err := o.Context.BindValidRequest(r, route, &Params); err != nil { // bind params
o.Context.Respond(rw, r, route.Produces, route, err)
return
}
res := o.Handler.Handle(Params) // actually handle the request
o.Context.Respond(rw, r, route.Produces, route, res)
}
// Code generated by go-swagger; DO NOT EDIT.
// Copyright Authors of Cilium
// SPDX-License-Identifier: Apache-2.0
package daemon
// This file was generated by the swagger tool.
// Editing this file might prove futile when you re-run the swagger generate command
import (
"net/http"
"github.com/go-openapi/errors"
"github.com/go-openapi/runtime/middleware"
)
// NewGetDebuginfoParams creates a new GetDebuginfoParams object
//
// There are no default values defined in the spec.
func NewGetDebuginfoParams() GetDebuginfoParams {
return GetDebuginfoParams{}
}
// GetDebuginfoParams contains all the bound params for the get debuginfo operation
// typically these are obtained from a http.Request
//
// swagger:parameters GetDebuginfo
type GetDebuginfoParams struct {
// HTTP Request Object
HTTPRequest *http.Request `json:"-"`
}
// BindRequest both binds and validates a request, it assumes that complex things implement a Validatable(strfmt.Registry) error interface
// for simple values it will use straight method calls.
//
// To ensure default values, the struct must have been initialized with NewGetDebuginfoParams() beforehand.
func (o *GetDebuginfoParams) BindRequest(r *http.Request, route *middleware.MatchedRoute) error {
var res []error
o.HTTPRequest = r
if len(res) > 0 {
return errors.CompositeValidationError(res...)
}
return nil
}
// Code generated by go-swagger; DO NOT EDIT.
// Copyright Authors of Cilium
// SPDX-License-Identifier: Apache-2.0
package daemon
// This file was generated by the swagger tool.
// Editing this file might prove futile when you re-run the swagger generate command
import (
"net/http"
"github.com/go-openapi/runtime"
"github.com/cilium/cilium/api/v1/models"
)
// GetDebuginfoOKCode is the HTTP code returned for type GetDebuginfoOK
const GetDebuginfoOKCode int = 200
/*
GetDebuginfoOK Success
swagger:response getDebuginfoOK
*/
type GetDebuginfoOK struct {
/*
In: Body
*/
Payload *models.DebugInfo `json:"body,omitempty"`
}
// NewGetDebuginfoOK creates GetDebuginfoOK with default headers values
func NewGetDebuginfoOK() *GetDebuginfoOK {
return &GetDebuginfoOK{}
}
// WithPayload adds the payload to the get debuginfo o k response
func (o *GetDebuginfoOK) WithPayload(payload *models.DebugInfo) *GetDebuginfoOK {
o.Payload = payload
return o
}
// SetPayload sets the payload to the get debuginfo o k response
func (o *GetDebuginfoOK) SetPayload(payload *models.DebugInfo) {
o.Payload = payload
}
// WriteResponse to the client
func (o *GetDebuginfoOK) WriteResponse(rw http.ResponseWriter, producer runtime.Producer) {
rw.WriteHeader(200)
if o.Payload != nil {
payload := o.Payload
if err := producer.Produce(rw, payload); err != nil {
panic(err) // let the recovery middleware deal with this
}
}
}
// GetDebuginfoFailureCode is the HTTP code returned for type GetDebuginfoFailure
const GetDebuginfoFailureCode int = 500
/*
GetDebuginfoFailure DebugInfo get failed
swagger:response getDebuginfoFailure
*/
type GetDebuginfoFailure struct {
/*
In: Body
*/
Payload models.Error `json:"body,omitempty"`
}
// NewGetDebuginfoFailure creates GetDebuginfoFailure with default headers values
func NewGetDebuginfoFailure() *GetDebuginfoFailure {
return &GetDebuginfoFailure{}
}
// WithPayload adds the payload to the get debuginfo failure response
func (o *GetDebuginfoFailure) WithPayload(payload models.Error) *GetDebuginfoFailure {
o.Payload = payload
return o
}
// SetPayload sets the payload to the get debuginfo failure response
func (o *GetDebuginfoFailure) SetPayload(payload models.Error) {
o.Payload = payload
}
// WriteResponse to the client
func (o *GetDebuginfoFailure) WriteResponse(rw http.ResponseWriter, producer runtime.Producer) {
rw.WriteHeader(500)
payload := o.Payload
if err := producer.Produce(rw, payload); err != nil {
panic(err) // let the recovery middleware deal with this
}
}
// Code generated by go-swagger; DO NOT EDIT.
// Copyright Authors of Cilium
// SPDX-License-Identifier: Apache-2.0
package daemon
// This file was generated by the swagger tool.
// Editing this file might prove futile when you re-run the generate command
import (
"net/http"
"github.com/go-openapi/runtime/middleware"
)
// GetHealthzHandlerFunc turns a function with the right signature into a get healthz handler
type GetHealthzHandlerFunc func(GetHealthzParams) middleware.Responder
// Handle executing the request and returning a response
func (fn GetHealthzHandlerFunc) Handle(params GetHealthzParams) middleware.Responder {
return fn(params)
}
// GetHealthzHandler interface for that can handle valid get healthz params
type GetHealthzHandler interface {
Handle(GetHealthzParams) middleware.Responder
}
// NewGetHealthz creates a new http.Handler for the get healthz operation
func NewGetHealthz(ctx *middleware.Context, handler GetHealthzHandler) *GetHealthz {
return &GetHealthz{Context: ctx, Handler: handler}
}
/*
GetHealthz swagger:route GET /healthz daemon getHealthz
# Get health of Cilium daemon
Returns health and status information of the Cilium daemon and related
components such as the local container runtime, connected datastore,
Kubernetes integration and Hubble.
*/
type GetHealthz struct {
Context *middleware.Context
Handler GetHealthzHandler
}
func (o *GetHealthz) ServeHTTP(rw http.ResponseWriter, r *http.Request) {
route, rCtx, _ := o.Context.RouteInfo(r)
if rCtx != nil {
*r = *rCtx
}
var Params = NewGetHealthzParams()
if err := o.Context.BindValidRequest(r, route, &Params); err != nil { // bind params
o.Context.Respond(rw, r, route.Produces, route, err)
return
}
res := o.Handler.Handle(Params) // actually handle the request
o.Context.Respond(rw, r, route.Produces, route, res)
}
// Code generated by go-swagger; DO NOT EDIT.
// Copyright Authors of Cilium
// SPDX-License-Identifier: Apache-2.0
package daemon
// This file was generated by the swagger tool.
// Editing this file might prove futile when you re-run the swagger generate command
import (
"net/http"
"github.com/go-openapi/errors"
"github.com/go-openapi/runtime/middleware"
"github.com/go-openapi/strfmt"
"github.com/go-openapi/swag"
)
// NewGetHealthzParams creates a new GetHealthzParams object
// with the default values initialized.
func NewGetHealthzParams() GetHealthzParams {
var (
// initialize parameters with default values
requireK8sConnectivityDefault = bool(true)
)
return GetHealthzParams{
RequireK8sConnectivity: &requireK8sConnectivityDefault,
}
}
// GetHealthzParams contains all the bound params for the get healthz operation
// typically these are obtained from a http.Request
//
// swagger:parameters GetHealthz
type GetHealthzParams struct {
// HTTP Request Object
HTTPRequest *http.Request `json:"-"`
/*Brief will return a brief representation of the Cilium status.
In: header
*/
Brief *bool
/*If set to true, failure of the agent to connect to the Kubernetes control plane will cause the agent's health status to also fail.
In: header
Default: true
*/
RequireK8sConnectivity *bool
}
// BindRequest both binds and validates a request, it assumes that complex things implement a Validatable(strfmt.Registry) error interface
// for simple values it will use straight method calls.
//
// To ensure default values, the struct must have been initialized with NewGetHealthzParams() beforehand.
func (o *GetHealthzParams) BindRequest(r *http.Request, route *middleware.MatchedRoute) error {
var res []error
o.HTTPRequest = r
if err := o.bindBrief(r.Header[http.CanonicalHeaderKey("brief")], true, route.Formats); err != nil {
res = append(res, err)
}
if err := o.bindRequireK8sConnectivity(r.Header[http.CanonicalHeaderKey("require-k8s-connectivity")], true, route.Formats); err != nil {
res = append(res, err)
}
if len(res) > 0 {
return errors.CompositeValidationError(res...)
}
return nil
}
// bindBrief binds and validates parameter Brief from header.
func (o *GetHealthzParams) bindBrief(rawData []string, hasKey bool, formats strfmt.Registry) error {
var raw string
if len(rawData) > 0 {
raw = rawData[len(rawData)-1]
}
// Required: false
if raw == "" { // empty values pass all other validations
return nil
}
value, err := swag.ConvertBool(raw)
if err != nil {
return errors.InvalidType("brief", "header", "bool", raw)
}
o.Brief = &value
return nil
}
// bindRequireK8sConnectivity binds and validates parameter RequireK8sConnectivity from header.
func (o *GetHealthzParams) bindRequireK8sConnectivity(rawData []string, hasKey bool, formats strfmt.Registry) error {
var raw string
if len(rawData) > 0 {
raw = rawData[len(rawData)-1]
}
// Required: false
if raw == "" { // empty values pass all other validations
// Default values have been previously initialized by NewGetHealthzParams()
return nil
}
value, err := swag.ConvertBool(raw)
if err != nil {
return errors.InvalidType("require-k8s-connectivity", "header", "bool", raw)
}
o.RequireK8sConnectivity = &value
return nil
}
// Code generated by go-swagger; DO NOT EDIT.
// Copyright Authors of Cilium
// SPDX-License-Identifier: Apache-2.0
package daemon
// This file was generated by the swagger tool.
// Editing this file might prove futile when you re-run the swagger generate command
import (
"net/http"
"github.com/go-openapi/runtime"
"github.com/cilium/cilium/api/v1/models"
)
// GetHealthzOKCode is the HTTP code returned for type GetHealthzOK
const GetHealthzOKCode int = 200
/*
GetHealthzOK Success
swagger:response getHealthzOK
*/
type GetHealthzOK struct {
/*
In: Body
*/
Payload *models.StatusResponse `json:"body,omitempty"`
}
// NewGetHealthzOK creates GetHealthzOK with default headers values
func NewGetHealthzOK() *GetHealthzOK {
return &GetHealthzOK{}
}
// WithPayload adds the payload to the get healthz o k response
func (o *GetHealthzOK) WithPayload(payload *models.StatusResponse) *GetHealthzOK {
o.Payload = payload
return o
}
// SetPayload sets the payload to the get healthz o k response
func (o *GetHealthzOK) SetPayload(payload *models.StatusResponse) {
o.Payload = payload
}
// WriteResponse to the client
func (o *GetHealthzOK) WriteResponse(rw http.ResponseWriter, producer runtime.Producer) {
rw.WriteHeader(200)
if o.Payload != nil {
payload := o.Payload
if err := producer.Produce(rw, payload); err != nil {
panic(err) // let the recovery middleware deal with this
}
}
}
// Code generated by go-swagger; DO NOT EDIT.
// Copyright Authors of Cilium
// SPDX-License-Identifier: Apache-2.0
package daemon
// This file was generated by the swagger tool.
// Editing this file might prove futile when you re-run the generate command
import (
"net/http"
"github.com/go-openapi/runtime/middleware"
)
// GetMapHandlerFunc turns a function with the right signature into a get map handler
type GetMapHandlerFunc func(GetMapParams) middleware.Responder
// Handle executing the request and returning a response
func (fn GetMapHandlerFunc) Handle(params GetMapParams) middleware.Responder {
return fn(params)
}
// GetMapHandler interface for that can handle valid get map params
type GetMapHandler interface {
Handle(GetMapParams) middleware.Responder
}
// NewGetMap creates a new http.Handler for the get map operation
func NewGetMap(ctx *middleware.Context, handler GetMapHandler) *GetMap {
return &GetMap{Context: ctx, Handler: handler}
}
/*
GetMap swagger:route GET /map daemon getMap
List all open maps
*/
type GetMap struct {
Context *middleware.Context
Handler GetMapHandler
}
func (o *GetMap) ServeHTTP(rw http.ResponseWriter, r *http.Request) {
route, rCtx, _ := o.Context.RouteInfo(r)
if rCtx != nil {
*r = *rCtx
}
var Params = NewGetMapParams()
if err := o.Context.BindValidRequest(r, route, &Params); err != nil { // bind params
o.Context.Respond(rw, r, route.Produces, route, err)
return
}
res := o.Handler.Handle(Params) // actually handle the request
o.Context.Respond(rw, r, route.Produces, route, res)
}
// Code generated by go-swagger; DO NOT EDIT.
// Copyright Authors of Cilium
// SPDX-License-Identifier: Apache-2.0
package daemon
// This file was generated by the swagger tool.
// Editing this file might prove futile when you re-run the generate command
import (
"net/http"
"github.com/go-openapi/runtime/middleware"
)
// GetMapNameHandlerFunc turns a function with the right signature into a get map name handler
type GetMapNameHandlerFunc func(GetMapNameParams) middleware.Responder
// Handle executing the request and returning a response
func (fn GetMapNameHandlerFunc) Handle(params GetMapNameParams) middleware.Responder {
return fn(params)
}
// GetMapNameHandler interface for that can handle valid get map name params
type GetMapNameHandler interface {
Handle(GetMapNameParams) middleware.Responder
}
// NewGetMapName creates a new http.Handler for the get map name operation
func NewGetMapName(ctx *middleware.Context, handler GetMapNameHandler) *GetMapName {
return &GetMapName{Context: ctx, Handler: handler}
}
/*
GetMapName swagger:route GET /map/{name} daemon getMapName
Retrieve contents of BPF map
*/
type GetMapName struct {
Context *middleware.Context
Handler GetMapNameHandler
}
func (o *GetMapName) ServeHTTP(rw http.ResponseWriter, r *http.Request) {
route, rCtx, _ := o.Context.RouteInfo(r)
if rCtx != nil {
*r = *rCtx
}
var Params = NewGetMapNameParams()
if err := o.Context.BindValidRequest(r, route, &Params); err != nil { // bind params
o.Context.Respond(rw, r, route.Produces, route, err)
return
}
res := o.Handler.Handle(Params) // actually handle the request
o.Context.Respond(rw, r, route.Produces, route, res)
}
// Code generated by go-swagger; DO NOT EDIT.
// Copyright Authors of Cilium
// SPDX-License-Identifier: Apache-2.0
package daemon
// This file was generated by the swagger tool.
// Editing this file might prove futile when you re-run the generate command
import (
"net/http"
"github.com/go-openapi/runtime/middleware"
)
// GetMapNameEventsHandlerFunc turns a function with the right signature into a get map name events handler
type GetMapNameEventsHandlerFunc func(GetMapNameEventsParams) middleware.Responder
// Handle executing the request and returning a response
func (fn GetMapNameEventsHandlerFunc) Handle(params GetMapNameEventsParams) middleware.Responder {
return fn(params)
}
// GetMapNameEventsHandler interface for that can handle valid get map name events params
type GetMapNameEventsHandler interface {
Handle(GetMapNameEventsParams) middleware.Responder
}
// NewGetMapNameEvents creates a new http.Handler for the get map name events operation
func NewGetMapNameEvents(ctx *middleware.Context, handler GetMapNameEventsHandler) *GetMapNameEvents {
return &GetMapNameEvents{Context: ctx, Handler: handler}
}
/*
GetMapNameEvents swagger:route GET /map/{name}/events daemon getMapNameEvents
Retrieves the recent event logs associated with this endpoint.
*/
type GetMapNameEvents struct {
Context *middleware.Context
Handler GetMapNameEventsHandler
}
func (o *GetMapNameEvents) ServeHTTP(rw http.ResponseWriter, r *http.Request) {
route, rCtx, _ := o.Context.RouteInfo(r)
if rCtx != nil {
*r = *rCtx
}
var Params = NewGetMapNameEventsParams()
if err := o.Context.BindValidRequest(r, route, &Params); err != nil { // bind params
o.Context.Respond(rw, r, route.Produces, route, err)
return
}
res := o.Handler.Handle(Params) // actually handle the request
o.Context.Respond(rw, r, route.Produces, route, res)
}
// Code generated by go-swagger; DO NOT EDIT.
// Copyright Authors of Cilium
// SPDX-License-Identifier: Apache-2.0
package daemon
// This file was generated by the swagger tool.
// Editing this file might prove futile when you re-run the swagger generate command
import (
"net/http"
"github.com/go-openapi/errors"
"github.com/go-openapi/runtime"
"github.com/go-openapi/runtime/middleware"
"github.com/go-openapi/strfmt"
"github.com/go-openapi/swag"
)
// NewGetMapNameEventsParams creates a new GetMapNameEventsParams object
//
// There are no default values defined in the spec.
func NewGetMapNameEventsParams() GetMapNameEventsParams {
return GetMapNameEventsParams{}
}
// GetMapNameEventsParams contains all the bound params for the get map name events operation
// typically these are obtained from a http.Request
//
// swagger:parameters GetMapNameEvents
type GetMapNameEventsParams struct {
// HTTP Request Object
HTTPRequest *http.Request `json:"-"`
/*Whether to follow streamed requests
In: query
*/
Follow *bool
/*Name of map
Required: true
In: path
*/
Name string
}
// BindRequest both binds and validates a request, it assumes that complex things implement a Validatable(strfmt.Registry) error interface
// for simple values it will use straight method calls.
//
// To ensure default values, the struct must have been initialized with NewGetMapNameEventsParams() beforehand.
func (o *GetMapNameEventsParams) BindRequest(r *http.Request, route *middleware.MatchedRoute) error {
var res []error
o.HTTPRequest = r
qs := runtime.Values(r.URL.Query())
qFollow, qhkFollow, _ := qs.GetOK("follow")
if err := o.bindFollow(qFollow, qhkFollow, route.Formats); err != nil {
res = append(res, err)
}
rName, rhkName, _ := route.Params.GetOK("name")
if err := o.bindName(rName, rhkName, route.Formats); err != nil {
res = append(res, err)
}
if len(res) > 0 {
return errors.CompositeValidationError(res...)
}
return nil
}
// bindFollow binds and validates parameter Follow from query.
func (o *GetMapNameEventsParams) bindFollow(rawData []string, hasKey bool, formats strfmt.Registry) error {
var raw string
if len(rawData) > 0 {
raw = rawData[len(rawData)-1]
}
// Required: false
// AllowEmptyValue: false
if raw == "" { // empty values pass all other validations
return nil
}
value, err := swag.ConvertBool(raw)
if err != nil {
return errors.InvalidType("follow", "query", "bool", raw)
}
o.Follow = &value
return nil
}
// bindName binds and validates parameter Name from path.
func (o *GetMapNameEventsParams) bindName(rawData []string, hasKey bool, formats strfmt.Registry) error {
var raw string
if len(rawData) > 0 {
raw = rawData[len(rawData)-1]
}
// Required: true
// Parameter is provided by construction from the route
o.Name = raw
return nil
}
// Code generated by go-swagger; DO NOT EDIT.
// Copyright Authors of Cilium
// SPDX-License-Identifier: Apache-2.0
package daemon
// This file was generated by the swagger tool.
// Editing this file might prove futile when you re-run the swagger generate command
import (
"io"
"net/http"
"github.com/go-openapi/runtime"
)
// GetMapNameEventsOKCode is the HTTP code returned for type GetMapNameEventsOK
const GetMapNameEventsOKCode int = 200
/*
GetMapNameEventsOK Success
swagger:response getMapNameEventsOK
*/
type GetMapNameEventsOK struct {
/*
In: Body
*/
Payload io.ReadCloser `json:"body,omitempty"`
}
// NewGetMapNameEventsOK creates GetMapNameEventsOK with default headers values
func NewGetMapNameEventsOK() *GetMapNameEventsOK {
return &GetMapNameEventsOK{}
}
// WithPayload adds the payload to the get map name events o k response
func (o *GetMapNameEventsOK) WithPayload(payload io.ReadCloser) *GetMapNameEventsOK {
o.Payload = payload
return o
}
// SetPayload sets the payload to the get map name events o k response
func (o *GetMapNameEventsOK) SetPayload(payload io.ReadCloser) {
o.Payload = payload
}
// WriteResponse to the client
func (o *GetMapNameEventsOK) WriteResponse(rw http.ResponseWriter, producer runtime.Producer) {
rw.WriteHeader(200)
payload := o.Payload
if err := producer.Produce(rw, payload); err != nil {
panic(err) // let the recovery middleware deal with this
}
}
// GetMapNameEventsNotFoundCode is the HTTP code returned for type GetMapNameEventsNotFound
const GetMapNameEventsNotFoundCode int = 404
/*
GetMapNameEventsNotFound Map not found
swagger:response getMapNameEventsNotFound
*/
type GetMapNameEventsNotFound struct {
}
// NewGetMapNameEventsNotFound creates GetMapNameEventsNotFound with default headers values
func NewGetMapNameEventsNotFound() *GetMapNameEventsNotFound {
return &GetMapNameEventsNotFound{}
}
// WriteResponse to the client
func (o *GetMapNameEventsNotFound) WriteResponse(rw http.ResponseWriter, producer runtime.Producer) {
rw.Header().Del(runtime.HeaderContentType) //Remove Content-Type on empty responses
rw.WriteHeader(404)
}
// Code generated by go-swagger; DO NOT EDIT.
// Copyright Authors of Cilium
// SPDX-License-Identifier: Apache-2.0
package daemon
// This file was generated by the swagger tool.
// Editing this file might prove futile when you re-run the swagger generate command
import (
"net/http"
"github.com/go-openapi/errors"
"github.com/go-openapi/runtime/middleware"
"github.com/go-openapi/strfmt"
)
// NewGetMapNameParams creates a new GetMapNameParams object
//
// There are no default values defined in the spec.
func NewGetMapNameParams() GetMapNameParams {
return GetMapNameParams{}
}
// GetMapNameParams contains all the bound params for the get map name operation
// typically these are obtained from a http.Request
//
// swagger:parameters GetMapName
type GetMapNameParams struct {
// HTTP Request Object
HTTPRequest *http.Request `json:"-"`
/*Name of map
Required: true
In: path
*/
Name string
}
// BindRequest both binds and validates a request, it assumes that complex things implement a Validatable(strfmt.Registry) error interface
// for simple values it will use straight method calls.
//
// To ensure default values, the struct must have been initialized with NewGetMapNameParams() beforehand.
func (o *GetMapNameParams) BindRequest(r *http.Request, route *middleware.MatchedRoute) error {
var res []error
o.HTTPRequest = r
rName, rhkName, _ := route.Params.GetOK("name")
if err := o.bindName(rName, rhkName, route.Formats); err != nil {
res = append(res, err)
}
if len(res) > 0 {
return errors.CompositeValidationError(res...)
}
return nil
}
// bindName binds and validates parameter Name from path.
func (o *GetMapNameParams) bindName(rawData []string, hasKey bool, formats strfmt.Registry) error {
var raw string
if len(rawData) > 0 {
raw = rawData[len(rawData)-1]
}
// Required: true
// Parameter is provided by construction from the route
o.Name = raw
return nil
}
// Code generated by go-swagger; DO NOT EDIT.
// Copyright Authors of Cilium
// SPDX-License-Identifier: Apache-2.0
package daemon
// This file was generated by the swagger tool.
// Editing this file might prove futile when you re-run the swagger generate command
import (
"net/http"
"github.com/go-openapi/runtime"
"github.com/cilium/cilium/api/v1/models"
)
// GetMapNameOKCode is the HTTP code returned for type GetMapNameOK
const GetMapNameOKCode int = 200
/*
GetMapNameOK Success
swagger:response getMapNameOK
*/
type GetMapNameOK struct {
/*
In: Body
*/
Payload *models.BPFMap `json:"body,omitempty"`
}
// NewGetMapNameOK creates GetMapNameOK with default headers values
func NewGetMapNameOK() *GetMapNameOK {
return &GetMapNameOK{}
}
// WithPayload adds the payload to the get map name o k response
func (o *GetMapNameOK) WithPayload(payload *models.BPFMap) *GetMapNameOK {
o.Payload = payload
return o
}
// SetPayload sets the payload to the get map name o k response
func (o *GetMapNameOK) SetPayload(payload *models.BPFMap) {
o.Payload = payload
}
// WriteResponse to the client
func (o *GetMapNameOK) WriteResponse(rw http.ResponseWriter, producer runtime.Producer) {
rw.WriteHeader(200)
if o.Payload != nil {
payload := o.Payload
if err := producer.Produce(rw, payload); err != nil {
panic(err) // let the recovery middleware deal with this
}
}
}
// GetMapNameNotFoundCode is the HTTP code returned for type GetMapNameNotFound
const GetMapNameNotFoundCode int = 404
/*
GetMapNameNotFound Map not found
swagger:response getMapNameNotFound
*/
type GetMapNameNotFound struct {
}
// NewGetMapNameNotFound creates GetMapNameNotFound with default headers values
func NewGetMapNameNotFound() *GetMapNameNotFound {
return &GetMapNameNotFound{}
}
// WriteResponse to the client
func (o *GetMapNameNotFound) WriteResponse(rw http.ResponseWriter, producer runtime.Producer) {
rw.Header().Del(runtime.HeaderContentType) //Remove Content-Type on empty responses
rw.WriteHeader(404)
}
// Code generated by go-swagger; DO NOT EDIT.
// Copyright Authors of Cilium
// SPDX-License-Identifier: Apache-2.0
package daemon
// This file was generated by the swagger tool.
// Editing this file might prove futile when you re-run the swagger generate command
import (
"net/http"
"github.com/go-openapi/errors"
"github.com/go-openapi/runtime/middleware"
)
// NewGetMapParams creates a new GetMapParams object
//
// There are no default values defined in the spec.
func NewGetMapParams() GetMapParams {
return GetMapParams{}
}
// GetMapParams contains all the bound params for the get map operation
// typically these are obtained from a http.Request
//
// swagger:parameters GetMap
type GetMapParams struct {
// HTTP Request Object
HTTPRequest *http.Request `json:"-"`
}
// BindRequest both binds and validates a request, it assumes that complex things implement a Validatable(strfmt.Registry) error interface
// for simple values it will use straight method calls.
//
// To ensure default values, the struct must have been initialized with NewGetMapParams() beforehand.
func (o *GetMapParams) BindRequest(r *http.Request, route *middleware.MatchedRoute) error {
var res []error
o.HTTPRequest = r
if len(res) > 0 {
return errors.CompositeValidationError(res...)
}
return nil
}
// Code generated by go-swagger; DO NOT EDIT.
// Copyright Authors of Cilium
// SPDX-License-Identifier: Apache-2.0
package daemon
// This file was generated by the swagger tool.
// Editing this file might prove futile when you re-run the swagger generate command
import (
"net/http"
"github.com/go-openapi/runtime"
"github.com/cilium/cilium/api/v1/models"
)
// GetMapOKCode is the HTTP code returned for type GetMapOK
const GetMapOKCode int = 200
/*
GetMapOK Success
swagger:response getMapOK
*/
type GetMapOK struct {
/*
In: Body
*/
Payload *models.BPFMapList `json:"body,omitempty"`
}
// NewGetMapOK creates GetMapOK with default headers values
func NewGetMapOK() *GetMapOK {
return &GetMapOK{}
}
// WithPayload adds the payload to the get map o k response
func (o *GetMapOK) WithPayload(payload *models.BPFMapList) *GetMapOK {
o.Payload = payload
return o
}
// SetPayload sets the payload to the get map o k response
func (o *GetMapOK) SetPayload(payload *models.BPFMapList) {
o.Payload = payload
}
// WriteResponse to the client
func (o *GetMapOK) WriteResponse(rw http.ResponseWriter, producer runtime.Producer) {
rw.WriteHeader(200)
if o.Payload != nil {
payload := o.Payload
if err := producer.Produce(rw, payload); err != nil {
panic(err) // let the recovery middleware deal with this
}
}
}
// Code generated by go-swagger; DO NOT EDIT.
// Copyright Authors of Cilium
// SPDX-License-Identifier: Apache-2.0
package daemon
// This file was generated by the swagger tool.
// Editing this file might prove futile when you re-run the generate command
import (
"net/http"
"github.com/go-openapi/runtime/middleware"
)
// GetNodeIdsHandlerFunc turns a function with the right signature into a get node ids handler
type GetNodeIdsHandlerFunc func(GetNodeIdsParams) middleware.Responder
// Handle executing the request and returning a response
func (fn GetNodeIdsHandlerFunc) Handle(params GetNodeIdsParams) middleware.Responder {
return fn(params)
}
// GetNodeIdsHandler interface for that can handle valid get node ids params
type GetNodeIdsHandler interface {
Handle(GetNodeIdsParams) middleware.Responder
}
// NewGetNodeIds creates a new http.Handler for the get node ids operation
func NewGetNodeIds(ctx *middleware.Context, handler GetNodeIdsHandler) *GetNodeIds {
return &GetNodeIds{Context: ctx, Handler: handler}
}
/*
GetNodeIds swagger:route GET /node/ids daemon getNodeIds
# List information about known node IDs
Retrieves a list of node IDs allocated by the agent and their
associated node IP addresses.
*/
type GetNodeIds struct {
Context *middleware.Context
Handler GetNodeIdsHandler
}
func (o *GetNodeIds) ServeHTTP(rw http.ResponseWriter, r *http.Request) {
route, rCtx, _ := o.Context.RouteInfo(r)
if rCtx != nil {
*r = *rCtx
}
var Params = NewGetNodeIdsParams()
if err := o.Context.BindValidRequest(r, route, &Params); err != nil { // bind params
o.Context.Respond(rw, r, route.Produces, route, err)
return
}
res := o.Handler.Handle(Params) // actually handle the request
o.Context.Respond(rw, r, route.Produces, route, res)
}
// Code generated by go-swagger; DO NOT EDIT.
// Copyright Authors of Cilium
// SPDX-License-Identifier: Apache-2.0
package daemon
// This file was generated by the swagger tool.
// Editing this file might prove futile when you re-run the swagger generate command
import (
"net/http"
"github.com/go-openapi/errors"
"github.com/go-openapi/runtime/middleware"
)
// NewGetNodeIdsParams creates a new GetNodeIdsParams object
//
// There are no default values defined in the spec.
func NewGetNodeIdsParams() GetNodeIdsParams {
return GetNodeIdsParams{}
}
// GetNodeIdsParams contains all the bound params for the get node ids operation
// typically these are obtained from a http.Request
//
// swagger:parameters GetNodeIds
type GetNodeIdsParams struct {
// HTTP Request Object
HTTPRequest *http.Request `json:"-"`
}
// BindRequest both binds and validates a request, it assumes that complex things implement a Validatable(strfmt.Registry) error interface
// for simple values it will use straight method calls.
//
// To ensure default values, the struct must have been initialized with NewGetNodeIdsParams() beforehand.
func (o *GetNodeIdsParams) BindRequest(r *http.Request, route *middleware.MatchedRoute) error {
var res []error
o.HTTPRequest = r
if len(res) > 0 {
return errors.CompositeValidationError(res...)
}
return nil
}
// Code generated by go-swagger; DO NOT EDIT.
// Copyright Authors of Cilium
// SPDX-License-Identifier: Apache-2.0
package daemon
// This file was generated by the swagger tool.
// Editing this file might prove futile when you re-run the swagger generate command
import (
"net/http"
"github.com/go-openapi/runtime"
"github.com/cilium/cilium/api/v1/models"
)
// GetNodeIdsOKCode is the HTTP code returned for type GetNodeIdsOK
const GetNodeIdsOKCode int = 200
/*
GetNodeIdsOK Success
swagger:response getNodeIdsOK
*/
type GetNodeIdsOK struct {
/*
In: Body
*/
Payload []*models.NodeID `json:"body,omitempty"`
}
// NewGetNodeIdsOK creates GetNodeIdsOK with default headers values
func NewGetNodeIdsOK() *GetNodeIdsOK {
return &GetNodeIdsOK{}
}
// WithPayload adds the payload to the get node ids o k response
func (o *GetNodeIdsOK) WithPayload(payload []*models.NodeID) *GetNodeIdsOK {
o.Payload = payload
return o
}
// SetPayload sets the payload to the get node ids o k response
func (o *GetNodeIdsOK) SetPayload(payload []*models.NodeID) {
o.Payload = payload
}
// WriteResponse to the client
func (o *GetNodeIdsOK) WriteResponse(rw http.ResponseWriter, producer runtime.Producer) {
rw.WriteHeader(200)
payload := o.Payload
if payload == nil {
// return empty array
payload = make([]*models.NodeID, 0, 50)
}
if err := producer.Produce(rw, payload); err != nil {
panic(err) // let the recovery middleware deal with this
}
}
// Code generated by go-swagger; DO NOT EDIT.
// Copyright Authors of Cilium
// SPDX-License-Identifier: Apache-2.0
package daemon
// This file was generated by the swagger tool.
// Editing this file might prove futile when you re-run the generate command
import (
"net/http"
"github.com/go-openapi/runtime/middleware"
)
// PatchConfigHandlerFunc turns a function with the right signature into a patch config handler
type PatchConfigHandlerFunc func(PatchConfigParams) middleware.Responder
// Handle executing the request and returning a response
func (fn PatchConfigHandlerFunc) Handle(params PatchConfigParams) middleware.Responder {
return fn(params)
}
// PatchConfigHandler interface for that can handle valid patch config params
type PatchConfigHandler interface {
Handle(PatchConfigParams) middleware.Responder
}
// NewPatchConfig creates a new http.Handler for the patch config operation
func NewPatchConfig(ctx *middleware.Context, handler PatchConfigHandler) *PatchConfig {
return &PatchConfig{Context: ctx, Handler: handler}
}
/*
PatchConfig swagger:route PATCH /config daemon patchConfig
# Modify daemon configuration
Updates the daemon configuration by applying the provided
ConfigurationMap and regenerates & recompiles all required datapath
components.
*/
type PatchConfig struct {
Context *middleware.Context
Handler PatchConfigHandler
}
func (o *PatchConfig) ServeHTTP(rw http.ResponseWriter, r *http.Request) {
route, rCtx, _ := o.Context.RouteInfo(r)
if rCtx != nil {
*r = *rCtx
}
var Params = NewPatchConfigParams()
if err := o.Context.BindValidRequest(r, route, &Params); err != nil { // bind params
o.Context.Respond(rw, r, route.Produces, route, err)
return
}
res := o.Handler.Handle(Params) // actually handle the request
o.Context.Respond(rw, r, route.Produces, route, res)
}
// Code generated by go-swagger; DO NOT EDIT.
// Copyright Authors of Cilium
// SPDX-License-Identifier: Apache-2.0
package daemon
// This file was generated by the swagger tool.
// Editing this file might prove futile when you re-run the swagger generate command
import (
"io"
"net/http"
"github.com/go-openapi/errors"
"github.com/go-openapi/runtime"
"github.com/go-openapi/runtime/middleware"
"github.com/go-openapi/validate"
"github.com/cilium/cilium/api/v1/models"
)
// NewPatchConfigParams creates a new PatchConfigParams object
//
// There are no default values defined in the spec.
func NewPatchConfigParams() PatchConfigParams {
return PatchConfigParams{}
}
// PatchConfigParams contains all the bound params for the patch config operation
// typically these are obtained from a http.Request
//
// swagger:parameters PatchConfig
type PatchConfigParams struct {
// HTTP Request Object
HTTPRequest *http.Request `json:"-"`
/*
Required: true
In: body
*/
Configuration *models.DaemonConfigurationSpec
}
// BindRequest both binds and validates a request, it assumes that complex things implement a Validatable(strfmt.Registry) error interface
// for simple values it will use straight method calls.
//
// To ensure default values, the struct must have been initialized with NewPatchConfigParams() beforehand.
func (o *PatchConfigParams) BindRequest(r *http.Request, route *middleware.MatchedRoute) error {
var res []error
o.HTTPRequest = r
if runtime.HasBody(r) {
defer r.Body.Close()
var body models.DaemonConfigurationSpec
if err := route.Consumer.Consume(r.Body, &body); err != nil {
if err == io.EOF {
res = append(res, errors.Required("configuration", "body", ""))
} else {
res = append(res, errors.NewParseError("configuration", "body", "", err))
}
} else {
// validate body object
if err := body.Validate(route.Formats); err != nil {
res = append(res, err)
}
ctx := validate.WithOperationRequest(r.Context())
if err := body.ContextValidate(ctx, route.Formats); err != nil {
res = append(res, err)
}
if len(res) == 0 {
o.Configuration = &body
}
}
} else {
res = append(res, errors.Required("configuration", "body", ""))
}
if len(res) > 0 {
return errors.CompositeValidationError(res...)
}
return nil
}
// Code generated by go-swagger; DO NOT EDIT.
// Copyright Authors of Cilium
// SPDX-License-Identifier: Apache-2.0
package daemon
// This file was generated by the swagger tool.
// Editing this file might prove futile when you re-run the swagger generate command
import (
"net/http"
"github.com/go-openapi/runtime"
"github.com/cilium/cilium/api/v1/models"
)
// PatchConfigOKCode is the HTTP code returned for type PatchConfigOK
const PatchConfigOKCode int = 200
/*
PatchConfigOK Success
swagger:response patchConfigOK
*/
type PatchConfigOK struct {
}
// NewPatchConfigOK creates PatchConfigOK with default headers values
func NewPatchConfigOK() *PatchConfigOK {
return &PatchConfigOK{}
}
// WriteResponse to the client
func (o *PatchConfigOK) WriteResponse(rw http.ResponseWriter, producer runtime.Producer) {
rw.Header().Del(runtime.HeaderContentType) //Remove Content-Type on empty responses
rw.WriteHeader(200)
}
// PatchConfigBadRequestCode is the HTTP code returned for type PatchConfigBadRequest
const PatchConfigBadRequestCode int = 400
/*
PatchConfigBadRequest Bad configuration parameters
swagger:response patchConfigBadRequest
*/
type PatchConfigBadRequest struct {
/*
In: Body
*/
Payload models.Error `json:"body,omitempty"`
}
// NewPatchConfigBadRequest creates PatchConfigBadRequest with default headers values
func NewPatchConfigBadRequest() *PatchConfigBadRequest {
return &PatchConfigBadRequest{}
}
// WithPayload adds the payload to the patch config bad request response
func (o *PatchConfigBadRequest) WithPayload(payload models.Error) *PatchConfigBadRequest {
o.Payload = payload
return o
}
// SetPayload sets the payload to the patch config bad request response
func (o *PatchConfigBadRequest) SetPayload(payload models.Error) {
o.Payload = payload
}
// WriteResponse to the client
func (o *PatchConfigBadRequest) WriteResponse(rw http.ResponseWriter, producer runtime.Producer) {
rw.WriteHeader(400)
payload := o.Payload
if err := producer.Produce(rw, payload); err != nil {
panic(err) // let the recovery middleware deal with this
}
}
// PatchConfigForbiddenCode is the HTTP code returned for type PatchConfigForbidden
const PatchConfigForbiddenCode int = 403
/*
PatchConfigForbidden Forbidden
swagger:response patchConfigForbidden
*/
type PatchConfigForbidden struct {
}
// NewPatchConfigForbidden creates PatchConfigForbidden with default headers values
func NewPatchConfigForbidden() *PatchConfigForbidden {
return &PatchConfigForbidden{}
}
// WriteResponse to the client
func (o *PatchConfigForbidden) WriteResponse(rw http.ResponseWriter, producer runtime.Producer) {
rw.Header().Del(runtime.HeaderContentType) //Remove Content-Type on empty responses
rw.WriteHeader(403)
}
// PatchConfigFailureCode is the HTTP code returned for type PatchConfigFailure
const PatchConfigFailureCode int = 500
/*
PatchConfigFailure Recompilation failed
swagger:response patchConfigFailure
*/
type PatchConfigFailure struct {
/*
In: Body
*/
Payload models.Error `json:"body,omitempty"`
}
// NewPatchConfigFailure creates PatchConfigFailure with default headers values
func NewPatchConfigFailure() *PatchConfigFailure {
return &PatchConfigFailure{}
}
// WithPayload adds the payload to the patch config failure response
func (o *PatchConfigFailure) WithPayload(payload models.Error) *PatchConfigFailure {
o.Payload = payload
return o
}
// SetPayload sets the payload to the patch config failure response
func (o *PatchConfigFailure) SetPayload(payload models.Error) {
o.Payload = payload
}
// WriteResponse to the client
func (o *PatchConfigFailure) WriteResponse(rw http.ResponseWriter, producer runtime.Producer) {
rw.WriteHeader(500)
payload := o.Payload
if err := producer.Produce(rw, payload); err != nil {
panic(err) // let the recovery middleware deal with this
}
}
// SPDX-License-Identifier: Apache-2.0
// Copyright Authors of Cilium
package option
import (
"fmt"
"log/slog"
"time"
"github.com/spf13/viper"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"github.com/cilium/cilium/pkg/command"
"github.com/cilium/cilium/pkg/logging"
"github.com/cilium/cilium/pkg/option"
)
const (
// EndpointGCIntervalDefault is the default time for the CEP GC
EndpointGCIntervalDefault = 5 * time.Minute
// PprofAddressOperator is the default value for pprof in the operator
PprofAddressOperator = "localhost"
// PprofPortOperator is the default value for pprof in the operator
PprofPortOperator = 6061
// DefaultProxyIdleTimeoutSeconds is the default value for the proxy idle timeout
DefaultProxyIdleTimeoutSeconds = 60
// DefaultProxyStreamIdleTimeoutSeconds is the default value for the proxy stream idle timeout
DefaultProxyStreamIdleTimeoutSeconds = 300
)
const (
// EnableMetrics enables prometheus metrics.
EnableMetrics = "enable-metrics"
// EndpointGCInterval is the interval between attempts of the CEP GC
// controller.
// Note that only one node per cluster should run this, and most iterations
// will simply return.
EndpointGCInterval = "cilium-endpoint-gc-interval"
// NodesGCInterval is the duration for which the cilium nodes are GC.
NodesGCInterval = "nodes-gc-interval"
// SyncK8sServices synchronizes k8s services into the kvstore
SyncK8sServices = "synchronize-k8s-services"
// SyncK8sNodes synchronizes k8s nodes into the kvstore
SyncK8sNodes = "synchronize-k8s-nodes"
// UnmanagedPodWatcherInterval is the interval to check for unmanaged kube-dns pods (0 to disable)
UnmanagedPodWatcherInterval = "unmanaged-pod-watcher-interval"
// IPAM options
// IPAMAPIBurst is the burst value allowed when accessing external IPAM APIs
IPAMAPIBurst = "limit-ipam-api-burst"
// IPAMAPIQPSLimit is the queries per second limit when accessing external IPAM APIs
IPAMAPIQPSLimit = "limit-ipam-api-qps"
// IPAMSubnetsIDs are optional subnets IDs used to filter subnets and interfaces listing
IPAMSubnetsIDs = "subnet-ids-filter"
// IPAMSubnetsTags are optional tags used to filter subnets, and interfaces within those subnets
IPAMSubnetsTags = "subnet-tags-filter"
// IPAMInstanceTags are optional tags used to filter instances for ENI discovery.
// Only used with AWS and Alibabacloud IPAM mode for now
IPAMInstanceTags = "instance-tags-filter"
// IPAMAutoCreateCiliumPodIPPools contains pre-defined IP pools to be auto-created on startup.
IPAMAutoCreateCiliumPodIPPools = "auto-create-cilium-pod-ip-pools"
// ClusterPoolIPv4CIDR is the cluster's IPv4 CIDR to allocate
// individual PodCIDR ranges from when using the ClusterPool ipam mode.
ClusterPoolIPv4CIDR = "cluster-pool-ipv4-cidr"
// ClusterPoolIPv6CIDR is the cluster's IPv6 CIDR to allocate
// individual PodCIDR ranges from when using the ClusterPool ipam mode.
ClusterPoolIPv6CIDR = "cluster-pool-ipv6-cidr"
// NodeCIDRMaskSizeIPv4 is the IPv4 podCIDR mask size that will be used
// per node.
NodeCIDRMaskSizeIPv4 = "cluster-pool-ipv4-mask-size"
// NodeCIDRMaskSizeIPv6 is the IPv6 podCIDR mask size that will be used
// per node.
NodeCIDRMaskSizeIPv6 = "cluster-pool-ipv6-mask-size"
// AWS options
// AWSReleaseExcessIPs allows releasing excess free IP addresses from ENI.
// Enabling this option reduces waste of IP addresses but may increase
// the number of API calls to AWS EC2 service.
AWSReleaseExcessIPs = "aws-release-excess-ips"
// ExcessIPReleaseDelay controls how long operator would wait before an IP previously marked as excess is released.
// Defaults to 180 secs
ExcessIPReleaseDelay = "excess-ip-release-delay"
// AWSEnablePrefixDelegation allows operator to allocate prefixes to ENIs on nitro instances instead of individual
// IP addresses. Allows for increased pod density on nodes.
AWSEnablePrefixDelegation = "aws-enable-prefix-delegation"
// ENITags are the tags that will be added to every ENI created by the
// AWS ENI IPAM.
ENITags = "eni-tags"
// ENIGarbageCollectionTags is a tag that will be added to every ENI
// created by the AWS ENI IPAM.
// Any stale and unattached ENIs with this tag will be garbage
// collected by the operator.
ENIGarbageCollectionTags = "eni-gc-tags"
// ENIGarbageCollectionInterval defines the interval of ENI GC
ENIGarbageCollectionInterval = "eni-gc-interval"
// ParallelAllocWorkers specifies the number of parallel workers to be used for IPAM allocation
ParallelAllocWorkers = "parallel-alloc-workers"
// EC2APIEndpoint is the custom API endpoint to use for the EC2 AWS service,
// e.g. "ec2-fips.us-west-1.amazonaws.com" to use a FIPS endpoint in the us-west-1 region.
EC2APIEndpoint = "ec2-api-endpoint"
// AWSUsePrimaryAddress specifies whether an interface's primary address should be available for allocations on
// node
AWSUsePrimaryAddress = "aws-use-primary-address"
// Azure options
// AzureSubscriptionID is the subscription ID to use when accessing the Azure API
AzureSubscriptionID = "azure-subscription-id"
// AzureResourceGroup is the resource group of the nodes used for the cluster
AzureResourceGroup = "azure-resource-group"
// AzureUserAssignedIdentityID is the id of the user assigned identity used
// for retrieving Azure API credentials
AzureUserAssignedIdentityID = "azure-user-assigned-identity-id"
// AzureUsePrimaryAddress specifies whether we should use or ignore the interface's
// primary IPConfiguration
AzureUsePrimaryAddress = "azure-use-primary-address"
// LeaderElectionLeaseDuration is the duration that non-leader candidates will wait to
// force acquire leadership
LeaderElectionLeaseDuration = "leader-election-lease-duration"
// LeaderElectionRenewDeadline is the duration that the current acting master in HA deployment
// will retry refreshing leadership before giving up the lock.
LeaderElectionRenewDeadline = "leader-election-renew-deadline"
// LeaderElectionRetryPeriod is the duration the LeaderElector clients should wait between
// tries of the actions in operator HA deployment.
LeaderElectionRetryPeriod = "leader-election-retry-period"
// AlibabaCloud options
// AlibabaCloudVPCID allows user to specific vpc
AlibabaCloudVPCID = "alibaba-cloud-vpc-id"
// AlibabaCloudReleaseExcessIPs allows releasing excess free IP addresses from ENI.
// Enabling this option reduces waste of IP addresses but may increase
// the number of API calls to AlibabaCloud ECS service.
AlibabaCloudReleaseExcessIPs = "alibaba-cloud-release-excess-ips"
// ProxyIdleTimeoutSeconds is the idle timeout for proxy connections to upstream clusters
ProxyIdleTimeoutSeconds = "proxy-idle-timeout-seconds"
// ProxyStreamIdleTimeoutSeconds is the stream timeout for proxy connections to upstream clusters
ProxyStreamIdleTimeoutSeconds = "proxy-stream-idle-timeout-seconds"
// EnableGatewayAPI enables support of Gateway API
// This must be enabled along with enable-envoy-config in cilium agent.
EnableGatewayAPI = "enable-gateway-api"
// KubeProxyReplacement is equivalent to the cilium-agent option, and
// is used to provide hints for misconfiguration.
KubeProxyReplacement = "kube-proxy-replacement"
// EnableNodePort is equivalent to the cilium-agent option, and
// is used to provide hints for misconfiguration.
EnableNodePort = "enable-node-port"
// CiliumK8sNamespace is the namespace where Cilium pods are running.
CiliumK8sNamespace = "cilium-pod-namespace"
// CiliumPodLabels specifies the pod labels that Cilium pods is running
// with.
CiliumPodLabels = "cilium-pod-labels"
// TaintSyncWorkers is the number of workers used to synchronize
// taints and conditions in Kubernetes nodes.
TaintSyncWorkers = "taint-sync-workers"
// RemoveCiliumNodeTaints is the flag to define if the Cilium node taint
// should be removed in Kubernetes nodes.
RemoveCiliumNodeTaints = "remove-cilium-node-taints"
// SetCiliumNodeTaints is whether or not to taint nodes that do not have
// a running Cilium instance.
SetCiliumNodeTaints = "set-cilium-node-taints"
// SetCiliumIsUpCondition sets the CiliumIsUp node condition in Kubernetes
// nodes.
SetCiliumIsUpCondition = "set-cilium-is-up-condition"
// PodRestartSelector specify the labels contained in the pod that needs to be restarted before the node can be de-stained
// default values: k8s-app=kube-dns
PodRestartSelector = "pod-restart-selector"
// AWSPaginationEnabled toggles pagination for AWS EC2 API requests
AWSPaginationEnabled = "aws-pagination-enabled"
)
// OperatorConfig is the configuration used by the operator.
type OperatorConfig struct {
// NodesGCInterval is the GC interval for CiliumNodes
NodesGCInterval time.Duration
// EnableMetrics enables prometheus metrics.
EnableMetrics bool
// EndpointGCInterval is the interval between attempts of the CEP GC
// controller.
// Note that only one node per cluster should run this, and most iterations
// will simply return.
EndpointGCInterval time.Duration
// SyncK8sServices synchronizes k8s services into the kvstore
SyncK8sServices bool
// SyncK8sNodes synchronizes k8s nodes into the kvstore
SyncK8sNodes bool
// UnmanagedPodWatcherInterval is the interval to check for unmanaged kube-dns pods (0 to disable)
UnmanagedPodWatcherInterval int
// LeaderElectionLeaseDuration is the duration that non-leader candidates will wait to
// force acquire leadership in Cilium Operator HA deployment.
LeaderElectionLeaseDuration time.Duration
// LeaderElectionRenewDeadline is the duration that the current acting master in HA deployment
// will retry refreshing leadership in before giving up the lock.
LeaderElectionRenewDeadline time.Duration
// LeaderElectionRetryPeriod is the duration that LeaderElector clients should wait between
// retries of the actions in operator HA deployment.
LeaderElectionRetryPeriod time.Duration
// IPAM options
// IPAMAPIBurst is the burst value allowed when accessing external IPAM APIs
IPAMAPIBurst int
// IPAMAPIQPSLimit is the queries per second limit when accessing external IPAM APIs
IPAMAPIQPSLimit float64
// IPAMSubnetsIDs are optional subnets IDs used to filter subnets and interfaces listing
IPAMSubnetsIDs []string
// IPAMSubnetsTags are optional tags used to filter subnets, and interfaces within those subnets
IPAMSubnetsTags map[string]string
// IPAMInstanceTags are optional tags used to filter instances for ENI discovery.
// Only used with AWS and Alibabacloud IPAM mode for now
IPAMInstanceTags map[string]string
// IPAM Operator options
// ClusterPoolIPv4CIDR is the cluster IPv4 podCIDR that should be used to
// allocate pods in the node.
ClusterPoolIPv4CIDR []string
// ClusterPoolIPv6CIDR is the cluster IPv6 podCIDR that should be used to
// allocate pods in the node.
ClusterPoolIPv6CIDR []string
// NodeCIDRMaskSizeIPv4 is the IPv4 podCIDR mask size that will be used
// per node.
NodeCIDRMaskSizeIPv4 int
// NodeCIDRMaskSizeIPv6 is the IPv6 podCIDR mask size that will be used
// per node.
NodeCIDRMaskSizeIPv6 int
// IPAMAutoCreateCiliumPodIPPools contains pre-defined IP pools to be auto-created on startup.
IPAMAutoCreateCiliumPodIPPools map[string]string
// KubeProxyReplacement or NodePort are required to implement cluster
// Ingress (or equivalent Gateway API functionality)
KubeProxyReplacement string
EnableNodePort bool
// AWS options
// ENITags are the tags that will be added to every ENI created by the AWS ENI IPAM
ENITags map[string]string
// ENIGarbageCollectionTags is a tag that will be added to every ENI
// created by the AWS ENI IPAM.
// Any stale and unattached ENIs with this tag will be garbage
// collected by the operator.
ENIGarbageCollectionTags map[string]string
// ENIGarbageCollectionInterval defines the interval of ENI GC
ENIGarbageCollectionInterval time.Duration
// ParallelAllocWorkers specifies the number of parallel workers to be used for accessing cloud provider APIs .
ParallelAllocWorkers int64
// AWSReleaseExcessIps allows releasing excess free IP addresses from ENI.
// Enabling this option reduces waste of IP addresses but may increase
// the number of API calls to AWS EC2 service.
AWSReleaseExcessIPs bool
// AWSEnablePrefixDelegation allows operator to allocate prefixes to ENIs on nitro instances instead of individual
// IP addresses. Allows for increased pod density on nodes.
AWSEnablePrefixDelegation bool
// AWSUsePrimaryAddress specifies whether an interface's primary address should be available for allocations on
// node
AWSUsePrimaryAddress bool
// ExcessIPReleaseDelay controls how long operator would wait before an IP previously marked as excess is released.
// Defaults to 180 secs
ExcessIPReleaseDelay int
// EC2APIEndpoint is the custom API endpoint to use for the EC2 AWS service,
// e.g. "ec2-fips.us-west-1.amazonaws.com" to use a FIPS endpoint in the us-west-1 region.
EC2APIEndpoint string
// Azure options
// AzureSubscriptionID is the subscription ID to use when accessing the Azure API
AzureSubscriptionID string
// AzureResourceGroup is the resource group of the nodes used for the cluster
AzureResourceGroup string
// AzureUserAssignedIdentityID is the id of the user assigned identity used
// for retrieving Azure API credentials
AzureUserAssignedIdentityID string
// AzureUsePrimaryAddress specify wether we should use or ignore the interface's
// primary IPConfiguration
AzureUsePrimaryAddress bool
// AlibabaCloud options
// AlibabaCloudVPCID allow user to specific vpc
AlibabaCloudVPCID string
// AlibabaCloudReleaseExcessIPs allows releasing excess free IP addresses from ENI.
// Enabling this option reduces waste of IP addresses but may increase
// the number of API calls to AlibabaCloud ECS service.
AlibabaCloudReleaseExcessIPs bool
// EnableGatewayAPI enables support of Gateway API
EnableGatewayAPI bool
// ProxyIdleTimeoutSeconds is the idle timeout for the proxy to upstream cluster
ProxyIdleTimeoutSeconds int
// ProxyStreamIdleTimeoutSeconds is the stream idle timeout for the proxy to upstream cluster
ProxyStreamIdleTimeoutSeconds int
// CiliumK8sNamespace is the namespace where Cilium pods are running.
CiliumK8sNamespace string
// CiliumPodLabels specifies the pod labels that Cilium pods is running
// with.
CiliumPodLabels string
// TaintSyncWorkers is the number of workers used to synchronize
// taints and conditions in Kubernetes nodes.
TaintSyncWorkers int
// RemoveCiliumNodeTaints is the flag to define if the Cilium node taint
// should be removed in Kubernetes nodes.
RemoveCiliumNodeTaints bool
// SetCiliumNodeTaints is whether or not to set taints on nodes that do not
// have a running Cilium pod.
SetCiliumNodeTaints bool
// SetCiliumIsUpCondition sets the CiliumIsUp node condition in Kubernetes
// nodes.
SetCiliumIsUpCondition bool
// PodRestartSelector specify the labels contained in the pod that needs to be restarted before the node can be de-stained
PodRestartSelector string
// AWSPaginationEnabled toggles pagination for AWS EC2 API requests
AWSPaginationEnabled bool
}
// Populate sets all options with the values from viper.
func (c *OperatorConfig) Populate(logger *slog.Logger, vp *viper.Viper) {
c.NodesGCInterval = vp.GetDuration(NodesGCInterval)
c.EnableMetrics = vp.GetBool(EnableMetrics)
c.EndpointGCInterval = vp.GetDuration(EndpointGCInterval)
c.SyncK8sServices = vp.GetBool(SyncK8sServices)
c.SyncK8sNodes = vp.GetBool(SyncK8sNodes)
c.UnmanagedPodWatcherInterval = vp.GetInt(UnmanagedPodWatcherInterval)
c.NodeCIDRMaskSizeIPv4 = vp.GetInt(NodeCIDRMaskSizeIPv4)
c.NodeCIDRMaskSizeIPv6 = vp.GetInt(NodeCIDRMaskSizeIPv6)
c.ClusterPoolIPv4CIDR = vp.GetStringSlice(ClusterPoolIPv4CIDR)
c.ClusterPoolIPv6CIDR = vp.GetStringSlice(ClusterPoolIPv6CIDR)
c.LeaderElectionLeaseDuration = vp.GetDuration(LeaderElectionLeaseDuration)
c.LeaderElectionRenewDeadline = vp.GetDuration(LeaderElectionRenewDeadline)
c.LeaderElectionRetryPeriod = vp.GetDuration(LeaderElectionRetryPeriod)
c.EnableGatewayAPI = vp.GetBool(EnableGatewayAPI)
c.ProxyIdleTimeoutSeconds = vp.GetInt(ProxyIdleTimeoutSeconds)
if c.ProxyIdleTimeoutSeconds == 0 {
c.ProxyIdleTimeoutSeconds = DefaultProxyIdleTimeoutSeconds
}
c.ProxyStreamIdleTimeoutSeconds = vp.GetInt(ProxyStreamIdleTimeoutSeconds)
if c.ProxyStreamIdleTimeoutSeconds == 0 {
c.ProxyStreamIdleTimeoutSeconds = DefaultProxyStreamIdleTimeoutSeconds
}
c.CiliumPodLabels = vp.GetString(CiliumPodLabels)
c.TaintSyncWorkers = vp.GetInt(TaintSyncWorkers)
c.RemoveCiliumNodeTaints = vp.GetBool(RemoveCiliumNodeTaints)
c.SetCiliumNodeTaints = vp.GetBool(SetCiliumNodeTaints)
c.SetCiliumIsUpCondition = vp.GetBool(SetCiliumIsUpCondition)
c.PodRestartSelector = vp.GetString(PodRestartSelector)
c.CiliumK8sNamespace = vp.GetString(CiliumK8sNamespace)
if c.CiliumK8sNamespace == "" {
if option.Config.K8sNamespace == "" {
c.CiliumK8sNamespace = metav1.NamespaceDefault
} else {
c.CiliumK8sNamespace = option.Config.K8sNamespace
}
}
// IPAM options
c.IPAMAPIQPSLimit = vp.GetFloat64(IPAMAPIQPSLimit)
c.IPAMAPIBurst = vp.GetInt(IPAMAPIBurst)
c.ParallelAllocWorkers = vp.GetInt64(ParallelAllocWorkers)
// Gateways and Ingress
c.KubeProxyReplacement = vp.GetString(KubeProxyReplacement)
c.EnableNodePort = vp.GetBool(EnableNodePort)
// AWS options
c.AWSReleaseExcessIPs = vp.GetBool(AWSReleaseExcessIPs)
c.AWSEnablePrefixDelegation = vp.GetBool(AWSEnablePrefixDelegation)
c.AWSUsePrimaryAddress = vp.GetBool(AWSUsePrimaryAddress)
c.EC2APIEndpoint = vp.GetString(EC2APIEndpoint)
c.ExcessIPReleaseDelay = vp.GetInt(ExcessIPReleaseDelay)
c.ENIGarbageCollectionInterval = vp.GetDuration(ENIGarbageCollectionInterval)
c.AWSPaginationEnabled = vp.GetBool(AWSPaginationEnabled)
// Azure options
c.AzureSubscriptionID = vp.GetString(AzureSubscriptionID)
c.AzureResourceGroup = vp.GetString(AzureResourceGroup)
c.AzureUsePrimaryAddress = vp.GetBool(AzureUsePrimaryAddress)
c.AzureUserAssignedIdentityID = vp.GetString(AzureUserAssignedIdentityID)
// AlibabaCloud options
c.AlibabaCloudVPCID = vp.GetString(AlibabaCloudVPCID)
c.AlibabaCloudReleaseExcessIPs = vp.GetBool(AlibabaCloudReleaseExcessIPs)
// Option maps and slices
if m := vp.GetStringSlice(IPAMSubnetsIDs); len(m) != 0 {
c.IPAMSubnetsIDs = m
}
if m, err := command.GetStringMapStringE(vp, IPAMSubnetsTags); err != nil {
logging.Fatal(logger, fmt.Sprintf("unable to parse %s: %s", IPAMSubnetsTags, err))
} else {
c.IPAMSubnetsTags = m
}
if m, err := command.GetStringMapStringE(vp, IPAMInstanceTags); err != nil {
logging.Fatal(logger, fmt.Sprintf("unable to parse %s: %s", IPAMInstanceTags, err))
} else {
c.IPAMInstanceTags = m
}
if m, err := command.GetStringMapStringE(vp, ENITags); err != nil {
logging.Fatal(logger, fmt.Sprintf("unable to parse %s: %s", ENITags, err))
} else {
c.ENITags = m
}
if m, err := command.GetStringMapStringE(vp, ENIGarbageCollectionTags); err != nil {
logging.Fatal(logger, fmt.Sprintf("unable to parse %s: %s", ENIGarbageCollectionTags, err))
} else {
c.ENIGarbageCollectionTags = m
}
if m, err := command.GetStringMapStringE(vp, IPAMAutoCreateCiliumPodIPPools); err != nil {
logging.Fatal(logger, fmt.Sprintf("unable to parse %s: %s", IPAMAutoCreateCiliumPodIPPools, err))
} else {
c.IPAMAutoCreateCiliumPodIPPools = m
}
}
// Config represents the operator configuration.
var Config = &OperatorConfig{
IPAMSubnetsIDs: make([]string, 0),
IPAMSubnetsTags: make(map[string]string),
IPAMInstanceTags: make(map[string]string),
IPAMAutoCreateCiliumPodIPPools: make(map[string]string),
ENITags: make(map[string]string),
ENIGarbageCollectionTags: make(map[string]string),
}
// SPDX-License-Identifier: Apache-2.0
// Copyright Authors of Cilium
package types
import (
"github.com/cilium/cilium/pkg/ipam/types"
)
// Spec is the ENI specification of a node. This specification is considered
// by the cilium-operator to act as an IPAM operator and makes ENI IPs available
// via the IPAMSpec section.
//
// The ENI specification can either be provided explicitly by the user or the
// cilium-agent running on the node can be instructed to create the CiliumNode
// custom resource along with an ENI specification when the node registers
// itself to the Kubernetes cluster.
type Spec struct {
// InstanceType is the ECS instance type, e.g. "ecs.g6.2xlarge"
//
// +kubebuilder:validation:Optional
InstanceType string `json:"instance-type,omitempty"`
// AvailabilityZone is the availability zone to use when allocating
// ENIs.
//
// +kubebuilder:validation:Optional
AvailabilityZone string `json:"availability-zone,omitempty"`
// VPCID is the VPC ID to use when allocating ENIs.
//
// +kubebuilder:validation:Optional
VPCID string `json:"vpc-id,omitempty"`
// CIDRBlock is vpc ipv4 CIDR
//
// +kubebuilder:validation:Optional
CIDRBlock string `json:"cidr-block,omitempty"`
// VSwitches is the ID of vSwitch available for ENI
//
// +kubebuilder:validation:Optional
VSwitches []string `json:"vswitches,omitempty"`
// VSwitchTags is the list of tags to use when evaluating which
// vSwitch to use for the ENI.
//
// +kubebuilder:validation:Optional
VSwitchTags map[string]string `json:"vswitch-tags,omitempty"`
// SecurityGroups is the list of security groups to attach to any ENI
// that is created and attached to the instance.
//
// +kubebuilder:validation:Optional
SecurityGroups []string `json:"security-groups,omitempty"`
// SecurityGroupTags is the list of tags to use when evaluating which
// security groups to use for the ENI.
//
// +kubebuilder:validation:Optional
SecurityGroupTags map[string]string `json:"security-group-tags,omitempty"`
}
const (
// ENITypePrimary is the type for ENI
ENITypePrimary string = "Primary"
// ENITypeSecondary is the type for ENI
ENITypeSecondary string = "Secondary"
)
// ENI represents an AlibabaCloud Elastic Network Interface
type ENI struct {
// NetworkInterfaceID is the ENI id
//
// +optional
NetworkInterfaceID string `json:"network-interface-id,omitempty"`
// MACAddress is the mac address of the ENI
//
// +optional
MACAddress string `json:"mac-address,omitempty"`
// Type is the ENI type Primary or Secondary
//
// +optional
Type string `json:"type,omitempty"`
// InstanceID is the InstanceID using this ENI
//
// +optional
InstanceID string `json:"instance-id,omitempty"`
// SecurityGroupIDs is the security group ids used by this ENI
//
// +optional
SecurityGroupIDs []string `json:"security-groupids,omitempty"`
// VPC is the vpc to which the ENI belongs
//
// +optional
VPC VPC `json:"vpc,omitempty"`
// ZoneID is the zone to which the ENI belongs
//
// +optional
ZoneID string `json:"zone-id,omitempty"`
// VSwitch is the vSwitch the ENI is using
//
// +optional
VSwitch VSwitch `json:"vswitch,omitempty"`
// PrimaryIPAddress is the primary IP on ENI
//
// +optional
PrimaryIPAddress string `json:"primary-ip-address,omitempty"`
// PrivateIPSets is the list of all IPs on the ENI, including PrimaryIPAddress
//
// +optional
PrivateIPSets []PrivateIPSet `json:"private-ipsets,omitempty"`
// Tags is the tags on this ENI
//
// +optional
Tags map[string]string `json:"tags,omitempty"`
}
func (e *ENI) DeepCopyInterface() types.Interface {
return e.DeepCopy()
}
// InterfaceID returns the identifier of the interface
func (e *ENI) InterfaceID() string {
return e.NetworkInterfaceID
}
// ForeachAddress iterates over all addresses and calls fn
func (e *ENI) ForeachAddress(id string, fn types.AddressIterator) error {
for _, address := range e.PrivateIPSets {
if address.Primary {
continue
}
if err := fn(id, e.NetworkInterfaceID, address.PrivateIpAddress, "", address); err != nil {
return err
}
}
return nil
}
// ENIStatus is the status of ENI addressing of the node
type ENIStatus struct {
// ENIs is the list of ENIs on the node
//
// +optional
ENIs map[string]ENI `json:"enis,omitempty"`
}
// PrivateIPSet is a nested struct in ecs response
type PrivateIPSet struct {
PrivateIpAddress string `json:"private-ip-address,omitempty"`
Primary bool `json:"primary,omitempty" `
}
type VPC struct {
// VPCID is the vpc to which the ENI belongs
//
// +optional
VPCID string `json:"vpc-id,omitempty"`
// CIDRBlock is the VPC IPv4 CIDR
//
// +optional
CIDRBlock string `json:"cidr,omitempty"`
// IPv6CIDRBlock is the VPC IPv6 CIDR
//
// +optional
IPv6CIDRBlock string `json:"ipv6-cidr,omitempty"`
// SecondaryCIDRs is the list of Secondary CIDRs associated with the VPC
//
// +optional
SecondaryCIDRs []string `json:"secondary-cidrs,omitempty"`
}
type VSwitch struct {
// VSwitchID is the vSwitch to which the ENI belongs
//
// +optional
VSwitchID string `json:"vswitch-id,omitempty"`
// CIDRBlock is the vSwitch IPv4 CIDR
//
// +optional
CIDRBlock string `json:"cidr,omitempty"`
// IPv6CIDRBlock is the vSwitch IPv6 CIDR
//
// +optional
IPv6CIDRBlock string `json:"ipv6-cidr,omitempty"`
}
//go:build !ignore_autogenerated
// +build !ignore_autogenerated
// SPDX-License-Identifier: Apache-2.0
// Copyright Authors of Cilium
// Code generated by deepcopy-gen. DO NOT EDIT.
package types
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *ENI) DeepCopyInto(out *ENI) {
*out = *in
if in.SecurityGroupIDs != nil {
in, out := &in.SecurityGroupIDs, &out.SecurityGroupIDs
*out = make([]string, len(*in))
copy(*out, *in)
}
in.VPC.DeepCopyInto(&out.VPC)
out.VSwitch = in.VSwitch
if in.PrivateIPSets != nil {
in, out := &in.PrivateIPSets, &out.PrivateIPSets
*out = make([]PrivateIPSet, len(*in))
copy(*out, *in)
}
if in.Tags != nil {
in, out := &in.Tags, &out.Tags
*out = make(map[string]string, len(*in))
for key, val := range *in {
(*out)[key] = val
}
}
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ENI.
func (in *ENI) DeepCopy() *ENI {
if in == nil {
return nil
}
out := new(ENI)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *ENIStatus) DeepCopyInto(out *ENIStatus) {
*out = *in
if in.ENIs != nil {
in, out := &in.ENIs, &out.ENIs
*out = make(map[string]ENI, len(*in))
for key, val := range *in {
(*out)[key] = *val.DeepCopy()
}
}
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ENIStatus.
func (in *ENIStatus) DeepCopy() *ENIStatus {
if in == nil {
return nil
}
out := new(ENIStatus)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *PrivateIPSet) DeepCopyInto(out *PrivateIPSet) {
*out = *in
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PrivateIPSet.
func (in *PrivateIPSet) DeepCopy() *PrivateIPSet {
if in == nil {
return nil
}
out := new(PrivateIPSet)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *Spec) DeepCopyInto(out *Spec) {
*out = *in
if in.VSwitches != nil {
in, out := &in.VSwitches, &out.VSwitches
*out = make([]string, len(*in))
copy(*out, *in)
}
if in.VSwitchTags != nil {
in, out := &in.VSwitchTags, &out.VSwitchTags
*out = make(map[string]string, len(*in))
for key, val := range *in {
(*out)[key] = val
}
}
if in.SecurityGroups != nil {
in, out := &in.SecurityGroups, &out.SecurityGroups
*out = make([]string, len(*in))
copy(*out, *in)
}
if in.SecurityGroupTags != nil {
in, out := &in.SecurityGroupTags, &out.SecurityGroupTags
*out = make(map[string]string, len(*in))
for key, val := range *in {
(*out)[key] = val
}
}
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Spec.
func (in *Spec) DeepCopy() *Spec {
if in == nil {
return nil
}
out := new(Spec)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *VPC) DeepCopyInto(out *VPC) {
*out = *in
if in.SecondaryCIDRs != nil {
in, out := &in.SecondaryCIDRs, &out.SecondaryCIDRs
*out = make([]string, len(*in))
copy(*out, *in)
}
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new VPC.
func (in *VPC) DeepCopy() *VPC {
if in == nil {
return nil
}
out := new(VPC)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *VSwitch) DeepCopyInto(out *VSwitch) {
*out = *in
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new VSwitch.
func (in *VSwitch) DeepCopy() *VSwitch {
if in == nil {
return nil
}
out := new(VSwitch)
in.DeepCopyInto(out)
return out
}
//go:build !ignore_autogenerated
// +build !ignore_autogenerated
// SPDX-License-Identifier: Apache-2.0
// Copyright Authors of Cilium
// Code generated by deepequal-gen. DO NOT EDIT.
package types
// DeepEqual is an autogenerated deepequal function, deeply comparing the
// receiver with other. in must be non-nil.
func (in *ENI) DeepEqual(other *ENI) bool {
if other == nil {
return false
}
if in.NetworkInterfaceID != other.NetworkInterfaceID {
return false
}
if in.MACAddress != other.MACAddress {
return false
}
if in.Type != other.Type {
return false
}
if in.InstanceID != other.InstanceID {
return false
}
if ((in.SecurityGroupIDs != nil) && (other.SecurityGroupIDs != nil)) || ((in.SecurityGroupIDs == nil) != (other.SecurityGroupIDs == nil)) {
in, other := &in.SecurityGroupIDs, &other.SecurityGroupIDs
if other == nil {
return false
}
if len(*in) != len(*other) {
return false
} else {
for i, inElement := range *in {
if inElement != (*other)[i] {
return false
}
}
}
}
if !in.VPC.DeepEqual(&other.VPC) {
return false
}
if in.ZoneID != other.ZoneID {
return false
}
if in.VSwitch != other.VSwitch {
return false
}
if in.PrimaryIPAddress != other.PrimaryIPAddress {
return false
}
if ((in.PrivateIPSets != nil) && (other.PrivateIPSets != nil)) || ((in.PrivateIPSets == nil) != (other.PrivateIPSets == nil)) {
in, other := &in.PrivateIPSets, &other.PrivateIPSets
if other == nil {
return false
}
if len(*in) != len(*other) {
return false
} else {
for i, inElement := range *in {
if !inElement.DeepEqual(&(*other)[i]) {
return false
}
}
}
}
if ((in.Tags != nil) && (other.Tags != nil)) || ((in.Tags == nil) != (other.Tags == nil)) {
in, other := &in.Tags, &other.Tags
if other == nil {
return false
}
if len(*in) != len(*other) {
return false
} else {
for key, inValue := range *in {
if otherValue, present := (*other)[key]; !present {
return false
} else {
if inValue != otherValue {
return false
}
}
}
}
}
return true
}
// DeepEqual is an autogenerated deepequal function, deeply comparing the
// receiver with other. in must be non-nil.
func (in *ENIStatus) DeepEqual(other *ENIStatus) bool {
if other == nil {
return false
}
if ((in.ENIs != nil) && (other.ENIs != nil)) || ((in.ENIs == nil) != (other.ENIs == nil)) {
in, other := &in.ENIs, &other.ENIs
if other == nil {
return false
}
if len(*in) != len(*other) {
return false
} else {
for key, inValue := range *in {
if otherValue, present := (*other)[key]; !present {
return false
} else {
if !inValue.DeepEqual(&otherValue) {
return false
}
}
}
}
}
return true
}
// DeepEqual is an autogenerated deepequal function, deeply comparing the
// receiver with other. in must be non-nil.
func (in *PrivateIPSet) DeepEqual(other *PrivateIPSet) bool {
if other == nil {
return false
}
if in.PrivateIpAddress != other.PrivateIpAddress {
return false
}
if in.Primary != other.Primary {
return false
}
return true
}
// DeepEqual is an autogenerated deepequal function, deeply comparing the
// receiver with other. in must be non-nil.
func (in *Spec) DeepEqual(other *Spec) bool {
if other == nil {
return false
}
if in.InstanceType != other.InstanceType {
return false
}
if in.AvailabilityZone != other.AvailabilityZone {
return false
}
if in.VPCID != other.VPCID {
return false
}
if in.CIDRBlock != other.CIDRBlock {
return false
}
if ((in.VSwitches != nil) && (other.VSwitches != nil)) || ((in.VSwitches == nil) != (other.VSwitches == nil)) {
in, other := &in.VSwitches, &other.VSwitches
if other == nil {
return false
}
if len(*in) != len(*other) {
return false
} else {
for i, inElement := range *in {
if inElement != (*other)[i] {
return false
}
}
}
}
if ((in.VSwitchTags != nil) && (other.VSwitchTags != nil)) || ((in.VSwitchTags == nil) != (other.VSwitchTags == nil)) {
in, other := &in.VSwitchTags, &other.VSwitchTags
if other == nil {
return false
}
if len(*in) != len(*other) {
return false
} else {
for key, inValue := range *in {
if otherValue, present := (*other)[key]; !present {
return false
} else {
if inValue != otherValue {
return false
}
}
}
}
}
if ((in.SecurityGroups != nil) && (other.SecurityGroups != nil)) || ((in.SecurityGroups == nil) != (other.SecurityGroups == nil)) {
in, other := &in.SecurityGroups, &other.SecurityGroups
if other == nil {
return false
}
if len(*in) != len(*other) {
return false
} else {
for i, inElement := range *in {
if inElement != (*other)[i] {
return false
}
}
}
}
if ((in.SecurityGroupTags != nil) && (other.SecurityGroupTags != nil)) || ((in.SecurityGroupTags == nil) != (other.SecurityGroupTags == nil)) {
in, other := &in.SecurityGroupTags, &other.SecurityGroupTags
if other == nil {
return false
}
if len(*in) != len(*other) {
return false
} else {
for key, inValue := range *in {
if otherValue, present := (*other)[key]; !present {
return false
} else {
if inValue != otherValue {
return false
}
}
}
}
}
return true
}
// DeepEqual is an autogenerated deepequal function, deeply comparing the
// receiver with other. in must be non-nil.
func (in *VPC) DeepEqual(other *VPC) bool {
if other == nil {
return false
}
if in.VPCID != other.VPCID {
return false
}
if in.CIDRBlock != other.CIDRBlock {
return false
}
if in.IPv6CIDRBlock != other.IPv6CIDRBlock {
return false
}
if ((in.SecondaryCIDRs != nil) && (other.SecondaryCIDRs != nil)) || ((in.SecondaryCIDRs == nil) != (other.SecondaryCIDRs == nil)) {
in, other := &in.SecondaryCIDRs, &other.SecondaryCIDRs
if other == nil {
return false
}
if len(*in) != len(*other) {
return false
} else {
for i, inElement := range *in {
if inElement != (*other)[i] {
return false
}
}
}
}
return true
}
// DeepEqual is an autogenerated deepequal function, deeply comparing the
// receiver with other. in must be non-nil.
func (in *VSwitch) DeepEqual(other *VSwitch) bool {
if other == nil {
return false
}
if in.VSwitchID != other.VSwitchID {
return false
}
if in.CIDRBlock != other.CIDRBlock {
return false
}
if in.IPv6CIDRBlock != other.IPv6CIDRBlock {
return false
}
return true
}
// SPDX-License-Identifier: Apache-2.0
// Copyright Authors of Cilium
package allocator
import (
"context"
"errors"
"fmt"
"log/slog"
"github.com/cilium/cilium/pkg/backoff"
"github.com/cilium/cilium/pkg/idpool"
"github.com/cilium/cilium/pkg/kvstore"
"github.com/cilium/cilium/pkg/lock"
"github.com/cilium/cilium/pkg/logging"
"github.com/cilium/cilium/pkg/logging/logfields"
"github.com/cilium/cilium/pkg/option"
"github.com/cilium/cilium/pkg/rate"
"github.com/cilium/cilium/pkg/time"
)
var (
subsysLogAttr = []any{logfields.LogSubsys, "allocator"}
)
const (
// DefaultSyncInterval is the default value for the periodic synchronization
// of the allocated identities.
DefaultSyncInterval = 5 * time.Minute
// defaultMaxAllocAttempts is the default number of attempted allocation
// requests performed before failing.
defaultMaxAllocAttempts = 16
)
// Allocator is a distributed ID allocator backed by a KVstore. It maps
// arbitrary keys to identifiers. Multiple users on different cluster nodes can
// in parallel request the ID for keys and are guaranteed to retrieve the same
// ID for an identical key.
//
// While the details of how keys are stored is delegated to Backend
// implementations, some expectations exist. See pkg/kvstore/allocator for
// details about the kvstore implementation.
//
// A node takes a reference to an identity when it is in-use on that node, and
// the identity remains in-use if there is any node reference to it. When an
// identity no longer has any node references, it may be garbage collected. No
// guarantees are made at that point and the numeric identity may be reused.
// Note that the numeric IDs are selected locally and verified with the Backend.
//
// Lookup ID by key:
// 1. Return ID from local cache updated by watcher (no Backend interactions)
// 2. Do ListPrefix() on slave key, return the first result that matches the exact
// prefix.
//
// Lookup key by ID:
// 1. Return key from local cache updated by watcher (no Backend interactions)
// 2. Do Get() on master key, return result
//
// Allocate:
// 1. Check local key cache, increment, and return if key is already in use
// locally (no Backend interactions)
// 2. Check local cache updated by watcher, if...
//
// ... match found:
//
// 2.1 Create a new slave key. This operation is potentially racy as the master
// key can be removed in the meantime.
// - etcd: Create is made conditional on existence of master key
//
// ... match not found:
//
// 2.1 Select new unused id from local cache
// 2.2 Create a new master key with the condition that it may not exist
// 2.3 Create a new slave key
//
// 1.1. If found, increment and return (no Backend interactions)
// 2. Lookup ID by key in local cache or via first slave key found in Backend
//
// Release:
// 1. Reduce local reference count until last use (no Backend interactions)
// 2. Delete slave key (basePath/value/key1/node1)
// This automatically guarantees that when the last node has released the
// key, the key is no longer found by Get()
// 3. If the node goes down, all slave keys of that node are removed after
// the TTL expires (auto release).
type Allocator struct {
logger *slog.Logger
// events is a channel which will receive AllocatorEvent as IDs are
// added, modified or removed from the allocator
events AllocatorEventSendChan
// keyType is an instance of the type to be used as allocator key.
keyType AllocatorKey
// min is the lower limit when allocating IDs. The allocator will never
// allocate an ID lesser than this value.
min idpool.ID
// max is the upper limit when allocating IDs. The allocator will never
// allocate an ID greater than this value.
max idpool.ID
// prefixMask if set, will be ORed to all selected IDs prior to
// allocation
prefixMask idpool.ID
// localKeys contains all keys including their reference count for keys
// which have been allocated and are in local use
localKeys *localKeys
// backoffTemplate is the backoff configuration while allocating
backoffTemplate backoff.Exponential
// slaveKeysMutex protects the concurrent access of the slave key by this
// agent.
slaveKeysMutex lock.Mutex
// mainCache is the main cache, representing the allocator contents of
// the primary kvstore connection
mainCache cache
// remoteCachesMutex protects accesse to remoteCaches
remoteCachesMutex lock.RWMutex
// remoteCaches is the list of additional remote caches being watched
// in addition to the main cache
remoteCaches map[string]*remoteCache
// stopGC is the channel used to stop the garbage collector
stopGC chan struct{}
// initialListDone is a channel that is closed when the initial
// synchronization has completed
initialListDone waitChan
// idPool maintains a pool of available ids for allocation.
idPool *idpool.IDPool
// enableMasterKeyProtection if true, causes master keys that are still in
// local use to be automatically re-created
enableMasterKeyProtection bool
// disableGC disables the garbage collector
disableGC bool
// disableAutostart prevents starting the allocator when it is initialized
disableAutostart bool
// operatorIDManagement indicates if cilium-operator is managing Cilium Identities.
operatorIDManagement bool
// maxAllocAttempts is the number of attempted allocation requests
// performed before failing.
maxAllocAttempts int
// syncInterval is the interval for local keys refresh.
syncInterval time.Duration
// cacheValidators implement extra validations of retrieved identities, e.g.,
// to ensure that they belong to the expected range.
cacheValidators []CacheValidator
// backend is the upstream, shared, backend to which we syncronize local
// information
backend Backend
}
// AllocatorOption is the base type for allocator options
type AllocatorOption func(*Allocator)
// CacheValidator is the type of the validation functions triggered to filter out
// invalid notification events.
type CacheValidator func(kind AllocatorChangeKind, id idpool.ID, key AllocatorKey) error
// NewAllocatorForGC returns an allocator that can be used to run RunGC()
//
// The allocator can be configured by passing in additional options:
// - WithMin(id) - minimum ID to allocate (default: 1)
// - WithMax(id) - maximum ID to allocate (default max(uint64))
func NewAllocatorForGC(rootLogger *slog.Logger, backend Backend, opts ...AllocatorOption) *Allocator {
a := &Allocator{
logger: rootLogger.With(subsysLogAttr...),
backend: backend,
min: idpool.ID(1),
max: idpool.ID(^uint64(0)),
}
for _, fn := range opts {
fn(a)
}
return a
}
type GCStats struct {
// Alive is the number of identities alive
Alive int
// Deleted is the number of identities deleted
Deleted int
}
// Backend represents clients to remote ID allocation systems, such as KV
// Stores. These are used to coordinate key->ID allocation between cilium
// nodes.
type Backend interface {
// DeleteAllKeys will delete all keys. It is used in tests.
DeleteAllKeys(ctx context.Context)
// DeleteID deletes the identity with the given ID
DeleteID(ctx context.Context, id idpool.ID) error
// AllocateID creates a new key->ID association. This is expected to be a
// create-only operation, and the ID may be allocated by another node. An
// error in that case is not expected to be fatal. The actual ID is obtained
// by Allocator from the local idPool, which is updated with used-IDs as the
// Backend makes calls to the handler in ListAndWatch.
// The implementation of the backend might return an AllocatorKey that is
// a copy of 'key' with an internal reference of the backend key or, if it
// doesn't use the internal reference of the backend key it simply returns
// 'key'. In case of an error the returned 'AllocatorKey' should be nil.
AllocateID(ctx context.Context, id idpool.ID, key AllocatorKey) (AllocatorKey, error)
// AllocateIDIfLocked behaves like AllocateID but when lock is non-nil the
// operation proceeds only if it is still valid.
// The implementation of the backend might return an AllocatorKey that is
// a copy of 'key' with an internal reference of the backend key or, if it
// doesn't use the internal reference of the backend key it simply returns
// 'key'. In case of an error the returned 'AllocatorKey' should be nil.
AllocateIDIfLocked(ctx context.Context, id idpool.ID, key AllocatorKey, lock kvstore.KVLocker) (AllocatorKey, error)
// AcquireReference records that this node is using this key->ID mapping.
// This is distinct from any reference counting within this agent; only one
// reference exists for this node for any number of managed endpoints using
// it.
// The semantics of cleaning up stale references is delegated to the Backend
// implementation. RunGC may need to be invoked.
// This can race, and so lock can be provided (via a Lock call, below).
AcquireReference(ctx context.Context, id idpool.ID, key AllocatorKey, lock kvstore.KVLocker) error
// Release releases the use of an ID associated with the provided key. It
// does not guard against concurrent calls to
// releases.Release(ctx context.Context, key AllocatorKey) (err error)
Release(ctx context.Context, id idpool.ID, key AllocatorKey) (err error)
// UpdateKey refreshes the record that this node is using this key -> id
// mapping. When reliablyMissing is set it will also recreate missing master or
// slave keys.
UpdateKey(ctx context.Context, id idpool.ID, key AllocatorKey, reliablyMissing bool) error
// UpdateKeyIfLocked behaves like UpdateKey but when lock is non-nil the operation proceeds only if it is still valid.
UpdateKeyIfLocked(ctx context.Context, id idpool.ID, key AllocatorKey, reliablyMissing bool, lock kvstore.KVLocker) error
// Get returns the allocated ID for this key as seen by the Backend. This may
// have been created by other agents.
Get(ctx context.Context, key AllocatorKey) (idpool.ID, error)
// GetIfLocked behaves like Get, but but when lock is non-nil the
// operation proceeds only if it is still valid.
GetIfLocked(ctx context.Context, key AllocatorKey, lock kvstore.KVLocker) (idpool.ID, error)
// GetByID returns the key associated with this ID, as seen by the Backend.
// This may have been created by other agents.
GetByID(ctx context.Context, id idpool.ID) (AllocatorKey, error)
// Lock provides an opaque lock object that can be used, later, to ensure
// that the key has not changed since the lock was created. This can be done
// with GetIfLocked.
Lock(ctx context.Context, key AllocatorKey) (kvstore.KVLocker, error)
// ListIDs returns the IDs of all identities currently stored in the backend
ListIDs(ctx context.Context) (identityIDs []idpool.ID, err error)
// ListAndWatch begins synchronizing the local Backend instance with its
// remote.
ListAndWatch(ctx context.Context, handler CacheMutations)
// RunGC reaps stale or unused identities within the Backend and makes them
// available for reuse. It is used by the cilium-operator and is not invoked
// by cilium-agent.
// Note: not all Backend implemenations rely on this, such as the kvstore
// backends, and may use leases to expire keys.
RunGC(ctx context.Context, rateLimit *rate.Limiter, staleKeysPrevRound map[string]uint64, minID idpool.ID, maxID idpool.ID) (map[string]uint64, *GCStats, error)
// RunLocksGC reaps stale or unused locks within the Backend. It is used by
// the cilium-operator and is not invoked by cilium-agent. Returns
// a map of locks currently being held in the KVStore including the ones
// that failed to be GCed.
// Note: not all Backend implementations rely on this, such as the kvstore
// backends, and may use leases to expire keys.
RunLocksGC(ctx context.Context, staleKeysPrevRound map[string]kvstore.Value) (map[string]kvstore.Value, error)
}
// NewAllocator creates a new Allocator. Any type can be used as key as long as
// the type implements the AllocatorKey interface. A variable of the type has
// to be passed into NewAllocator() to make the type known. The specified base
// path is used to prefix all keys in the kvstore. The provided path must be
// unique.
//
// The allocator can be configured by passing in additional options:
// - WithEvents() - enable Events channel
// - WithMin(id) - minimum ID to allocate (default: 1)
// - WithMax(id) - maximum ID to allocate (default max(uint64))
//
// After creation, IDs can be allocated with Allocate() and released with
// Release()
func NewAllocator(rootLogger *slog.Logger, typ AllocatorKey, backend Backend, opts ...AllocatorOption) (*Allocator, error) {
a := &Allocator{
logger: rootLogger.With(subsysLogAttr...),
keyType: typ,
backend: backend,
min: idpool.ID(1),
max: idpool.ID(^uint64(0)),
localKeys: newLocalKeys(rootLogger),
stopGC: make(chan struct{}),
remoteCaches: map[string]*remoteCache{},
backoffTemplate: backoff.Exponential{
Logger: rootLogger.With(subsysLogAttr...),
Min: time.Duration(20) * time.Millisecond,
Factor: 2.0,
},
maxAllocAttempts: defaultMaxAllocAttempts,
syncInterval: DefaultSyncInterval,
}
for _, fn := range opts {
fn(a)
}
a.mainCache = newCache(a)
if a.min < 1 {
return nil, errors.New("minimum ID must be >= 1")
}
if a.max <= a.min {
return nil, fmt.Errorf("maximum ID must be greater than minimum ID: configured max %v, min %v", a.max, a.min)
}
a.idPool = idpool.NewIDPool(a.min, a.max)
if !a.disableAutostart {
a.start()
}
return a, nil
}
func (a *Allocator) start() {
a.initialListDone = a.mainCache.start()
if !a.disableGC {
go func() {
select {
case <-a.initialListDone:
case <-time.After(option.Config.AllocatorListTimeout):
logging.Fatal(a.logger, "Timeout while waiting for initial allocator state")
}
a.startLocalKeySync()
}()
}
}
// WithBackend sets this allocator to use backend. It is expected to be used at
// initialization.
func WithBackend(backend Backend) AllocatorOption {
return func(a *Allocator) {
a.backend = backend
}
}
// WithEvents enables receiving of events.
//
// CAUTION: When using this function. The provided channel must be continuously
// read while NewAllocator() is being called to ensure that the channel does
// not block indefinitely while NewAllocator() emits events on it while
// populating the initial cache.
func WithEvents(events AllocatorEventSendChan) AllocatorOption {
return func(a *Allocator) { a.events = events }
}
// WithMin sets the minimum identifier to be allocated
func WithMin(id idpool.ID) AllocatorOption {
return func(a *Allocator) { a.min = id }
}
// WithMax sets the maximum identifier to be allocated
func WithMax(id idpool.ID) AllocatorOption {
return func(a *Allocator) { a.max = id }
}
// WithPrefixMask sets the prefix used for all ID allocations. If set, the mask
// will be ORed to all selected IDs prior to allocation. It is the
// responsibility of the caller to ensure that the mask is not conflicting with
// min..max.
func WithPrefixMask(mask idpool.ID) AllocatorOption {
return func(a *Allocator) { a.prefixMask = mask }
}
// WithMasterKeyProtection will watch for delete events on master keys and
// re-created them if local usage suggests that the key is still in use
func WithMasterKeyProtection() AllocatorOption {
return func(a *Allocator) { a.enableMasterKeyProtection = true }
}
// WithOperatorIDManagement enables the mode with cilium-operator managing
// Cilium Identities.
func WithOperatorIDManagement() AllocatorOption {
return func(a *Allocator) { a.operatorIDManagement = true }
}
// WithMaxAllocAttempts sets the maxAllocAttempts. If not set, new Allocator
// will use defaultMaxAllocAttempts.
func WithMaxAllocAttempts(maxAttempts int) AllocatorOption {
return func(a *Allocator) { a.maxAllocAttempts = maxAttempts }
}
// WithoutGC disables the use of the garbage collector
func WithoutGC() AllocatorOption {
return func(a *Allocator) { a.disableGC = true }
}
// WithoutAutostart prevents starting the allocator when it is initialized
func WithoutAutostart() AllocatorOption {
return func(a *Allocator) { a.disableAutostart = true }
}
// WithSyncInterval configures the interval for local keys refresh.
func WithSyncInterval(interval time.Duration) AllocatorOption {
return func(a *Allocator) { a.syncInterval = interval }
}
// WithCacheValidator registers a validator triggered for each identity
// notification event to filter out invalid IDs and keys.
func WithCacheValidator(validator CacheValidator) AllocatorOption {
return func(a *Allocator) { a.cacheValidators = append(a.cacheValidators, validator) }
}
// GetEvents returns the events channel given to the allocator when
// constructed.
// Note: This channel is not owned by the allocator!
func (a *Allocator) GetEvents() AllocatorEventSendChan {
return a.events
}
// Delete deletes an allocator and stops the garbage collector
func (a *Allocator) Delete() {
close(a.stopGC)
a.mainCache.stop()
}
// WaitForInitialSync waits until the initial sync is complete
func (a *Allocator) WaitForInitialSync(ctx context.Context) error {
select {
case <-a.initialListDone:
case <-ctx.Done():
return fmt.Errorf("identity sync was cancelled: %w", ctx.Err())
}
return nil
}
// RangeFunc is the function called by RangeCache
type RangeFunc func(idpool.ID, AllocatorKey)
// ForeachCache iterates over the allocator cache and calls RangeFunc on each
// cached entry
func (a *Allocator) ForeachCache(cb RangeFunc) {
a.mainCache.foreach(cb)
a.remoteCachesMutex.RLock()
for _, rc := range a.remoteCaches {
rc.cache.foreach(cb)
}
a.remoteCachesMutex.RUnlock()
}
// selectAvailableID selects an available ID.
// Returns a triple of the selected ID ORed with prefixMask, the ID string and
// the originally selected ID.
func (a *Allocator) selectAvailableID() (idpool.ID, string, idpool.ID) {
if id := a.idPool.LeaseAvailableID(); id != idpool.NoID {
unmaskedID := id
id |= a.prefixMask
return id, id.String(), unmaskedID
}
return 0, "", 0
}
// AllocatorKey is the interface to implement in order for a type to be used as
// key for the allocator. The key's data is assumed to be a collection of
// pkg/label.Label, and the functions reflect this somewhat.
type AllocatorKey interface {
fmt.Stringer
// GetKey returns the canonical string representation of the key
GetKey() string
// PutKey stores the information in v into the key. This is the inverse
// operation to GetKey
PutKey(v string) AllocatorKey
// GetAsMap returns the key as a collection of "labels" with a key and value.
// This is the inverse operation to PutKeyFromMap.
GetAsMap() map[string]string
// PutKeyFromMap stores the labels in v into the key to be used later. This
// is the inverse operation to GetAsMap.
PutKeyFromMap(v map[string]string) AllocatorKey
// PutValue puts metadata inside the global identity for the given 'key' with
// the given 'value'.
PutValue(key any, value any) AllocatorKey
// Value returns the value stored in the metadata map.
Value(key any) any
}
// Return values:
// 1. allocated ID
// 2. whether the ID is newly allocated from kvstore
// 3. whether this is the first owner that holds a reference to the key in
// localkeys store
// 4. error in case of failure
func (a *Allocator) lockedAllocate(ctx context.Context, key AllocatorKey) (idpool.ID, bool, bool, error) {
var firstUse bool
kvstore.Trace(a.logger, "Allocating key in kvstore", fieldKey, key)
k := key.GetKey()
lock, err := a.backend.Lock(ctx, key)
if err != nil {
return 0, false, false, err
}
defer lock.Unlock(context.Background())
// fetch first key that matches /value/<key>
value, err := a.GetIfLocked(ctx, key, lock)
if err != nil {
return 0, false, false, err
}
kvstore.Trace(a.logger, "kvstore state is: ", fieldID, value)
a.slaveKeysMutex.Lock()
defer a.slaveKeysMutex.Unlock()
// We shouldn't assume the fact the master key does not exist in the kvstore
// that localKeys does not have it. The KVStore might have lost all of its
// data but the local agent still holds a reference for the given master key.
if value == 0 {
value = a.localKeys.lookupKey(k)
if value != 0 {
// re-create master key
if err := a.backend.UpdateKeyIfLocked(ctx, value, key, true, lock); err != nil {
return 0, false, false, fmt.Errorf("unable to re-create missing master key '%s': %s while allocating ID: %w", key, value, err)
}
}
} else {
_, firstUse, err = a.localKeys.allocate(k, key, value)
if err != nil {
return 0, false, false, fmt.Errorf("unable to reserve local key '%s': %w", k, err)
}
if firstUse {
a.logger.Debug("Reserved new local key", logfields.Key, k)
} else {
a.logger.Debug("Reusing existing local key", logfields.Key, k)
}
}
if value != 0 {
a.logger.Debug("Reusing existing global key", logfields.Key, k)
if err = a.backend.AcquireReference(ctx, value, key, lock); err != nil {
a.localKeys.release(k)
return 0, false, false, fmt.Errorf("unable to create secondary key '%s': %w", k, err)
}
// mark the key as verified in the local cache
if err := a.localKeys.verify(k); err != nil {
a.logger.Error("BUG: Unable to verify local key", logfields.Error, err)
}
return value, false, firstUse, nil
}
a.logger.Debug("Allocating new master ID", logfields.Key, k)
id, strID, unmaskedID := a.selectAvailableID()
if id == 0 {
return 0, false, false, fmt.Errorf("no more available IDs in configured space")
}
kvstore.Trace(a.logger, "Selected available key ID", fieldID, id)
releaseKeyAndID := func() {
a.localKeys.release(k)
a.idPool.Release(unmaskedID) // This returns this ID to be re-used for other keys
}
oldID, firstUse, err := a.localKeys.allocate(k, key, id)
if err != nil {
a.idPool.Release(unmaskedID)
return 0, false, false, fmt.Errorf("unable to reserve local key '%s': %w", k, err)
}
// Another local writer beat us to allocating an ID for the same key,
// start over
if id != oldID {
releaseKeyAndID()
return 0, false, false, fmt.Errorf("another writer has allocated key %s", k)
}
// Check that this key has not been allocated in the cluster during our
// operation here
value, err = a.GetNoCache(ctx, key)
if err != nil {
releaseKeyAndID()
return 0, false, false, err
}
if value != 0 {
releaseKeyAndID()
return 0, false, false, fmt.Errorf("Found master key after proceeding with new allocation for %s", k)
}
// Assigned to 'key' from 'key2' since in case of an error, we don't replace
// the original 'key' variable with 'nil'.
key2 := key
key, err = a.backend.AllocateIDIfLocked(ctx, id, key2, lock)
if err != nil {
// Creation failed. Another agent most likely beat us to allocting this
// ID, retry.
releaseKeyAndID()
return 0, false, false, fmt.Errorf("unable to allocate ID %s for key %s: %w", strID, key2, err)
}
// Notify pool that leased ID is now in-use.
a.idPool.Use(unmaskedID)
if err = a.backend.AcquireReference(ctx, id, key, lock); err != nil {
// We will leak the master key here as the key has already been
// exposed and may be in use by other nodes. The garbage
// collector will release it again.
releaseKeyAndID()
return 0, false, false, fmt.Errorf("secondary key creation failed '%s': %w", k, err)
}
// mark the key as verified in the local cache
if err := a.localKeys.verify(k); err != nil {
a.logger.Error("BUG: Unable to verify local key", logfields.Error, err)
}
a.logger.Debug("Allocated new global key", logfields.Key, k)
return id, true, firstUse, nil
}
// Allocate will retrieve the ID for the provided key. If no ID has been
// allocated for this key yet, a key will be allocated. If allocation fails,
// most likely due to a parallel allocation of the same ID by another user,
// allocation is re-attempted for maxAllocAttempts times.
//
// Return values:
// 1. allocated ID
// 2. whether the ID is newly allocated from kvstore
// 3. whether this is the first owner that holds a reference to the key in
// localkeys store
// 4. error in case of failure
func (a *Allocator) Allocate(ctx context.Context, key AllocatorKey) (idpool.ID, bool, bool, error) {
var (
err error
value idpool.ID
isNew bool
firstUse bool
)
a.logger.Debug("Allocating key", logfields.Key, key)
select {
case <-a.initialListDone:
case <-ctx.Done():
return 0, false, false, fmt.Errorf("allocation was cancelled while waiting for initial key list to be received: %w", ctx.Err())
}
if a.operatorIDManagement {
id, err := a.GetWithRetry(ctx, key)
// The second and third return values are always false when
// operatorIDManagement is enabled because cilium-operator manages security
// IDs, and they are never newly allocated or require holding a reference to
// a key.
return id, false, false, err
}
kvstore.Trace(a.logger, "Allocating from kvstore", fieldKey, key)
// make a copy of the template and customize it
boff := a.backoffTemplate
boff.Name = key.String()
for attempt := range a.maxAllocAttempts {
// Check our list of local keys already in use and increment the
// refcnt. The returned key must be released afterwards. No kvstore
// operation was performed for this allocation.
// We also do this on every loop as a different Allocate call might have
// allocated the key while we are attempting to allocate in this
// execution thread. It does not hurt to check if localKeys contains a
// reference for the key that we are attempting to allocate.
if val := a.localKeys.use(key.GetKey()); val != idpool.NoID {
kvstore.Trace(a.logger, "Reusing local id",
fieldID, val,
fieldKey, key,
)
a.mainCache.insert(key, val)
return val, false, false, nil
}
// FIXME: Add non-locking variant
value, isNew, firstUse, err = a.lockedAllocate(ctx, key)
if err == nil {
a.mainCache.insert(key, value)
a.logger.Debug("Allocated key",
logfields.Key, key,
logfields.ID, value,
)
return value, isNew, firstUse, nil
}
select {
case <-ctx.Done():
a.logger.Warn("Ongoing key allocation has been cancelled",
logfields.Error, ctx.Err(),
logfields.Key, key,
logfields.Attempt, attempt,
)
return 0, false, false, fmt.Errorf("key allocation cancelled: %w", ctx.Err())
default:
a.logger.Warn("Key allocation attempt failed",
logfields.Error, err,
logfields.Key, key,
logfields.Attempt, attempt,
)
}
kvstore.Trace(a.logger, "Allocation attempt failed",
fieldKey, key,
logfields.Attempt, attempt,
)
if waitErr := boff.Wait(ctx); waitErr != nil {
return 0, false, false, waitErr
}
}
return 0, false, false, err
}
func (a *Allocator) GetWithRetry(ctx context.Context, key AllocatorKey) (idpool.ID, error) {
getID := func() (idpool.ID, error) {
id, err := a.Get(ctx, key)
if err != nil {
return idpool.NoID, err
}
if id == idpool.NoID {
return idpool.NoID, fmt.Errorf("security identity not found for key %s", key.String())
}
return id, nil
}
// Make a copy of the template and customize it.
boff := a.backoffTemplate
boff.Name = key.String()
var id idpool.ID
var err error
for attempt := range a.maxAllocAttempts {
id, err = getID()
if err == nil {
return id, nil
}
select {
case <-ctx.Done():
a.logger.Warn("Ongoing key allocation has been cancelled",
logfields.Error, ctx.Err(),
logfields.Key, key,
logfields.Attempt, attempt,
)
return idpool.NoID, fmt.Errorf("key allocation cancelled: %w", ctx.Err())
default:
a.logger.Debug("CiliumIdentity not yet created by cilium-operator, retrying...",
logfields.Error, err,
logfields.Key, key,
logfields.Attempt, attempt,
)
}
if waitErr := boff.Wait(ctx); waitErr != nil {
a.logger.Warn("timed out waiting for cilium-operator to allocate CiliumIdentity",
logfields.Key, key,
logfields.Attempt, attempt,
)
return idpool.NoID, fmt.Errorf("timed out waiting for cilium-operator to allocate CiliumIdentity for key %v, error: %w", key.GetKey(), waitErr)
}
}
return idpool.NoID, err
}
// GetIfLocked returns the ID which is allocated to a key. Returns an ID of NoID if no ID
// has been allocated to this key yet if the client is still holding the given
// lock.
func (a *Allocator) GetIfLocked(ctx context.Context, key AllocatorKey, lock kvstore.KVLocker) (idpool.ID, error) {
if id := a.mainCache.get(key.GetKey()); id != idpool.NoID {
return id, nil
}
return a.backend.GetIfLocked(ctx, key, lock)
}
// Get returns the ID which is allocated to a key. Returns an ID of NoID if no ID
// has been allocated to this key yet.
func (a *Allocator) Get(ctx context.Context, key AllocatorKey) (idpool.ID, error) {
if id := a.mainCache.get(key.GetKey()); id != idpool.NoID {
return id, nil
}
return a.GetNoCache(ctx, key)
}
// GetNoCache returns the ID which is allocated to a key in the kvstore,
// bypassing the local copy of allocated keys.
func (a *Allocator) GetNoCache(ctx context.Context, key AllocatorKey) (idpool.ID, error) {
return a.backend.Get(ctx, key)
}
// GetByID returns the key associated with an ID. Returns nil if no key is
// associated with the ID.
func (a *Allocator) GetByID(ctx context.Context, id idpool.ID) (AllocatorKey, error) {
if key := a.mainCache.getByID(id); key != nil {
return key, nil
}
return a.backend.GetByID(ctx, id)
}
// GetIncludeRemoteCaches returns the ID which is allocated to a key. Includes the
// caches of watched remote kvstores in the query. Returns an ID of NoID if no
// ID has been allocated in any remote kvstore to this key yet.
func (a *Allocator) GetIncludeRemoteCaches(ctx context.Context, key AllocatorKey) (idpool.ID, error) {
// check main cache first
if id := a.mainCache.get(key.GetKey()); id != idpool.NoID {
return id, nil
}
// check remote caches
a.remoteCachesMutex.RLock()
for _, rc := range a.remoteCaches {
if id := rc.cache.get(key.GetKey()); id != idpool.NoID {
a.remoteCachesMutex.RUnlock()
return id, nil
}
}
a.remoteCachesMutex.RUnlock()
// check main backend
if id, err := a.backend.Get(ctx, key); id != idpool.NoID || err != nil {
return id, err
}
// we skip checking remote backends explicitly here, to avoid
// accidentally overloading them in case of lookups for invalid identities
return idpool.NoID, nil
}
// GetByIDIncludeRemoteCaches returns the key associated with an ID. Includes
// the caches of watched remote kvstores in the query.
// Returns nil if no key is associated with the ID.
func (a *Allocator) GetByIDIncludeRemoteCaches(ctx context.Context, id idpool.ID) (AllocatorKey, error) {
// check main cache first
if key := a.mainCache.getByID(id); key != nil {
return key, nil
}
// check remote caches
a.remoteCachesMutex.RLock()
for _, rc := range a.remoteCaches {
if key := rc.cache.getByID(id); key != nil {
a.remoteCachesMutex.RUnlock()
return key, nil
}
}
a.remoteCachesMutex.RUnlock()
// check main backend
if key, err := a.backend.GetByID(ctx, id); key != nil || err != nil {
return key, err
}
// we skip checking remote backends explicitly here, to avoid
// accidentally overloading them in case of lookups for invalid identities
return nil, nil
}
// Release releases the use of an ID associated with the provided key. After
// the last user has released the ID, the key is removed in the KVstore and
// the returned lastUse value is true.
func (a *Allocator) Release(ctx context.Context, key AllocatorKey) (lastUse bool, err error) {
if a.operatorIDManagement {
a.logger.Debug("Skipping key release when cilium-operator ID management is enabled", logfields.Key, key)
return false, nil
}
a.logger.Debug("Releasing key", logfields.Key, key)
select {
case <-a.initialListDone:
case <-ctx.Done():
return false, fmt.Errorf("release was cancelled while waiting for initial key list to be received: %w", ctx.Err())
}
k := key.GetKey()
a.slaveKeysMutex.Lock()
defer a.slaveKeysMutex.Unlock()
// release the key locally, if it was the last use, remove the node
// specific value key to remove the global reference mark
var id idpool.ID
lastUse, id, err = a.localKeys.release(k)
if err != nil {
return lastUse, err
}
if lastUse {
// Since in CRD mode we don't have a way to map which identity is being
// used by a node, we need to also pass the ID to the release function.
// This allows the CRD store to find the right identity by its ID and
// remove the node reference on that identity.
a.backend.Release(ctx, id, key)
}
return lastUse, err
}
// RunGC scans the kvstore for unused master keys and removes them
func (a *Allocator) RunGC(ctx context.Context, rateLimit *rate.Limiter, staleKeysPrevRound map[string]uint64) (map[string]uint64, *GCStats, error) {
return a.backend.RunGC(ctx, rateLimit, staleKeysPrevRound, a.min, a.max)
}
// RunLocksGC scans the kvstore for stale locks and removes them
func (a *Allocator) RunLocksGC(ctx context.Context, staleLocksPrevRound map[string]kvstore.Value) (map[string]kvstore.Value, error) {
return a.backend.RunLocksGC(ctx, staleLocksPrevRound)
}
// DeleteAllKeys will delete all keys. It is expected to be used in tests.
func (a *Allocator) DeleteAllKeys() {
a.backend.DeleteAllKeys(context.TODO())
}
// syncLocalKeys checks the kvstore and verifies that a master key exists for
// all locally used allocations. This will restore master keys if deleted for
// some reason.
func (a *Allocator) syncLocalKeys() {
// Create a local copy of all local allocations to not require to hold
// any locks while performing kvstore operations. Local use can
// disappear while we perform the sync. For master keys this is fine as
// the garbage collector will remove it again. However, for slave keys, they
// will continue to exist until the kvstore lease expires after the agent is restarted.
// To ensure slave keys are not leaked, we do an extra check after the upsert,
// to ensure the key is still in use. If it's not in use, we grab the slave key mutex
// and hold it until we have released the key knowing that no new usage has started during the operation.
ids := a.localKeys.getVerifiedIDs()
ctx := context.TODO()
for id, key := range ids {
a.syncLocalKey(ctx, id, key)
}
}
func (a *Allocator) syncLocalKey(ctx context.Context, id idpool.ID, key AllocatorKey) {
encodedKey := key.GetKey()
if newId := a.localKeys.lookupKey(encodedKey); newId != id {
return
}
err := a.backend.UpdateKey(ctx, id, key, false)
if err != nil {
a.logger.Warn(
"Error updating key",
logfields.Key, key,
logfields.ID, id,
)
}
// Check if the key is still in use locally. Given its expected it's still
// in use in most cases, we avoid grabbing the slaveKeysMutex here to reduce lock contention.
// If it is in use here, we know the slave key is not leaked, and we don't need to do any cleanup.
if newId := a.localKeys.lookupKey(encodedKey); newId != idpool.NoID {
return
}
a.slaveKeysMutex.Lock()
defer a.slaveKeysMutex.Unlock()
// Check once again that the slave key is unused locally before releasing it,
// all while holding the slaveKeysMutex to ensure there are no concurrent allocations or releases.
// If the key is still unused, it could mean that the slave key was upserted into the kvstore during "UpdateKey"
// after it was previously released. If that is the case, we release it while holding the slaveKeysMutex.
if newId := a.localKeys.lookupKey(encodedKey); newId == idpool.NoID {
ctx, cancel := context.WithTimeout(ctx, backendOpTimeout)
defer cancel()
a.logger.Warn(
"Releasing now unused key that was re-recreated",
logfields.Key, key,
logfields.ID, id,
)
err = a.backend.Release(ctx, id, key)
if err != nil {
a.logger.Warn(
"Error releasing unused key",
logfields.Error, err,
logfields.Key, key,
logfields.ID, id,
)
}
}
}
func (a *Allocator) startLocalKeySync() {
go func(a *Allocator) {
for {
a.syncLocalKeys()
select {
case <-a.stopGC:
a.logger.Debug("Stopped master key sync routine")
return
case <-time.After(a.syncInterval):
}
}
}(a)
}
// AllocatorEventChan is a channel to receive allocator events on
type AllocatorEventChan chan AllocatorEvent
// Send- and receive-only versions of the above.
type AllocatorEventRecvChan = <-chan AllocatorEvent
type AllocatorEventSendChan = chan<- AllocatorEvent
// AllocatorEvent is an event sent over AllocatorEventChan
type AllocatorEvent struct {
// Typ is the type of event (upsert / delete)
Typ AllocatorChangeKind
// ID is the allocated ID
ID idpool.ID
// Key is the key associated with the ID
Key AllocatorKey
}
// remoteCache represents the cache content of an additional kvstore managing
// identities. The contents are not directly accessible but will be merged into
// the ForeachCache() function.
type remoteCache struct {
name string
allocator *Allocator
cache *cache
watchFunc func(ctx context.Context, remote *remoteCache, onSync func(context.Context))
}
type RemoteIDCache interface {
NumEntries() int
Synced() bool
Watch(ctx context.Context, onSync func(context.Context))
}
func (a *Allocator) NewRemoteCache(remoteName string, remoteAlloc *Allocator) RemoteIDCache {
return &remoteCache{
name: remoteName,
allocator: remoteAlloc,
cache: &remoteAlloc.mainCache,
watchFunc: a.watchRemoteKVStore,
}
}
// watchRemoteKVStore starts watching an allocator base prefix the kvstore
// represents by the provided backend. A local cache of all identities of that
// kvstore will be maintained in the RemoteCache structure returned and will
// start being reported in the identities returned by the ForeachCache()
// function. RemoteName should be unique per logical "remote".
func (a *Allocator) watchRemoteKVStore(ctx context.Context, rc *remoteCache, onSync func(context.Context)) {
scopedLog := a.logger.With(logfields.ClusterName, rc.name)
scopedLog.Info("Starting remote kvstore watcher")
rc.allocator.start()
select {
case <-ctx.Done():
scopedLog.Debug("Context canceled before remote kvstore watcher synchronization completed: stale identities will now be drained")
rc.close()
a.remoteCachesMutex.RLock()
old := a.remoteCaches[rc.name]
a.remoteCachesMutex.RUnlock()
if old != nil {
old.cache.mutex.RLock()
defer old.cache.mutex.RUnlock()
}
// Drain all entries that might have been received until now, and that
// are not present in the current cache (if any). This ensures we do not
// leak any stale identity, and at the same time we do not invalidate the
// current state.
rc.cache.drainIf(func(id idpool.ID) bool {
if old == nil {
return true
}
_, ok := old.cache.nextCache[id]
return !ok
})
return
case <-rc.cache.listDone:
scopedLog.Info("Remote kvstore watcher successfully synchronized and registered")
}
a.remoteCachesMutex.Lock()
old := a.remoteCaches[rc.name]
a.remoteCaches[rc.name] = rc
a.remoteCachesMutex.Unlock()
if old != nil {
// In case of reconnection, let's emit a deletion event for all stale identities
// that are no longer present in the kvstore. We take the lock of the new cache
// to ensure that we observe a stable state during this process (i.e., no keys
// are added/removed in the meanwhile).
scopedLog.Debug("Another kvstore watcher was already registered: deleting stale identities")
rc.cache.mutex.RLock()
old.cache.drainIf(func(id idpool.ID) bool {
_, ok := rc.cache.nextCache[id]
return !ok
})
rc.cache.mutex.RUnlock()
}
// Execute the on-sync callback handler.
onSync(ctx)
<-ctx.Done()
rc.close()
scopedLog.Info("Stopped remote kvstore watcher")
}
// RemoveRemoteKVStore removes any reference to a remote allocator / kvstore, emitting
// a deletion event for all previously known identities.
func (a *Allocator) RemoveRemoteKVStore(remoteName string) {
a.remoteCachesMutex.Lock()
old := a.remoteCaches[remoteName]
delete(a.remoteCaches, remoteName)
a.remoteCachesMutex.Unlock()
if old != nil {
old.cache.drain()
a.logger.Info("Remote kvstore watcher unregistered", logfields.ClusterName, remoteName)
}
}
// Watch starts watching the remote kvstore and synchronize the identities in
// the local cache. It blocks until the context is closed.
func (rc *remoteCache) Watch(ctx context.Context, onSync func(context.Context)) {
rc.watchFunc(ctx, rc, onSync)
}
// NumEntries returns the number of entries in the remote cache
func (rc *remoteCache) NumEntries() int {
if rc == nil {
return 0
}
return rc.cache.numEntries()
}
// Synced returns whether the initial list of entries has been retrieved from
// the kvstore, and new events are currently being watched.
func (rc *remoteCache) Synced() bool {
if rc == nil {
return false
}
select {
case <-rc.cache.ctx.Done():
// The cache has been stopped.
return false
default:
select {
case <-rc.cache.listDone:
return true
default:
return false
}
}
}
// close stops watching for identities in the kvstore associated with the
// remote cache.
func (rc *remoteCache) close() {
rc.cache.allocator.Delete()
}
// Observe the identity changes. Conforms to stream.Observable.
// Replays the current state of the cache when subscribing.
func (a *Allocator) Observe(ctx context.Context, next func(AllocatorChange), complete func(error)) {
a.mainCache.Observe(ctx, next, complete)
}
// SPDX-License-Identifier: Apache-2.0
// Copyright Authors of Cilium
package allocator
import (
"context"
"log/slog"
"sync"
"github.com/cilium/stream"
"github.com/cilium/cilium/pkg/controller"
"github.com/cilium/cilium/pkg/idpool"
"github.com/cilium/cilium/pkg/lock"
"github.com/cilium/cilium/pkg/logging/logfields"
"github.com/cilium/cilium/pkg/time"
)
// backendOpTimeout is the time allowed for operations sent to backends in
// response to events such as create/modify/delete.
const backendOpTimeout = 10 * time.Second
// idMap provides mapping from ID to an AllocatorKey
type idMap map[idpool.ID]AllocatorKey
// keyMap provides mapping from AllocatorKey to ID
type keyMap map[string]idpool.ID
type cache struct {
logger *slog.Logger
controllers *controller.Manager
allocator *Allocator
ctx context.Context
cancel context.CancelFunc
// mutex protects all cache data structures
mutex lock.RWMutex
// cache is a local cache of all IDs allocated in the kvstore. It is
// being maintained by watching for kvstore events and can thus lag
// behind.
cache idMap
// keyCache shadows cache and allows access by key
keyCache keyMap
// nextCache is the cache is constantly being filled by startWatch(),
// when startWatch has successfully performed the initial fill using
// ListPrefix, the cache above will be pointed to nextCache. If the
// startWatch() fails to perform the initial list, then the cache is
// never pointed to nextCache. This guarantees that a valid cache is
// kept at all times.
nextCache idMap
// nextKeyCache follows the same logic as nextCache but for keyCache
nextKeyCache keyMap
listDone waitChan
// stopWatchWg is a wait group that gets conditions added when a
// watcher is started with the conditions marked as done when the
// watcher has exited
stopWatchWg sync.WaitGroup
changeSrc stream.Observable[AllocatorChange]
emitChange func(AllocatorChange)
completeChangeSrc func(error)
}
func newCache(a *Allocator) (c cache) {
ctx, cancel := context.WithCancel(context.Background())
c = cache{
logger: a.logger,
allocator: a,
cache: idMap{},
keyCache: keyMap{},
ctx: ctx,
cancel: cancel,
controllers: controller.NewManager(),
}
c.changeSrc, c.emitChange, c.completeChangeSrc = stream.Multicast[AllocatorChange]()
return
}
type waitChan chan struct{}
// CacheMutations are the operations given to a Backend's ListAndWatch command.
// They are called on changes to identities.
type CacheMutations interface {
// OnListDone is called when the initial full-sync is complete.
OnListDone()
// OnUpsert is called when either a new key->ID mapping appears or an existing
// one is modified. The latter case may occur e.g., when leases are updated,
// and does not mean that the actual mapping had changed.
OnUpsert(id idpool.ID, key AllocatorKey)
// OnDelete is called when a key->ID mapping is removed. This may trigger
// master-key protection, if enabled, where the local allocator will recreate
// the key->ID association is recreated because the local node is still using
// it.
OnDelete(id idpool.ID, key AllocatorKey)
}
func (c *cache) sendEvent(typ AllocatorChangeKind, id idpool.ID, key AllocatorKey) {
if events := c.allocator.events; events != nil {
events <- AllocatorEvent{Typ: typ, ID: id, Key: key}
}
}
func (c *cache) OnListDone() {
c.mutex.Lock()
// nextCache is valid, point the live cache to it
c.cache = c.nextCache
c.keyCache = c.nextKeyCache
c.mutex.Unlock()
c.logger.Debug("Initial list of identities received")
// report that the list operation has
// been completed and the allocator is
// ready to use
close(c.listDone)
}
func (c *cache) OnUpsert(id idpool.ID, key AllocatorKey) {
for _, validator := range c.allocator.cacheValidators {
if err := validator(AllocatorChangeUpsert, id, key); err != nil {
c.logger.Warn(
"Skipping event for invalid identity",
logfields.Error, err,
logfields.Identity, id,
logfields.Event, AllocatorChangeUpsert,
)
return
}
}
c.mutex.Lock()
defer c.mutex.Unlock()
if k, ok := c.nextCache[id]; ok {
delete(c.nextKeyCache, k.GetKey())
}
c.nextCache[id] = key
if key != nil {
c.nextKeyCache[key.GetKey()] = id
}
c.allocator.idPool.Remove(id)
c.emitChange(AllocatorChange{Kind: AllocatorChangeUpsert, ID: id, Key: key})
c.sendEvent(AllocatorChangeUpsert, id, key)
}
func (c *cache) OnDelete(id idpool.ID, key AllocatorKey) {
for _, validator := range c.allocator.cacheValidators {
if err := validator(AllocatorChangeDelete, id, key); err != nil {
c.logger.Warn(
"Skipping event for invalid identity",
logfields.Error, err,
logfields.Identity, id,
logfields.Event, AllocatorChangeDelete,
)
return
}
}
c.mutex.Lock()
defer c.mutex.Unlock()
c.onDeleteLocked(id, key, true)
}
const syncIdentityControllerGroup = "sync-identity"
func syncControllerName(id idpool.ID) string {
return syncIdentityControllerGroup + "-" + id.String()
}
// no max interval by default, exposed as a variable for testing.
var masterKeyRecreateMaxInterval = time.Duration(0)
var syncIdentityGroup = controller.NewGroup(syncIdentityControllerGroup)
// onDeleteLocked must be called while holding c.Mutex for writing
func (c *cache) onDeleteLocked(id idpool.ID, key AllocatorKey, recreateMissingLocalKeys bool) {
a := c.allocator
if a.enableMasterKeyProtection && recreateMissingLocalKeys {
if value := a.localKeys.lookupID(id); value != nil {
c.controllers.UpdateController(syncControllerName(id), controller.ControllerParams{
Context: context.Background(),
MaxRetryInterval: masterKeyRecreateMaxInterval,
Group: syncIdentityGroup,
DoFunc: func(ctx context.Context) error {
c.mutex.Lock()
defer c.mutex.Unlock()
// For each attempt, check if this ciliumidentity is still a candidate for recreation.
// It's possible that since the last iteration that this agent has legitimately deleted
// the key, in which case we can stop trying to recreate it.
if value := c.allocator.localKeys.lookupID(id); value == nil {
return nil
}
ctx, cancel := context.WithTimeout(ctx, backendOpTimeout)
defer cancel()
// Each iteration will attempt to grab the key reference, if that succeeds
// then this completes (i.e. the key exists).
// Otherwise we will attempt to create the key, this process repeats until
// the key is created.
if err := a.backend.UpdateKey(ctx, id, value, true); err != nil {
c.logger.Error(
"OnDelete MasterKeyProtection update for key",
logfields.Error, err,
logfields.ID, id,
)
return err
}
c.logger.Info(
"OnDelete MasterKeyProtection update succeeded",
logfields.ID, id,
)
return nil
},
})
return
}
}
if k, ok := c.nextCache[id]; ok && k != nil {
delete(c.nextKeyCache, k.GetKey())
}
delete(c.nextCache, id)
a.idPool.Insert(id)
c.emitChange(AllocatorChange{Kind: AllocatorChangeDelete, ID: id, Key: key})
c.sendEvent(AllocatorChangeDelete, id, key)
}
// start requests a LIST operation from the kvstore and starts watching the
// prefix in a go subroutine.
func (c *cache) start() waitChan {
c.listDone = make(waitChan)
c.mutex.Lock()
// start with a fresh nextCache
c.nextCache = idMap{}
c.nextKeyCache = keyMap{}
c.mutex.Unlock()
c.stopWatchWg.Add(1)
go func() {
c.allocator.backend.ListAndWatch(c.ctx, c)
c.stopWatchWg.Done()
}()
return c.listDone
}
func (c *cache) stop() {
c.cancel()
c.stopWatchWg.Wait()
// Drain/stop any remaining sync identity controllers.
// Backend watch is now stopped, any running controllers attempting to
// sync identities will complete and stop (possibly in a unresolved state).
c.controllers.RemoveAllAndWait()
c.completeChangeSrc(nil)
}
// drain emits a deletion event for all known IDs. It must be called after the
// cache has been stopped, to ensure that no new events can be received afterwards.
func (c *cache) drain() {
// Make sure we wait until the watch loop has been properly stopped.
c.stopWatchWg.Wait()
c.mutex.Lock()
for id, key := range c.nextCache {
c.onDeleteLocked(id, key, false)
}
c.mutex.Unlock()
}
// drainIf emits a deletion event for all known IDs that are stale according to
// the isStale function. It must be called after the cache has been stopped, to
// ensure that no new events can be received afterwards.
func (c *cache) drainIf(isStale func(id idpool.ID) bool) {
// Make sure we wait until the watch loop has been properly stopped, otherwise
// new IDs might be added afterwards we complete the draining process.
c.stopWatchWg.Wait()
c.mutex.Lock()
for id, key := range c.nextCache {
if isStale(id) {
c.onDeleteLocked(id, key, false)
c.logger.Debug(
"Stale identity deleted",
logfields.ID, id,
logfields.Key, key,
)
}
}
c.mutex.Unlock()
}
func (c *cache) get(key string) idpool.ID {
c.mutex.RLock()
if id, ok := c.keyCache[key]; ok {
c.mutex.RUnlock()
return id
}
c.mutex.RUnlock()
return idpool.NoID
}
func (c *cache) getByID(id idpool.ID) AllocatorKey {
c.mutex.RLock()
if v, ok := c.cache[id]; ok {
c.mutex.RUnlock()
return v
}
c.mutex.RUnlock()
return nil
}
func (c *cache) foreach(cb RangeFunc) {
c.mutex.RLock()
for k, v := range c.cache {
cb(k, v)
}
c.mutex.RUnlock()
}
func (c *cache) insert(key AllocatorKey, val idpool.ID) {
c.mutex.Lock()
c.nextCache[val] = key
c.nextKeyCache[key.GetKey()] = val
c.mutex.Unlock()
}
func (c *cache) numEntries() int {
c.mutex.RLock()
defer c.mutex.RUnlock()
return len(c.nextCache)
}
type AllocatorChangeKind string
const (
AllocatorChangeSync AllocatorChangeKind = "sync"
AllocatorChangeUpsert AllocatorChangeKind = "upsert"
AllocatorChangeDelete AllocatorChangeKind = "delete"
)
type AllocatorChange struct {
Kind AllocatorChangeKind
ID idpool.ID
Key AllocatorKey
}
// Observe the allocator changes. Conforms to stream.Observable.
// Replays the current state of the cache when subscribing.
func (c *cache) Observe(ctx context.Context, next func(AllocatorChange), complete func(error)) {
// This short-lived go routine serves the purpose of replaying the current state of the cache before starting
// to observe the actual source changeSrc. ChangeSrc is backed by a stream.FuncObservable, that will start its own
// go routine. Therefore, the current go routine will stop and free the lock on the mutex after the registration.
go func() {
// Wait until initial listing has completed before
// replaying the state.
select {
case <-c.listDone:
case <-ctx.Done():
complete(ctx.Err())
return
}
c.mutex.RLock()
defer c.mutex.RUnlock()
for id, key := range c.cache {
next(AllocatorChange{Kind: AllocatorChangeUpsert, ID: id, Key: key})
}
// Emit a sync event to inform the subscriber that it has received a consistent
// initial state.
next(AllocatorChange{Kind: AllocatorChangeSync})
// And subscribe to new events. Since we held the read-lock there won't be any
// missed or duplicate events.
c.changeSrc.Observe(ctx, next, complete)
}()
}
// SPDX-License-Identifier: Apache-2.0
// Copyright Authors of Cilium
package allocator
import (
"fmt"
"log/slog"
"github.com/cilium/cilium/pkg/idpool"
"github.com/cilium/cilium/pkg/kvstore"
"github.com/cilium/cilium/pkg/lock"
)
type localKey struct {
val idpool.ID
key AllocatorKey
refcnt uint64
// verified is true when the key has been synced with the kvstore
verified bool
}
// localKeys is a map of keys in use locally. Keys can be used multiple times.
// A refcnt is managed to know when a key is no longer in use
type localKeys struct {
logger *slog.Logger
lock.RWMutex
keys map[string]*localKey
ids map[idpool.ID]*localKey
}
func newLocalKeys(logger *slog.Logger) *localKeys {
return &localKeys{
logger: logger,
keys: map[string]*localKey{},
ids: map[idpool.ID]*localKey{},
}
}
// allocate creates an entry for key in localKeys if needed and increments the
// refcnt. The value associated with the key must match the local cache or an
// error is returned
func (lk *localKeys) allocate(keyString string, key AllocatorKey, val idpool.ID) (idpool.ID, bool, error) {
lk.Lock()
defer lk.Unlock()
var firstUse bool
if k, ok := lk.keys[keyString]; ok {
if val != k.val {
return idpool.NoID, firstUse, fmt.Errorf("local key already allocated with different value (%s != %s)", val, k.val)
}
k.refcnt++
kvstore.Trace(lk.logger, "Incremented local key refcnt",
fieldKey, keyString,
fieldID, val,
fieldRefCnt, k.refcnt,
)
return k.val, firstUse, nil
}
firstUse = true
k := &localKey{key: key, val: val, refcnt: 1}
lk.keys[keyString] = k
lk.ids[val] = k
kvstore.Trace(lk.logger, "New local key",
fieldKey, keyString,
fieldID, val,
fieldRefCnt, 1,
)
return val, firstUse, nil
}
func (lk *localKeys) verify(key string) error {
lk.Lock()
defer lk.Unlock()
if k, ok := lk.keys[key]; ok {
k.verified = true
kvstore.Trace(lk.logger, "Local key verified",
fieldKey, key,
)
return nil
}
return fmt.Errorf("key %s not found", key)
}
// lookupKey returns the idpool.ID of the key is present in the map of keys.
// if it isn't present, returns idpool.NoID
func (lk *localKeys) lookupKey(key string) idpool.ID {
lk.RLock()
defer lk.RUnlock()
if k, ok := lk.keys[key]; ok {
return k.val
}
return idpool.NoID
}
// lookupID returns the key for a given ID or an empty string
func (lk *localKeys) lookupID(id idpool.ID) AllocatorKey {
lk.RLock()
defer lk.RUnlock()
if k, ok := lk.ids[id]; ok {
return k.key
}
return nil
}
// use increments the refcnt of the key and returns its value
func (lk *localKeys) use(key string) idpool.ID {
lk.Lock()
defer lk.Unlock()
if k, ok := lk.keys[key]; ok {
// unverified keys behave as if they do not exist
if !k.verified {
return idpool.NoID
}
k.refcnt++
kvstore.Trace(lk.logger, "Incremented local key refcnt",
fieldKey, key,
fieldID, k.val,
fieldRefCnt, k.refcnt,
)
return k.val
}
return idpool.NoID
}
// release releases the refcnt of a key. It returns the ID associated with the
// given key. When the last reference was released, the key is deleted and the
// returned lastUse value is true.
func (lk *localKeys) release(key string) (lastUse bool, id idpool.ID, err error) {
lk.Lock()
defer lk.Unlock()
if k, ok := lk.keys[key]; ok {
k.refcnt--
kvstore.Trace(lk.logger, "Decremented local key refcnt",
fieldKey, key,
fieldID, k.val,
fieldRefCnt, k.refcnt,
)
if k.refcnt == 0 {
delete(lk.keys, key)
delete(lk.ids, k.val)
return true, k.val, nil
}
return false, k.val, nil
}
return false, idpool.NoID, fmt.Errorf("unable to find key in local cache")
}
func (lk *localKeys) getVerifiedIDs() map[idpool.ID]AllocatorKey {
ids := map[idpool.ID]AllocatorKey{}
lk.RLock()
for id, localKey := range lk.ids {
if localKey.verified {
ids[id] = localKey.key
}
}
lk.RUnlock()
return ids
}
// SPDX-License-Identifier: Apache-2.0
// Copyright Authors of Cilium
package annotation
import "strings"
const (
ServiceAffinityNone = ""
ServiceAffinityLocal = "local"
ServiceAffinityRemote = "remote"
)
func GetAnnotationIncludeExternal(obj annotatedObject) bool {
if value, ok := Get(obj, GlobalService, GlobalServiceAlias); ok {
return strings.ToLower(value) == "true"
}
return false
}
func GetAnnotationShared(obj annotatedObject) bool {
// The SharedService annotation is ignored if the service is not declared as global.
if !GetAnnotationIncludeExternal(obj) {
return false
}
if value, ok := Get(obj, SharedService, SharedServiceAlias); ok {
return strings.ToLower(value) == "true"
}
// A global service is marked as shared by default.
return true
}
func GetAnnotationServiceAffinity(obj annotatedObject) string {
// The ServiceAffinity annotation is ignored if the service is not declared as global.
if !GetAnnotationIncludeExternal(obj) {
return ServiceAffinityNone
}
if value, ok := Get(obj, ServiceAffinity, ServiceAffinityAlias); ok {
return strings.ToLower(value)
}
return ServiceAffinityNone
}
// SPDX-License-Identifier: Apache-2.0
// Copyright Authors of Cilium
package annotation
import (
"regexp"
)
const (
// Prefix is the common prefix for all annotations
Prefix = "io.cilium"
// ConfigPrefix is the common prefix for configuration related annotations.
ConfigPrefix = "config.cilium.io"
// ClusterMeshPrefix is the common prefix for ClusterMesh related annotations.
ClusterMeshPrefix = "clustermesh.cilium.io"
// IngressPrefix is the common prefix for ingress related annotations.
IngressPrefix = "ingress.cilium.io"
// NetworkPrefix is the common prefix for network related annotations.
NetworkPrefix = "network.cilium.io"
// PolicyPrefix is the common prefix for policy related annotations.
PolicyPrefix = "policy.cilium.io"
// ServicePrefix is the common prefix for service related annotations.
ServicePrefix = "service.cilium.io"
// IPAMPrefix is the common prefix for IPAM related annotations.
IPAMPrefix = "ipam.cilium.io"
// LBIPAMPrefix is the common prefix for LB IPAM related annotations.
LBIPAMPrefix = "lbipam.cilium.io"
// CNIPrefix is the common prefix for CNI related annotations.
CNIPrefix = "cni.cilium.io"
// CECPrefix is the common prefix for CEC related annotations.
CECPrefix = "cec.cilium.io"
// PodAnnotationMAC is used to store the MAC address of the Pod.
PodAnnotationMAC = CNIPrefix + "/mac-address"
// PolicyName / PolicyNameAlias is an optional annotation to the NetworkPolicy
// resource which specifies the name of the policy node to which all
// rules should be applied to.
PolicyName = PolicyPrefix + "/name"
PolicyNameAlias = Prefix + ".name"
// V4CIDRName / V4CIDRNameAlias is the annotation name used to store the IPv4
// pod CIDR in the node's annotations.
V4CIDRName = NetworkPrefix + "/ipv4-pod-cidr"
V4CIDRNameAlias = Prefix + ".network.ipv4-pod-cidr"
// V6CIDRName / V6CIDRNameAlias is the annotation name used to store the IPv6
// pod CIDR in the node's annotations.
V6CIDRName = NetworkPrefix + "/ipv6-pod-cidr"
V6CIDRNameAlias = Prefix + ".network.ipv6-pod-cidr"
// V4HealthName / V4HealthNameAlias is the annotation name used to store the
// IPv4 address of the cilium-health endpoint in the node's annotations.
V4HealthName = NetworkPrefix + "/ipv4-health-ip"
V4HealthNameAlias = Prefix + ".network.ipv4-health-ip"
// V6HealthName / V6HealthNameAlias is the annotation name used to store the
// IPv6 address of the cilium-health endpoint in the node's annotations.
V6HealthName = NetworkPrefix + "/ipv6-health-ip"
V6HealthNameAlias = Prefix + ".network.ipv6-health-ip"
// V4IngressName / V4IngressNameAlias is the annotation name used to store
// the IPv4 address of the Ingress listener in the node's annotations.
V4IngressName = NetworkPrefix + "/ipv4-Ingress-ip"
V4IngressNameAlias = Prefix + ".network.ipv4-Ingress-ip"
// V6IngressName / V6IngressNameAlias is the annotation name used to store
// the IPv6 address of the Ingress listener in the node's annotations.
V6IngressName = NetworkPrefix + "/ipv6-Ingress-ip"
V6IngressNameAlias = Prefix + ".network.ipv6-Ingress-ip"
// CiliumHostIP / CiliumHostIPAlias is the annotation name used to store the
// IPv4 address of the cilium host interface in the node's annotations.
CiliumHostIP = NetworkPrefix + "/ipv4-cilium-host"
CiliumHostIPAlias = Prefix + ".network.ipv4-cilium-host"
// CiliumHostIPv6 / CiliumHostIPv6Alias is the annotation name used to store
// the IPv6 address of the cilium host interface in the node's annotation.
CiliumHostIPv6 = NetworkPrefix + "/ipv6-cilium-host"
CiliumHostIPv6Alias = Prefix + ".network.ipv6-cilium-host"
// CiliumEncryptionKey / CiliumEncryptionKeyAlias is the annotation name used to
// store the encryption key of the cilium host interface in the node's annotation.
CiliumEncryptionKey = NetworkPrefix + "/encryption-key"
CiliumEncryptionKeyAlias = Prefix + ".network.encryption-key"
// GlobalService / GlobalServiceAlias if set to true, marks a service to
// become a global service.
GlobalService = ServicePrefix + "/global"
GlobalServiceAlias = Prefix + "/global-service"
// GlobalServiceSyncEndpointSlice if set to true, marks a service to
// synchronize remote clusters endpoint slices to the local Kubernetes API
GlobalServiceSyncEndpointSlices = ServicePrefix + "/global-sync-endpoint-slices"
// SharedService / SharedServiceAlias if set to false, prevents a service
// from being shared, the default is true if GlobalService is set, otherwise
// false. Setting the annotation SharedService to false while setting
// GlobalService to true allows to expose remote endpoints without
// sharing local endpoints.
SharedService = ServicePrefix + "/shared"
SharedServiceAlias = Prefix + "/shared-service"
// ServiceAffinity / ServiceAffinityAlias annotations determines the
// preferred endpoint destination.
// Allowed values:
// - local
// preferred endpoints from local cluster if available
// - remote
// preferred endpoints from remote cluster if available
// - none (default)
// no preference. Default behavior if this annotation does not exist
ServiceAffinity = ServicePrefix + "/affinity"
ServiceAffinityAlias = Prefix + "/service-affinity"
// CoreDNSAutoPatched is the annotation used to roll out CoreDNS once we
// we have patched its configuration to enabled MCS-API support.
CoreDNSAutoPatched = ClusterMeshPrefix + "/autoPatchedAt"
// ServiceLoadBalancingAlgorithm indicates which backend selection algorithm
// for a given Service to use. This annotation will override the default
// value set in bpf-lb-algorithm.
// Allowed values:
// - random
// - maglev
ServiceLoadBalancingAlgorithm = ServicePrefix + "/lb-algorithm"
// ServiceNodeExposure is the label name used to mark a service to only a
// subset of the nodes which match the same value. For all other nodes, this
// service is ignored and not installed into their datapath.
ServiceNodeExposure = ServicePrefix + "/node"
// ServiceNodeSelectorExposure is the label name used to mark a service to only a
// subset of the nodes which match the label selector. For all other nodes, this
// service is ignored and not installed into their datapath.
ServiceNodeSelectorExposure = ServicePrefix + "/node-selector"
// ServiceTypeExposure is the annotation name used to mark what service type
// to provision (only single type is allowed; allowed types: "ClusterIP",
// "NodePort" and "LoadBalancer").
//
// For example, a LoadBalancer service includes ClusterIP and NodePort (unless
// allocateLoadBalancerNodePorts is set to false). To avoid provisioning
// the latter two, one can set the annotation with the value "LoadBalancer".
ServiceTypeExposure = ServicePrefix + "/type"
// ServiceSourceRangesPolicy is the annotation name used to specify the policy
// of the user-provided loadBalancerSourceRanges, meaning whether this CIDR
// list should act as an allow- or deny-list. Both "allow" or "deny" are
// possible values for this annotation.
ServiceSourceRangesPolicy = ServicePrefix + "/src-ranges-policy"
// ServiceProxyDelegation is the annotation name used to specify whether there
// should be delegation to a 3rd party proxy. Allowed values are "none" (default)
// and "delegate-if-local". The latter pushes all service packets to a user
// space proxy if the selected backend IP is the IP of the local node. If the
// selected backend IP is non-local then the BPF datapath forwards the packet
// back out again with the configured BPF load-balancing mechanism.
ServiceProxyDelegation = ServicePrefix + "/proxy-delegation"
// ServiceForwardingMode annotations determines the way packets are pushed to the
// remote backends.
// Allowed values are of type loadbalancer.SVCForwardingMode:
// - dsr
// use the configured DSR method
// - snat
// use SNAT so that reply traffic comes back
ServiceForwardingMode = ServicePrefix + "/forwarding-mode"
// NoTrack / NoTrackAlias is the annotation name used to store the port and
// protocol that we should bypass kernel conntrack for a given pod. This
// applies for both TCP and UDP connection. Current use case is NodeLocalDNS.
NoTrack = PolicyPrefix + "/no-track-port"
NoTrackAlias = Prefix + ".no-track-port"
// WireguardPubKey / WireguardPubKeyAlias is the annotation name used to store
// the WireGuard public key in the CiliumNode CRD that we need to use to encrypt
// traffic to that node.
WireguardPubKey = NetworkPrefix + "/wg-pub-key"
WireguardPubKeyAlias = Prefix + ".network.wg-pub-key"
// BGPVRouterAnnoPrefix is the prefix used for all Virtual Router annotations
// Its just a prefix, because the ASN of the Router is part of the annotation itself
BGPVRouterAnnoPrefix = "cilium.io/bgp-virtual-router."
// IPAMPoolKey is the annotation name used to store the IPAM pool name from
// which workloads should allocate their IP from
IPAMPoolKey = IPAMPrefix + "/ip-pool"
// IPAMIPv4PoolKey is the annotation name used to store the IPAM IPv4 pool name from
// which workloads should allocate their IP from
IPAMIPv4PoolKey = IPAMPrefix + "/ipv4-pool"
// IPAMIPv6PoolKey is the annotation name used to store the IPAM IPv6 pool name from
// which workloads should allocate their IP from
IPAMIPv6PoolKey = IPAMPrefix + "/ipv6-pool"
// IPAMIgnore is the annotation used to make the Cilium operator IPAM logic
// ignore the given CiliumNode object
IPAMIgnore = IPAMPrefix + "/ignore"
LBIPAMIPsKey = LBIPAMPrefix + "/ips"
LBIPAMIPKeyAlias = Prefix + "/lb-ipam-ips"
LBIPAMSharingKey = LBIPAMPrefix + "/sharing-key"
LBIPAMSharingKeyAlias = Prefix + "/lb-ipam-sharing-key"
LBIPAMSharingAcrossNamespace = LBIPAMPrefix + "/sharing-cross-namespace"
LBIPAMSharingAcrossNamespaceAlias = Prefix + "/lb-ipam-sharing-cross-namespace"
CECInjectCiliumFilters = CECPrefix + "/inject-cilium-filters"
CECIsL7LB = CECPrefix + "/is-l7lb"
CECUseOriginalSourceAddress = CECPrefix + "/use-original-source-address"
)
// CiliumPrefixRegex is a regex matching Cilium specific annotations.
var CiliumPrefixRegex = regexp.MustCompile(`^([A-Za-z0-9]+\.)*cilium.io/`)
type annotatedObject interface {
GetAnnotations() map[string]string
}
// Get returns the annotation value associated with the given key, or any of
// the additional aliases if not found.
func Get(obj annotatedObject, key string, aliases ...string) (value string, ok bool) {
keys := append([]string{key}, aliases...)
annotations := obj.GetAnnotations()
for _, k := range keys {
if value, ok = annotations[k]; ok {
return value, ok
}
}
return "", false
}
// SPDX-License-Identifier: Apache-2.0
// Copyright Authors of Cilium
package api
import (
"log/slog"
"net/http"
"github.com/go-openapi/runtime/middleware"
"github.com/cilium/cilium/pkg/logging/logfields"
)
type AdminDisableHandler struct {
logger *slog.Logger
name string
}
func NewAdminDisableHandler(logger *slog.Logger, name string) *AdminDisableHandler {
return &AdminDisableHandler{
logger: logger.With(subsysLogAttr...),
name: name,
}
}
func (a *AdminDisableHandler) ServeHTTP(wr http.ResponseWriter, req *http.Request) {
wr.WriteHeader(http.StatusForbidden)
a.logger.Info(
"Denied API request on administratively disabled API endpoint",
logfields.Endpoint, a.name,
)
_, _ = wr.Write([]byte("This API is administratively disabled. Contact your administrator for more details."))
}
// DisableAPIs configures the API middleware for all of the paths in the
// provided PathSet such that those APIs will be administratively disabled at
// runtime.
func DisableAPIs(logger *slog.Logger, paths PathSet, addMiddleware func(method string, path string, builder middleware.Builder)) {
for k, pm := range paths {
addMiddleware(pm.Method, pm.Path, func(_ http.Handler) http.Handler {
return NewAdminDisableHandler(logger, k)
})
}
}
// SPDX-License-Identifier: Apache-2.0
// Copyright Authors of Cilium
package api
import (
"errors"
"fmt"
"net/http"
"github.com/go-openapi/runtime"
"github.com/cilium/cilium/api/v1/models"
)
// APIError is the error representation for the API.
type APIError struct {
code int
msg string
}
// New creates an API error from the code, msg and extra arguments.
func New(code int, msg string, args ...any) *APIError {
if code <= 0 {
code = 500
}
if len(args) > 0 {
return &APIError{code: code, msg: fmt.Sprintf(msg, args...)}
}
return &APIError{code: code, msg: msg}
}
// GetCode returns the code for the API Error.
func (a *APIError) GetCode() int {
return a.code
}
// Error creates a new API error from the code and error.
func Error(code int, err error) *APIError {
if err == nil {
err = errors.New("Error pointer was nil")
}
return New(code, "%v", err)
}
// Error returns the API error message.
func (a *APIError) Error() string {
return a.msg
}
// GetModel returns model error.
func (a *APIError) GetModel() *models.Error {
m := models.Error(a.msg)
return &m
}
// WriteResponse to the client.
func (a *APIError) WriteResponse(rw http.ResponseWriter, producer runtime.Producer) {
rw.WriteHeader(a.code)
m := a.GetModel()
if err := producer.Produce(rw, m); err != nil {
panic(err)
}
}
// SPDX-License-Identifier: Apache-2.0
// Copyright Authors of Cilium
package api
import (
"context"
"errors"
"log/slog"
"net/http"
"os"
"runtime/debug"
"syscall"
"github.com/cilium/cilium/pkg/logging/logfields"
)
// APIPanicHandler recovers from API panics and logs encountered panics
type APIPanicHandler struct {
Logger *slog.Logger
Next http.Handler
}
// ServeHTTP implements the http.Handler interface.
// It recovers from panics of all next handlers and logs them
func (h *APIPanicHandler) ServeHTTP(wr http.ResponseWriter, req *http.Request) {
defer func() {
if r := recover(); r != nil {
scopedLog := h.Logger.With(
logfields.URL, req.URL,
logfields.Method, req.Method,
logfields.Client, req.RemoteAddr,
)
if err, ok := r.(error); ok && errors.Is(err, syscall.EPIPE) {
scopedLog.Debug("Failed to write API response: client connection closed",
logfields.Error, err,
)
return
}
scopedLog.Warn("Cilium API handler panicked",
logfields.PanicMessage, r,
)
if scopedLog.Enabled(context.Background(), slog.LevelDebug) {
os.Stdout.Write(debug.Stack())
}
wr.WriteHeader(http.StatusInternalServerError)
if _, err := wr.Write([]byte("Internal error occurred, check Cilium logs for details.")); err != nil {
scopedLog.Debug("Failed to write API response",
logfields.Error, err,
)
}
}
}()
h.Next.ServeHTTP(wr, req)
}
// SPDX-License-Identifier: Apache-2.0
// Copyright Authors of Cilium
package api
import (
"fmt"
"strings"
"github.com/go-openapi/loads"
"github.com/go-openapi/spec"
)
var (
ErrUnknownWildcard = fmt.Errorf("Unsupported API wildcard")
ErrUnknownFlag = fmt.Errorf("Unknown API flag")
)
func pascalize(in string) string {
if len(in) < 2 {
return strings.ToUpper(in)
}
switch in {
case "bgp":
return "BGP"
case "id":
return "ID"
case "ip":
return "IP"
case "ipam":
return "IPAM"
case "lrp":
return "LRP"
}
return strings.ToUpper(in[0:1]) + strings.ToLower(in[1:])
}
func pathToFlagSuffix(path string) string {
result := ""
path = strings.TrimPrefix(path, "/")
for hunk := range strings.SplitSeq(path, "/") {
// TODO: Maybe we can just rename the /cgroup-dump-metadata API to /cgroups to avoid this loop?
for word := range strings.SplitSeq(hunk, "-") {
trimmed := strings.Trim(word, "{}")
result = result + pascalize(trimmed)
}
}
return result
}
func parseSpecPaths(paths *spec.Paths) PathSet {
results := make(PathSet)
for path, item := range paths.Paths {
suffix := pathToFlagSuffix(path)
ops := map[string]*spec.Operation{
"Delete": item.Delete,
"Get": item.Get,
"Patch": item.Patch,
"Post": item.Post,
"Put": item.Put,
}
for prefix, op := range ops {
if op != nil {
flag := prefix + suffix
results[flag] = Endpoint{
Method: strings.ToUpper(prefix),
Path: path,
Description: op.Description,
}
}
}
}
return PathSet(results)
}
func generateDeniedAPIEndpoints(allPaths PathSet, allowed []string) (PathSet, error) {
// default to "deny all", then allow specified APIs by flag
denied := allPaths
var wildcardPrefixes []string
for _, opt := range allowed {
switch strings.Index(opt, "*") {
case -1: // No wildcard
break
case len(opt) - 1: // suffix
prefix := strings.TrimSuffix(opt, "*")
if len(prefix) == 0 { // Full opt "*", ie allow all
return PathSet{}, nil
}
wildcardPrefixes = append(wildcardPrefixes, prefix)
continue
default:
return nil, fmt.Errorf("%w: %q", ErrUnknownWildcard, opt)
}
if _, ok := denied[opt]; ok {
delete(denied, opt)
} else {
return nil, fmt.Errorf("%w: %q", ErrUnknownFlag, opt)
}
}
for _, prefix := range wildcardPrefixes {
for f := range denied {
if strings.HasPrefix(f, prefix) {
delete(denied, f)
}
}
}
return denied, nil
}
// Endpoint is an API Endpoint for a parsed API specification.
type Endpoint struct {
Method string
Path string
Description string
}
// PathSet is a set of APIs in the form of a map of canonical pascalized flag
// name to MethodPath, for example:
// "GetEndpointID": {"GET", "/endpoint/{id}"}
type PathSet map[string]Endpoint
func NewPathSet(spec *loads.Document) PathSet {
return parseSpecPaths(spec.Spec().Paths)
}
// AllowedFlagsToDeniedPaths parses the input API specification and the provided
// commandline flags, and returns the PathSet that should be administratively
// disabled using a subsequent call to DisableAPIs().
func AllowedFlagsToDeniedPaths(spec *loads.Document, allowed []string) (PathSet, error) {
paths := parseSpecPaths(spec.Spec().Paths)
return generateDeniedAPIEndpoints(paths, allowed)
}
// SPDX-License-Identifier: Apache-2.0
// Copyright Authors of Cilium
package api
import (
"fmt"
"os"
"os/user"
"strconv"
"github.com/cilium/cilium/pkg/logging/logfields"
)
var subsysLogAttr = []any{logfields.LogSubsys, "api"}
// getGroupIDByName returns the group ID for the given grpName.
func getGroupIDByName(grpName string) (int, error) {
group, err := user.LookupGroup(grpName)
if err != nil {
return -1, err
}
return strconv.Atoi(group.Gid)
}
// SetDefaultPermissions sets the given socket's group to `CiliumGroupName` and
// mode to `SocketFileMode`.
func SetDefaultPermissions(debugLogger func(msg string, args ...any), socketPath string) error {
gid, err := getGroupIDByName(CiliumGroupName)
if err != nil {
debugLogger("Group not found",
logfields.Error, err,
logfields.Path, socketPath,
logfields.Group, CiliumGroupName,
)
} else {
if err := os.Chown(socketPath, 0, gid); err != nil {
return fmt.Errorf("failed while setting up %s's group ID"+
" in %q: %s", CiliumGroupName, socketPath, err)
}
}
if err := os.Chmod(socketPath, SocketFileMode); err != nil {
return fmt.Errorf("failed while setting up file permissions in %q: %w",
socketPath, err)
}
return nil
}
// SPDX-License-Identifier: Apache-2.0
// Copyright Authors of Cilium
package types
import (
"github.com/cilium/cilium/pkg/ipam/types"
)
// ENISpec is the ENI specification of a node. This specification is considered
// by the cilium-operator to act as an IPAM operator and makes ENI IPs available
// via the IPAMSpec section.
//
// The ENI specification can either be provided explicitly by the user or the
// cilium agent running on the node can be instructed to create the CiliumNode
// custom resource along with an ENI specification when the node registers
// itself to the Kubernetes cluster.
type ENISpec struct {
// InstanceID is the AWS InstanceId of the node. The InstanceID is used
// to retrieve AWS metadata for the node.
//
// OBSOLETE: This field is obsolete, please use Spec.InstanceID
//
// +kubebuilder:validation:Optional
InstanceID string `json:"instance-id,omitempty"`
// InstanceType is the AWS EC2 instance type, e.g. "m5.large"
//
// +kubebuilder:validation:Optional
InstanceType string `json:"instance-type,omitempty"`
// MinAllocate is the minimum number of IPs that must be allocated when
// the node is first bootstrapped. It defines the minimum base socket
// of addresses that must be available. After reaching this watermark,
// the PreAllocate and MaxAboveWatermark logic takes over to continue
// allocating IPs.
//
// OBSOLETE: This field is obsolete, please use Spec.IPAM.MinAllocate
//
// +kubebuilder:validation:Minimum=0
// +kubebuilder:validation:Optional
MinAllocate int `json:"min-allocate,omitempty"`
// PreAllocate defines the number of IP addresses that must be
// available for allocation in the IPAMspec. It defines the buffer of
// addresses available immediately without requiring cilium-operator to
// get involved.
//
// OBSOLETE: This field is obsolete, please use Spec.IPAM.PreAllocate
//
// +kubebuilder:validation:Minimum=0
// +kubebuilder:validation:Optional
PreAllocate int `json:"pre-allocate,omitempty"`
// MaxAboveWatermark is the maximum number of addresses to allocate
// beyond the addresses needed to reach the PreAllocate watermark.
// Going above the watermark can help reduce the number of API calls to
// allocate IPs, e.g. when a new ENI is allocated, as many secondary
// IPs as possible are allocated. Limiting the amount can help reduce
// waste of IPs.
//
// OBSOLETE: This field is obsolete, please use Spec.IPAM.MaxAboveWatermark
//
// +kubebuilder:validation:Minimum=0
// +kubebuilder:validation:Optional
MaxAboveWatermark int `json:"max-above-watermark,omitempty"`
// FirstInterfaceIndex is the index of the first ENI to use for IP
// allocation, e.g. if the node has eth0, eth1, eth2 and
// FirstInterfaceIndex is set to 1, then only eth1 and eth2 will be
// used for IP allocation, eth0 will be ignored for PodIP allocation.
//
// +kubebuilder:validation:Minimum=0
// +kubebuilder:validation:Optional
FirstInterfaceIndex *int `json:"first-interface-index,omitempty"`
// SecurityGroups is the list of security groups to attach to any ENI
// that is created and attached to the instance.
//
// +kubebuilder:validation:Optional
SecurityGroups []string `json:"security-groups,omitempty"`
// SecurityGroupTags is the list of tags to use when evaliating what
// AWS security groups to use for the ENI.
//
// +kubebuilder:validation:Optional
SecurityGroupTags map[string]string `json:"security-group-tags,omitempty"`
// SubnetIDs is the list of subnet ids to use when evaluating what AWS
// subnets to use for ENI and IP allocation.
//
// +kubebuilder:validation:Optional
SubnetIDs []string `json:"subnet-ids,omitempty"`
// SubnetTags is the list of tags to use when evaluating what AWS
// subnets to use for ENI and IP allocation.
//
// +kubebuilder:validation:Optional
SubnetTags map[string]string `json:"subnet-tags,omitempty"`
// NodeSubnetID is the subnet of the primary ENI the instance was brought up
// with. It is used as a sensible default subnet to create ENIs in.
//
// +kubebuilder:validation:Optional
NodeSubnetID string `json:"node-subnet-id,omitempty"`
// VpcID is the VPC ID to use when allocating ENIs.
//
// +kubebuilder:validation:Optional
VpcID string `json:"vpc-id,omitempty"`
// AvailabilityZone is the availability zone to use when allocating
// ENIs.
//
// +kubebuilder:validation:Optional
AvailabilityZone string `json:"availability-zone,omitempty"`
// ExcludeInterfaceTags is the list of tags to use when excluding ENIs for
// Cilium IP allocation. Any interface matching this set of tags will not
// be managed by Cilium.
//
// +kubebuilder:validation:Optional
ExcludeInterfaceTags map[string]string `json:"exclude-interface-tags,omitempty"`
// DeleteOnTermination defines that the ENI should be deleted when the
// associated instance is terminated. If the parameter is not set the
// default behavior is to delete the ENI on instance termination.
//
// +kubebuilder:validation:Optional
DeleteOnTermination *bool `json:"delete-on-termination,omitempty"`
// UsePrimaryAddress determines whether an ENI's primary address
// should be available for allocations on the node
//
// +kubebuilder:validation:Optional
UsePrimaryAddress *bool `json:"use-primary-address,omitempty"`
// DisablePrefixDelegation determines whether ENI prefix delegation should be
// disabled on this node.
//
// +kubebuilder:validation:Optional
DisablePrefixDelegation *bool `json:"disable-prefix-delegation,omitempty"`
}
// ENI represents an AWS Elastic Network Interface
//
// More details:
// https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/using-eni.html
type ENI struct {
// ID is the ENI ID
//
// +optional
ID string `json:"id,omitempty"`
// IP is the primary IP of the ENI
//
// +optional
IP string `json:"ip,omitempty"`
// MAC is the mac address of the ENI
//
// +optional
MAC string `json:"mac,omitempty"`
// AvailabilityZone is the availability zone of the ENI
//
// +optional
AvailabilityZone string `json:"availability-zone,omitempty"`
// Description is the description field of the ENI
//
// +optional
Description string `json:"description,omitempty"`
// Number is the interface index, it used in combination with
// FirstInterfaceIndex
//
// +optional
Number int `json:"number,omitempty"`
// Subnet is the subnet the ENI is associated with
//
// +optional
Subnet AwsSubnet `json:"subnet,omitempty"`
// VPC is the VPC information to which the ENI is attached to
//
// +optional
VPC AwsVPC `json:"vpc,omitempty"`
// Addresses is the list of all secondary IPs associated with the ENI
//
// +optional
Addresses []string `json:"addresses,omitempty"`
// Prefixes is the list of all /28 prefixes associated with the ENI
//
// +optional
Prefixes []string `json:"prefixes,omitempty"`
// SecurityGroups are the security groups associated with the ENI
SecurityGroups []string `json:"security-groups,omitempty"`
// Tags is the set of tags of the ENI. Used to detect ENIs which should
// not be managed by Cilium
//
// +optional
Tags map[string]string `json:"tags,omitempty"`
// PublicIP is the public IP associated with the ENI
//
// +optional
PublicIP string `json:"public-ip,omitempty"`
}
func (e *ENI) DeepCopyInterface() types.Interface {
return e.DeepCopy()
}
// InterfaceID returns the identifier of the interface
func (e *ENI) InterfaceID() string {
return e.ID
}
// ForeachAddress iterates over all addresses and calls fn
func (e *ENI) ForeachAddress(id string, fn types.AddressIterator) error {
for _, address := range e.Addresses {
if err := fn(id, e.ID, address, "", address); err != nil {
return err
}
}
return nil
}
// IsExcludedBySpec returns true if the ENI is excluded by the provided spec and
// therefore should not be managed by Cilium.
func (e *ENI) IsExcludedBySpec(spec ENISpec) bool {
if spec.FirstInterfaceIndex != nil && e.Number < *spec.FirstInterfaceIndex {
return true
}
if len(spec.ExcludeInterfaceTags) > 0 {
if types.Tags(e.Tags).Match(spec.ExcludeInterfaceTags) {
return true
}
}
return false
}
// ENIStatus is the status of ENI addressing of the node
type ENIStatus struct {
// ENIs is the list of ENIs on the node
//
// +optional
ENIs map[string]ENI `json:"enis,omitempty"`
}
// AwsSubnet stores information regarding an AWS subnet
type AwsSubnet struct {
// ID is the ID of the subnet
ID string `json:"id,omitempty"`
// CIDR is the CIDR range associated with the subnet
CIDR string `json:"cidr,omitempty"`
}
// AwsVPC stores information regarding an AWS VPC
type AwsVPC struct {
/// ID is the ID of a VPC
ID string `json:"id,omitempty"`
// PrimaryCIDR is the primary CIDR of the VPC
PrimaryCIDR string `json:"primary-cidr,omitempty"`
// CIDRs is the list of CIDR ranges associated with the VPC
CIDRs []string `json:"cidrs,omitempty"`
}
//go:build !ignore_autogenerated
// +build !ignore_autogenerated
// SPDX-License-Identifier: Apache-2.0
// Copyright Authors of Cilium
// Code generated by deepcopy-gen. DO NOT EDIT.
package types
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *AwsSubnet) DeepCopyInto(out *AwsSubnet) {
*out = *in
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AwsSubnet.
func (in *AwsSubnet) DeepCopy() *AwsSubnet {
if in == nil {
return nil
}
out := new(AwsSubnet)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *AwsVPC) DeepCopyInto(out *AwsVPC) {
*out = *in
if in.CIDRs != nil {
in, out := &in.CIDRs, &out.CIDRs
*out = make([]string, len(*in))
copy(*out, *in)
}
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AwsVPC.
func (in *AwsVPC) DeepCopy() *AwsVPC {
if in == nil {
return nil
}
out := new(AwsVPC)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *ENI) DeepCopyInto(out *ENI) {
*out = *in
out.Subnet = in.Subnet
in.VPC.DeepCopyInto(&out.VPC)
if in.Addresses != nil {
in, out := &in.Addresses, &out.Addresses
*out = make([]string, len(*in))
copy(*out, *in)
}
if in.Prefixes != nil {
in, out := &in.Prefixes, &out.Prefixes
*out = make([]string, len(*in))
copy(*out, *in)
}
if in.SecurityGroups != nil {
in, out := &in.SecurityGroups, &out.SecurityGroups
*out = make([]string, len(*in))
copy(*out, *in)
}
if in.Tags != nil {
in, out := &in.Tags, &out.Tags
*out = make(map[string]string, len(*in))
for key, val := range *in {
(*out)[key] = val
}
}
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ENI.
func (in *ENI) DeepCopy() *ENI {
if in == nil {
return nil
}
out := new(ENI)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *ENISpec) DeepCopyInto(out *ENISpec) {
*out = *in
if in.FirstInterfaceIndex != nil {
in, out := &in.FirstInterfaceIndex, &out.FirstInterfaceIndex
*out = new(int)
**out = **in
}
if in.SecurityGroups != nil {
in, out := &in.SecurityGroups, &out.SecurityGroups
*out = make([]string, len(*in))
copy(*out, *in)
}
if in.SecurityGroupTags != nil {
in, out := &in.SecurityGroupTags, &out.SecurityGroupTags
*out = make(map[string]string, len(*in))
for key, val := range *in {
(*out)[key] = val
}
}
if in.SubnetIDs != nil {
in, out := &in.SubnetIDs, &out.SubnetIDs
*out = make([]string, len(*in))
copy(*out, *in)
}
if in.SubnetTags != nil {
in, out := &in.SubnetTags, &out.SubnetTags
*out = make(map[string]string, len(*in))
for key, val := range *in {
(*out)[key] = val
}
}
if in.ExcludeInterfaceTags != nil {
in, out := &in.ExcludeInterfaceTags, &out.ExcludeInterfaceTags
*out = make(map[string]string, len(*in))
for key, val := range *in {
(*out)[key] = val
}
}
if in.DeleteOnTermination != nil {
in, out := &in.DeleteOnTermination, &out.DeleteOnTermination
*out = new(bool)
**out = **in
}
if in.UsePrimaryAddress != nil {
in, out := &in.UsePrimaryAddress, &out.UsePrimaryAddress
*out = new(bool)
**out = **in
}
if in.DisablePrefixDelegation != nil {
in, out := &in.DisablePrefixDelegation, &out.DisablePrefixDelegation
*out = new(bool)
**out = **in
}
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ENISpec.
func (in *ENISpec) DeepCopy() *ENISpec {
if in == nil {
return nil
}
out := new(ENISpec)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *ENIStatus) DeepCopyInto(out *ENIStatus) {
*out = *in
if in.ENIs != nil {
in, out := &in.ENIs, &out.ENIs
*out = make(map[string]ENI, len(*in))
for key, val := range *in {
(*out)[key] = *val.DeepCopy()
}
}
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ENIStatus.
func (in *ENIStatus) DeepCopy() *ENIStatus {
if in == nil {
return nil
}
out := new(ENIStatus)
in.DeepCopyInto(out)
return out
}
//go:build !ignore_autogenerated
// +build !ignore_autogenerated
// SPDX-License-Identifier: Apache-2.0
// Copyright Authors of Cilium
// Code generated by deepequal-gen. DO NOT EDIT.
package types
// DeepEqual is an autogenerated deepequal function, deeply comparing the
// receiver with other. in must be non-nil.
func (in *AwsSubnet) DeepEqual(other *AwsSubnet) bool {
if other == nil {
return false
}
if in.ID != other.ID {
return false
}
if in.CIDR != other.CIDR {
return false
}
return true
}
// DeepEqual is an autogenerated deepequal function, deeply comparing the
// receiver with other. in must be non-nil.
func (in *AwsVPC) DeepEqual(other *AwsVPC) bool {
if other == nil {
return false
}
if in.ID != other.ID {
return false
}
if in.PrimaryCIDR != other.PrimaryCIDR {
return false
}
if ((in.CIDRs != nil) && (other.CIDRs != nil)) || ((in.CIDRs == nil) != (other.CIDRs == nil)) {
in, other := &in.CIDRs, &other.CIDRs
if other == nil {
return false
}
if len(*in) != len(*other) {
return false
} else {
for i, inElement := range *in {
if inElement != (*other)[i] {
return false
}
}
}
}
return true
}
// DeepEqual is an autogenerated deepequal function, deeply comparing the
// receiver with other. in must be non-nil.
func (in *ENI) DeepEqual(other *ENI) bool {
if other == nil {
return false
}
if in.ID != other.ID {
return false
}
if in.IP != other.IP {
return false
}
if in.MAC != other.MAC {
return false
}
if in.AvailabilityZone != other.AvailabilityZone {
return false
}
if in.Description != other.Description {
return false
}
if in.Number != other.Number {
return false
}
if in.Subnet != other.Subnet {
return false
}
if !in.VPC.DeepEqual(&other.VPC) {
return false
}
if ((in.Addresses != nil) && (other.Addresses != nil)) || ((in.Addresses == nil) != (other.Addresses == nil)) {
in, other := &in.Addresses, &other.Addresses
if other == nil {
return false
}
if len(*in) != len(*other) {
return false
} else {
for i, inElement := range *in {
if inElement != (*other)[i] {
return false
}
}
}
}
if ((in.Prefixes != nil) && (other.Prefixes != nil)) || ((in.Prefixes == nil) != (other.Prefixes == nil)) {
in, other := &in.Prefixes, &other.Prefixes
if other == nil {
return false
}
if len(*in) != len(*other) {
return false
} else {
for i, inElement := range *in {
if inElement != (*other)[i] {
return false
}
}
}
}
if ((in.SecurityGroups != nil) && (other.SecurityGroups != nil)) || ((in.SecurityGroups == nil) != (other.SecurityGroups == nil)) {
in, other := &in.SecurityGroups, &other.SecurityGroups
if other == nil {
return false
}
if len(*in) != len(*other) {
return false
} else {
for i, inElement := range *in {
if inElement != (*other)[i] {
return false
}
}
}
}
if ((in.Tags != nil) && (other.Tags != nil)) || ((in.Tags == nil) != (other.Tags == nil)) {
in, other := &in.Tags, &other.Tags
if other == nil {
return false
}
if len(*in) != len(*other) {
return false
} else {
for key, inValue := range *in {
if otherValue, present := (*other)[key]; !present {
return false
} else {
if inValue != otherValue {
return false
}
}
}
}
}
if in.PublicIP != other.PublicIP {
return false
}
return true
}
// DeepEqual is an autogenerated deepequal function, deeply comparing the
// receiver with other. in must be non-nil.
func (in *ENISpec) DeepEqual(other *ENISpec) bool {
if other == nil {
return false
}
if in.InstanceID != other.InstanceID {
return false
}
if in.InstanceType != other.InstanceType {
return false
}
if in.MinAllocate != other.MinAllocate {
return false
}
if in.PreAllocate != other.PreAllocate {
return false
}
if in.MaxAboveWatermark != other.MaxAboveWatermark {
return false
}
if (in.FirstInterfaceIndex == nil) != (other.FirstInterfaceIndex == nil) {
return false
} else if in.FirstInterfaceIndex != nil {
if *in.FirstInterfaceIndex != *other.FirstInterfaceIndex {
return false
}
}
if ((in.SecurityGroups != nil) && (other.SecurityGroups != nil)) || ((in.SecurityGroups == nil) != (other.SecurityGroups == nil)) {
in, other := &in.SecurityGroups, &other.SecurityGroups
if other == nil {
return false
}
if len(*in) != len(*other) {
return false
} else {
for i, inElement := range *in {
if inElement != (*other)[i] {
return false
}
}
}
}
if ((in.SecurityGroupTags != nil) && (other.SecurityGroupTags != nil)) || ((in.SecurityGroupTags == nil) != (other.SecurityGroupTags == nil)) {
in, other := &in.SecurityGroupTags, &other.SecurityGroupTags
if other == nil {
return false
}
if len(*in) != len(*other) {
return false
} else {
for key, inValue := range *in {
if otherValue, present := (*other)[key]; !present {
return false
} else {
if inValue != otherValue {
return false
}
}
}
}
}
if ((in.SubnetIDs != nil) && (other.SubnetIDs != nil)) || ((in.SubnetIDs == nil) != (other.SubnetIDs == nil)) {
in, other := &in.SubnetIDs, &other.SubnetIDs
if other == nil {
return false
}
if len(*in) != len(*other) {
return false
} else {
for i, inElement := range *in {
if inElement != (*other)[i] {
return false
}
}
}
}
if ((in.SubnetTags != nil) && (other.SubnetTags != nil)) || ((in.SubnetTags == nil) != (other.SubnetTags == nil)) {
in, other := &in.SubnetTags, &other.SubnetTags
if other == nil {
return false
}
if len(*in) != len(*other) {
return false
} else {
for key, inValue := range *in {
if otherValue, present := (*other)[key]; !present {
return false
} else {
if inValue != otherValue {
return false
}
}
}
}
}
if in.NodeSubnetID != other.NodeSubnetID {
return false
}
if in.VpcID != other.VpcID {
return false
}
if in.AvailabilityZone != other.AvailabilityZone {
return false
}
if ((in.ExcludeInterfaceTags != nil) && (other.ExcludeInterfaceTags != nil)) || ((in.ExcludeInterfaceTags == nil) != (other.ExcludeInterfaceTags == nil)) {
in, other := &in.ExcludeInterfaceTags, &other.ExcludeInterfaceTags
if other == nil {
return false
}
if len(*in) != len(*other) {
return false
} else {
for key, inValue := range *in {
if otherValue, present := (*other)[key]; !present {
return false
} else {
if inValue != otherValue {
return false
}
}
}
}
}
if (in.DeleteOnTermination == nil) != (other.DeleteOnTermination == nil) {
return false
} else if in.DeleteOnTermination != nil {
if *in.DeleteOnTermination != *other.DeleteOnTermination {
return false
}
}
if (in.UsePrimaryAddress == nil) != (other.UsePrimaryAddress == nil) {
return false
} else if in.UsePrimaryAddress != nil {
if *in.UsePrimaryAddress != *other.UsePrimaryAddress {
return false
}
}
if (in.DisablePrefixDelegation == nil) != (other.DisablePrefixDelegation == nil) {
return false
} else if in.DisablePrefixDelegation != nil {
if *in.DisablePrefixDelegation != *other.DisablePrefixDelegation {
return false
}
}
return true
}
// DeepEqual is an autogenerated deepequal function, deeply comparing the
// receiver with other. in must be non-nil.
func (in *ENIStatus) DeepEqual(other *ENIStatus) bool {
if other == nil {
return false
}
if ((in.ENIs != nil) && (other.ENIs != nil)) || ((in.ENIs == nil) != (other.ENIs == nil)) {
in, other := &in.ENIs, &other.ENIs
if other == nil {
return false
}
if len(*in) != len(*other) {
return false
} else {
for key, inValue := range *in {
if otherValue, present := (*other)[key]; !present {
return false
} else {
if !inValue.DeepEqual(&otherValue) {
return false
}
}
}
}
}
return true
}
// SPDX-License-Identifier: Apache-2.0
// Copyright Authors of Cilium
package types
import (
"strings"
"github.com/cilium/cilium/pkg/ipam/types"
)
const (
// ProviderPrefix is the prefix used to indicate that a k8s ProviderID
// represents an Azure resource
ProviderPrefix = "azure://"
// InterfaceAddressLimit is the maximum number of addresses on an interface
//
//
// For more information:
// https://docs.microsoft.com/en-us/azure/azure-resource-manager/management/azure-subscription-service-limits?toc=%2fazure%2fvirtual-network%2ftoc.json#networking-limits
InterfaceAddressLimit = 256
// StateSucceeded is the address state for a successfully provisioned address
StateSucceeded = "succeeded"
)
// AzureSpec is the Azure specification of a node running via the Azure IPAM
//
// The Azure specification can either be provided explicitly by the user or the
// cilium agent running on the node can be instructed to create the CiliumNode
// custom resource along with an Azure specification when the node registers
// itself to the Kubernetes cluster.
// This struct is embedded into v2.CiliumNode
//
// +k8s:deepcopy-gen=true
type AzureSpec struct {
// InterfaceName is the name of the interface the cilium-operator
// will use to allocate all the IPs on
//
// +kubebuilder:validation:Optional
InterfaceName string `json:"interface-name,omitempty"`
}
// AzureStatus is the status of Azure addressing of the node.
// This struct is embedded into v2.CiliumNode
//
// +k8s:deepcopy-gen=true
type AzureStatus struct {
// Interfaces is the list of interfaces on the node
//
// +optional
Interfaces []AzureInterface `json:"interfaces,omitempty"`
}
// AzureAddress is an IP address assigned to an AzureInterface
type AzureAddress struct {
// IP is the ip address of the address
IP string `json:"ip,omitempty"`
// Subnet is the subnet the address belongs to
Subnet string `json:"subnet,omitempty"`
// State is the provisioning state of the address
State string `json:"state,omitempty"`
}
// AzureInterface represents an Azure Interface
//
// +k8s:deepcopy-gen=true
type AzureInterface struct {
// ID is the identifier
//
// +optional
ID string `json:"id,omitempty"`
// Name is the name of the interface
//
// +optional
Name string `json:"name,omitempty"`
// MAC is the mac address
//
// +optional
MAC string `json:"mac,omitempty"`
// State is the provisioning state
//
// +optional
State string `json:"state,omitempty"`
// Addresses is the list of all IPs associated with the interface,
// including all secondary addresses
//
// +optional
Addresses []AzureAddress `json:"addresses,omitempty"`
// SecurityGroup is the security group associated with the interface
SecurityGroup string `json:"security-group,omitempty"`
// GatewayIP is the interface's subnet's default route
//
// OBSOLETE: This field is obsolete, please use Gateway field instead.
//
// +optional
GatewayIP string `json:"GatewayIP"`
// Gateway is the interface's subnet's default route
//
// +optional
Gateway string `json:"gateway"`
// CIDR is the range that the interface belongs to.
//
// +optional
CIDR string `json:"cidr,omitempty"`
// vmssName is the name of the virtual machine scale set. This field is
// set by extractIDs()
vmssName string `json:"-"`
// vmID is the ID of the virtual machine
vmID string `json:"-"`
// resourceGroup is the resource group the interface belongs to
resourceGroup string `json:"-"`
}
func (a *AzureInterface) DeepCopyInterface() types.Interface {
return a.DeepCopy()
}
// SetID sets the Azure interface ID, as well as extracting other fields from
// the ID itself.
func (a *AzureInterface) SetID(id string) {
a.ID = id
a.extractIDs()
}
// InterfaceID returns the identifier of the interface
func (a *AzureInterface) InterfaceID() string {
return a.ID
}
func (a *AzureInterface) extractIDs() {
switch {
// Interface from a VMSS instance:
// //subscriptions/xxx/resourceGroups/yyy/providers/Microsoft.Compute/virtualMachineScaleSets/ssss/virtualMachines/vvv/networkInterfaces/iii
case strings.Contains(a.ID, "virtualMachineScaleSets"):
segs := strings.Split(a.ID, "/")
if len(segs) >= 5 {
a.resourceGroup = segs[4]
}
if len(segs) >= 9 {
a.vmssName = segs[8]
}
if len(segs) >= 11 {
a.vmID = segs[10]
}
// Interface from a standalone instance:
// //subscriptions/xxx/resourceGroups/yyy/providers/Microsoft.Network/networkInterfaces/iii
case strings.Contains(a.ID, "/Microsoft.Network/"):
segs := strings.Split(a.ID, "/")
if len(segs) >= 5 {
a.resourceGroup = segs[4]
}
}
}
// GetResourceGroup returns the resource group the interface belongs to
func (a *AzureInterface) GetResourceGroup() string {
return a.resourceGroup
}
// GetVMScaleSetName returns the VM scale set name the interface belongs to
func (a *AzureInterface) GetVMScaleSetName() string {
return a.vmssName
}
// GetVMID returns the VM ID the interface belongs to
func (a *AzureInterface) GetVMID() string {
return a.vmID
}
// ForeachAddress iterates over all addresses and calls fn
func (a *AzureInterface) ForeachAddress(id string, fn types.AddressIterator) error {
for _, address := range a.Addresses {
if err := fn(id, a.ID, address.IP, address.Subnet, address); err != nil {
return err
}
}
return nil
}
//go:build !ignore_autogenerated
// +build !ignore_autogenerated
// SPDX-License-Identifier: Apache-2.0
// Copyright Authors of Cilium
// Code generated by deepcopy-gen. DO NOT EDIT.
package types
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *AzureInterface) DeepCopyInto(out *AzureInterface) {
*out = *in
if in.Addresses != nil {
in, out := &in.Addresses, &out.Addresses
*out = make([]AzureAddress, len(*in))
copy(*out, *in)
}
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AzureInterface.
func (in *AzureInterface) DeepCopy() *AzureInterface {
if in == nil {
return nil
}
out := new(AzureInterface)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *AzureSpec) DeepCopyInto(out *AzureSpec) {
*out = *in
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AzureSpec.
func (in *AzureSpec) DeepCopy() *AzureSpec {
if in == nil {
return nil
}
out := new(AzureSpec)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *AzureStatus) DeepCopyInto(out *AzureStatus) {
*out = *in
if in.Interfaces != nil {
in, out := &in.Interfaces, &out.Interfaces
*out = make([]AzureInterface, len(*in))
for i := range *in {
(*in)[i].DeepCopyInto(&(*out)[i])
}
}
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AzureStatus.
func (in *AzureStatus) DeepCopy() *AzureStatus {
if in == nil {
return nil
}
out := new(AzureStatus)
in.DeepCopyInto(out)
return out
}
//go:build !ignore_autogenerated
// +build !ignore_autogenerated
// SPDX-License-Identifier: Apache-2.0
// Copyright Authors of Cilium
// Code generated by deepequal-gen. DO NOT EDIT.
package types
// DeepEqual is an autogenerated deepequal function, deeply comparing the
// receiver with other. in must be non-nil.
func (in *AzureAddress) DeepEqual(other *AzureAddress) bool {
if other == nil {
return false
}
if in.IP != other.IP {
return false
}
if in.Subnet != other.Subnet {
return false
}
if in.State != other.State {
return false
}
return true
}
// DeepEqual is an autogenerated deepequal function, deeply comparing the
// receiver with other. in must be non-nil.
func (in *AzureInterface) DeepEqual(other *AzureInterface) bool {
if other == nil {
return false
}
if in.ID != other.ID {
return false
}
if in.Name != other.Name {
return false
}
if in.MAC != other.MAC {
return false
}
if in.State != other.State {
return false
}
if ((in.Addresses != nil) && (other.Addresses != nil)) || ((in.Addresses == nil) != (other.Addresses == nil)) {
in, other := &in.Addresses, &other.Addresses
if other == nil {
return false
}
if len(*in) != len(*other) {
return false
} else {
for i, inElement := range *in {
if !inElement.DeepEqual(&(*other)[i]) {
return false
}
}
}
}
if in.SecurityGroup != other.SecurityGroup {
return false
}
if in.GatewayIP != other.GatewayIP {
return false
}
if in.Gateway != other.Gateway {
return false
}
if in.CIDR != other.CIDR {
return false
}
if in.vmssName != other.vmssName {
return false
}
if in.vmID != other.vmID {
return false
}
if in.resourceGroup != other.resourceGroup {
return false
}
return true
}
// DeepEqual is an autogenerated deepequal function, deeply comparing the
// receiver with other. in must be non-nil.
func (in *AzureSpec) DeepEqual(other *AzureSpec) bool {
if other == nil {
return false
}
if in.InterfaceName != other.InterfaceName {
return false
}
return true
}
// DeepEqual is an autogenerated deepequal function, deeply comparing the
// receiver with other. in must be non-nil.
func (in *AzureStatus) DeepEqual(other *AzureStatus) bool {
if other == nil {
return false
}
if ((in.Interfaces != nil) && (other.Interfaces != nil)) || ((in.Interfaces == nil) != (other.Interfaces == nil)) {
in, other := &in.Interfaces, &other.Interfaces
if other == nil {
return false
}
if len(*in) != len(*other) {
return false
} else {
for i, inElement := range *in {
if !inElement.DeepEqual(&(*other)[i]) {
return false
}
}
}
}
return true
}
// SPDX-License-Identifier: Apache-2.0
// Copyright Authors of Cilium
package backoff
import (
"context"
"fmt"
"log/slog"
"math"
"math/rand/v2"
"github.com/google/uuid"
"github.com/cilium/cilium/pkg/logging/logfields"
"github.com/cilium/cilium/pkg/time"
)
// NodeManager is the interface required to implement cluster size dependent
// intervals
type NodeManager interface {
ClusterSizeDependantInterval(baseInterval time.Duration) time.Duration
}
// nodeManager is a wrapper to enable using a plain function as NodeManager to implement
// cluster size dependent intervals
type nodeManager struct {
clusterSizeDependantInterval func(baseInterval time.Duration) time.Duration
}
// NewNodeManager returns a new NodeManager implementing cluster size dependent intervals
// based on the given function. If the function is nil, then no tuning is performed.
func NewNodeManager(clusterSizeDependantInterval func(baseInterval time.Duration) time.Duration) NodeManager {
return &nodeManager{clusterSizeDependantInterval: clusterSizeDependantInterval}
}
func (n *nodeManager) ClusterSizeDependantInterval(baseInterval time.Duration) time.Duration {
if n.clusterSizeDependantInterval == nil {
return baseInterval
}
return n.clusterSizeDependantInterval(baseInterval)
}
// Exponential implements an exponential backoff
type Exponential struct {
Logger *slog.Logger
// Min is the minimal backoff time, if unspecified, 1 second will be
// used
Min time.Duration
// Max is the maximum backoff time, if unspecified, no maximum time is
// applied
Max time.Duration
// Factor is the factor the backoff time grows exponentially, if
// unspecified, a factor of 2.0 will be used
Factor float64
// Jitter, when enabled, adds random jitter to the interval
Jitter bool
// NodeManager enables the use of cluster size dependent backoff
// intervals, i.e. the larger the cluster, the longer the backoff
// interval
NodeManager NodeManager
// Name is a free form string describing the operation subject to the
// backoff, if unspecified, a UUID is generated. This string is used
// for logging purposes.
Name string
// ResetAfter will reset the exponential back-off if no attempt is made for the amount of time specified here.
// Needs to be larger than the Max duration, otherwise it will be ignored to avoid accidental resets.
// If unspecified, no reset is performed.
ResetAfter time.Duration
lastBackoffStart time.Time
attempt int
}
// CalculateDuration calculates the backoff duration based on minimum base
// interval, exponential factor, jitter and number of failures.
func CalculateDuration(min, max time.Duration, factor float64, jitter bool, failures int) time.Duration {
minFloat := float64(min)
maxFloat := float64(max)
t := minFloat * math.Pow(factor, float64(failures))
if max != time.Duration(0) && t > maxFloat {
t = maxFloat
}
if jitter {
t = rand.Float64()*(t-minFloat) + minFloat
}
return time.Duration(t)
}
// ClusterSizeDependantInterval returns a time.Duration that is dependent on
// the cluster size, i.e. the number of nodes that have been discovered. This
// can be used to control sync intervals of shared or centralized resources to
// avoid overloading these resources as the cluster grows.
//
// Example sync interval with baseInterval = 1 * time.Minute
//
// nodes | sync interval
// ------+-----------------
// 1 | 41.588830833s
// 2 | 1m05.916737320s
// 4 | 1m36.566274746s
// 8 | 2m11.833474640s
// 16 | 2m49.992800643s
// 32 | 3m29.790453687s
// 64 | 4m10.463236193s
// 128 | 4m51.588744261s
// 256 | 5m32.944565093s
// 512 | 6m14.416550710s
// 1024 | 6m55.946873494s
// 2048 | 7m37.506428894s
// 4096 | 8m19.080616652s
// 8192 | 9m00.662124608s
// 16384 | 9m42.247293667s
func ClusterSizeDependantInterval(baseInterval time.Duration, numNodes int) time.Duration {
// no nodes are being managed, no work will be performed, return
// baseInterval to check again in a reasonable timeframe
if numNodes == 0 {
return baseInterval
}
waitNanoseconds := float64(baseInterval.Nanoseconds()) * math.Log1p(float64(numNodes))
return time.Duration(int64(waitNanoseconds))
}
// Reset backoff attempt counter
func (b *Exponential) Reset() {
b.attempt = 0
}
// Attempt returns the number of attempts since the last reset.
func (b *Exponential) Attempt() int {
return b.attempt
}
// Wait waits for the required time using an exponential backoff
func (b *Exponential) Wait(ctx context.Context) error {
if resetDuration := b.ResetAfter; resetDuration != time.Duration(0) && resetDuration > b.Max {
if !b.lastBackoffStart.IsZero() {
if time.Since(b.lastBackoffStart) > resetDuration {
b.Reset()
}
}
}
b.lastBackoffStart = time.Now()
b.attempt++
t := b.Duration(b.attempt)
b.Logger.Debug("Sleeping with exponential backoff",
logfields.Duration, t,
logfields.Attempt, b.attempt,
logfields.Name, b.Name,
)
select {
case <-ctx.Done():
return fmt.Errorf("exponential backoff cancelled via context: %w", ctx.Err())
case <-time.After(t):
}
return nil
}
// Duration returns the wait duration for the nth attempt
func (b *Exponential) Duration(attempt int) time.Duration {
if b.Name == "" {
b.Name = uuid.New().String()
}
min := time.Duration(1) * time.Second
if b.Min != time.Duration(0) {
min = b.Min
}
factor := float64(2)
if b.Factor != float64(0) {
factor = b.Factor
}
t := CalculateDuration(min, b.Max, factor, b.Jitter, attempt)
if b.NodeManager != nil {
t = b.NodeManager.ClusterSizeDependantInterval(t)
}
if b.Max != time.Duration(0) && t > b.Max {
t = b.Max
}
return t
}
// SPDX-License-Identifier: Apache-2.0
// Copyright Authors of Cilium
package analyze
import (
"unsafe"
)
// bitmap is a bitmap used to track which blocks are reachable in the control
// flow graph.
type bitmap []uint64
const wordSize = uint64(unsafe.Alignof(bitmap(nil)[0]) * 8)
// newBitmap returns a bitmap capable of tracking at least n items. All bits are
// false by default.
func newBitmap(n uint64) bitmap {
return make(bitmap, (n+(wordSize-1))/wordSize)
}
// set sets the bit at index i to the given value. If i is out of bounds, it
// does nothing.
func (b bitmap) set(i uint64, value bool) {
word, bit := i/wordSize, i%wordSize
if word >= uint64(len(b)) {
return
}
if value {
b[word] |= 1 << (bit)
} else {
b[word] &^= 1 << (bit)
}
}
// get returns the value of the bit at index i. If i is out of bounds, it
// returns false.
func (b bitmap) get(i uint64) bool {
word, bit := i/wordSize, i%wordSize
if word >= uint64(len(b)) {
return false
}
return b[word]&(1<<(bit)) != 0
}
// SPDX-License-Identifier: Apache-2.0
// Copyright Authors of Cilium
package analyze
import (
"errors"
"fmt"
"iter"
"slices"
"strings"
"github.com/cilium/ebpf/asm"
)
// leaderKey is used to store the leader metadata in an instruction's metadata.
type leaderKey struct{}
// A leader is an instruction at the beginning of a basic block.
type leader struct {
// predecessors are instructions in other blocks that are always executed
// before this instruction.
predecessors []*asm.Instruction
// block is the block that this instruction is the start of.
block *Block
}
// setLeaderMeta sets the leader metadata for an instruction. This metadata
// is used to mark the start of a basic block and to store information about
// the block and its predecessors.
func setLeaderMeta(ins *asm.Instruction, meta *leader) {
ins.Metadata.Set(leaderKey{}, meta)
}
// getLeaderMeta retrieves the leader metadata for an instruction.
func getLeaderMeta(ins *asm.Instruction) *leader {
val := ins.Metadata.Get(leaderKey{})
meta, ok := val.(*leader)
if !ok {
return nil
}
return meta
}
// addPredecessors adds one or more predecessor instructions to the list of
// predecessors for the given instruction. If a predecessor is already in the
// list, it is not added again.
//
// This is used to track the control flow graph of the program, where each
// instruction can have multiple predecessors (i.e. it can be reached from
// multiple branches). Initializes the instruction's leader metadata if it does
// not exist yet.
func addPredecessors(ins *asm.Instruction, preds ...*asm.Instruction) {
l := getLeaderMeta(ins)
if l == nil {
l = &leader{}
setLeaderMeta(ins, l)
}
for _, pred := range preds {
if pred == nil {
continue
}
if !slices.Contains(l.predecessors, pred) {
l.predecessors = append(l.predecessors, pred)
}
}
}
// edgeKey is used to store the edge metadata in an instruction's metadata.
type edgeKey struct{}
// edge is a metadata structure that is associated with an instruction marking
// the end of a basic block. It can have a branch target (the target of a jump
// instruction) and a fallthrough target (the next instruction in the
// instruction stream that is executed if the branch is not taken).
type edge struct {
branch *asm.Instruction
fthrough *asm.Instruction
block *Block
}
// setEdgeMeta sets the edge metadata for an instruction.
func setEdgeMeta(ins *asm.Instruction, meta *edge) {
ins.Metadata.Set(edgeKey{}, meta)
}
// getEdgeMeta retrieves the edge metadata for an instruction.
func getEdgeMeta(ins *asm.Instruction) *edge {
val := ins.Metadata.Get(edgeKey{})
meta, ok := val.(*edge)
if !ok {
return nil
}
return meta
}
// setEdgeBranchTarget sets the branch target for an edge instruction. This is
// used to mark the target of a jump instruction that branches to another basic
// block.
func setEdgeBranchTarget(ins *asm.Instruction, target *asm.Instruction) {
e := getEdgeMeta(ins)
if e == nil {
e = &edge{}
setEdgeMeta(ins, e)
}
e.branch = target
}
// setEdgeFallthrough sets the fallthrough target for an edge instruction. This
// is used to mark the next instruction in the instruction stream that is
// executed if the branch is not taken, typically the instruction immediately
// following the branch instruction.
func setEdgeFallthrough(ins *asm.Instruction, target *asm.Instruction) {
if ins == nil {
return
}
e := getEdgeMeta(ins)
if e == nil {
e = &edge{}
setEdgeMeta(ins, e)
}
e.fthrough = target
}
// setBranchTarget creates a two-way association between both the branch
// instruction and its target instruction, as well as the target instruction and
// its natural predecessor. prev may be nil if the branch target is the first
// instruction in the program.
//
// This process creates two edges and a leader, updating existing metadata if
// the instructions were already marked as leaders or edges.
func setBranchTarget(branch, target, prev *asm.Instruction) {
// Associate the branch instruction with its target. The target becomes a
// leader, the branch instruction becomes an edge.
setEdgeBranchTarget(branch, target)
// Create a reverse link from the branch target to both the branch (jump)
// instructions and the target's predecessor.
if canFallthrough(prev) {
// Creating a leader implicitly means making the instruction before it an
// edge with a fallthrough target. Associate the target with its predecessor.
setEdgeFallthrough(prev, target)
addPredecessors(target, branch, prev)
} else {
// If the instruction preceding the branch target cannot fall through (Ja,
// Exit), don't register it as a predecessor.
setEdgeFallthrough(prev, nil)
addPredecessors(target, branch)
}
}
// A Block is a contiguous sequence of instructions that are executed together.
// Boundaries are defined by branching instructions.
//
// Blocks are attached to instructions via metadata and should not be modified
// after being created.
//
// It should never contain direct references to the original asm.Instructions
// since copying the ProgramSpec won't update pointers to the new copied insns.
// This is a problem when modifying instructions through
// [Blocks.LiveInstructions] after reachability analysis, since it would modify
// the original ProgramSpec's instructions.
type Block struct {
id uint64
raw asm.RawInstructionOffset
start, end int
predecessors []*Block
branch *Block
fthrough *Block
predict uint8
}
func (b *Block) leader(insns asm.Instructions) *leader {
if len(insns) == 0 {
return nil
}
return getLeaderMeta(&insns[b.start])
}
func (b *Block) edge(insns asm.Instructions) *edge {
if len(insns) == 0 {
return nil
}
if b.end >= len(insns) {
return nil
}
return getEdgeMeta(&insns[b.end])
}
func (b *Block) len() int {
return max(b.end-b.start+1, 0)
}
func (b *Block) iterate(insns asm.Instructions) *asm.InstructionIterator {
if b.start < 0 || b.end < 0 || b.start >= len(insns) || b.end >= len(insns) {
return nil
}
i := insns[b.start : b.end+1].Iterate()
// Setting these fields correctly allows the insn printer to show correct
// raw offsets of instructions matching verifier output. Makes debugging
// significantly easier.
i.Index = b.start
i.Offset = b.raw
return i
}
// backward returns an iterator that traverses the instructions in the block
// in reverse order, starting from the last instruction and going to the first.
//
// Doesn't return an [iter.Seq2] because converting it to a pull-based iterator
// using [iter.Pull] is incredibly, prohibitively expensive. Maybe this improves
// in a future Go version.
func (b *Block) backward(insns asm.Instructions) func() (*asm.Instruction, bool) {
// Track depth of block traversal to avoid infinite loops. Used in favor of a
// visited set since it's much cheaper than frequent map lookups. Typical
// depth is 1-3 with some double-digit outliers.
const maxDepth uint8 = 128
var depth uint8 = 0
i := b.len() - 1
return func() (*asm.Instruction, bool) {
if i < 0 {
// If we've reached the start of the block, roll over to its predecessor
// if there is exactly one. Sometimes, map pointers are reused from a
// previous block.
//
// This is not needed for blocks with multiple predecessors, since
// execution could've originated from any of them, making the contents of
// the pointer register undefined.
if len(b.predecessors) == 1 {
pred := b.predecessors[0]
if pred == b {
// Blocks representing loops can have themselves as a predecessor.
// Don't roll over to itself for obvious reasons.
return nil, false
}
if depth >= maxDepth {
return nil, false
}
depth++
b = pred
i = b.len() - 1
} else {
// Zero or multiple predecessors means we can't roll over to a
// predecessor, stop here.
return nil, false
}
}
out := &insns[b.start+i]
i--
return out, true
}
}
func (b *Block) String() string {
return b.Dump(nil)
}
func (b *Block) Dump(insns asm.Instructions) string {
var sb strings.Builder
sb.WriteString("Predecessors: [")
for i, from := range b.predecessors {
if i > 0 {
sb.WriteString(", ")
}
sb.WriteString(fmt.Sprintf("%d", from.id))
}
sb.WriteString("]\n")
sb.WriteString(fmt.Sprintf("Start: %d (raw %d), end: %d\n", b.start, b.raw, b.end))
sb.WriteString("\n")
if len(insns) != 0 {
sb.WriteString("Instructions:\n")
i := b.iterate(insns)
for i.Next() {
if i.Ins.Symbol() != "" {
fmt.Fprintf(&sb, "\t%s:\n", i.Ins.Symbol())
}
if src := i.Ins.Source(); src != nil {
line := strings.TrimSpace(src.String())
if line != "" {
fmt.Fprintf(&sb, "\t%*s; %s\n", 4, " ", line)
}
}
fmt.Fprintf(&sb, "\t%*d: %v\n", 4, i.Offset, i.Ins)
}
sb.WriteString("\n")
} else {
sb.WriteString("Instructions: not provided, call Dump() with insns\n")
}
if b.branch != nil {
sb.WriteString("Branch: ")
sb.WriteString(fmt.Sprintf("%d", b.branch.id))
sb.WriteString("\n")
}
if b.fthrough != nil {
sb.WriteString("Fallthrough: ")
sb.WriteString(fmt.Sprintf("%d", b.fthrough.id))
sb.WriteString("\n")
}
if b.predict != 0 {
sb.WriteString("Predict: ")
switch b.predict {
case 1:
sb.WriteString("branch taken\n")
case 2:
sb.WriteString("fallthrough taken\n")
default:
}
}
return sb.String()
}
// getBlock retrieves the block associated with an instruction. It checks both
// the leader and edge metadata to find the block. If neither is found, it
// returns nil, indicating that the instruction forms neither the start nor end
// of a basic block.
func getBlock(ins *asm.Instruction) *Block {
l := getLeaderMeta(ins)
if l != nil {
return l.block
}
e := getEdgeMeta(ins)
if e != nil {
return e.block
}
return nil
}
// Blocks is a list of basic blocks.
type Blocks struct {
b []*Block
// l is a bitmap tracking reachable blocks.
l bitmap
}
// LiveInstructions returns a sequence of [asm.Instruction]s held by Blocks. The
// bool value indicates if the instruction is live (reachable), false if it's
// not.
//
// Returns nil if block reachability hasn't been computed yet.
func (bl *Blocks) LiveInstructions(insns asm.Instructions) iter.Seq2[*asm.Instruction, bool] {
if len(bl.l) == 0 {
return nil
}
return func(yield func(*asm.Instruction, bool) bool) {
for _, b := range bl.b {
for i := range insns[b.start : b.end+1] {
ins := &insns[b.start+i]
live := bl.l.get(b.id)
if !yield(ins, live) {
return
}
}
}
}
}
func newBlocks(cap uint64) *Blocks {
if cap == 0 {
// Provide capacity for at least one block.
cap = 1
}
return &Blocks{
b: make([]*Block, 0, cap),
}
}
func (bl *Blocks) count() uint64 {
return uint64(len(bl.b))
}
func (bl *Blocks) add(b *Block) {
if b == nil {
return
}
b.id = uint64(bl.count())
bl.b = append(bl.b, b)
}
func (bl *Blocks) first() *Block {
if len(bl.b) == 0 {
return nil
}
return bl.b[0]
}
func (bl *Blocks) last() *Block {
if len(bl.b) == 0 {
return nil
}
return bl.b[len(bl.b)-1]
}
func (bl *Blocks) isLive(id uint64) bool {
if id >= bl.count() {
return false
}
return bl.l.get(id)
}
func (bl *Blocks) countLive() uint64 {
var count uint64
for i := range uint64(len(bl.b)) {
if bl.l.get(i) {
count++
}
}
return count
}
func (bl *Blocks) String() string {
return bl.Dump(nil)
}
func (bl *Blocks) Dump(insns asm.Instructions) string {
var sb strings.Builder
for _, block := range bl.b {
sb.WriteString(fmt.Sprintf("\n=== Block %d ===\n", block.id))
sb.WriteString(block.Dump(insns))
sb.WriteString(fmt.Sprintf("Live: %t\n", bl.l.get(uint64(block.id))))
}
return sb.String()
}
// Copy returns a shallow copy of the block list. Reachability information is
// not copied.
//
// Individual blocks are attached to leader and edge [asm.Instruction] metadata
// and should not be modified.
func (bl *Blocks) Copy() *Blocks {
return &Blocks{
b: slices.Clone(bl.b),
}
}
// MakeBlocks returns a list of basic blocks of instructions that are always
// executed together. Multiple calls on the same insns will return the same
// Blocks object.
//
// Blocks are created by finding branches and jump targets in the given insns
// and cutting up the instruction stream accordingly.
func MakeBlocks(insns asm.Instructions) (*Blocks, error) {
if len(insns) == 0 {
return nil, errors.New("insns is empty, cannot compute blocks")
}
if blocks := loadBlocks(insns); blocks != nil {
return blocks, nil
}
blocks, err := computeBlocks(insns)
if err != nil {
return nil, fmt.Errorf("computing blocks: %w", err)
}
if err := storeBlocks(insns, blocks); err != nil {
return nil, fmt.Errorf("storing blocks: %w", err)
}
return blocks, nil
}
// computeBlocks computes the basic blocks from the given instruction stream.
func computeBlocks(insns asm.Instructions) (*Blocks, error) {
targets, err := rawJumpTargets(insns)
if err != nil {
return nil, fmt.Errorf("collecting jump targets: %w", err)
}
if err := tagLeadersAndEdges(insns, targets); err != nil {
return nil, fmt.Errorf("tagging instructions: %w", err)
}
blocks, err := allocateBlocks(insns)
if err != nil {
return nil, fmt.Errorf("allocating blocks: %w", err)
}
if err := connectBlocks(blocks, insns); err != nil {
return nil, fmt.Errorf("connecting blocks: %w", err)
}
return blocks, nil
}
// blocksKey is used to store Blocks in an instruction's metadata.
type blocksKey struct{}
// storeBlocks associates the given Blocks with the first instruction in the
// given insns.
//
// If insns is empty, does nothing.
func storeBlocks(insns asm.Instructions, bl *Blocks) error {
if len(insns) == 0 {
return errors.New("insns is empty, cannot store Blocks")
}
insns[0].Metadata.Set(blocksKey{}, bl)
return nil
}
// loadBlocks retrieves the Blocks associated with the first instruction in the
// given insns.
//
// If no Blocks is present, returns nil.
func loadBlocks(insns asm.Instructions) *Blocks {
if len(insns) == 0 {
return nil
}
blocks, ok := insns[0].Metadata.Get(blocksKey{}).(*Blocks)
if !ok {
return nil
}
return blocks
}
// rawJumpTargets returns a map of raw instruction offsets to jump targets,
// where each target is a logical instruction in the instruction stream.
//
// The raw instruction offsets are the offsets of the instructions in the raw
// bytecode, which may not correspond to the logical instruction indices due to
// variable instruction sizes (e.g. dword loads).
func rawJumpTargets(insns asm.Instructions) (rawTargets, error) {
// Jump offsets are in raw instructions of size [asm.InstructionSize], but
// some instructions are 2x the size of a normal instruction (e.g. dword
// loads). Find the raw offsets of all jump targets and mark them for
// resolution.
targets := make(rawTargets)
i := insns.Iterate()
for i.Next() {
target, ok := jumpTarget(i.Offset, i.Ins)
if !ok {
continue
}
// Queue the target as a 'raw' leader to be resolved to a logical insn in
// the next loop.
targets.add(target)
// Mark the instruction as an incomplete edge to avoid re-checking if each
// insn is a jump in a subsequent step.
setEdgeMeta(i.Ins, &edge{})
}
if len(targets) == 0 {
// No jump targets to resolve.
return nil, nil
}
// Second loop for finding the [asm.Instruction] for each raw offset
// identified in the previous step.
next, stop := iter.Pull(targets.keysSorted())
defer stop()
// Memoize the next leader so we don't need a map lookup for every insn.
nextTarget, ok := next()
if !ok {
return nil, errors.New("no jump target to resolve, this is a bug")
}
i = insns.Iterate()
for i.Next() {
if i.Offset != nextTarget {
continue
}
// Map the raw instruction offset to its logical instruction.
targets.resolve(i.Offset, i.Index, i.Ins)
// Pull the next target to resolve.
nextTarget, ok = next()
if !ok {
// No more targets to resolve.
break
}
}
return targets, nil
}
// tagLeadersAndEdges tags the instructions in the given instruction stream
// as leaders and/or edges based on their control flow properties. It identifies
// the first instruction as a leader without predecessors, the last instruction
// as an edge without a branch or fallthrough, and processes jump instructions
// to create leaders for their targets and edges for their predecessors.
//
// Returns error if any edge instruction does not have a target instruction at
// the specified raw offset.
func tagLeadersAndEdges(insns asm.Instructions, targets rawTargets) error {
// Mark first insn as leader without predecessors, last insn as an edge
// without a branch or fallthrough.
setLeaderMeta(&insns[0], &leader{})
setEdgeMeta(&insns[len(insns)-1], &edge{})
if len(targets) == 0 {
// No jump targets to resolve.
return nil
}
// Find all jump instructions, create leaders for their targets and edges for
// their predecessors.
i := insns.Iterate()
for i.Next() {
// If the insn was identified as an edge in a prior step, add it as a
// predecessor to the next instruction and to the branch target.
e := getEdgeMeta(i.Ins)
if e == nil {
continue
}
// If the instruction is a branch, we need to find the target instruction
// and set it as the branch target.
raw, ok := jumpTarget(i.Offset, i.Ins)
if !ok {
// Edge doesn't have a jump target. This could be an exit or call
// instruction, in which case there's no jump target to resolve and no
// leader to create.
continue
}
tgt := targets.get(raw)
if tgt == nil {
return fmt.Errorf("edge %v has no target instruction at offset %d", i.Ins, raw)
}
// In case of a jump to the first instruction, the target has no
// predecessor, so we need a bounds check.
var prev *asm.Instruction
if tgt.index-1 >= 0 {
prev = &insns[tgt.index-1]
}
setBranchTarget(i.Ins, tgt.ins, prev)
// No next instruction, don't set a fallthrough target.
if i.Index == len(insns)-1 {
continue
}
// If the instruction is an unconditional jump, don't consider the next
// instruction a fallthrough target.
if i.Ins.OpCode.JumpOp() == asm.Ja {
continue
}
next := &insns[i.Index+1]
addPredecessors(next, i.Ins)
setEdgeFallthrough(i.Ins, next)
}
return nil
}
// allocateBlocks returns a list of blocks based on leaders and edges identified
// in prior stages. It creates a new block whenever it encounters a leader
// instruction and finalizes the current one when it reaches an edge
// instruction. No blocks are pointing to each other yet, this is done in a
// subsequent step.
func allocateBlocks(insns asm.Instructions) (*Blocks, error) {
blocks := newBlocks(0)
var block *Block
i := insns.Iterate()
for i.Next() {
// Roll over to the next block if this is a leader.
if nextBlock := maybeAllocateBlock(i); nextBlock != nil {
block = nextBlock
blocks.add(block)
}
// Finalize the block if we've reached an edge.
maybeFinalizeBlock(block, i)
}
if blocks.count() == 0 {
return nil, errors.New("no blocks created, this is a bug")
}
return blocks, nil
}
// maybeAllocateBlock allocates a new block for the instruction pointed to by
// the iterator if it is a leader instruction. If the instruction is not a
// leader, it returns nil. This is used to start a new basic block when
// encountering a leader instruction in the instruction stream.
func maybeAllocateBlock(i *asm.InstructionIterator) *Block {
l := getLeaderMeta(i.Ins)
if l == nil {
return nil
}
l.block = &Block{
start: i.Index,
raw: i.Offset,
}
return l.block
}
// maybeFinalizeBlock finalizes the current block by populating its insns field
// and associating it with the edge metadata if the instruction is an edge.
// If the instruction is not an edge or the given block is nil, it does nothing.
func maybeFinalizeBlock(blk *Block, i *asm.InstructionIterator) {
e := getEdgeMeta(i.Ins)
if e == nil {
return
}
if blk == nil {
return
}
blk.end = i.Index
e.block = blk
}
// connectBlocks connects the blocks in the given block list by setting their
// predecessors, branch and fallthrough targets based on the relationships
// between instructions identified in prior steps. Assumes that blocks have been
// allocated and that the leaders and edges have been tagged.
func connectBlocks(blocks *Blocks, insns asm.Instructions) error {
if blocks.count() == 0 {
return errors.New("no blocks to connect, this is a bug")
}
// Wire all blocks together by setting their predecessors, branch and
// fallthrough targets.
for _, blk := range blocks.b {
// Predecessors of the first instruction are the block's predecessors.
leader := blk.leader(insns)
if leader == nil {
return fmt.Errorf("block %d has no leader", blk.id)
}
blk.predecessors = make([]*Block, 0, len(leader.predecessors))
for _, pi := range leader.predecessors {
b := getBlock(pi)
if b == nil {
return fmt.Errorf("predecessor instruction %v has no block", pi)
}
blk.predecessors = append(blk.predecessors, b)
}
// Branch/fthrough targets of the last instruction are the block's branch
// and fallthrough targets.
edge := blk.edge(insns)
if edge == nil {
return fmt.Errorf("block %d has no edge", blk.id)
}
if edge.branch != nil {
// If the edge has a branch target, set it as the block's branch target.
b := getBlock(edge.branch)
if b == nil {
return fmt.Errorf("branch target %v has no block", edge.branch)
}
blk.branch = b
}
if edge.fthrough != nil {
// If the edge has a fallthrough target, set it as the block's fallthrough
// target.
b := getBlock(edge.fthrough)
if b == nil {
return fmt.Errorf("fallthrough target %v has no block", edge.fthrough)
}
blk.fthrough = b
}
}
return nil
}
// SPDX-License-Identifier: Apache-2.0
// Copyright Authors of Cilium
package analyze
import (
"fmt"
"reflect"
"github.com/cilium/cilium/pkg/container/set"
)
// This code is taken from ebpf-go while we figure out how to export it properly
// from the library.
// Fields extracts object names tagged 'ebpf' from a struct type.
func Fields(to any) (*set.Set[string], error) {
toValue := reflect.ValueOf(to)
if toValue.Type().Kind() != reflect.Ptr {
return nil, fmt.Errorf("%T is not a pointer to struct", to)
}
if toValue.IsNil() {
return nil, fmt.Errorf("nil pointer to %T", to)
}
return ebpfFields(toValue.Elem(), nil)
}
// structField represents a struct field containing the ebpf struct tag.
type structField struct {
reflect.StructField
value reflect.Value
}
func ebpfFields(structVal reflect.Value, visited map[reflect.Type]bool) (*set.Set[string], error) {
if visited == nil {
visited = make(map[reflect.Type]bool)
}
structType := structVal.Type()
if structType.Kind() != reflect.Struct {
return nil, fmt.Errorf("%s is not a struct", structType)
}
if visited[structType] {
return nil, fmt.Errorf("recursion on type %s", structType)
}
keep := set.NewSet[string]()
for i := 0; i < structType.NumField(); i++ {
field := structField{structType.Field(i), structVal.Field(i)}
// If the field is tagged, gather it and move on.
name := field.Tag.Get("ebpf")
if name != "" {
keep.Insert(name)
continue
}
// If the field does not have an ebpf tag, but is a struct or a pointer
// to a struct, attempt to gather its fields as well.
var v reflect.Value
switch field.Type.Kind() {
case reflect.Ptr:
if field.Type.Elem().Kind() != reflect.Struct {
continue
}
if field.value.IsNil() {
return nil, fmt.Errorf("nil pointer to %s", structType)
}
// Obtain the destination type of the pointer.
v = field.value.Elem()
case reflect.Struct:
// Reference the value's type directly.
v = field.value
default:
continue
}
inner, err := ebpfFields(v, visited)
if err != nil {
return nil, fmt.Errorf("field %s: %w", field.Name, err)
}
keep.Merge(*inner)
}
return &keep, nil
}
// SPDX-License-Identifier: Apache-2.0
// Copyright Authors of Cilium
package analyze
import (
"errors"
"fmt"
"unique"
"github.com/cilium/ebpf"
"github.com/cilium/ebpf/asm"
)
// Reachability performs static analysis on BPF programs to determine which code
// paths are reachable based on runtime constants. It evaluates conditional
// branches that depend on constant configuration values (like those used in
// CONFIG() macros) to predict whether branches will be taken at runtime.
//
// The analysis works by identifying patterns where a value is loaded from a
// global data map, dereferenced, and then used in a conditional branch. When
// such a pattern is found with a constant value, the branch outcome can be
// predicted statically. This allows for dead code elimination to prune unused
// maps and tail calls.
//
// The algorithm works as follows:
//
// 1. Start from the first block of the BPF program. Check if the last
// instruction is a branch instruction that compares a register against an
// immediate (embedded in bytecode) value. For example:
//
// J{OP}Imm dst: Ry off:{relative jump offset} imm: {constant value}
//
// 2. If such an instruction is found, backtrack to find a pointer dereference
// targeting the Ry register used in the branch instruction. If the top of the
// current block is hit, roll over to the predecessor block if the block has a
// single predecessor. For example:
//
// LdXMem{B,H,W,DW} dst: Ry src: Rx off: 0
//
// 3. If the dereference is found, backtrack further to find a map load
// instruction that populates the Rx register. Similar to the previous case,
// backtracking continues in the predecessor if needed. For example:
//
// LoadMapValue dst: Rx, fd: 0 off: {offset of variable} <{name of global data map}>
//
// 4. If a map load instruction is found, look up which variable it refers to in
// CollectionSpec.Variables. Only variables declared `const` qualify for branch
// prediction, otherwise its value may change at runtime and the branch cannot
// be predicted.
//
// 5. If the variable is found and its value is constant, the branch instruction
// is interpreted and a verdict is made whether the branch is always taken
// or never taken.
//
// This process is repeated, recursively, exactly once for each block in the
// BPF program. The analysis is conservative, meaning that if any part of the
// pattern is not found, the branch is considered unpredictable and both the
// branch and fallthrough blocks are visited.
//
// If a block is visited, it is implicitly marked as live, since it means that
// at least one of its predecessors is live, making it reachable from the root
// (first block) of the BPF program.
//
// Once the reachability analysis is complete, the program's instructions can be
// iterated using a special iterator that provides a boolean with every
// instruction to indicate whether the instruction is reachable or not. This
// makes it straightforward to mark live and/or unreachable resources like maps
// and tail calls referenced by the instructions in a single pass.
// TODO(tb): This is kind of silly. Let's just put a NewVariableSpec in the lib
// to make this kind of testing possible. They are accessors anyway, though
// they're copied during CollectionSpec.Copy(), which will need some extra
// attention. Bounds checks also need to be performed in NewVariableSpec.
// Make a variable spec interface that is satisfied by the ebpf.VariableSpec
// This makes testing easier since we can create a mock variable spec.
var _ VariableSpec = (*ebpf.VariableSpec)(nil)
type VariableSpec interface {
MapName() string
Offset() uint64
Size() uint64
Get(out any) error
Constant() bool
}
func VariableSpecs(variables map[string]*ebpf.VariableSpec) map[string]VariableSpec {
variablesMap := make(map[string]VariableSpec)
for name, varSpec := range variables {
variablesMap[name] = varSpec
}
return variablesMap
}
// Reachability returns a copy of blocks with populated reachability information.
//
// Reachability of blocks is determined by predicting branches on BPF runtime
// constants. A subsequent call to [Blocks.LiveInstructions] will iterate over
// all instructions deemed reachable given the set of VariableSpecs.
//
// Given a piece of code like:
//
// if (CONFIG(enable_feature_a)) {
//
// or
//
// if (CONFIG(number_of_something) > 5) {
//
// It looks for the following bytecode:
//
// LoadMapValue dst: Rx, fd: 0 off: {offset of variable} <{name of global data map}>
// LdXMem{B,H,W,DW} dst: Ry src: Rx off: 0
// J{OP}IMM dst: Ry off:{relative jump offset} imm: {constant value}
func Reachability(blocks *Blocks, insns asm.Instructions, variables map[string]VariableSpec) (*Blocks, error) {
if blocks == nil || blocks.count() == 0 {
return nil, errors.New("nil or empty blocks")
}
if len(insns) == 0 {
return nil, errors.New("nil or empty instructions")
}
if len(blocks.l) != 0 {
return nil, errors.New("reachability already computed")
}
// Copy Blocks to avoid modifying the original, since they're often stored in
// instruction metadata in a cached CollectionSpec.
blocks = blocks.Copy()
// Variables in the CollectionSpec are identified by name. However,
// instructions refer to variables by map name and offset. Build a reverse
// lookup map. This notably includes references to non-constant variables,
// which will be rejected later in the branch evaluation logic. They are
// included here to ensure that the reachability analysis is conclusive.
vars := make(map[mapOffset]VariableSpec)
for _, v := range variables {
vars[mapOffset{
mapName: unique.Make(v.MapName()),
offset: v.Offset(),
}] = v
}
live := newBitmap(uint64(blocks.count()))
// Start recursing at first block since it is always live.
if err := visitBlock(blocks.first(), insns, vars, live); err != nil {
return nil, fmt.Errorf("predicting blocks: %w", err)
}
blocks.l = live
return blocks, nil
}
// findBranch pulls exactly one instruction and checks if it's a branch
// instruction comparing a register against an immediate value. Returns
// the instruction if it met the criteria, nil otherwise.
func findBranch(pull func() (*asm.Instruction, bool)) *asm.Instruction {
// Only the last instruction of a block can be a branch instruction.
branch, ok := pull()
if !ok {
return nil
}
switch branch.OpCode.JumpOp() {
case asm.Exit, asm.Call, asm.Ja, asm.InvalidJumpOp:
return nil
}
// Only consider jumps that check the dst register against an immediate value.
if branch.OpCode.Source() != asm.ImmSource {
return nil
}
return branch
}
// findDereference pulls instructions until it finds a memory load (dereference)
// into the given dst register.
//
// Since all CONFIG() variables are `volatile`, the compiler should emit a
// dereference before every branch instruction. These typically occur in the
// same basic block, albeit with a few unrelated instructions in between.
func findDereference(pull func() (*asm.Instruction, bool), dst asm.Register) *asm.Instruction {
for ins, ok := pull(); ok; ins, ok = pull() {
op := ins.OpCode
if op.Class().IsLoad() && op.Mode() == asm.MemMode && ins.Dst == dst {
return ins
}
if ins.Dst == dst {
// Found a non-load instruction that clobbers the register used by the
// branch instruction. This doesn't match the pattern we're looking for,
// so stop looking.
return nil
}
}
return nil
}
// findMapLoad pulls instructions until it finds a map load instruction that
// populates the given src register.
//
// Even though CONFIG() variables are declared volatile, the compiler may still
// decide to reuse the register containing the map pointer for multiple
// dereferences. This often occurs in a predecessor block, so the pull function
// must support predecessor traversal.
//
// Note: the compiler should favor reconstructing the map pointer over spilling
// to the stack, so we don't consider stack spilling.
func findMapLoad(pull func() (*asm.Instruction, bool), src asm.Register) *asm.Instruction {
for ins, ok := pull(); ok; ins, ok = pull() {
if ins.Dst == src {
if ins.IsLoadFromMap() {
return ins
}
// Register got clobbered, stop looking.
return nil
}
}
return nil
}
type mapOffset struct {
mapName unique.Handle[string]
offset uint64
}
// unpredictableBlock is called when the branch cannot be predicted. It visits
// both the branch and fallthrough blocks.
func unpredictableBlock(b *Block, insns asm.Instructions, vars map[mapOffset]VariableSpec, live bitmap) error {
if err := visitBlock(b.branch, insns, vars, live); err != nil {
return fmt.Errorf("visiting branch block %d: %w", b.branch.id, err)
}
if err := visitBlock(b.fthrough, insns, vars, live); err != nil {
return fmt.Errorf("visiting fallthrough block %d: %w", b.fthrough.id, err)
}
return nil
}
// visitBlock recursively visits a block and its successors to determine
// reachability based on the branch instructions and the provided vars.
func visitBlock(b *Block, insns asm.Instructions, vars map[mapOffset]VariableSpec, live bitmap) error {
if b == nil {
return nil
}
// Don't evaluate the same block twice, this would lead to an infinite loop.
// A live block implies a visited block.
if live.get(b.id) {
return nil
}
live.set(b.id, true)
pull := b.backward(insns)
branch := findBranch(pull)
if branch == nil {
return unpredictableBlock(b, insns, vars, live)
}
deref := findDereference(pull, branch.Dst)
if deref == nil {
return unpredictableBlock(b, insns, vars, live)
}
load := findMapLoad(pull, deref.Src)
if load == nil {
return unpredictableBlock(b, insns, vars, live)
}
// TODO(tb): evalBranch doesn't currently take the deref's offset field into
// account so it can't deal with variables over 8 bytes in size. Improve it
// to be more robust and remove this limitation.
vs := lookupVariable(load, vars)
if vs == nil || !vs.Constant() || vs.Size() > 8 {
return unpredictableBlock(b, insns, vars, live)
}
jump, err := evalBranch(branch, vs)
if err != nil {
return fmt.Errorf("evaluating branch of block %d: %w", b.id, err)
}
// If the branch is always taken, only visit the branch target.
if jump {
b.predict = 1
return visitBlock(b.branch, insns, vars, live)
}
// Otherwise, only visit the fallthrough target.
b.predict = 2
return visitBlock(b.fthrough, insns, vars, live)
}
// lookupVariable retrieves the VariableSpec for the given load instruction from
// the provided vars. If there's no VariableSpec for the given map and offset,
// it returns nil.
//
// A lookup failure doesn't mean there's a bug in our code or in the BPF
// program. ebpf-go only emits VariableSpecs for symbols with global visibility,
// so function-scoped variables and many other symbols in .bss may not have an
// associated VariableSpec.
func lookupVariable(load *asm.Instruction, vars map[mapOffset]VariableSpec) VariableSpec {
mo := mapOffset{
mapName: unique.Make(load.Reference()),
offset: uint64(load.Constant >> 32),
}
vs, found := vars[mo]
if !found {
return nil
}
return vs
}
// evalBranch evaluates the branch instruction based on the value of the
// variable it refers to.
//
// Returns true if the branch is always taken, false if it is never taken,
func evalBranch(branch *asm.Instruction, vs VariableSpec) (bool, error) {
// Extract the variable value
var (
value int64
err error
)
switch vs.Size() {
case 1:
var value8 int8
err = vs.Get(&value8)
value = int64(value8)
case 2:
var value16 int16
err = vs.Get(&value16)
value = int64(value16)
case 4:
var value32 int32
err = vs.Get(&value32)
value = int64(value32)
case 8:
var value64 int64
err = vs.Get(&value64)
value = value64
default:
return false, fmt.Errorf("jump instruction on variable %v of size %d?", vs, vs.Size())
}
if err != nil {
return false, fmt.Errorf("getting value of variable: %w", err)
}
// Now lets determine if the branch is always taken or never taken.
var jump bool
switch op := branch.OpCode.JumpOp(); op {
case asm.JEq, asm.JNE:
jump = value == branch.Constant
if op == asm.JNE {
jump = !jump
}
case asm.JGT, asm.JLE:
jump = value > branch.Constant
if op == asm.JLE {
jump = !jump
}
case asm.JLT, asm.JGE:
jump = value < branch.Constant
if op == asm.JGE {
jump = !jump
}
case asm.JSGT, asm.JSLE:
jump = value > branch.Constant
if op == asm.JSLE {
jump = !jump
}
case asm.JSLT, asm.JSGE:
jump = value < branch.Constant
if op == asm.JSGE {
jump = !jump
}
case asm.JSet:
jump = value&branch.Constant != 0
default:
return false, fmt.Errorf("unsupported jump instruction: %v", branch)
}
return jump, nil
}
// SPDX-License-Identifier: Apache-2.0
// Copyright Authors of Cilium
package analyze
import (
"fmt"
"iter"
"maps"
"slices"
"github.com/cilium/ebpf/asm"
)
// A target is the destination of a jump instruction. It is initially known
// only by its raw instruction offset, and is later resolved to a logical
// index in the instruction stream. In subsequent passes, targets are
// also marked as leaders since they are the start of a new basic block.
type target struct {
// index is the index of the logical instruction in the instruction stream.
index int
ins *asm.Instruction
}
// rawTargets is a map of raw instruction offsets to targets. It is used to
// collect jump targets in the first pass of the basic block analysis.
//
// The raw instruction offset is the offset of the instruction in the raw
// bytecode, which is not necessarily the same as its index in
// [asm.Instructions] since some instructions can be larger than the standard
// instruction size (e.g. dword loads).
type rawTargets map[asm.RawInstructionOffset]*target
// add adds a raw instruction offset to the rawTargets map, marking the offset
// as the target of a jump instruction. If the offset is already present in the
// map, it is not added again.
func (rt rawTargets) add(raw asm.RawInstructionOffset) {
_, ok := rt[raw]
if !ok {
rt[raw] = nil
}
}
// resolve resolves a raw instruction offset to a fully-qualified target
// instruction at the given logical index. If the instruction was already
// resolved, does nothing.
func (rt rawTargets) resolve(raw asm.RawInstructionOffset, index int, ins *asm.Instruction) {
if l := rt[raw]; l != nil {
return
}
rt[raw] = &target{index, ins}
}
// get retrieves a target by its raw instruction offset. If the offset is not
// present in the map, it returns nil.
func (rt rawTargets) get(raw asm.RawInstructionOffset) *target {
return rt[raw]
}
// keysSorted returns an iterator over the raw instruction offsets in sorted
// order. This is used to ensure that the raw instruction offsets are processed
// in the order they appear in the instruction stream, which is important for
// correctly resolving jump targets.
func (rt rawTargets) keysSorted() iter.Seq[asm.RawInstructionOffset] {
return func(yield func(asm.RawInstructionOffset) bool) {
for _, raw := range slices.Sorted(maps.Keys(rt)) {
if !yield(raw) {
return
}
}
}
}
// jumpTarget calculates the target of a jump instruction based on the current
// raw instruction offset and the offset or constant present in the instruction.
// It returns the target offset and a boolean indicating whether the instruction
// is a jump instruction that causes a branch to another block.
//
// Returns false if the instruction does not branch.
func jumpTarget(raw asm.RawInstructionOffset, ins *asm.Instruction) (asm.RawInstructionOffset, bool) {
op := ins.OpCode
class := op.Class()
jump := op.JumpOp()
// Only jump instructions cause a branch to another block. Execution ends at
// an exit instruction. And calls do not cause a branch, execution continues
// after the call.
if !class.IsJump() || jump == asm.Exit || jump == asm.Call {
return 0, false
}
// Jump target is the current offset + the instruction offset + 1
target := int64(raw) + int64(ins.Offset) + 1
// A jump32 + JA is a 'long jump' with an offset larger than a u16. This is
// encoded in the Constant field.
if class == asm.Jump32Class && jump == asm.Ja {
target = int64(raw) + ins.Constant + 1
}
if target < 0 {
panic(fmt.Sprintf("negative jump target %d, raw: %d, insn: %s", target, raw, ins))
}
return asm.RawInstructionOffset(target), true
}
// canFallthrough checks if execution can fall through to the next instruction.
//
// An instruction can fall through if it is not a jump instruction, or if it is
// a jump instruction other than a jump-always and an exit.
func canFallthrough(ins *asm.Instruction) bool {
if ins == nil {
return false
}
if ins.OpCode.JumpOp() == asm.Ja ||
ins.OpCode.JumpOp() == asm.Exit {
return false
}
return true
}
// SPDX-License-Identifier: Apache-2.0
// Copyright Authors of Cilium
package bpf
import (
"sync/atomic"
"github.com/cilium/ebpf"
"golang.org/x/sys/unix"
)
var (
preAllocateMapSetting uint32 = unix.BPF_F_NO_PREALLOC
noCommonLRUMapSetting uint32 = 0
)
// EnableMapPreAllocation enables BPF map pre-allocation on map types that
// support it. This does not take effect on existing map although some map
// types could be recreated later when objCheck() runs.
func EnableMapPreAllocation() {
atomic.StoreUint32(&preAllocateMapSetting, 0)
}
// DisableMapPreAllocation disables BPF map pre-allocation as a default
// setting. Some map types enforces pre-alloc strategy so this does not
// take effect in that case. Also note that this does not take effect on
// existing map although could be recreated later when objCheck() runs.
func DisableMapPreAllocation() {
atomic.StoreUint32(&preAllocateMapSetting, unix.BPF_F_NO_PREALLOC)
}
// EnableMapDistributedLRU enables the LRU map no-common-LRU feature which
// splits backend memory pools among CPUs to avoid sharing a common backend
// pool where frequent allocation/frees might content on internal spinlocks.
func EnableMapDistributedLRU() {
atomic.StoreUint32(&noCommonLRUMapSetting, unix.BPF_F_NO_COMMON_LRU)
}
// DisableMapDistributedLRU disables the LRU map no-common-LRU feature which
// is the default case.
func DisableMapDistributedLRU() {
atomic.StoreUint32(&noCommonLRUMapSetting, 0)
}
// GetMapMemoryFlags returns relevant map memory allocation flags which
// the user requested.
func GetMapMemoryFlags(t ebpf.MapType) uint32 {
switch t {
// LPM Tries don't support preallocation.
case ebpf.LPMTrie:
return unix.BPF_F_NO_PREALLOC
// Support disabling preallocation for these map types.
case ebpf.Hash, ebpf.PerCPUHash, ebpf.HashOfMaps:
return atomic.LoadUint32(&preAllocateMapSetting)
// Support no-common LRU backend memory
case ebpf.LRUHash, ebpf.LRUCPUHash:
return atomic.LoadUint32(&noCommonLRUMapSetting)
}
return 0
}
// SPDX-License-Identifier: Apache-2.0
// Copyright Authors of Cilium
//go:build linux
package bpf
import (
"errors"
"fmt"
"log/slog"
"os"
"path"
"github.com/cilium/ebpf"
"golang.org/x/sys/unix"
"github.com/cilium/cilium/pkg/logging/logfields"
"github.com/cilium/cilium/pkg/metrics"
"github.com/cilium/cilium/pkg/spanstat"
)
// createMap wraps a call to ebpf.NewMapWithOptions while measuring syscall duration.
func createMap(spec *ebpf.MapSpec, opts *ebpf.MapOptions) (*ebpf.Map, error) {
if opts == nil {
opts = &ebpf.MapOptions{}
}
var duration *spanstat.SpanStat
if metrics.BPFSyscallDuration.IsEnabled() {
duration = spanstat.Start()
}
m, err := ebpf.NewMapWithOptions(spec, *opts)
if metrics.BPFSyscallDuration.IsEnabled() {
metrics.BPFSyscallDuration.WithLabelValues(metricOpCreate, metrics.Error2Outcome(err)).Observe(duration.End(err == nil).Total().Seconds())
}
return m, err
}
func objCheck(logger *slog.Logger, m *ebpf.Map, path string, mapType ebpf.MapType, keySize, valueSize, maxEntries, flags uint32) bool {
scopedLogger := logger.With(logfields.Path, path)
mismatch := false
if m.Type() != mapType {
scopedLogger.Warn("Map type mismatch for BPF map",
logfields.Old, m.Type(),
logfields.New, mapType,
)
mismatch = true
}
if m.KeySize() != keySize {
scopedLogger.Warn("Key-size mismatch for BPF map",
logfields.Old, m.KeySize(),
logfields.New, keySize,
)
mismatch = true
}
if m.ValueSize() != valueSize {
scopedLogger.Warn("Value-size mismatch for BPF map",
logfields.Old, m.ValueSize(),
logfields.New, valueSize,
)
mismatch = true
}
if m.MaxEntries() != maxEntries {
scopedLogger.Warn("Max entries mismatch for BPF map",
logfields.Old, m.MaxEntries(),
logfields.New, maxEntries,
)
mismatch = true
}
if m.Flags() != flags {
scopedLogger.Warn("Flags mismatch for BPF map",
logfields.Old, m.Flags(),
logfields.New, flags,
)
mismatch = true
}
if mismatch {
if m.Type() == ebpf.ProgramArray {
return false
}
scopedLogger.Warn("Removing map to allow for property upgrade (expect map data loss)")
// Kernel still holds map reference count via attached prog.
// Only exception is prog array, but that is already resolved
// differently.
os.Remove(path)
return true
}
return false
}
// OpenOrCreateMap attempts to load the pinned map at "pinDir/<spec.Name>" if
// the spec is marked as Pinned. Any parent directories of pinDir are
// automatically created. Any pinned maps incompatible with the given spec are
// removed and recreated.
//
// If spec.Pinned is 0, a new Map is always created.
func OpenOrCreateMap(logger *slog.Logger, spec *ebpf.MapSpec, pinDir string) (*ebpf.Map, error) {
var opts ebpf.MapOptions
if spec.Pinning != 0 {
if pinDir == "" {
return nil, errors.New("cannot pin map to empty pinDir")
}
if spec.Name == "" {
return nil, errors.New("cannot load unnamed map from pin")
}
if err := MkdirBPF(pinDir); err != nil {
return nil, fmt.Errorf("creating map base pinning directory: %w", err)
}
opts.PinPath = pinDir
}
m, err := createMap(spec, &opts)
if errors.Is(err, ebpf.ErrMapIncompatible) {
// Found incompatible map. Open the pin again to find out why.
m, err := ebpf.LoadPinnedMap(path.Join(pinDir, spec.Name), nil)
if err != nil {
return nil, fmt.Errorf("open pin of incompatible map: %w", err)
}
defer m.Close()
logger.Info(
"Unpinning map with incompatible properties",
logfields.Path, path.Join(pinDir, spec.Name),
logfields.Old, []any{
logfields.Type, m.Type(),
logfields.KeySize, m.KeySize(),
logfields.ValueSize, m.ValueSize(),
logfields.MaxEntries, m.MaxEntries(),
logfields.Flags, m.Flags(),
},
logfields.New, []any{
logfields.Type, spec.Type,
logfields.KeySize, spec.KeySize,
logfields.ValueSize, spec.ValueSize,
logfields.MaxEntries, spec.MaxEntries,
logfields.Flags, spec.Flags,
},
)
// Existing map incompatible with spec. Unpin so it can be recreated.
if err := m.Unpin(); err != nil {
return nil, err
}
return createMap(spec, &opts)
}
return m, err
}
// GetMtime returns monotonic time that can be used to compare
// values with ktime_get_ns() BPF helper, e.g. needed to check
// the timeout in sec for BPF entries. We return the raw nsec,
// although that is not quite usable for comparison. Go has
// runtime.nanotime() but doesn't expose it as API.
func GetMtime() (uint64, error) {
var ts unix.Timespec
err := unix.ClockGettime(unix.CLOCK_MONOTONIC, &ts)
if err != nil {
return 0, fmt.Errorf("Unable get time: %w", err)
}
return uint64(unix.TimespecToNsec(ts)), nil
}
// SPDX-License-Identifier: Apache-2.0
// Copyright Authors of Cilium
//go:build linux
package bpf
import (
"errors"
"fmt"
"log/slog"
"os"
"path/filepath"
"sync"
"golang.org/x/sys/unix"
"github.com/cilium/cilium/pkg/components"
"github.com/cilium/cilium/pkg/defaults"
"github.com/cilium/cilium/pkg/logging"
"github.com/cilium/cilium/pkg/logging/logfields"
"github.com/cilium/cilium/pkg/mountinfo"
)
var (
// Path to where bpffs is mounted
bpffsRoot = defaults.BPFFSRoot
// Set to true on first get request to detect misorder
lockedDown = false
once sync.Once
readMountInfo sync.Once
mountInfoPrefix string
)
func lockDown() {
lockedDown = true
}
func setBPFFSRoot(path string) {
if lockedDown {
panic("setBPFFSRoot() call after bpffsRoot was read")
}
bpffsRoot = path
}
func BPFFSRoot() string {
once.Do(lockDown)
return bpffsRoot
}
// TCGlobalsPath returns the absolute path to <bpffs>/tc/globals, used for
// legacy map pin paths.
func TCGlobalsPath() string {
once.Do(lockDown)
return filepath.Join(bpffsRoot, defaults.TCGlobalsPath)
}
// CiliumPath returns the bpffs path to be used for Cilium object pins.
func CiliumPath() string {
once.Do(lockDown)
return filepath.Join(bpffsRoot, "cilium")
}
// MkdirBPF wraps [os.MkdirAll] with the right permission bits for bpffs.
// Use this for ensuring the existence of directories on bpffs.
func MkdirBPF(path string) error {
return os.MkdirAll(path, 0755)
}
// Remove path ignoring ErrNotExist.
func Remove(path string) error {
err := os.RemoveAll(path)
if err != nil && !errors.Is(err, os.ErrNotExist) {
return fmt.Errorf("removing bpffs directory at %s: %w", path, err)
}
return err
}
func tcPathFromMountInfo(logger *slog.Logger, name string) string {
readMountInfo.Do(func() {
mountInfos, err := mountinfo.GetMountInfo()
if err != nil {
logging.Fatal(logger, "Could not get mount info for map root lookup", logfields.Error, err)
}
for _, mountInfo := range mountInfos {
if mountInfo.FilesystemType == "bpf" {
mountInfoPrefix = filepath.Join(mountInfo.MountPoint, defaults.TCGlobalsPath)
return
}
}
logging.Fatal(logger, "Could not find BPF map root")
})
return filepath.Join(mountInfoPrefix, name)
}
// MapPath returns a path for a BPF map with a given name.
func MapPath(logger *slog.Logger, name string) string {
if components.IsCiliumAgent() {
once.Do(lockDown)
return filepath.Join(TCGlobalsPath(), name)
}
return tcPathFromMountInfo(logger, name)
}
// LocalMapName returns the name for a BPF map that is local to the specified ID.
func LocalMapName(name string, id uint16) string {
return fmt.Sprintf("%s%05d", name, id)
}
// LocalMapPath returns the path for a BPF map that is local to the specified ID.
func LocalMapPath(logger *slog.Logger, name string, id uint16) string {
return MapPath(logger, LocalMapName(name, id))
}
var (
mountOnce sync.Once
)
// mountFS mounts the BPFFS filesystem into the desired mapRoot directory.
func mountFS(logger *slog.Logger, printWarning bool) error {
if printWarning {
logger.Warn("================================= WARNING ==========================================")
logger.Warn("BPF filesystem is not mounted. This will lead to network disruption when Cilium pods")
logger.Warn("are restarted. Ensure that the BPF filesystem is mounted in the host.")
logger.Warn("https://docs.cilium.io/en/stable/operations/system_requirements/#mounted-ebpf-filesystem")
logger.Warn("====================================================================================")
}
logger.Info("Mounting BPF filesystem", logfields.BPFFSRoot, bpffsRoot)
mapRootStat, err := os.Stat(bpffsRoot)
if err != nil {
if os.IsNotExist(err) {
if err := MkdirBPF(bpffsRoot); err != nil {
return fmt.Errorf("unable to create bpf mount directory: %w", err)
}
} else {
return fmt.Errorf("failed to stat the mount path %s: %w", bpffsRoot, err)
}
} else if !mapRootStat.IsDir() {
return fmt.Errorf("%s is a file which is not a directory", bpffsRoot)
}
if err := unix.Mount(bpffsRoot, bpffsRoot, "bpf", 0, ""); err != nil {
return fmt.Errorf("failed to mount %s: %w", bpffsRoot, err)
}
return nil
}
// hasMultipleMounts checks whether the current mapRoot has only one mount.
func hasMultipleMounts() (bool, error) {
num := 0
mountInfos, err := mountinfo.GetMountInfo()
if err != nil {
return false, err
}
for _, mountInfo := range mountInfos {
if mountInfo.Root == "/" && mountInfo.MountPoint == bpffsRoot {
num++
}
}
return num > 1, nil
}
// checkOrMountCustomLocation tries to check or mount the BPF filesystem in the
// given path.
func checkOrMountCustomLocation(logger *slog.Logger, bpfRoot string) error {
setBPFFSRoot(bpfRoot)
// Check whether the custom location has a BPFFS mount.
mounted, bpffsInstance, err := mountinfo.IsMountFS(mountinfo.FilesystemTypeBPFFS, bpfRoot)
if err != nil {
return err
}
// If the custom location has no mount, let's mount BPFFS there.
if !mounted {
setBPFFSRoot(bpfRoot)
if err := mountFS(logger, true); err != nil {
return err
}
return nil
}
// If the custom location already has a mount with some other filesystem than
// BPFFS, return the error.
if !bpffsInstance {
return fmt.Errorf("mount in the custom directory %s has a different filesystem than BPFFS", bpfRoot)
}
logger.Info("Detected mounted BPF filesystem", logfields.BPFFSRoot, bpffsRoot)
return nil
}
// checkOrMountDefaultLocations tries to check or mount the BPF filesystem in
// standard locations, which are:
// - /sys/fs/bpf
// - /run/cilium/bpffs
// There is a procedure of determining which directory is going to be used:
// 1. Checking whether BPFFS filesystem is mounted in /sys/fs/bpf.
// 2. If there is no mount, then mount BPFFS in /sys/fs/bpf and finish there.
// 3. If there is a BPFFS mount, finish there.
// 4. If there is a mount, but with the other filesystem, then it means that most
// probably Cilium is running inside container which has mounted /sys/fs/bpf
// from host, but host doesn't have proper BPFFS mount, so that mount is just
// the empty directory. In that case, mount BPFFS under /run/cilium/bpffs.
func checkOrMountDefaultLocations(logger *slog.Logger) error {
// Check whether /sys/fs/bpf has a BPFFS mount.
mounted, bpffsInstance, err := mountinfo.IsMountFS(mountinfo.FilesystemTypeBPFFS, bpffsRoot)
if err != nil {
return err
}
// If /sys/fs/bpf is not mounted at all, we should mount
// BPFFS there.
if !mounted {
if err := mountFS(logger, false); err != nil {
return err
}
return nil
}
if !bpffsInstance {
// If /sys/fs/bpf has a mount but with some other filesystem
// than BPFFS, it means that Cilium is running inside container
// and /sys/fs/bpf is not mounted on host. We should mount BPFFS
// in /run/cilium/bpffs automatically. This will allow operation
// of Cilium but will result in unmounting of the filesystem
// when the pod is restarted. This in turn will cause resources
// such as the connection tracking table of the BPF programs to
// be released which will cause all connections into local
// containers to be dropped. User is going to be warned.
logger.Warn(fmt.Sprintf("BPF filesystem is going to be mounted automatically "+
"in %s. However, it probably means that Cilium is running "+
"inside container and BPFFS is not mounted on the host. "+
"for more information, see: https://cilium.link/err-bpf-mount",
defaults.BPFFSRootFallback,
),
)
setBPFFSRoot(defaults.BPFFSRootFallback)
cMounted, cBpffsInstance, err := mountinfo.IsMountFS(mountinfo.FilesystemTypeBPFFS, bpffsRoot)
if err != nil {
return err
}
if !cMounted {
if err := mountFS(logger, false); err != nil {
return err
}
} else if !cBpffsInstance {
logging.Fatal(logger, fmt.Sprintf("%s is mounted but has a different filesystem than BPFFS", defaults.BPFFSRootFallback))
}
}
logger.Info("Detected mounted BPF filesystem", logfields.BPFFSRoot, bpffsRoot)
return nil
}
func checkOrMountFS(logger *slog.Logger, bpfRoot string) error {
if bpfRoot == "" || bpfRoot == defaults.BPFFSRoot {
if err := checkOrMountDefaultLocations(logger); err != nil {
return err
}
} else {
if err := checkOrMountCustomLocation(logger, bpfRoot); err != nil {
return err
}
}
multipleMounts, err := hasMultipleMounts()
if err != nil {
return err
}
if multipleMounts {
return fmt.Errorf("multiple mount points detected at %s", bpffsRoot)
}
return nil
}
// CheckOrMountFS checks or mounts the BPF filesystem and then
// opens/creates/deletes all maps which have previously been scheduled to be
// opened/created/deleted.
//
// If printWarning is set, will print a warning if bpffs has not previously been
// mounted.
func CheckOrMountFS(logger *slog.Logger, bpfRoot string) {
mountOnce.Do(func() {
if err := checkOrMountFS(logger, bpfRoot); err != nil {
logging.Fatal(logger, "Unable to mount BPF filesystem", logfields.Error, err)
}
})
}
// SPDX-License-Identifier: Apache-2.0
// Copyright Authors of Cilium
package bpf
import "github.com/cilium/hive/cell"
// BpfMap defines the base interface every BPF map needs to implement.
//
// Its main purpose is to register a BPF map via value group `bpf-maps`. See [MapOut].
type BpfMap any
// MapOut ensures that maps are created before the datapath loader
// is invoked.
type MapOut[T any] struct {
cell.Out
Map T
BpfMap BpfMap `group:"bpf-maps"`
}
func NewMapOut[T any](m T) MapOut[T] {
return MapOut[T]{Map: m, BpfMap: m}
}
// SPDX-License-Identifier: Apache-2.0
// Copyright Authors of Cilium
package bpf
import (
"context"
"errors"
"fmt"
"log/slog"
"os"
"strings"
"github.com/cilium/ebpf"
"github.com/cilium/ebpf/asm"
"github.com/cilium/ebpf/btf"
"github.com/cilium/cilium/pkg/bpf/analyze"
"github.com/cilium/cilium/pkg/container/set"
"github.com/cilium/cilium/pkg/datapath/config"
"github.com/cilium/cilium/pkg/logging/logfields"
)
const (
callsMap = "cilium_calls"
)
// LoadCollectionSpec loads the eBPF ELF at the given path and parses it into a
// CollectionSpec. This spec is only a blueprint of the contents of the ELF and
// does not represent any live resources that have been loaded into the kernel.
//
// This is a wrapper around ebpf.LoadCollectionSpec that populates the object's
// calls map with programs marked with the __declare_tail() annotation. It
// performs static reachability analysis of tail call programs. Any unreachable
// tail call program is removed from the spec.
func LoadCollectionSpec(logger *slog.Logger, path string) (*ebpf.CollectionSpec, error) {
spec, err := ebpf.LoadCollectionSpec(path)
if err != nil {
return nil, err
}
if err := checkUnspecifiedPrograms(spec); err != nil {
return nil, fmt.Errorf("checking for unspecified programs: %w", err)
}
if err := removeUnreachableTailcalls(logger, spec); err != nil {
return nil, fmt.Errorf("removing unreachable tail calls: %w", err)
}
if err := resolveTailCalls(spec); err != nil {
return nil, fmt.Errorf("resolving tail calls: %w", err)
}
return spec, nil
}
// checkUnspecifiedPrograms returns an error if any of the programs in the spec
// are of the UnspecifiedProgram type.
func checkUnspecifiedPrograms(spec *ebpf.CollectionSpec) error {
for _, prog := range spec.Programs {
if prog.Type == ebpf.UnspecifiedProgram {
return fmt.Errorf("program %s has unspecified type: annotate with __section_entry or __declare_tail()", prog.Name)
}
}
return nil
}
// isEntrypoint returns true if the program is marked with the __section_entry
// annotation.
func isEntrypoint(prog *ebpf.ProgramSpec) bool {
return strings.HasSuffix(prog.SectionName, "/entry")
}
// isTailCall returns true if the program is marked with the __declare_tail()
// annotation.
func isTailCall(prog *ebpf.ProgramSpec) bool {
return strings.HasSuffix(prog.SectionName, "/tail")
}
// tailCallSlot returns the tail call slot for the given program, which must be
// marked with the __declare_tail() annotation. The slot is the index in the
// calls map that the program will be called from.
func tailCallSlot(prog *ebpf.ProgramSpec) (uint32, error) {
if !isTailCall(prog) {
return 0, fmt.Errorf("program %s is not a tail call", prog.Name)
}
fn := btf.FuncMetadata(&prog.Instructions[0])
if fn == nil {
return 0, fmt.Errorf("program %s has no function metadata", prog.Name)
}
for _, tag := range fn.Tags {
var slot uint32
if _, err := fmt.Sscanf(tag, fmt.Sprintf("tail:%s/%%v", callsMap), &slot); err == nil {
return slot, nil
}
}
return 0, fmt.Errorf("program %s has no tail call slot", prog.Name)
}
// resolveTailCalls populates the calls map with Programs marked with the
// __declare_tail annotation.
func resolveTailCalls(spec *ebpf.CollectionSpec) error {
// If cilium_calls map is missing, do nothing.
ms := spec.Maps[callsMap]
if ms == nil {
return nil
}
if ms.Type != ebpf.ProgramArray {
return fmt.Errorf("%s is not a program array, got %s", callsMap, ms.Type)
}
slots := make(map[uint32]struct{})
for name, prog := range spec.Programs {
if !isTailCall(prog) {
continue
}
slot, err := tailCallSlot(prog)
if err != nil {
return fmt.Errorf("getting tail call slot: %w", err)
}
if _, ok := slots[slot]; ok {
return fmt.Errorf("duplicate tail call slot %d", slot)
}
slots[slot] = struct{}{}
ms.Contents = append(ms.Contents, ebpf.MapKV{Key: slot, Value: name})
}
return nil
}
// removeUnreachableTailcalls removes tail calls that are not reachable from
// entrypoint programs. This is done by traversing the call graph of the
// entrypoint programs and marking all reachable tail calls. Any tail call that
// is not marked is removed from the CollectionSpec.
func removeUnreachableTailcalls(logger *slog.Logger, spec *ebpf.CollectionSpec) error {
type tail struct {
referenced bool
visited bool
spec *ebpf.ProgramSpec
}
// Build a map of entrypoint programs annotated with __section_entry.
entrypoints := make(map[string]*ebpf.ProgramSpec)
for _, prog := range spec.Programs {
if isEntrypoint(prog) {
entrypoints[prog.Name] = prog
}
}
// Build a map of tail call slots to ProgramSpecs.
tailcalls := make(map[uint32]*tail)
for _, prog := range spec.Programs {
if !isTailCall(prog) {
continue
}
slot, err := tailCallSlot(prog)
if err != nil {
return fmt.Errorf("getting tail call slot: %w", err)
}
tailcalls[slot] = &tail{
spec: prog,
}
}
// Discover all tailcalls that are reachable from the given program.
visit := func(prog *ebpf.ProgramSpec, tailcalls map[uint32]*tail) error {
// We look back from any tailcall, so we expect there to always be 3 instructions ahead of any tail call instr.
for i := 3; i < len(prog.Instructions); i++ {
// The `tail_call_static` C function is always used to call tail calls when
// the map index is known at compile time.
// Due to inline ASM this generates the following instructions:
// Mov R1, Rx
// Mov R2, <map>
// Mov R3, <index>
// call tail_call
// Find the tail call instruction.
inst := prog.Instructions[i]
if !inst.IsBuiltinCall() || inst.Constant != int64(asm.FnTailCall) {
continue
}
// Check that the previous instruction is a mov of the tail call index.
movIdx := prog.Instructions[i-1]
if movIdx.OpCode.ALUOp() != asm.Mov || movIdx.Dst != asm.R3 {
continue
}
// Check that the instruction before that is the load of the tail call map.
movR2 := prog.Instructions[i-2]
if movR2.OpCode != asm.LoadImmOp(asm.DWord) || movR2.Src != asm.PseudoMapFD {
continue
}
ref := movR2.Reference()
// Ignore static tail calls made to maps that are not the calls map
if ref != callsMap {
logger.Debug(
"skipping tail call into map other than the calls map",
logfields.Section, prog.SectionName,
logfields.Prog, prog.Name,
logfields.Instruction, i,
logfields.Reference, ref,
)
continue
}
tc := tailcalls[uint32(movIdx.Constant)]
if tc == nil {
return fmt.Errorf(
"potential missed tail call in program %s to slot %d at insn %d",
prog.Name,
movIdx.Constant,
i,
)
}
tc.referenced = true
}
return nil
}
// Discover all tailcalls that are reachable from the entrypoints.
for _, prog := range entrypoints {
if err := visit(prog, tailcalls); err != nil {
return err
}
}
// Keep visiting tailcalls until no more are discovered.
reset:
for _, tailcall := range tailcalls {
// If a tailcall is referenced by an entrypoint or another tailcall we should visit it
if tailcall.referenced && !tailcall.visited {
if err := visit(tailcall.spec, tailcalls); err != nil {
return err
}
tailcall.visited = true
// Visiting this tail call might have caused tail calls earlier in the list to become referenced, but this
// loop already skipped them. So reset the loop. If we already visited a tailcall we will ignore them anyway.
goto reset
}
}
// Remove all tailcalls that are not referenced.
for _, tailcall := range tailcalls {
if !tailcall.referenced {
logger.Debug(
"unreferenced tail call, deleting",
logfields.Section, tailcall.spec.SectionName,
logfields.Prog, tailcall.spec.Name,
)
delete(spec.Programs, tailcall.spec.Name)
}
}
return nil
}
// LoadAndAssign loads spec into the kernel and assigns the requested eBPF
// objects to the given object. It is a wrapper around [LoadCollection]. See its
// documentation for more details on the loading process.
func LoadAndAssign(logger *slog.Logger, to any, spec *ebpf.CollectionSpec, opts *CollectionOptions) (func() error, error) {
keep, err := analyze.Fields(to)
if err != nil {
return nil, fmt.Errorf("analyzing fields of %T: %w", to, err)
}
opts.Keep = keep
coll, commit, err := LoadCollection(logger, spec, opts)
var ve *ebpf.VerifierError
if errors.As(err, &ve) {
if _, err := fmt.Fprintf(os.Stderr, "Verifier error: %s\nVerifier log: %+v\n", err, ve); err != nil {
return nil, fmt.Errorf("writing verifier log to stderr: %w", err)
}
}
if err != nil {
return nil, fmt.Errorf("loading eBPF collection into the kernel: %w", err)
}
if err := coll.Assign(to); err != nil {
return nil, fmt.Errorf("assigning eBPF objects to %T: %w", to, err)
}
return commit, nil
}
type CollectionOptions struct {
ebpf.CollectionOptions
// Replacements for datapath runtime configs declared using DECLARE_CONFIG.
// Pass a pointer to a populated object from pkg/datapath/config.
Constants any
// Maps to be renamed during loading. Key is the key in CollectionSpec.Maps,
// value is the new name.
MapRenames map[string]string
// MapReplacements passes along the inner map to MapReplacements inside
// the embedded ebpf.CollectionOptions struct.
MapReplacements map[string]*Map
// Set of objects to keep during reachability pruning.
Keep *set.Set[string]
}
func (co *CollectionOptions) populateMapReplacements() {
if co.CollectionOptions.MapReplacements == nil {
co.CollectionOptions.MapReplacements = make(map[string]*ebpf.Map)
}
for n, m := range co.MapReplacements {
co.CollectionOptions.MapReplacements[n] = m.m
}
}
// LoadCollection loads the given spec into the kernel with the specified opts.
// Returns a function that must be called after the Collection's entrypoints are
// attached to their respective kernel hooks. This function commits pending map
// pins to the bpf file system for maps that were found to be incompatible with
// their pinned counterparts, or for maps with certain flags that modify the
// default pinning behaviour.
//
// When attaching multiple programs from the same ELF in a loop, the returned
// function should only be run after all entrypoints have been attached. For
// example, attach both bpf_host.c:cil_to_netdev and cil_from_netdev before
// invoking the returned function, otherwise missing tail calls will occur.
//
// Any maps marked as pinned in the spec are automatically loaded from the path
// given in opts.Maps.PinPath and will be used instead of creating new ones.
func LoadCollection(logger *slog.Logger, spec *ebpf.CollectionSpec, opts *CollectionOptions) (*ebpf.Collection, func() error, error) {
if spec == nil {
return nil, nil, errors.New("can't load nil CollectionSpec")
}
if opts == nil {
opts = &CollectionOptions{}
}
opts.populateMapReplacements()
logger.Debug("Loading Collection into kernel",
logfields.MapRenames, opts.MapRenames,
logfields.Constants, fmt.Sprintf("%#v", opts.Constants),
)
// Copy spec so the modifications below don't affect the input parameter,
// allowing the spec to be safely re-used by the caller.
spec = spec.Copy()
if err := renameMaps(spec, opts.MapRenames); err != nil {
return nil, nil, err
}
if err := applyConstants(spec, opts.Constants); err != nil {
return nil, nil, fmt.Errorf("applying variable overrides: %w", err)
}
keep, err := removeUnusedMaps(spec, opts.Keep)
if err != nil {
return nil, nil, fmt.Errorf("pruning unused maps: %w", err)
}
// Find and strip all CILIUM_PIN_REPLACE pinning flags before creating the
// Collection. ebpf-go will reject maps with pins it doesn't recognize.
toReplace := consumePinReplace(spec)
// Attempt to load the Collection.
coll, err := ebpf.NewCollectionWithOptions(spec, opts.CollectionOptions)
// Collect key names of maps that are not compatible with their pinned
// counterparts and remove their pinning flags.
if errors.Is(err, ebpf.ErrMapIncompatible) {
var incompatible []string
incompatible, err = incompatibleMaps(spec, opts.CollectionOptions)
if err != nil {
return nil, nil, fmt.Errorf("finding incompatible maps: %w", err)
}
toReplace = append(toReplace, incompatible...)
// Retry loading the Collection with necessary pinning flags removed.
coll, err = ebpf.NewCollectionWithOptions(spec, opts.CollectionOptions)
}
if err != nil {
return nil, nil, err
}
if logger.Enabled(context.Background(), slog.LevelDebug) {
if err := verifyUnusedMaps(coll, keep); err != nil {
return nil, nil, fmt.Errorf("verifying unused maps: %w", err)
}
logger.Debug("Verified no unused maps after loading Collection")
}
// Collect Maps that need their bpffs pins replaced. Pull out Map objects
// before returning the Collection, since commit() still needs to work when
// the Map is removed from the Collection, e.g. by [ebpf.Collection.Assign].
pins, err := mapsToReplace(toReplace, spec, coll, opts.CollectionOptions)
if err != nil {
return nil, nil, fmt.Errorf("collecting map pins to replace: %w", err)
}
// Load successful, return a function that must be invoked after attaching the
// Collection's entrypoint programs to their respective hooks.
commit := func() error {
return commitMapPins(logger, pins)
}
return coll, commit, nil
}
// renameMaps applies renames to coll.
func renameMaps(coll *ebpf.CollectionSpec, renames map[string]string) error {
for name, rename := range renames {
mapSpec := coll.Maps[name]
if mapSpec == nil {
return fmt.Errorf("unknown map %q: can't rename to %q", name, rename)
}
mapSpec.Name = rename
}
return nil
}
// applyConstants sets the values of BPF C runtime configurables defined using
// the DECLARE_CONFIG macro.
func applyConstants(spec *ebpf.CollectionSpec, obj any) error {
if obj == nil {
return nil
}
constants, err := config.StructToMap(obj)
if err != nil {
return fmt.Errorf("converting struct to map: %w", err)
}
for name, value := range constants {
constName := config.ConstantPrefix + name
v, ok := spec.Variables[constName]
if !ok {
return fmt.Errorf("can't set non-existent Variable %s", name)
}
if v.MapName() != config.Section {
return fmt.Errorf("can only set Cilium config variables in section %s (got %s:%s), ", config.Section, v.MapName(), name)
}
if err := v.Set(value); err != nil {
return fmt.Errorf("setting Variable %s: %w", name, err)
}
}
return nil
}
// SPDX-License-Identifier: Apache-2.0
// Copyright Authors of Cilium
package bpf
import (
"fmt"
"net"
"go4.org/netipx"
cmtypes "github.com/cilium/cilium/pkg/clustermesh/types"
"github.com/cilium/cilium/pkg/types"
)
// Must be in sync with ENDPOINT_KEY_* in <bpf/lib/common.h>
const (
EndpointKeyIPv4 uint8 = 1
EndpointKeyIPv6 uint8 = 2
)
// EndpointKey represents the key value of the endpoints BPF map
//
// Must be in sync with struct endpoint_key in <bpf/lib/common.h>
type EndpointKey struct {
// represents both IPv6 and IPv4 (in the lowest four bytes)
IP types.IPv6 `align:"$union0"`
Family uint8 `align:"family"`
Key uint8 `align:"key"`
ClusterID uint16 `align:"cluster_id"`
}
// NewEndpointKey returns an EndpointKey based on the provided IP address. The
// address family is automatically detected.
func NewEndpointKey(ip net.IP, clusterID uint16) EndpointKey {
result := EndpointKey{}
if ip4 := ip.To4(); ip4 != nil {
result.Family = EndpointKeyIPv4
copy(result.IP[:], ip4)
} else {
result.Family = EndpointKeyIPv6
copy(result.IP[:], ip)
}
result.Key = 0
result.ClusterID = clusterID
return result
}
// ToIP converts the EndpointKey into a net.IP structure.
func (k EndpointKey) ToIP() net.IP {
switch k.Family {
case EndpointKeyIPv4:
return k.IP[:4]
case EndpointKeyIPv6:
return k.IP[:]
}
return nil
}
// String provides a string representation of the EndpointKey.
func (k EndpointKey) String() string {
if ip := k.ToIP(); ip != nil {
addrCluster := cmtypes.AddrClusterFrom(
netipx.MustFromStdIP(ip),
uint32(k.ClusterID),
)
return addrCluster.String() + ":" + fmt.Sprintf("%d", k.Key)
}
return "nil"
}
// SPDX-License-Identifier: Apache-2.0
// Copyright Authors of Cilium
package bpf
import (
"context"
"fmt"
"log/slog"
"sync"
"sync/atomic"
"github.com/cilium/cilium/pkg/container"
"github.com/cilium/cilium/pkg/controller"
"github.com/cilium/cilium/pkg/lock"
"github.com/cilium/cilium/pkg/logging/logfields"
"github.com/cilium/cilium/pkg/time"
)
// Action describes an action for map buffer events.
type Action uint8
const (
// MapUpdate describes a map.Update event.
MapUpdate Action = iota
// MapDelete describes a map.Delete event.
MapDelete
// MapDeleteAll describes a map.DeleteAll event which is aggregated into a single event
// to minimize memory and subscription buffer usage.
MapDeleteAll
)
var bpfEventBufferGCControllerGroup = controller.NewGroup("bpf-event-buffer-gc")
// String returns a string representation of an Action.
func (e Action) String() string {
switch e {
case MapUpdate:
return "update"
case MapDelete:
return "delete"
case MapDeleteAll:
return "delete-all"
default:
return "unknown"
}
}
// Event contains data about a bpf operation event.
type Event struct {
Timestamp time.Time
action Action
cacheEntry
}
// GetAction returns the event action string.
func (e *Event) GetAction() string {
return e.action.String()
}
// GetKey returns the string representation of a event key.
func (e Event) GetKey() string {
if e.cacheEntry.Key == nil {
return "<nil>"
}
return e.cacheEntry.Key.String()
}
// GetValue returns the string representation of a event value.
// Nil values (such as with deletes) are returned as a canonical
// string representation.
func (e Event) GetValue() string {
if e.cacheEntry.Value == nil {
return "<nil>"
}
return e.cacheEntry.Value.String()
}
// GetLastError returns the last error for an event.
func (e Event) GetLastError() error {
return e.cacheEntry.LastError
}
// GetDesiredAction returns the desired action enum for an event.
func (e Event) GetDesiredAction() DesiredAction {
return e.cacheEntry.DesiredAction
}
func (m *Map) initEventsBuffer(maxSize int, eventsTTL time.Duration) {
b := &eventsBuffer{
logger: m.Logger,
buffer: container.NewRingBuffer(maxSize),
eventTTL: eventsTTL,
}
if b.eventTTL > 0 {
m.Logger.Debug("starting bpf map event buffer GC controller")
mapControllers.UpdateController(
fmt.Sprintf("bpf-event-buffer-gc-%s", m.name),
controller.ControllerParams{
Group: bpfEventBufferGCControllerGroup,
DoFunc: func(_ context.Context) error {
m.Logger.Debug(
"clearing bpf map events older than TTL",
logfields.TTL, b.eventTTL,
)
b.buffer.Compact(func(e any) bool {
event, ok := e.(*Event)
if !ok {
m.Logger.Error("Failed to compact the event buffer", logfields.Error, wrongObjTypeErr(e))
return false
}
return time.Since(event.Timestamp) < b.eventTTL
})
return nil
},
RunInterval: b.eventTTL,
},
)
}
m.events = b
}
// eventsBuffer stores a buffer of events for auditing and debugging
// purposes.
type eventsBuffer struct {
logger *slog.Logger
buffer *container.RingBuffer
eventTTL time.Duration
subsLock lock.RWMutex
subscriptions []*Handle
}
// Handle allows for handling event streams safely outside of this package.
// The key design consideration for event streaming is that it is non-blocking.
// The eventsBuffer takes care of closing handles when their consumer is not reading
// off the buffer (or is not reading off it fast enough).
type Handle struct {
c chan *Event
closed atomic.Bool
closer *sync.Once
err error
}
// Returns read only channel for Handle subscription events. Channel should be closed with
// handle.Close() function.
func (h *Handle) C() <-chan *Event {
return h.c // return read only channel to prevent closing outside of Close(...).
}
// Close allows for safaley closing of a handle.
func (h *Handle) Close() {
h.close(nil)
}
func (h *Handle) close(err error) {
h.closer.Do(func() {
close(h.c)
h.err = err
h.closed.Store(true)
})
}
func (h *Handle) isClosed() bool {
return h.closed.Load()
}
func (h *Handle) isFull() bool {
return len(h.c) >= cap(h.c)
}
// This configures how big buffers are for channels used for streaming events from
// eventsBuffer.
//
// To prevent blocking bpf.Map operations, subscribed events are buffered per client handle.
// How fast subscribers will need to proceess events will depend on the event throughput.
// In this case, our throughput will be expected to be not above 100 events a second.
// Therefore the consumer will have 10ms to process each event. The channel is also
// given a constant buffer size in the case where events arrive at once (i.e. all 100 events
// arriving at the top of the second).
//
// NOTE: Although using timers/timed-contexts seems like an obvious choice for this use case,
// the timer.After implementation actually uses a large amount of memory. To reduce memory spikes
// in high throughput cases, we instead just use a sufficiently buffered channel.
const (
eventSubChanBufferSize = 32
maxConcurrentEventSubs = 32
)
func (eb *eventsBuffer) hasSubCapacity() bool {
eb.subsLock.RLock()
defer eb.subsLock.RUnlock()
return len(eb.subscriptions) <= maxConcurrentEventSubs
}
func (eb *eventsBuffer) dumpAndSubscribe(callback EventCallbackFunc, follow bool) (*Handle, error) {
if follow && !eb.hasSubCapacity() {
return nil, fmt.Errorf("exceeded max number of concurrent map event subscriptions %d", maxConcurrentEventSubs)
}
if callback != nil {
eb.dumpWithCallback(callback)
}
if !follow {
return nil, nil
}
h := &Handle{
c: make(chan *Event, eventSubChanBufferSize),
closer: &sync.Once{},
}
eb.subsLock.Lock()
defer eb.subsLock.Unlock()
eb.subscriptions = append(eb.subscriptions, h)
return h, nil
}
// DumpAndSubscribe dumps existing buffer, if callback is not nil. Followed by creating a
// subscription to the maps events buffer and returning the handle.
// These actions are done together so as to prevent possible missed events between the handoff
// of the callback and sub handle creation.
func (m *Map) DumpAndSubscribe(callback EventCallbackFunc, follow bool) (*Handle, error) {
// note: we have to hold rlock for the duration of this to prevent missed events between dump and sub.
// dumpAndSubscribe maintains its own write-lock for updating subscribers.
m.lock.RLock()
defer m.lock.RUnlock()
if !m.eventsBufferEnabled {
return nil, fmt.Errorf("map events not enabled for map %q", m.name)
}
return m.events.dumpAndSubscribe(callback, follow)
}
func (m *Map) IsEventsEnabled() bool {
return m.eventsBufferEnabled
}
func (eb *eventsBuffer) add(e *Event) {
eb.buffer.Add(e)
var activeSubs []*Handle
activeSubsLock := &lock.Mutex{}
wg := &sync.WaitGroup{}
for i, sub := range eb.subscriptions {
if sub.isClosed() { // sub will be removed.
continue
}
wg.Add(1)
go func(sub *Handle, i int) {
defer wg.Done()
if sub.isFull() {
err := fmt.Errorf("timed out waiting to send sub map event")
eb.logger.Warn(
"subscription channel buffer was full, closing subscription",
logfields.Error, err,
logfields.SubscriptionID, i,
)
sub.close(err)
} else {
sub.c <- e
activeSubsLock.Lock()
activeSubs = append(activeSubs, sub)
activeSubsLock.Unlock()
}
}(sub, i)
}
wg.Wait()
eb.subsLock.Lock()
defer eb.subsLock.Unlock()
eb.subscriptions = activeSubs
}
func wrongObjTypeErr(i any) error {
return fmt.Errorf("BUG: wrong object type in event ring buffer: %T", i)
}
func (eb *eventsBuffer) eventIsValid(e any) bool {
event, ok := e.(*Event)
if !ok {
eb.logger.Error("Could not dump contents of events buffer", logfields.Error, wrongObjTypeErr(e))
return false
}
return eb.eventTTL == 0 || time.Since(event.Timestamp) <= eb.eventTTL
}
// EventCallbackFunc is used to dump events from a event buffer.
type EventCallbackFunc func(*Event)
func (eb *eventsBuffer) dumpWithCallback(callback EventCallbackFunc) {
eb.buffer.IterateValid(eb.eventIsValid, func(e any) {
event, ok := e.(*Event)
if !ok {
eb.logger.Error("Could not dump contents of events buffer", logfields.Error, wrongObjTypeErr(e))
return
}
callback(event)
})
}
// SPDX-License-Identifier: Apache-2.0
// Copyright Authors of Cilium
package bpf
import (
"fmt"
"github.com/cilium/ebpf"
"github.com/cilium/ebpf/link"
)
// UpdateLink loads a pinned bpf_link at the given pin path and updates its
// program.
//
// Returns [os.ErrNotExist] if the pin is not found.
//
// Updating the link can fail if it is defunct (the hook it points to no longer
// exists).
func UpdateLink(pin string, prog *ebpf.Program) error {
l, err := link.LoadPinnedLink(pin, &ebpf.LoadPinOptions{})
if err != nil {
return fmt.Errorf("opening pinned link %s: %w", pin, err)
}
defer l.Close()
if err = l.Update(prog); err != nil {
return fmt.Errorf("updating link %s: %w", pin, err)
}
return nil
}
// DetachLink loads and unpins a bpf_link at the given pin path.
//
// Returns [os.ErrNotExist] if the pin is not found.
func UnpinLink(pin string) error {
l, err := link.LoadPinnedLink(pin, &ebpf.LoadPinOptions{})
if err != nil {
return fmt.Errorf("opening pinned link %s: %w", pin, err)
}
defer l.Close()
if err := l.Unpin(); err != nil {
return fmt.Errorf("unpinning link %s: %w", pin, err)
}
return nil
}
// SPDX-License-Identifier: Apache-2.0
// Copyright Authors of Cilium
package bpf
import (
"regexp"
"github.com/cilium/cilium/pkg/controller"
"github.com/cilium/cilium/pkg/time"
)
const (
// maxSyncErrors is the maximum consecutive errors syncing before the
// controller bails out
maxSyncErrors = 512
// errorResolverSchedulerMinInterval is the minimum interval for the
// error resolver to be scheduled. This minimum interval ensures not to
// overschedule if a large number of updates fail in a row.
errorResolverSchedulerMinInterval = 5 * time.Second
// errorResolverSchedulerDelay is the delay to update the controller
// after determination that a run is needed. The delay allows to
// schedule the resolver after series of updates have failed.
errorResolverSchedulerDelay = 200 * time.Millisecond
)
var (
mapControllers = controller.NewManager()
)
// DesiredAction is the action to be performed on the BPF map
type DesiredAction uint8
const (
// OK indicates that to further action is required and the entry is in
// sync
OK DesiredAction = iota
// Insert indicates that the entry needs to be created or updated
Insert
// Delete indicates that the entry needs to be deleted
Delete
)
func (d DesiredAction) String() string {
switch d {
case OK:
return "sync"
case Insert:
return "to-be-inserted"
case Delete:
return "to-be-deleted"
default:
return "unknown"
}
}
var commonNameRegexps = []*regexp.Regexp{
regexp.MustCompile(`^(cilium_)(.+)_v[0-9]+_reserved_[0-9]+$`),
regexp.MustCompile(`^(cilium_)(.+)_reserved_[0-9]+$`),
regexp.MustCompile(`^(cilium_)(.+)_netdev_ns_[0-9]+$`),
regexp.MustCompile(`^(cilium_)(.+)_overlay_[0-9]+$`),
regexp.MustCompile(`^(cilium_)(.+)_v[0-9]+_[0-9]+$`),
regexp.MustCompile(`^(cilium_)(.+)_[0-9]+$`),
regexp.MustCompile(`^(cilium_)(.+)+$`),
}
func extractCommonName(name string) string {
for _, r := range commonNameRegexps {
if replaced := r.ReplaceAllString(name, `$2`); replaced != name {
return replaced
}
}
return name
}
// SPDX-License-Identifier: Apache-2.0
// Copyright Authors of Cilium
//go:build linux
package bpf
import (
"context"
"errors"
"fmt"
"io/fs"
"iter"
"log/slog"
"math"
"os"
"path"
"reflect"
"strings"
"github.com/cilium/ebpf"
"golang.org/x/sys/unix"
"github.com/cilium/cilium/api/v1/models"
"github.com/cilium/cilium/pkg/controller"
"github.com/cilium/cilium/pkg/lock"
"github.com/cilium/cilium/pkg/logging"
"github.com/cilium/cilium/pkg/logging/logfields"
"github.com/cilium/cilium/pkg/metrics"
"github.com/cilium/cilium/pkg/option"
"github.com/cilium/cilium/pkg/spanstat"
"github.com/cilium/cilium/pkg/time"
)
var (
// ErrMaxLookup is returned when the maximum number of map element lookups has
// been reached.
ErrMaxLookup = errors.New("maximum number of lookups reached")
bpfMapSyncControllerGroup = controller.NewGroup("bpf-map-sync")
)
type MapKey interface {
fmt.Stringer
// New must return a pointer to a new MapKey.
New() MapKey
}
type MapValue interface {
fmt.Stringer
// New must return a pointer to a new MapValue.
New() MapValue
}
// MapPerCPUValue is the same as MapValue, but for per-CPU maps. Implement to be
// able to fetch map values from all CPUs.
type MapPerCPUValue interface {
MapValue
// NewSlice must return a pointer to a slice of structs that implement MapValue.
NewSlice() any
}
type cacheEntry struct {
Key MapKey
Value MapValue
DesiredAction DesiredAction
LastError error
}
type Map struct {
Logger *slog.Logger
m *ebpf.Map
// spec will be nil after the map has been created
spec *ebpf.MapSpec
key MapKey
value MapValue
name string
path string
lock lock.RWMutex
// cachedCommonName is the common portion of the name excluding any
// endpoint ID
cachedCommonName string
// enableSync is true when synchronization retries have been enabled.
enableSync bool
// withValueCache is true when map cache has been enabled
withValueCache bool
// cache as key/value entries when map cache is enabled or as key-only when
// pressure metric is enabled
cache map[string]*cacheEntry
// errorResolverLastScheduled is the timestamp when the error resolver
// was last scheduled
errorResolverLastScheduled time.Time
// outstandingErrors states whether there are outstanding errors, occurred while
// syncing an entry with the kernel, that need to be resolved. This variable exists
// to avoid iterating over the full cache to check if reconciliation is necessary,
// but it is possible that it gets out of sync if an error is automatically
// resolved while performing a subsequent Update/Delete operation on the same key.
outstandingErrors bool
// pressureGauge is a metric that tracks the pressure on this map
pressureGauge *metrics.GaugeWithThreshold
// is true when events buffer is enabled.
eventsBufferEnabled bool
// contains optional event buffer which stores last n bpf map events.
events *eventsBuffer
// group is the metric group name for this map, it classifies maps of the same
// type that share the same metric group.
group string
}
func (m *Map) Type() ebpf.MapType {
if m.m != nil {
return m.m.Type()
}
if m.spec != nil {
return m.spec.Type
}
return ebpf.UnspecifiedMap
}
type nopDecoder []struct{}
func (nopDecoder) UnmarshalBinary(data []byte) error {
return nil
}
// BatchCount the number of elements in the map using a batch lookup.
// Only usable for hash, lru-hash and lpm-trie maps.
func (m *Map) BatchCount() (count int, err error) {
switch m.Type() {
case ebpf.Hash, ebpf.LRUHash, ebpf.LPMTrie:
break
default:
return 0, fmt.Errorf("unsupported map type %s, must be one either hash or lru-hash types", m.Type())
}
chunkSize := startingChunkSize(int(m.MaxEntries()))
// Since we don't care about the actual data we just use a no-op binary
// decoder.
keys := make(nopDecoder, chunkSize)
vals := make(nopDecoder, chunkSize)
maxRetries := defaultBatchedRetries
var cursor ebpf.MapBatchCursor
for {
for retry := range maxRetries {
// Attempt to read batch into buffer.
c, batchErr := m.BatchLookup(&cursor, keys, vals, nil)
count += c
switch {
// Lookup batch on LRU hash map may fail if the buffer passed is not big enough to
// accommodate the largest bucket size in the LRU map. See full comment in
// [BatchIterator.IterateAll]
case errors.Is(batchErr, unix.ENOSPC):
if retry == maxRetries-1 {
err = batchErr
} else {
chunkSize *= 2
}
keys = make(nopDecoder, chunkSize)
vals = make(nopDecoder, chunkSize)
continue
case errors.Is(batchErr, ebpf.ErrKeyNotExist):
return
case batchErr != nil:
// If we're not done, and we didn't hit a ENOSPC then stop iteration and record
// the error.
err = fmt.Errorf("failed to iterate map: %w", batchErr)
return
}
// Do the next batch
break
}
}
}
func (m *Map) KeySize() uint32 {
if m.m != nil {
return m.m.KeySize()
}
if m.spec != nil {
return m.spec.KeySize
}
return 0
}
func (m *Map) ValueSize() uint32 {
if m.m != nil {
return m.m.ValueSize()
}
if m.spec != nil {
return m.spec.ValueSize
}
return 0
}
func (m *Map) MaxEntries() uint32 {
if m.m != nil {
return m.m.MaxEntries()
}
if m.spec != nil {
return m.spec.MaxEntries
}
return 0
}
func (m *Map) Flags() uint32 {
if m.m != nil {
return m.m.Flags()
}
if m.spec != nil {
return m.spec.Flags
}
return 0
}
func (m *Map) hasPerCPUValue() bool {
mt := m.Type()
return mt == ebpf.PerCPUHash || mt == ebpf.PerCPUArray || mt == ebpf.LRUCPUHash || mt == ebpf.PerCPUCGroupStorage
}
func (m *Map) updateMetrics() {
if m.group == "" {
return
}
metrics.UpdateMapCapacity(m.group, m.MaxEntries())
}
// NewMap creates a new Map instance - object representing a BPF map
func NewMap(name string, mapType ebpf.MapType, mapKey MapKey, mapValue MapValue,
maxEntries int, flags uint32) *Map {
// slogloggercheck: it's safe to use the default logger here as it has been initialized by the program up to this point.
defaultSlogLogger := logging.DefaultSlogLogger
keySize := reflect.TypeOf(mapKey).Elem().Size()
valueSize := reflect.TypeOf(mapValue).Elem().Size()
return &Map{
Logger: defaultSlogLogger.With(
logfields.BPFMapPath, name,
logfields.BPFMapName, name,
),
spec: &ebpf.MapSpec{
Type: mapType,
Name: path.Base(name),
KeySize: uint32(keySize),
ValueSize: uint32(valueSize),
MaxEntries: uint32(maxEntries),
Flags: flags,
},
name: path.Base(name),
key: mapKey,
value: mapValue,
group: name,
}
}
// NewMap creates a new Map instance - object representing a BPF map
func NewMapWithInnerSpec(name string, mapType ebpf.MapType, mapKey MapKey, mapValue MapValue,
maxEntries int, flags uint32, innerSpec *ebpf.MapSpec) *Map {
// slogloggercheck: it's safe to use the default logger here as it has been initialized by the program up to this point.
defaultSlogLogger := logging.DefaultSlogLogger
keySize := reflect.TypeOf(mapKey).Elem().Size()
valueSize := reflect.TypeOf(mapValue).Elem().Size()
return &Map{
Logger: defaultSlogLogger.With(
logfields.BPFMapPath, name,
logfields.BPFMapName, name,
),
spec: &ebpf.MapSpec{
Type: mapType,
Name: path.Base(name),
KeySize: uint32(keySize),
ValueSize: uint32(valueSize),
MaxEntries: uint32(maxEntries),
Flags: flags,
InnerMap: innerSpec,
},
name: path.Base(name),
key: mapKey,
value: mapValue,
}
}
func (m *Map) commonName() string {
if m.cachedCommonName != "" {
return m.cachedCommonName
}
m.cachedCommonName = extractCommonName(m.name)
return m.cachedCommonName
}
func (m *Map) NonPrefixedName() string {
return strings.TrimPrefix(m.name, metrics.Namespace+"_")
}
// scheduleErrorResolver schedules a periodic resolver controller that scans
// all BPF map caches for unresolved errors and attempts to resolve them. On
// error of resolution, the controller is-rescheduled in an expedited manner
// with an exponential back-off.
//
// m.lock must be held for writing
func (m *Map) scheduleErrorResolver() {
m.outstandingErrors = true
if time.Since(m.errorResolverLastScheduled) <= errorResolverSchedulerMinInterval {
return
}
m.errorResolverLastScheduled = time.Now()
go func() {
time.Sleep(errorResolverSchedulerDelay)
mapControllers.UpdateController(m.controllerName(),
controller.ControllerParams{
Group: bpfMapSyncControllerGroup,
DoFunc: m.resolveErrors,
RunInterval: errorResolverSchedulerMinInterval,
},
)
}()
}
// WithCache enables use of a cache. This will store all entries inserted from
// user space in a local cache (map) and will indicate the status of each
// individual entry.
func (m *Map) WithCache() *Map {
if m.cache == nil {
m.cache = map[string]*cacheEntry{}
}
m.withValueCache = true
m.enableSync = true
return m
}
// WithEvents enables use of the event buffer, if the buffer is enabled.
// This stores all map events (i.e. add/update/delete) in a bounded event buffer.
// If eventTTL is not zero, than events that are older than the TTL
// will periodically be removed from the buffer.
// Enabling events will use aprox proportional to 100MB for every million capacity
// in maxSize.
//
// TODO: The IPCache map have many periodic update events added by a controller for entries such as the 0.0.0.0/0 range.
// These fill the event buffer with possibly unnecessary events.
// We should either provide an option to aggregate these events, ignore hem from the ipcache event buffer or store them in a separate buffer.
func (m *Map) WithEvents(c option.BPFEventBufferConfig) *Map {
if !c.Enabled {
return m
}
m.Logger.Debug(
"enabling events buffer",
logfields.Size, c.MaxSize,
logfields.TTL, c.TTL,
)
m.eventsBufferEnabled = true
m.initEventsBuffer(c.MaxSize, c.TTL)
return m
}
func (m *Map) WithGroupName(group string) *Map {
m.group = group
return m
}
// WithPressureMetricThreshold enables the tracking of a metric that measures
// the pressure of this map. This metric is only reported if over the
// threshold.
func (m *Map) WithPressureMetricThreshold(registry *metrics.Registry, threshold float64) *Map {
if registry == nil {
return m
}
// When pressure metric is enabled, we keep track of map keys in cache
if m.cache == nil {
m.cache = map[string]*cacheEntry{}
}
m.pressureGauge = registry.NewBPFMapPressureGauge(m.NonPrefixedName(), threshold)
return m
}
// WithPressureMetric enables tracking and reporting of this map pressure with
// threshold 0.
func (m *Map) WithPressureMetric(registry *metrics.Registry) *Map {
return m.WithPressureMetricThreshold(registry, 0.0)
}
// UpdatePressureMetricWithSize updates map pressure metric using the given map size.
func (m *Map) UpdatePressureMetricWithSize(size int32) {
if m.pressureGauge == nil {
return
}
// Do a lazy check of MetricsConfig as it is not available at map static
// initialization.
if !metrics.BPFMapPressure {
if !m.withValueCache {
m.cache = nil
}
m.pressureGauge = nil
return
}
pvalue := float64(size) / float64(m.MaxEntries())
m.pressureGauge.Set(pvalue)
}
func (m *Map) updatePressureMetric() {
// Skipping pressure metric gauge updates for LRU map as the cache size
// does not accurately represent the actual map sie.
if m.spec != nil && m.spec.Type == ebpf.LRUHash {
return
}
m.UpdatePressureMetricWithSize(int32(len(m.cache)))
}
func (m *Map) FD() int {
return m.m.FD()
}
// Name returns the basename of this map.
func (m *Map) Name() string {
return m.name
}
// Path returns the path to this map on the filesystem.
func (m *Map) Path() (string, error) {
if err := m.setPathIfUnset(); err != nil {
return "", err
}
return m.path, nil
}
// Unpin attempts to unpin (remove) the map from the filesystem.
func (m *Map) Unpin() error {
path, err := m.Path()
if err != nil {
return err
}
return os.RemoveAll(path)
}
// UnpinIfExists tries to unpin (remove) the map only if it exists.
func (m *Map) UnpinIfExists() error {
found, err := m.exist()
if err != nil {
return err
}
if !found {
return nil
}
return m.Unpin()
}
func (m *Map) controllerName() string {
return fmt.Sprintf("bpf-map-sync-%s", m.name)
}
// OpenMap opens the map at pinPath.
func OpenMap(pinPath string, key MapKey, value MapValue) (*Map, error) {
if !path.IsAbs(pinPath) {
return nil, fmt.Errorf("pinPath must be absolute: %s", pinPath)
}
em, err := ebpf.LoadPinnedMap(pinPath, nil)
if err != nil {
return nil, err
}
// slogloggercheck: it's safe to use the default logger here as it has been initialized by the program up to this point.
defaultSlogLogger := logging.DefaultSlogLogger
logger := defaultSlogLogger.With(
logfields.BPFMapPath, pinPath,
logfields.BPFMapName, path.Base(pinPath),
)
m := &Map{
Logger: logger,
m: em,
name: path.Base(pinPath),
path: pinPath,
key: key,
value: value,
}
m.updateMetrics()
registerMap(logger, pinPath, m)
return m, nil
}
func (m *Map) setPathIfUnset() error {
if m.path == "" {
if m.name == "" {
return fmt.Errorf("either path or name must be set")
}
m.path = MapPath(m.Logger, m.name)
}
return nil
}
// Recreate removes any pin at the Map's pin path, recreates and re-pins it.
func (m *Map) Recreate() error {
m.lock.Lock()
defer m.lock.Unlock()
if m.m != nil {
return fmt.Errorf("map already open: %s", m.name)
}
if err := m.setPathIfUnset(); err != nil {
return err
}
if err := os.Remove(m.path); err != nil && !errors.Is(err, fs.ErrNotExist) {
return fmt.Errorf("removing pinned map %s: %w", m.name, err)
}
m.Logger.Info(
"Removed map pin, recreating and re-pinning map",
)
return m.openOrCreate(true)
}
// IsOpen returns true if the map has been opened.
func (m *Map) IsOpen() bool {
m.lock.Lock()
defer m.lock.Unlock()
return m.m != nil
}
// OpenOrCreate attempts to open the Map, or if it does not yet exist, create
// the Map. If the existing map's attributes such as map type, key/value size,
// capacity, etc. do not match the Map's attributes, then the map will be
// deleted and reopened without any attempt to retain its previous contents.
// If the map is marked as non-persistent, it will always be recreated.
//
// Returns whether the map was deleted and recreated, or an optional error.
func (m *Map) OpenOrCreate() error {
m.lock.Lock()
defer m.lock.Unlock()
return m.openOrCreate(true)
}
// CreateUnpinned creates the map without pinning it to the file system.
//
// TODO(tb): Remove this when all map creation takes MapSpec.
func (m *Map) CreateUnpinned() error {
m.lock.Lock()
defer m.lock.Unlock()
return m.openOrCreate(false)
}
// Create is similar to OpenOrCreate, but closes the map after creating or
// opening it.
func (m *Map) Create() error {
if err := m.OpenOrCreate(); err != nil {
return err
}
return m.Close()
}
func (m *Map) openOrCreate(pin bool) error {
if m.m != nil {
return nil
}
if m.spec == nil {
return fmt.Errorf("attempted to create map %s without MapSpec", m.name)
}
if err := m.setPathIfUnset(); err != nil {
return err
}
m.spec.Flags |= GetMapMemoryFlags(m.spec.Type)
if m.spec.InnerMap != nil {
m.spec.InnerMap.Flags |= GetMapMemoryFlags(m.spec.InnerMap.Type)
}
if pin {
m.spec.Pinning = ebpf.PinByName
}
em, err := OpenOrCreateMap(m.Logger, m.spec, path.Dir(m.path))
if err != nil {
return err
}
m.updateMetrics()
registerMap(m.Logger, m.path, m)
// Consume the MapSpec.
m.spec = nil
// Retain the Map.
m.m = em
return nil
}
// Open opens the BPF map. All calls to Open() are serialized due to acquiring
// m.lock
func (m *Map) Open() error {
m.lock.Lock()
defer m.lock.Unlock()
return m.open()
}
// open opens the BPF map. It is identical to Open() but should be used when
// m.lock is already held. open() may only be used if m.lock is held for
// writing.
func (m *Map) open() error {
if m.m != nil {
return nil
}
if err := m.setPathIfUnset(); err != nil {
return err
}
em, err := ebpf.LoadPinnedMap(m.path, nil)
if err != nil {
return fmt.Errorf("loading pinned map %s: %w", m.path, err)
}
m.updateMetrics()
registerMap(m.Logger, m.path, m)
m.m = em
return nil
}
func (m *Map) Close() error {
m.lock.Lock()
defer m.lock.Unlock()
if m.enableSync {
mapControllers.RemoveController(m.controllerName())
}
if m.m != nil {
m.m.Close()
m.m = nil
}
unregisterMap(m.Logger, m.path, m)
return nil
}
func (m *Map) NextKey(key, nextKeyOut any) error {
var duration *spanstat.SpanStat
if metrics.BPFSyscallDuration.IsEnabled() {
duration = spanstat.Start()
}
err := m.m.NextKey(key, nextKeyOut)
if metrics.BPFSyscallDuration.IsEnabled() {
metrics.BPFSyscallDuration.WithLabelValues(metricOpGetNextKey, metrics.Error2Outcome(err)).Observe(duration.End(err == nil).Total().Seconds())
}
return err
}
type DumpCallback func(key MapKey, value MapValue)
// DumpWithCallback iterates over the Map and calls the given DumpCallback for
// each map entry. With the current implementation, it is safe for callbacks to
// retain the values received, as they are guaranteed to be new instances.
//
// To dump per-cpu maps, use DumpPerCPUWithCallback.
func (m *Map) DumpWithCallback(cb DumpCallback) error {
if cb == nil {
return errors.New("empty callback")
}
if err := m.Open(); err != nil {
return err
}
m.lock.RLock()
defer m.lock.RUnlock()
// Don't need deep copies here, only fresh pointers.
mk := m.key.New()
mv := m.value.New()
i := m.m.Iterate()
for i.Next(mk, mv) {
cb(mk, mv)
mk = m.key.New()
mv = m.value.New()
}
return i.Err()
}
// DumpPerCPUCallback is called by DumpPerCPUWithCallback with the map key and
// the slice of all values from all CPUs.
type DumpPerCPUCallback func(key MapKey, values any)
// DumpPerCPUWithCallback iterates over the Map and calls the given
// DumpPerCPUCallback for each map entry, passing the slice with all values
// from all CPUs. With the current implementation, it is safe for callbacks
// to retain the values received, as they are guaranteed to be new instances.
func (m *Map) DumpPerCPUWithCallback(cb DumpPerCPUCallback) error {
if cb == nil {
return errors.New("empty callback")
}
if !m.hasPerCPUValue() {
return fmt.Errorf("map %s is not a per-CPU map", m.name)
}
v, ok := m.value.(MapPerCPUValue)
if !ok {
return fmt.Errorf("map %s value type does not implement MapPerCPUValue", m.name)
}
if err := m.Open(); err != nil {
return err
}
m.lock.RLock()
defer m.lock.RUnlock()
// Don't need deep copies here, only fresh pointers.
mk := m.key.New()
mv := v.NewSlice()
i := m.m.Iterate()
for i.Next(mk, mv) {
cb(mk, mv)
mk = m.key.New()
mv = v.NewSlice()
}
return i.Err()
}
// DumpWithCallbackIfExists is similar to DumpWithCallback, but returns earlier
// if the given map does not exist.
func (m *Map) DumpWithCallbackIfExists(cb DumpCallback) error {
found, err := m.exist()
if err != nil {
return err
}
if found {
return m.DumpWithCallback(cb)
}
return nil
}
// DumpReliablyWithCallback is similar to DumpWithCallback, but performs
// additional tracking of the current and recently seen keys, so that if an
// element is removed from the underlying kernel map during the dump, the dump
// can continue from a recently seen key rather than restarting from scratch.
// In addition, it caps the maximum number of map entry iterations at 4 times
// the maximum map size. If this limit is reached, ErrMaxLookup is returned.
//
// The caller must provide a callback for handling each entry, and a stats
// object initialized via a call to NewDumpStats(). The callback function must
// not invoke any map operations that acquire the Map.lock.
func (m *Map) DumpReliablyWithCallback(cb DumpCallback, stats *DumpStats) error {
if cb == nil {
return errors.New("empty callback")
}
if stats == nil {
return errors.New("stats is nil")
}
var (
prevKey = m.key.New()
currentKey = m.key.New()
nextKey = m.key.New()
value = m.value.New()
prevKeyValid = false
)
stats.start()
defer stats.finish()
if err := m.Open(); err != nil {
return err
}
// Acquire a (write) lock here as callers can invoke map operations in the
// DumpCallback that need a (write) lock.
// See PR for more details. - https://github.com/cilium/cilium/pull/38590.
m.lock.Lock()
defer m.lock.Unlock()
if m.m == nil {
// We currently don't prevent open maps from being closed.
// See GH issue - https://github.com/cilium/cilium/issues/39287.
return errors.New("map is closed")
}
// Get the first map key.
if err := m.NextKey(nil, currentKey); err != nil {
stats.Lookup = 1
if errors.Is(err, ebpf.ErrKeyNotExist) {
// Empty map, nothing to iterate.
stats.Completed = true
return nil
}
}
// maxLookup is an upper bound limit to prevent backtracking forever
// when iterating over the map's elements (the map might be concurrently
// updated while being iterated)
maxLookup := stats.MaxEntries * 4
// This loop stops when all elements have been iterated (Map.NextKey() returns
// ErrKeyNotExist) OR, in order to avoid hanging if
// the map is continuously updated, when maxLookup has been reached
for stats.Lookup = 1; stats.Lookup <= maxLookup; stats.Lookup++ {
// currentKey was set by the first m.NextKey() above. We know it existed in
// the map, but it may have been deleted by a concurrent map operation.
//
// If currentKey is no longer in the map, nextKey may be the first key in
// the map again. Continue with nextKey only if we still find currentKey in
// the Lookup() after the call to m.NextKey(), this way we know nextKey is
// NOT the first key in the map and iteration hasn't reset.
nextKeyErr := m.NextKey(currentKey, nextKey)
if err := m.m.Lookup(currentKey, value); err != nil {
stats.LookupFailed++
// Restarting from a invalid key starts the iteration again from the beginning.
// If we have a previously found key, try to restart from there instead
if prevKeyValid {
currentKey = prevKey
// Restart from a given previous key only once, otherwise if the prevKey is
// concurrently deleted we might loop forever trying to look it up.
prevKeyValid = false
stats.KeyFallback++
} else {
// Depending on exactly when currentKey was deleted from the
// map, nextKey may be the actual key element after the deleted
// one, or the first element in the map.
currentKey = nextKey
// To avoid having nextKey and currentKey pointing at the same memory
// we allocate a new key for nextKey. Without this currentKey and nextKey
// would be the same pointer value and would get double iterated on the next
// iterations m.NextKey(...) call.
nextKey = m.key.New()
stats.Interrupted++
}
continue
}
cb(currentKey, value)
if nextKeyErr != nil {
if errors.Is(nextKeyErr, ebpf.ErrKeyNotExist) {
stats.Completed = true
return nil // end of map, we're done iterating
}
return nextKeyErr
}
// Prepare keys to move to the next iteration.
prevKey = currentKey
currentKey = nextKey
nextKey = m.key.New()
prevKeyValid = true
}
return ErrMaxLookup
}
// BatchIterator provides a typed wrapper *Map that allows for batched iteration
// of bpf maps.
type BatchIterator[KT, VT any, KP KeyPointer[KT], VP ValuePointer[VT]] struct {
m *Map
err error
keys []KT
vals []VT
chunkSize int
maxDumpRetries uint32
// Iteration stats
batchSize int
opts *ebpf.BatchOptions
}
// NewBatchIterator that allows for iterating a map using the bpf batch api.
// This automatically handles concerns such as batch sizing and handling errors
// when end of map is reached.
// Following iteration, any unresolved errors encountered when iterating
// the bpf map can be accessed via the Err() function.
// The pointer type of KT & VT must implement Map{Key,Value}, respectively.
//
// Subsequent iterations via IterateAll reset all internal state and begin
// iteration over.
//
// Example usage:
//
// m := NewMap("cilium_test",
// ebpf.Hash,
// &TestKey{}, // *TestKey implements MapKey.
// &TestValue{}, // *TestValue implements MapValue.
// mapSize,
// BPF_F_NO_PREALLOC,
// )
//
// iter := NewBatchIterator[TestKey, TestValue](m)
// for k, v := range iter.IterateAll(context.TODO()) {
// // ...
// }
func NewBatchIterator[KT any, VT any, KP KeyPointer[KT], VP ValuePointer[VT]](m *Map) *BatchIterator[KT, VT, KP, VP] {
return &BatchIterator[KT, VT, KP, VP]{
m: m,
}
}
// KeyPointer is a generic interface that provides the constraint that the pointer
// of a type T is a MapKey.
type KeyPointer[T any] interface {
MapKey
*T
}
// ValuePointer is a generic interface that provides the constraint that the pointer
// of a type T is a MapValue.
type ValuePointer[T any] interface {
MapValue
*T
}
// Err returns errors encountered during the previous iteration when
// IterateAll(...) is called.
//
// If the iterator is reused, the error will be reset,
func (kvs BatchIterator[KT, VT, KP, VP]) Err() error {
return kvs.err
}
func (bi BatchIterator[KT, VT, KP, VP]) maxBatchedRetries() int {
if bi.maxDumpRetries > 0 {
return int(bi.maxDumpRetries)
}
return defaultBatchedRetries
}
const defaultBatchedRetries = 3
type BatchIteratorOpt[KT any, VT any, KP KeyPointer[KT], VP ValuePointer[VT]] func(*BatchIterator[KT, VT, KP, VP]) *BatchIterator[KT, VT, KP, VP]
// WithEBPFBatchOpts returns a batch iterator option that allows for overriding
// BPF_MAP_LOOKUP_BATCH options.
func WithEBPFBatchOpts[KT, VT any, KP KeyPointer[KT], VP ValuePointer[VT]](opts *ebpf.BatchOptions) BatchIteratorOpt[KT, VT, KP, VP] {
return func(in *BatchIterator[KT, VT, KP, VP]) *BatchIterator[KT, VT, KP, VP] {
in.opts = opts
return in
}
}
// WithMaxRetries returns a batch iterator option that allows overriding the default
// max batch retries.
//
// Unless the starting chunk size is set to be the map size, it is possible for iteration
// to fail with ENOSPC if the passed allocated chunk array size is not big enough to accommodate
// a the bpf maps underlying hashmaps bucket size.
//
// If this happens, BatchIterator will automatically attempt to double the batch size and
// retry the iteration from the same place - up to a number of retries.
func WithMaxRetries[KT, VT any, KP KeyPointer[KT], VP ValuePointer[VT]](retries uint32) BatchIteratorOpt[KT, VT, KP, VP] {
return func(in *BatchIterator[KT, VT, KP, VP]) *BatchIterator[KT, VT, KP, VP] {
in.maxDumpRetries = retries
return in
}
}
// WithStartingChunkSize returns a batch iterator option that allows overriding the dynamically
// chosen starting chunk size.
func WithStartingChunkSize[KT, VT any, KP KeyPointer[KT], VP ValuePointer[VT]](size int) BatchIteratorOpt[KT, VT, KP, VP] {
return func(in *BatchIterator[KT, VT, KP, VP]) *BatchIterator[KT, VT, KP, VP] {
in.chunkSize = size
if in.chunkSize <= 0 {
in.chunkSize = 8
}
return in
}
}
// CountAll is a helper function that returns the count of all elements in a batched
// iterator.
func CountAll[KT, VT any, KP KeyPointer[KT], VP ValuePointer[VT]](ctx context.Context, iter *BatchIterator[KT, VT, KP, VP]) (int, error) {
c := 0
for range iter.IterateAll(ctx) {
c++
}
return c, iter.Err()
}
func startingChunkSize(maxEntries int) int {
bucketSize := math.Sqrt(float64(maxEntries * 2))
nearest2 := math.Log2(bucketSize)
return int(math.Pow(2, math.Ceil(nearest2)))
}
// IterateAll returns an iterate Seq2 type which can be used to iterate a map
// using the batched API.
// In the case of a the iteration failing due to insufficient batch buffer size,
// this will attempt to grow the buffer by a factor of 2 (up to a default: 3 amount
// of retries) and re-attempt the iteration.
// If the number of failures exceeds max retries, then iteration will stop and an error
// will be returned via Err().
//
// All other errors will result in immediate termination of iterator.
//
// If the iteration fails, then the Err() function will return the error that caused the failure.
func (bi *BatchIterator[KT, VT, KP, VP]) IterateAll(ctx context.Context, opts ...BatchIteratorOpt[KT, VT, KP, VP]) iter.Seq2[KP, VP] {
switch bi.m.Type() {
case ebpf.Hash, ebpf.LRUHash, ebpf.LPMTrie:
break
default:
bi.err = fmt.Errorf("unsupported map type %s, must be one either hash or lru-hash types", bi.m.Type())
return func(yield func(KP, VP) bool) {}
}
bi.chunkSize = startingChunkSize(int(bi.m.MaxEntries()))
for _, opt := range opts {
if opt != nil {
bi = opt(bi)
}
}
// reset values
bi.err = nil
bi.batchSize = 0
bi.keys = make([]KT, bi.chunkSize)
bi.vals = make([]VT, bi.chunkSize)
processed := 0
var cursor ebpf.MapBatchCursor
return func(yield func(KP, VP) bool) {
if bi.Err() != nil {
return
}
iterate:
for {
if ctx.Err() != nil {
bi.err = ctx.Err()
return
}
retry:
for retry := range bi.maxBatchedRetries() {
// Attempt to read batch into buffer.
c, batchErr := bi.m.BatchLookup(&cursor, bi.keys, bi.vals, nil)
bi.batchSize = c
done := errors.Is(batchErr, ebpf.ErrKeyNotExist)
// Lookup batch on LRU hash map may fail if the buffer passed is not big enough to
// accommodate the largest bucket size in the LRU map [1]
// Because bucket size, in general, cannot be known, we approximate a good starting
// buffer size from the approximation of how many entries there should be in the map
// before expect to see a hash map collision: sqrt(max_entries * 2)
//
// If we receive ENOSPC failures, we will try to recover by growing the batch buffer
// size (up to some max number of retries - default: 3) and retrying the iteration.
//
// [1] https://elixir.bootlin.com/linux/v6.12.6/source/kernel/bpf/hashtab.c#L1807-L1809
//
// Note: If this failure happens during the bpf syscall, it is expected that the underlying
// cursor will not have been swapped - meaning that we can retry the iteration at the same cursor.
if errors.Is(batchErr, unix.ENOSPC) {
if retry == bi.maxBatchedRetries()-1 {
bi.err = batchErr
} else {
bi.chunkSize *= 2
bi.keys = make([]KT, bi.chunkSize)
bi.vals = make([]VT, bi.chunkSize)
}
continue retry
} else if !done && batchErr != nil {
// If we're not done, and we didn't hit a ENOSPC then stop iteration and record
// the error.
bi.err = fmt.Errorf("failed to iterate map: %w", batchErr)
return
}
// Yield all received pairs.
for i := range bi.batchSize {
processed++
if !yield(&bi.keys[i], &bi.vals[i]) {
break iterate
}
}
if done {
break iterate
}
break retry // finish retry loop for this batch.
}
}
}
}
// Dump returns the map (type map[string][]string) which contains all
// data stored in BPF map.
func (m *Map) Dump(hash map[string][]string) error {
callback := func(key MapKey, value MapValue) {
// No need to deep copy since we are creating strings.
hash[key.String()] = append(hash[key.String()], value.String())
}
if err := m.DumpWithCallback(callback); err != nil {
return err
}
return nil
}
// BatchLookup returns the count of elements in the map by dumping the map
// using batch lookup.
func (m *Map) BatchLookup(cursor *ebpf.MapBatchCursor, keysOut, valuesOut any, opts *ebpf.BatchOptions) (int, error) {
return m.m.BatchLookup(cursor, keysOut, valuesOut, opts)
}
// DumpIfExists dumps the contents of the map into hash via Dump() if the map
// file exists
func (m *Map) DumpIfExists(hash map[string][]string) error {
found, err := m.exist()
if err != nil {
return err
}
if found {
return m.Dump(hash)
}
return nil
}
func (m *Map) Lookup(key MapKey) (MapValue, error) {
if err := m.Open(); err != nil {
return nil, err
}
m.lock.RLock()
defer m.lock.RUnlock()
var duration *spanstat.SpanStat
if metrics.BPFSyscallDuration.IsEnabled() {
duration = spanstat.Start()
}
value := m.value.New()
err := m.m.Lookup(key, value)
if metrics.BPFSyscallDuration.IsEnabled() {
metrics.BPFSyscallDuration.WithLabelValues(metricOpLookup, metrics.Error2Outcome(err)).Observe(duration.End(err == nil).Total().Seconds())
}
if err != nil {
return nil, err
}
return value, nil
}
func (m *Map) Update(key MapKey, value MapValue) error {
var err error
m.lock.Lock()
defer m.lock.Unlock()
defer func() {
desiredAction := OK
if err != nil {
desiredAction = Insert
}
entry := &cacheEntry{
Key: key,
Value: value,
DesiredAction: desiredAction,
LastError: err,
}
m.addToEventsLocked(MapUpdate, *entry)
if m.cache == nil {
return
}
if m.withValueCache {
if err != nil {
m.scheduleErrorResolver()
}
m.cache[key.String()] = &cacheEntry{
Key: key,
Value: value,
DesiredAction: desiredAction,
LastError: err,
}
m.updatePressureMetric()
} else if err == nil {
m.cache[key.String()] = nil
m.updatePressureMetric()
}
}()
if err = m.open(); err != nil {
return err
}
err = m.m.Update(key, value, ebpf.UpdateAny)
if metrics.BPFMapOps.IsEnabled() {
metrics.BPFMapOps.WithLabelValues(m.commonName(), metricOpUpdate, metrics.Error2Outcome(err)).Inc()
}
if err != nil {
return fmt.Errorf("update map %s: %w", m.Name(), err)
}
return nil
}
// deleteMapEvent is run at every delete map event.
// If cache is enabled, it will update the cache to reflect the delete.
// As well, if event buffer is enabled, it adds a new event to the buffer.
func (m *Map) deleteMapEvent(key MapKey, err error) {
m.addToEventsLocked(MapDelete, cacheEntry{
Key: key,
DesiredAction: Delete,
LastError: err,
})
m.deleteCacheEntry(key, err)
}
func (m *Map) deleteAllMapEvent() {
m.addToEventsLocked(MapDeleteAll, cacheEntry{})
}
// deleteCacheEntry evaluates the specified error, if nil the map key is
// removed from the cache to indicate successful deletion. If non-nil, the map
// key entry in the cache is updated to indicate deletion failure with the
// specified error.
//
// Caller must hold m.lock for writing
func (m *Map) deleteCacheEntry(key MapKey, err error) {
if m.cache == nil {
return
}
k := key.String()
if err == nil {
delete(m.cache, k)
} else if !m.withValueCache {
return
} else {
entry, ok := m.cache[k]
if !ok {
m.cache[k] = &cacheEntry{
Key: key,
}
entry = m.cache[k]
}
entry.DesiredAction = Delete
entry.LastError = err
m.scheduleErrorResolver()
}
}
// delete deletes the map entry corresponding to the given key. If ignoreMissing
// is set to true and the entry was not found, the error metric is not
// incremented for missing entries and nil error is returned.
func (m *Map) delete(key MapKey, ignoreMissing bool) (_ bool, err error) {
defer func() {
m.deleteMapEvent(key, err)
if err != nil {
m.updatePressureMetric()
}
}()
if err = m.open(); err != nil {
return false, err
}
var duration *spanstat.SpanStat
if metrics.BPFSyscallDuration.IsEnabled() {
duration = spanstat.Start()
}
err = m.m.Delete(key)
if metrics.BPFSyscallDuration.IsEnabled() {
metrics.BPFSyscallDuration.WithLabelValues(metricOpDelete, metrics.Error2Outcome(err)).Observe(duration.End(err == nil).Total().Seconds())
}
if errors.Is(err, ebpf.ErrKeyNotExist) && ignoreMissing {
// Error and metrics handling is skipped in case ignoreMissing is set and
// the map key did not exist. This removes false positives in the delete
// metrics and skips the deferred cleanup of nonexistent entries. This
// situation occurs at least in the context of cleanup of NAT mappings from
// CT GC.
return false, nil
}
if metrics.BPFMapOps.IsEnabled() {
// err can be nil or any error other than ebpf.ErrKeyNotExist.
metrics.BPFMapOps.WithLabelValues(m.commonName(), metricOpDelete, metrics.Error2Outcome(err)).Inc()
}
if err != nil {
return false, fmt.Errorf("unable to delete element %s from map %s: %w", key, m.name, err)
}
return true, nil
}
// SilentDelete deletes the map entry corresponding to the given key.
// If a map entry is not found this returns (false, nil).
func (m *Map) SilentDelete(key MapKey) (deleted bool, err error) {
m.lock.Lock()
defer m.lock.Unlock()
return m.delete(key, true)
}
// Delete deletes the map entry corresponding to the given key.
func (m *Map) Delete(key MapKey) error {
m.lock.Lock()
defer m.lock.Unlock()
_, err := m.delete(key, false)
return err
}
// DeleteLocked deletes the map entry for the given key.
//
// This method must be called from within a DumpCallback to avoid deadlocks,
// as it assumes the m.lock is already acquired.
func (m *Map) DeleteLocked(key MapKey) error {
_, err := m.delete(key, false)
return err
}
// DeleteAll deletes all entries of a map by traversing the map and deleting individual
// entries. Note that if entries are added while the taversal is in progress,
// such entries may survive the deletion process.
func (m *Map) DeleteAll() error {
m.lock.Lock()
defer m.lock.Unlock()
defer m.updatePressureMetric()
m.Logger.Debug("deleting all entries in map")
if m.withValueCache {
// Mark all entries for deletion, upon successful deletion,
// entries will be removed or the LastError will be updated
for _, entry := range m.cache {
entry.DesiredAction = Delete
entry.LastError = fmt.Errorf("deletion pending")
}
}
if err := m.open(); err != nil {
return err
}
mk := m.key.New()
mv := make([]byte, m.ValueSize())
defer m.deleteAllMapEvent()
i := m.m.Iterate()
for i.Next(mk, &mv) {
err := m.m.Delete(mk)
m.deleteCacheEntry(mk, err)
if err != nil {
return err
}
}
err := i.Err()
if err != nil {
m.Logger.Warn(
"Unable to correlate iteration key with cache entry. Inconsistent cache.",
logfields.Error, err,
logfields.Key, mk,
)
}
return err
}
func (m *Map) ClearAll() error {
if m.eventsBufferEnabled || m.withValueCache {
return fmt.Errorf("clear map: events buffer and value cache are not supported")
}
m.lock.Lock()
defer m.lock.Unlock()
defer m.updatePressureMetric()
if err := m.open(); err != nil {
return err
}
mk := m.key.New()
var mv any
if m.hasPerCPUValue() {
mv = m.value.(MapPerCPUValue).NewSlice()
} else {
mv = m.value.New()
}
empty := reflect.Indirect(reflect.ValueOf(mv)).Interface()
i := m.m.Iterate()
for i.Next(mk, mv) {
err := m.m.Update(mk, empty, ebpf.UpdateAny)
if metrics.BPFMapOps.IsEnabled() {
metrics.BPFMapOps.WithLabelValues(m.commonName(), metricOpUpdate, metrics.Error2Outcome(err)).Inc()
}
if err != nil {
return err
}
}
return i.Err()
}
// GetModel returns a BPF map in the representation served via the API
func (m *Map) GetModel() *models.BPFMap {
mapModel := &models.BPFMap{
Path: m.path,
}
mapModel.Cache = make([]*models.BPFMapEntry, 0, len(m.cache))
if m.withValueCache {
m.lock.RLock()
defer m.lock.RUnlock()
for k, entry := range m.cache {
model := &models.BPFMapEntry{
Key: k,
DesiredAction: entry.DesiredAction.String(),
}
if entry.LastError != nil {
model.LastError = entry.LastError.Error()
}
if entry.Value != nil {
model.Value = entry.Value.String()
}
mapModel.Cache = append(mapModel.Cache, model)
}
return mapModel
}
stats := NewDumpStats(m)
filterCallback := func(key MapKey, value MapValue) {
mapModel.Cache = append(mapModel.Cache, &models.BPFMapEntry{
Key: key.String(),
Value: value.String(),
})
}
m.DumpReliablyWithCallback(filterCallback, stats)
return mapModel
}
func (m *Map) addToEventsLocked(action Action, entry cacheEntry) {
if !m.eventsBufferEnabled {
return
}
m.events.add(&Event{
action: action,
Timestamp: time.Now(),
cacheEntry: entry,
})
}
// resolveErrors is schedule by scheduleErrorResolver() and runs periodically.
// It resolves up to maxSyncErrors discrepancies between cache and BPF map in
// the kernel.
func (m *Map) resolveErrors(ctx context.Context) error {
started := time.Now()
m.lock.Lock()
defer m.lock.Unlock()
if m.cache == nil {
return nil
}
if !m.outstandingErrors {
return nil
}
outstanding := 0
for _, e := range m.cache {
switch e.DesiredAction {
case Insert, Delete:
outstanding++
}
}
// Errors appear to have already been resolved. This can happen if a subsequent
// Update/Delete operation acting on the same key succeeded.
if outstanding == 0 {
m.outstandingErrors = false
return nil
}
if err := m.open(); err != nil {
return err
}
m.Logger.Debug(
"Starting periodic BPF map error resolver",
logfields.Remaining, outstanding,
)
resolved := 0
scanned := 0
nerr := 0
for k, e := range m.cache {
scanned++
switch e.DesiredAction {
case OK:
case Insert:
// Call into ebpf-go's Map.Update() directly, don't go through the cache.
err := m.m.Update(e.Key, e.Value, ebpf.UpdateAny)
if metrics.BPFMapOps.IsEnabled() {
metrics.BPFMapOps.WithLabelValues(m.commonName(), metricOpUpdate, metrics.Error2Outcome(err)).Inc()
}
if err == nil {
e.DesiredAction = OK
e.LastError = nil
resolved++
outstanding--
} else {
e.LastError = err
nerr++
}
m.cache[k] = e
m.addToEventsLocked(MapUpdate, *e)
case Delete:
// Holding lock, issue direct delete on map.
err := m.m.Delete(e.Key)
if metrics.BPFMapOps.IsEnabled() {
metrics.BPFMapOps.WithLabelValues(m.commonName(), metricOpDelete, metrics.Error2Outcome(err)).Inc()
}
if err == nil || errors.Is(err, ebpf.ErrKeyNotExist) {
delete(m.cache, k)
resolved++
outstanding--
} else {
e.LastError = err
nerr++
m.cache[k] = e
}
m.addToEventsLocked(MapDelete, *e)
}
// bail out if maximum errors are reached to relax the map lock
if nerr > maxSyncErrors {
break
}
}
m.updatePressureMetric()
m.Logger.Debug(
"BPF map error resolver completed",
logfields.Remaining, outstanding,
logfields.Resolved, resolved,
logfields.Scanned, scanned,
logfields.Duration, time.Since(started),
)
m.outstandingErrors = outstanding > 0
if m.outstandingErrors {
return fmt.Errorf("%d map sync errors", outstanding)
}
return nil
}
// CheckAndUpgrade checks the received map's properties (for the map currently
// loaded into the kernel) against the desired properties, and if they do not
// match, deletes the map.
//
// Returns true if the map was upgraded.
func (m *Map) CheckAndUpgrade(desired *Map) bool {
flags := desired.Flags() | GetMapMemoryFlags(desired.Type())
return objCheck(
m.Logger,
m.m,
m.path,
desired.Type(),
desired.KeySize(),
desired.ValueSize(),
desired.MaxEntries(),
flags,
)
}
func (m *Map) exist() (bool, error) {
path, err := m.Path()
if err != nil {
return false, err
}
if _, err := os.Stat(path); err == nil {
return true, nil
}
return false, nil
}
// SPDX-License-Identifier: Apache-2.0
// Copyright Authors of Cilium
//go:build linux
package bpf
import (
"log/slog"
"path"
"github.com/cilium/cilium/api/v1/models"
"github.com/cilium/cilium/pkg/lock"
"github.com/cilium/cilium/pkg/logging/logfields"
)
var (
mutex lock.RWMutex
mapRegister = map[string]*Map{}
)
func registerMap(logger *slog.Logger, path string, m *Map) {
mutex.Lock()
mapRegister[path] = m
mutex.Unlock()
logger.Debug("Registered BPF map", logfields.Path, path)
}
func unregisterMap(logger *slog.Logger, path string, m *Map) {
mutex.Lock()
delete(mapRegister, path)
mutex.Unlock()
logger.Debug("Unregistered BPF map", logfields.Path, path)
}
// GetMap returns the registered map with the given name or absolute path
func GetMap(logger *slog.Logger, name string) *Map {
mutex.RLock()
defer mutex.RUnlock()
if !path.IsAbs(name) {
name = MapPath(logger, name)
}
return mapRegister[name]
}
// GetOpenMaps returns a slice of all open BPF maps. This is identical to
// calling GetMap() on all open maps.
func GetOpenMaps() []*models.BPFMap {
// create a copy of mapRegister so we can unlock the mutex again as
// locking Map.lock inside of the mutex is not permitted
mutex.RLock()
maps := make([]*Map, 0, len(mapRegister))
for _, m := range mapRegister {
maps = append(maps, m)
}
mutex.RUnlock()
mapList := make([]*models.BPFMap, len(maps))
i := 0
for _, m := range maps {
mapList[i] = m.GetModel()
i++
}
return mapList
}
// SPDX-License-Identifier: Apache-2.0
// Copyright Authors of Cilium
package bpf
import (
"context"
"github.com/cilium/hive/job"
"github.com/cilium/statedb"
"github.com/cilium/cilium/pkg/metrics"
"github.com/cilium/cilium/pkg/time"
)
const (
metricOpCreate = "create"
metricOpUpdate = "update"
metricOpLookup = "lookup"
metricOpDelete = "delete"
metricOpGetNextKey = "getNextKey"
)
const (
tablePressureMetricsInterval = 30 * time.Second // Interval for updating the pressure gauge
)
type mapPressureMetricsOps interface {
IsOpen() bool
NonPrefixedName() string
MaxEntries() uint32
}
// RegisterTablePressureMetricsJob adds a timer job to track the map pressure of a BPF map
// where the desired state is stored in a StateDB table.
//
// Example usage:
//
// type myBPFMap struct { *bpf.Map }
// cell.Invoke(
// bpf.RegisterTablePressureMetricsJob[MyObj, myBPFMap],
// )
func RegisterTablePressureMetricsJob[Obj any, Map mapPressureMetricsOps](g job.Group, registry *metrics.Registry, db *statedb.DB, table statedb.Table[Obj], m Map) {
name := m.NonPrefixedName()
var pressureGauge *metrics.GaugeWithThreshold
g.Add(job.Timer(
"pressure-metric-"+name,
func(context.Context) error {
if !m.IsOpen() {
// Map not opened, do nothing.
return nil
}
if pressureGauge == nil {
pressureGauge = registry.NewBPFMapPressureGauge(name, 0.0)
}
txn := db.ReadTxn()
pressureGauge.Set(float64(table.NumObjects(txn)) / float64(m.MaxEntries()))
return nil
},
tablePressureMetricsInterval,
))
}
// SPDX-License-Identifier: Apache-2.0
// Copyright Authors of Cilium
package bpf
import (
"context"
"encoding"
"errors"
"iter"
"reflect"
"unsafe"
"github.com/cilium/ebpf"
"github.com/cilium/statedb"
"github.com/cilium/statedb/reconciler"
"k8s.io/apimachinery/pkg/util/sets"
)
// ErrMapNotOpened is returned when the MapOps is used with a BPF map that is not open yet.
// In general this should be avoided and the map should be opened in a start hook before
// the reconciler. If the map won't be used then the reconciler should not be started.
var ErrMapNotOpened = errors.New("BPF map has not been opened")
// KeyValue is the interface that an BPF map value object must implement.
//
// The object can either store the key and value directly in struct form
// and use StructBinaryMarshaler{}, or it can implement conversion to binary
// form on the fly by implementing BinaryMarshaler by hand.
type KeyValue interface {
BinaryKey() encoding.BinaryMarshaler
BinaryValue() encoding.BinaryMarshaler
}
// StructBinaryMarshaler implements a BinaryMarshaler for a struct of
// primitive fields. Same caviats apply as with cilium/ebpf when using a
// struct as key or value.
// Example usage:
//
// func (x *X) Key() encoding.BinaryMarshaler {
// return StructBinaryMarshaler{x}
// }
type StructBinaryMarshaler struct {
Target any // pointer to struct
}
func (m StructBinaryMarshaler) MarshalBinary() ([]byte, error) {
v := reflect.ValueOf(m.Target)
size := int(v.Type().Elem().Size())
return unsafe.Slice((*byte)(v.UnsafePointer()), size), nil
}
type mapOps[KV KeyValue] struct {
m *Map
}
func NewMapOps[KV KeyValue](m *Map) reconciler.Operations[KV] {
ops := &mapOps[KV]{m}
return ops
}
func (ops *mapOps[KV]) withMap(do func(m *ebpf.Map) error) error {
ops.m.lock.RLock()
defer ops.m.lock.RUnlock()
if ops.m.m == nil {
return ErrMapNotOpened
}
return do(ops.m.m)
}
// Delete implements reconciler.Operations.
func (ops *mapOps[KV]) Delete(ctx context.Context, txn statedb.ReadTxn, _ statedb.Revision, entry KV) error {
return ops.withMap(func(m *ebpf.Map) error {
err := ops.m.m.Delete(entry.BinaryKey())
if errors.Is(err, ebpf.ErrKeyNotExist) {
// Silently ignore deletions of non-existing keys.
return nil
}
return err
})
}
type keyIterator struct {
m *ebpf.Map
nextKey []byte
err error
maxEntries uint32
}
func (it *keyIterator) Err() error {
return it.err
}
func (it *keyIterator) Next() []byte {
if it.maxEntries == 0 {
return nil
}
var key []byte
if it.nextKey == nil {
key, it.err = it.m.NextKeyBytes(nil)
} else {
key, it.err = it.m.NextKeyBytes(it.nextKey)
}
if key == nil || it.err != nil {
return nil
}
it.nextKey = key
it.maxEntries--
return key
}
func (ops *mapOps[KV]) toStringKey(kv KV) string {
key, _ := kv.BinaryKey().MarshalBinary()
return string(key)
}
// Prune BPF map values that do not exist in the table.
func (ops *mapOps[KV]) Prune(ctx context.Context, txn statedb.ReadTxn, objs iter.Seq2[KV, statedb.Revision]) error {
return ops.withMap(func(m *ebpf.Map) error {
desiredKeys := sets.New[string]()
for obj := range objs {
desiredKeys.Insert(ops.toStringKey(obj))
}
// We need to collect the keys to prune first, as it is not safe to
// delete entries while iterating
keysToPrune := [][]byte{}
mapIter := &keyIterator{m, nil, nil, m.MaxEntries()}
for key := mapIter.Next(); key != nil; key = mapIter.Next() {
if !desiredKeys.Has(string(key)) {
keysToPrune = append(keysToPrune, key)
}
}
var errs []error
for _, key := range keysToPrune {
if err := m.Delete(key); err != nil {
errs = append(errs, err)
}
}
errs = append(errs, mapIter.Err())
return errors.Join(errs...)
})
}
// Update the BPF map value to match with the object in the desired state table.
func (ops *mapOps[KV]) Update(ctx context.Context, txn statedb.ReadTxn, _ statedb.Revision, entry KV) error {
return ops.withMap(func(m *ebpf.Map) error {
return m.Put(entry.BinaryKey(), entry.BinaryValue())
})
}
// SPDX-License-Identifier: Apache-2.0
// Copyright Authors of Cilium
package bpf
import (
"errors"
"fmt"
"log/slog"
"os"
"path"
"github.com/cilium/ebpf"
"golang.org/x/sys/unix"
"github.com/cilium/cilium/pkg/logging/logfields"
)
// Cilium-specific pinning flags used in bpf C code to request specific pinning
// behaviour from the agent.
const (
// PinAlwaysReplace matches CILIUM_PIN_REPLACE.
PinReplace = ebpf.PinType(1 << 4)
)
// consumePinReplace returns the key names of MapSpecs in spec with the
// CILIUM_PIN_REPLACE pinning flag set. Clears the CILIUM_PIN_REPLACE flag.
func consumePinReplace(spec *ebpf.CollectionSpec) []string {
var toReplace []string
for key, ms := range spec.Maps {
if ms.Pinning == PinReplace {
toReplace = append(toReplace, key)
ms.Pinning = 0
}
}
return toReplace
}
// incompatibleMaps returns the key names MapSpecs in spec with the
// LIBBPF_PIN_BY_NAME pinning flag that are incompatible with their pinned
// counterparts. Removes the LIBBPF_PIN_BY_NAME flag. opts.Maps.PinPath must be
// specified.
//
// The slice of strings returned contains the keys used in Collection.Maps and
// CollectionSpec.Maps, which can differ from the Map's Name field.
func incompatibleMaps(spec *ebpf.CollectionSpec, opts ebpf.CollectionOptions) ([]string, error) {
if opts.Maps.PinPath == "" {
return nil, errors.New("missing opts.Maps.PinPath")
}
var incompatible []string
for key, ms := range spec.Maps {
if ms.Pinning != ebpf.PinByName {
continue
}
pinPath := path.Join(opts.Maps.PinPath, ms.Name)
m, err := ebpf.LoadPinnedMap(pinPath, nil)
if errors.Is(err, unix.ENOENT) {
continue
}
if err != nil {
return nil, fmt.Errorf("opening map %s from pin: %w", ms.Name, err)
}
if ms.Compatible(m) == nil {
m.Close()
continue
}
incompatible = append(incompatible, key)
ms.Pinning = 0
}
return incompatible, nil
}
type toPin struct {
m *ebpf.Map
path string
}
// mapsToReplace returns a slice of paths and ebpf.Maps corresponding to the
// entries provided in toReplace. Commit the returned pins after attaching the
// Collection's programs to the required hooks using [commitMapPins].
//
// This has two main purposes: replacing pins of incompatible maps that are
// upgraded or downgraded by a different version of the agent, as well as
// replacing pins of maps that should never be reused/repopulated by the loader,
// like tail call maps.
//
// Letting the loader repopulate an existing tail call map will transition the
// program through invalid states. For example, code can be moved from one tail
// call to another, making some instructions execute twice or not at all
// depending on the order the tail calls were inserted.
func mapsToReplace(toReplace []string, spec *ebpf.CollectionSpec, coll *ebpf.Collection, opts ebpf.CollectionOptions) ([]toPin, error) {
if opts.Maps.PinPath == "" && len(toReplace) > 0 {
return nil, errors.New("empty Maps.PinPath in CollectionOptions")
}
// We need both Map and MapSpec as the pin path is derived from MapSpec.Name,
// and can be modified before creating the Collection. Maps are often renamed
// to give them unique bpffs pin paths. MapInfo is/was truncated to 20 chars,
// so we need the MapSpec.Name.
var pins []toPin
for _, key := range toReplace {
m, ok := coll.Maps[key]
if !ok {
return nil, fmt.Errorf("Map %s not found in Collection", key)
}
ms, ok := spec.Maps[key]
if !ok {
return nil, fmt.Errorf("MapSpec %s not found in CollectionSpec", key)
}
if m.IsPinned() {
return nil, fmt.Errorf("Map %s was already pinned by ebpf-go: LIBBPF_PIN_BY_NAME and CILIUM_PIN_REPLACE are mutually exclusive", ms.Name)
}
pins = append(pins, toPin{m: m, path: path.Join(opts.Maps.PinPath, ms.Name)})
}
return pins, nil
}
// commitMapPins removes the given map pins and replaces them with the
// corresponding Maps. This is to be called after the Collection's programs have
// been attached to the required hooks. Any existing pins are overwritten.
func commitMapPins(logger *slog.Logger, pins []toPin) error {
for _, pin := range pins {
if err := os.Remove(pin.path); err != nil && !errors.Is(err, os.ErrNotExist) {
return fmt.Errorf("removing map pin at %s: %w", pin.path, err)
}
if err := pin.m.Pin(pin.path); err != nil {
return fmt.Errorf("pinning map to %s: %w", pin.path, err)
}
logger.Debug("Replaced map pin", logfields.Pin, pin.path)
}
return nil
}
// SPDX-License-Identifier: Apache-2.0
// Copyright Authors of Cilium
//go:build linux
package bpf
import (
"github.com/cilium/cilium/pkg/time"
)
// DumpStats tracks statistics over the dump of a map.
type DumpStats struct {
// Started is the timestamp when the gc run was started.
Started time.Time
// Finished is the timestamp when the gc run completed.
Finished time.Time
// Lookup is the number of key lookups performed.
Lookup uint32
// LookupFailed is the number of key lookups that failed.
LookupFailed uint32
// PrevKeyUnavailable is the number of times the previous key was not
// available.
PrevKeyUnavailable uint32
// KeyFallback is the number of times the current key became invalid
// while traversing and we had to fall back to the previous key.
KeyFallback uint32
// MaxEntries is the maximum number of entries in the gc table.
MaxEntries uint32
// Interrupted is the number of times the gc run was interrupted and
// had to start from scratch.
Interrupted uint32
// Completed is true when the gc run has been completed.
Completed bool
}
// NewDumpStats returns a new stats structure for collecting dump statistics.
func NewDumpStats(m *Map) *DumpStats {
return &DumpStats{
MaxEntries: m.MaxEntries(),
}
}
// start starts the dump.
func (d *DumpStats) start() {
d.Started = time.Now()
}
// finish finishes the dump.
func (d *DumpStats) finish() {
d.Finished = time.Now()
}
// Duration returns the duration of the dump.
func (d *DumpStats) Duration() time.Duration {
return d.Finished.Sub(d.Started)
}
// SPDX-License-Identifier: Apache-2.0
// Copyright Authors of Cilium
package bpf
import (
"fmt"
"github.com/cilium/ebpf"
"github.com/cilium/ebpf/asm"
"github.com/cilium/cilium/pkg/bpf/analyze"
"github.com/cilium/cilium/pkg/container/set"
)
// poisonedMapLoad is a special value that is used to replace map load
// instructions that reference an unused map.
const poisonedMapLoad = 0xdeadc0de
// removeUnusedMaps analyzes the given spec to detect which parts of the code
// will be unreachable given the VariableSpecs. It then removes any MapSpecs
// that are not used by any Program.
func removeUnusedMaps(spec *ebpf.CollectionSpec, keep *set.Set[string]) (*set.Set[string], error) {
if keep == nil {
k := set.NewSet[string]()
keep = &k
}
// VariableSpec's underlying maps always need to remain part of the
// CollectionSpec, even if the code doesn't reference them.
for _, v := range spec.Variables {
keep.Insert(v.MapName())
}
// When populating a map-in-map with contents (other maps) defined at
// compile time, we need to ensure the inner maps are not pruned
// since they will not be directly referenced in the code.
for _, m := range spec.Maps {
if m.Type != ebpf.ArrayOfMaps && m.Type != ebpf.HashOfMaps {
continue
}
for _, c := range m.Contents {
if inner, ok := c.Value.(string); ok {
keep.Insert(inner)
}
}
}
for name, prog := range spec.Programs {
// Load Blocks computed after compilation, or compute new ones.
bl, err := analyze.MakeBlocks(prog.Instructions)
if err != nil {
return nil, fmt.Errorf("computing Blocks for Program %s: %w", prog.Name, err)
}
// Analyze reachability given the VariableSpecs provided at load time.
bl, err = analyze.Reachability(bl, prog.Instructions, analyze.VariableSpecs(spec.Variables))
if err != nil {
return nil, fmt.Errorf("reachability analysis for program %s: %w", name, err)
}
// Record which maps are still referenced after reachability analysis.
for ins, live := range bl.LiveInstructions(prog.Instructions) {
if !ins.IsLoadFromMap() {
continue
}
if live {
// Mark the map as used, so it won't be pruned from the CollectionSpec.
keep.Insert(ins.Reference())
} else {
// Remove all map references from unreachable instructions. Replace the map
// pointer load instruction with a new instruction with a recognizable
// poison value, without a metadata reference to the MapSpec. This will
// prevent LoadAndAssign from pulling in the map unconditionally during
// lazy-loading.
//
// If, for whatever reason, we caused a false positive and the program
// attempts to use this value as map pointer, it should be clear from
// the verifier log.
*ins = asm.LoadImm(ins.Dst, poisonedMapLoad, asm.DWord)
}
}
}
// Delete unused MapSpecs so ebpf-go doesn't create them when using
// LoadCollection.
for name := range spec.Maps {
if !keep.Has(name) {
delete(spec.Maps, name)
}
}
return keep, nil
}
// verifyUnusedMaps makes sure that all Maps appearing in the Collection are
// actually used by at least one Program in the Collection after the verifier
// has done its dead code elimination.
//
// This validates Cilium's user space dead code elimination logic, which removes
// unused MapSpecs from the CollectionSpec before loading it into the kernel.
//
// It should only be invoked in debug mode, since it's expensive to run.
func verifyUnusedMaps(coll *ebpf.Collection, ignore *set.Set[string]) error {
mapsByID := make(map[ebpf.MapID]string)
unused := set.NewSet[string]()
for name, m := range coll.Maps {
info, err := m.Info()
if err != nil {
return fmt.Errorf("getting map info for %s: %w", name, err)
}
id, bool := info.ID()
if !bool {
return fmt.Errorf("no map ID for map %s", name)
}
mapsByID[id] = name
// If the map is in the ignore set, always consider it used. This is for
// maps like .rodata that are never removed from the CollectionSpec since
// they are referenced by VariableSpecs.
if ignore == nil || !ignore.Has(name) {
unused.Insert(name)
}
}
for name, prog := range coll.Programs {
info, err := prog.Info()
if err != nil {
return fmt.Errorf("getting info for program %s: %w", name, err)
}
insns, err := info.Instructions()
if err != nil {
return fmt.Errorf("getting instructions for program %s: %w", name, err)
}
// Find all live maps after the verifier's dead code elimination.
for _, ins := range insns {
if !ins.IsLoadFromMap() {
continue
}
id := ebpf.MapID(ins.Constant)
name, found := mapsByID[id]
if !found {
return fmt.Errorf("program %s references map with unknown ID %d", name, id)
}
// Map appears in the instruction stream, so it's being used by the Program.
unused.Remove(name)
}
}
if unused.Len() > 0 {
return fmt.Errorf("unused maps after dead code elimination: %s", unused.AsSlice())
}
return nil
}
// SPDX-License-Identifier: Apache-2.0
// Copyright Authors of Cilium
package byteorder
import (
"net"
"net/netip"
)
// NetIPv4ToHost32 converts an net.IP to a uint32 in host byte order. ip
// must be a IPv4 address, otherwise the function will panic.
func NetIPv4ToHost32(ip net.IP) uint32 {
ipv4 := ip.To4()
_ = ipv4[3] // Assert length of ipv4.
return Native.Uint32(ipv4)
}
func NetIPAddrToHost32(ip netip.Addr) uint32 {
ipv4 := ip.As4()
return Native.Uint32(ipv4[:])
}
// SPDX-License-Identifier: Apache-2.0
// Copyright Authors of Cilium
//go:build 386 || amd64 || arm || arm64 || mips64le || ppc64le || riscv64 || wasm || loong64
package byteorder
import (
"encoding/binary"
"math/bits"
)
var Native binary.ByteOrder = binary.LittleEndian
func HostToNetwork16(u uint16) uint16 { return bits.ReverseBytes16(u) }
func HostToNetwork32(u uint32) uint32 { return bits.ReverseBytes32(u) }
func HostToNetwork64(u uint64) uint64 { return bits.ReverseBytes64(u) }
func NetworkToHost16(u uint16) uint16 { return bits.ReverseBytes16(u) }
func NetworkToHost32(u uint32) uint32 { return bits.ReverseBytes32(u) }
func NetworkToHost64(u uint64) uint64 { return bits.ReverseBytes64(u) }
// SPDX-License-Identifier: Apache-2.0
// Copyright Authors of Cilium
package cgroups
import (
"log/slog"
"sync"
"github.com/cilium/cilium/pkg/defaults"
"github.com/cilium/cilium/pkg/logging/logfields"
)
var (
// Path to where cgroup is mounted
cgroupRoot = defaults.DefaultCgroupRoot
// Only mount a single instance
cgrpMountOnce sync.Once
)
// setCgroupRoot will set the path to mount cgroupv2
func setCgroupRoot(path string) {
cgroupRoot = path
}
// GetCgroupRoot returns the path for the cgroupv2 mount
func GetCgroupRoot() string {
return cgroupRoot
}
// CheckOrMountCgrpFS this checks if the cilium cgroup2 root mount point is
// mounted and if not mounts it. If mapRoot is "" it will mount the default
// location. It is harmless to have multiple cgroupv2 root mounts so unlike
// BPFFS case we simply mount at the cilium default regardless if the system
// has another mount created by systemd or otherwise.
func CheckOrMountCgrpFS(logger *slog.Logger, mapRoot string) {
cgrpMountOnce.Do(func() {
if mapRoot == "" {
mapRoot = cgroupRoot
}
if err := cgrpCheckOrMountLocation(mapRoot); err != nil {
logger.Warn(
"Failed to mount cgroupv2. Any functionality that needs cgroup (e.g.: socket-based LB) will not work.",
logfields.Error, err,
)
} else {
logger.Info(
"Mounted cgroupv2 filesystem",
logfields.Location, mapRoot,
)
}
})
}
// SPDX-License-Identifier: Apache-2.0
// Copyright Authors of Cilium
package cgroups
import (
"fmt"
"os"
"github.com/vishvananda/netlink/nl"
"golang.org/x/sys/unix"
"github.com/cilium/cilium/pkg/mountinfo"
)
// mountCgroup mounts the Cgroup v2 filesystem into the desired cgroupRoot directory.
func mountCgroup() error {
cgroupRootStat, err := os.Stat(cgroupRoot)
if err != nil {
if os.IsNotExist(err) {
if err := os.MkdirAll(cgroupRoot, 0755); err != nil {
return fmt.Errorf("Unable to create cgroup mount directory: %w", err)
}
} else {
return fmt.Errorf("Failed to stat the mount path %s: %w", cgroupRoot, err)
}
} else if !cgroupRootStat.IsDir() {
return fmt.Errorf("%s is a file which is not a directory", cgroupRoot)
}
if err := unix.Mount("none", cgroupRoot, "cgroup2", 0, ""); err != nil {
return fmt.Errorf("failed to mount %s: %w", cgroupRoot, err)
}
return nil
}
// checkOrMountCustomLocation tries to check or mount the cgroup filesystem in the
// given path.
func cgrpCheckOrMountLocation(cgroupRoot string) error {
setCgroupRoot(cgroupRoot)
// Check whether the custom location has a mount.
mounted, cgroupInstance, err := mountinfo.IsMountFS(mountinfo.FilesystemTypeCgroup2, cgroupRoot)
if err != nil {
return err
}
// If the custom location has no mount, let's mount there.
if !mounted {
return mountCgroup()
} else if !cgroupInstance {
return fmt.Errorf("Mount in the custom directory %s has a different filesystem than cgroup2", cgroupRoot)
}
return nil
}
func GetCgroupID(cgroupPath string) (uint64, error) {
handle, _, err := unix.NameToHandleAt(unix.AT_FDCWD, cgroupPath, 0)
if err != nil {
return 0, fmt.Errorf("NameToHandleAt failed: %w", err)
}
b := handle.Bytes()[:8]
cgID := nl.NativeEndian().Uint64(b)
return cgID, nil
}
// SPDX-License-Identifier: Apache-2.0
// Copyright Authors of Cilium
package manager
import (
"log/slog"
"github.com/cilium/hive/cell"
"github.com/cilium/cilium/pkg/logging/logfields"
"github.com/cilium/cilium/pkg/option"
)
// Cell provides access to the cgroup manager.
var Cell = cell.Module(
"cgroup-manager",
"CGroup Manager",
cell.Provide(newCGroupManager),
cell.Provide(newGetCgroupDumpMetadataRestApiHandler),
)
type cgroupManagerParams struct {
cell.In
Logger *slog.Logger
Lifecycle cell.Lifecycle
AgentConfig *option.DaemonConfig
}
func newCGroupManager(params cgroupManagerParams) CGroupManager {
if !params.AgentConfig.EnableSocketLBTracing {
return &noopCGroupManager{}
}
pathProvider, err := getCgroupPathProvider()
if err != nil {
params.Logger.
Info(
"Failed to setup socket load-balancing tracing with Hubble. See the kubeproxy-free guide for more details.",
logfields.Error, err,
)
return &noopCGroupManager{}
}
cm := newManager(params.Logger, cgroupImpl{}, pathProvider, podEventsChannelSize)
params.Lifecycle.Append(cell.Hook{
OnStart: func(hookContext cell.HookContext) error {
go cm.processPodEvents()
return nil
},
OnStop: func(cell.HookContext) error {
cm.Close()
return nil
},
})
params.Logger.Info("Cgroup metadata manager is enabled")
return cm
}
// SPDX-License-Identifier: Apache-2.0
// Copyright Authors of Cilium
package manager
import (
"log/slog"
"maps"
"os"
"slices"
"strings"
"github.com/cilium/cilium/pkg/cgroups"
v1 "github.com/cilium/cilium/pkg/k8s/slim/k8s/api/core/v1"
"github.com/cilium/cilium/pkg/lock"
"github.com/cilium/cilium/pkg/logging/logfields"
nodetypes "github.com/cilium/cilium/pkg/node/types"
)
// Channel buffer size for pod events in order to not block callers
var podEventsChannelSize = 20
// Pod events processed by CgroupManager
const (
podAddEvent = iota
podUpdateEvent
podDeleteEvent
podGetMetadataEvent
podDumpMetadataEvent
)
type CGroupManager interface {
OnAddPod(pod *v1.Pod)
OnUpdatePod(oldPod, newPod *v1.Pod)
OnDeletePod(pod *v1.Pod)
// GetPodMetadataForContainer returns pod metadata for the given container
// cgroup id in case of success, or nil otherwise.
GetPodMetadataForContainer(cgroupId uint64) *PodMetadata
DumpPodMetadata() []*FullPodMetadata
}
// CgroupManager maintains Kubernetes and low-level metadata (cgroup path and
// cgroup id) for local pods and their containers. In order to do that, it defines
// and implements callback functions that are called on Kubernetes pod watcher events.
// It also exposes APIs to read the saved metadata.
//
// The manager's internals are synchronized via a channel, and must not be
// accessed/updated outside this channel.
//
// During initialization, the manager checks for a valid cgroup path pathProvider.
// If it fails to find a pathProvider, it will ignore all the subsequent pod events.
type cgroupManager struct {
logger *slog.Logger
// Map of pod metadata indexed by their UIDs
podMetadataById map[podUID]*podMetadata
// Map of container metadata indexed by their cgroup ids
containerMetadataByCgrpId map[uint64]*containerMetadata
// Buffered channel to receive pod events
podEvents chan podEvent
// Tracks completed pod asynchronous events. Only used for testing.
podEventsDone chan podEventStatus
// Cgroup path provider
pathProvider cgroupPathProvider
// Channel to shut down manager
shutdown chan struct{}
// Interface to do cgroups related operations
cgroupsChecker cgroup
// Cache indexed by cgroup id to store pod metadata
metadataCache map[uint64]PodMetadata
// Lock to protect metadata cache
metadataCacheLock lock.RWMutex
}
// PodMetadata stores selected metadata of a pod populated via Kubernetes watcher events.
type PodMetadata struct {
Name string
Namespace string
IPs []string
}
// FullPodMetadata stores selected metadata of a pod and associated containers.
type FullPodMetadata struct {
Name string
Namespace string
Containers []*cgroupMetadata
IPs []string
}
type cgroupMetadata struct {
CgroupId uint64
CgroupPath string
}
func (m *cgroupManager) OnAddPod(pod *v1.Pod) {
if pod.Spec.NodeName != nodetypes.GetName() {
return
}
m.podEvents <- podEvent{
pod: pod,
eventType: podAddEvent,
}
}
func (m *cgroupManager) OnUpdatePod(oldPod, newPod *v1.Pod) {
if newPod.Spec.NodeName != nodetypes.GetName() {
return
}
m.podEvents <- podEvent{
pod: newPod,
oldPod: oldPod,
eventType: podUpdateEvent,
}
}
func (m *cgroupManager) OnDeletePod(pod *v1.Pod) {
if pod.Spec.NodeName != nodetypes.GetName() {
return
}
m.podEvents <- podEvent{
pod: pod,
eventType: podDeleteEvent,
}
}
func (m *cgroupManager) GetPodMetadataForContainer(cgroupId uint64) *PodMetadata {
m.metadataCacheLock.RLock()
if metadata, ok := m.metadataCache[cgroupId]; ok {
m.metadataCacheLock.RUnlock()
return &metadata
}
m.metadataCacheLock.RUnlock()
podMetaOut := make(chan *PodMetadata)
m.podEvents <- podEvent{
cgroupId: cgroupId,
eventType: podGetMetadataEvent,
podMetadataOut: podMetaOut,
}
// We either receive pod metadata, or zero value when the channel is closed.
return <-podMetaOut
}
func (m *cgroupManager) DumpPodMetadata() []*FullPodMetadata {
allMetaOut := make(chan []*FullPodMetadata)
m.podEvents <- podEvent{
eventType: podDumpMetadataEvent,
allMetadataOut: allMetaOut,
}
return <-allMetaOut
}
// Close should only be called once from daemon close.
func (m *cgroupManager) Close() {
close(m.shutdown)
}
type podUID = string
type podMetadata struct {
name string
namespace string
ips []string
containers map[string]struct{}
}
type containerMetadata struct {
cgroupId uint64
cgroupPath string
podId string
}
type podEvent struct {
pod *v1.Pod
oldPod *v1.Pod
cgroupId uint64
eventType int
podMetadataOut chan *PodMetadata
allMetadataOut chan []*FullPodMetadata
}
type podEventStatus struct {
name string
namespace string
eventType int
}
type fs interface {
Stat(name string) (os.FileInfo, error)
}
type cgroup interface {
GetCgroupID(cgroupPath string) (uint64, error)
}
type cgroupImpl struct{}
func (c cgroupImpl) GetCgroupID(cgroupPath string) (uint64, error) {
return cgroups.GetCgroupID(cgroupPath)
}
func newManager(logger *slog.Logger, cg cgroup, pathProvider cgroupPathProvider, channelSize int) *cgroupManager {
return &cgroupManager{
logger: logger,
podMetadataById: make(map[string]*podMetadata),
containerMetadataByCgrpId: make(map[uint64]*containerMetadata),
podEvents: make(chan podEvent, channelSize),
shutdown: make(chan struct{}),
metadataCache: map[uint64]PodMetadata{},
cgroupsChecker: cg,
pathProvider: pathProvider,
}
}
func (m *cgroupManager) processPodEvents() {
for {
select {
case ev := <-m.podEvents:
switch ev.eventType {
case podAddEvent, podUpdateEvent:
m.updatePodMetadata(ev.pod, ev.oldPod)
if m.podEventsDone != nil {
m.podEventsDone <- podEventStatus{
name: ev.pod.Name,
namespace: ev.pod.Namespace,
eventType: ev.eventType,
}
}
case podDeleteEvent:
m.deletePodMetadata(ev.pod)
if m.podEventsDone != nil {
m.podEventsDone <- podEventStatus{
name: ev.pod.Name,
namespace: ev.pod.Namespace,
eventType: ev.eventType,
}
}
case podGetMetadataEvent:
m.getPodMetadata(ev.cgroupId, ev.podMetadataOut)
case podDumpMetadataEvent:
m.dumpPodMetadata(ev.allMetadataOut)
}
case <-m.shutdown:
if m.podEventsDone != nil {
close(m.podEventsDone)
}
return
}
}
}
func (m *cgroupManager) updatePodMetadata(pod, oldPod *v1.Pod) {
id := string(pod.ObjectMeta.UID)
pm, ok := m.podMetadataById[id]
if !ok {
// Fill in pod static metadata.
pm = &podMetadata{
name: pod.Name,
namespace: pod.Namespace,
}
m.podMetadataById[id] = pm
}
if oldPod != nil && oldPod.Status.DeepEqual(&pod.Status) || len(pod.Status.PodIPs) == 0 {
return
}
// Only update the metadata that can change. This excludes pod's name,
// namespace, id, and qos class.
podIPs := pod.Status.PodIPs
pm.ips = make([]string, len(podIPs))
for i := range podIPs {
pm.ips[i] = podIPs[i].IP
}
// Get metadata for pod's containers that are in the running state. Containers
// can get re-created, and their ids can change. Update the new containers.
// Pod's metadata including its containers map will be deleted when the pod
// is deleted.
numContainers := len(pod.Status.ContainerStatuses)
if pm.containers == nil && numContainers > 0 {
pm.containers = make(map[string]struct{})
}
currContainers := make(map[string]struct{}, numContainers)
for _, c := range pod.Status.ContainerStatuses {
var cId string
if cId = c.ContainerID; cId == "" || c.State.Running == nil {
continue
}
// The container ID field is of the form: <container-runtime>://<containerID>
// Example:containerd://e275d1a37782ab30008aa3ae6666cccefe53b3a14a2ab5a8dc459939107c8c0e
_, after, found := strings.Cut(cId, "//")
if !found || after == "" {
m.logger.Error(
"unexpected container ID",
logfields.K8sPodName, pod.Name,
logfields.K8sNamespace, pod.Namespace,
logfields.ContainerID, cId,
)
continue
}
cId = after
if _, ok := pm.containers[cId]; ok {
currContainers[cId] = struct{}{}
// Container cgroup path doesn't change as long as the container id
// is the same.
continue
}
pm.containers[cId] = struct{}{}
currContainers[cId] = struct{}{}
// Container could've been gone, so don't log any errors.
cgrpPath, err := m.pathProvider.getContainerPath(id, cId, pod.Status.QOSClass)
if err != nil {
m.logger.Debug(
"failed to get container metadata",
logfields.Error, err,
logfields.K8sPodName, pod.Name,
logfields.K8sNamespace, pod.Namespace,
logfields.ContainerID, cId,
)
continue
}
cgrpId, err := m.cgroupsChecker.GetCgroupID(cgrpPath)
if err != nil {
m.logger.Debug(
"failed to get cgroup id",
logfields.Error, err,
logfields.K8sPodName, pod.Name,
logfields.K8sNamespace, pod.Namespace,
logfields.ContainerID, cId,
)
continue
}
m.containerMetadataByCgrpId[cgrpId] = &containerMetadata{
cgroupId: cgrpId,
cgroupPath: cgrpPath,
podId: id,
}
}
// Clean up any pod's old containers.
if oldPod != nil {
for _, c := range oldPod.Status.ContainerStatuses {
// Pod status fields other than containers can be updated so check for
// containers that were deleted.
if _, ok := currContainers[c.ContainerID]; !ok {
delete(pm.containers, c.ContainerID)
}
}
// Purge the metadata cache, and let it be re-populated when needed.
m.metadataCacheLock.Lock()
for i, metadata := range m.metadataCache {
if metadata.Name == oldPod.Name && metadata.Namespace == oldPod.Namespace {
delete(m.metadataCache, i)
}
}
m.metadataCacheLock.Unlock()
}
}
func (m *cgroupManager) deletePodMetadata(pod *v1.Pod) {
podId := string(pod.ObjectMeta.UID)
if _, ok := m.podMetadataById[podId]; !ok {
return
}
for k, cm := range m.containerMetadataByCgrpId {
if cm.podId == podId {
delete(m.containerMetadataByCgrpId, k)
m.metadataCacheLock.Lock()
delete(m.metadataCache, k)
m.metadataCacheLock.Unlock()
}
}
delete(m.podMetadataById, podId)
}
func (m *cgroupManager) getPodMetadata(cgroupId uint64, podMetadataOut chan *PodMetadata) {
cm, ok := m.containerMetadataByCgrpId[cgroupId]
if !ok {
close(podMetadataOut)
return
}
pm, ok := m.podMetadataById[cm.podId]
if !ok {
close(podMetadataOut)
return
}
podMetadata := PodMetadata{
Name: pm.name,
Namespace: pm.namespace,
}
podMetadata.IPs = append(podMetadata.IPs, pm.ips...)
m.metadataCacheLock.Lock()
m.metadataCache[cgroupId] = podMetadata
m.metadataCacheLock.Unlock()
podMetadataOut <- &podMetadata
close(podMetadataOut)
}
func (m *cgroupManager) dumpPodMetadata(allMetadataOut chan []*FullPodMetadata) {
allMetas := make(map[string]*FullPodMetadata)
for _, cm := range m.containerMetadataByCgrpId {
pm, ok := m.podMetadataById[cm.podId]
if !ok {
m.logger.Debug(
"Pod metadata not found",
logfields.CGroupID, cm.cgroupId,
)
continue
}
fullPm, ok := allMetas[cm.podId]
if !ok {
fullPm = &FullPodMetadata{
Name: pm.name,
Namespace: pm.namespace,
}
fullPm.IPs = append(fullPm.IPs, pm.ips...)
allMetas[cm.podId] = fullPm
}
cgroupMetadata := &cgroupMetadata{
CgroupId: cm.cgroupId,
CgroupPath: cm.cgroupPath,
}
fullPm.Containers = append(fullPm.Containers, cgroupMetadata)
}
allMetadataOut <- slices.Collect(maps.Values(allMetas))
close(allMetadataOut)
}
var _ CGroupManager = &noopCGroupManager{}
type noopCGroupManager struct{}
func (n *noopCGroupManager) OnAddPod(pod *v1.Pod) {
}
func (n *noopCGroupManager) OnDeletePod(pod *v1.Pod) {
}
func (n *noopCGroupManager) OnUpdatePod(oldPod *v1.Pod, newPod *v1.Pod) {
}
func (n *noopCGroupManager) GetPodMetadataForContainer(cgroupId uint64) *PodMetadata {
return nil
}
func (n *noopCGroupManager) DumpPodMetadata() []*FullPodMetadata {
return nil
}
// SPDX-License-Identifier: Apache-2.0
// Copyright Authors of Cilium
package manager
import (
"fmt"
"os"
"path/filepath"
"strings"
"github.com/cilium/cilium/pkg/cgroups"
v1 "github.com/cilium/cilium/pkg/k8s/slim/k8s/api/core/v1"
)
var (
// example default cgroup path in kubernetes environments
// /kubepods/burstable/pod1858680e-b044-4fd5-9dd4-f137e30e2180/e275d1a37782ab30008aa3ae6666cccefe53b3a14a2ab5a8dc459939107c8c0
defaultCgroupBasePath = "/kubepods"
// example cgroup path in environments with systemd cgroup driver
// /kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod9ac48755_3968_48e4_b9dc_6d4b69f3bb42.slice/cri-containerd-3baf66ee56a52a8765c3deb2444315411a888fa3e2f8f7ddd75e9ded3c34425e.scope
systemdCgroupBasePath = "/kubepods.slice"
// example cgroup path in nested environments like kind
// /kubelet/kubepods/pod4841248b-fc2f-41f4-9981-a685bf840ab5/d8f227cc24940cfdce8d8e601f3b92242ac9661b0e83f0ea57fdea1cb6bc93ec
nestedCgroupBasePath = "/kubelet" + "/kubepods"
// example cgroup path in nested environments with systemd cgroup driver
// /kubelet.slice/kubelet-kubepods.slice/kubelet-kubepods-besteffort.slice/kubepods-besteffort-pod9ac48755_3968_48e4_b9dc_6d4b69f3bb42.slice/cri-containerd-3baf66ee56a52a8765c3deb2444315411a888fa3e2f8f7ddd75e9ded3c34425e.scope
nestedSystemdCgroupBasePath = "/kubelet.slice/kubelet-kubepods.slice/"
// List of cgroup providers for different environments
providers = []cgroupPathProvider{
newDefaultProvider(),
newSystemdProvider(),
newNestedProvider(),
newNestedSystemdProvider(),
}
// Prefix added to container cgroup sub-path by containerd runtime
containerdPrefix = "cri-containerd-"
// Prefix added to container cgroup sub-path by crio runtime
crioPrefix = "crio-"
// Prefix added to container cgroup sub-path by crio runtime
dockerPrefix = "docker-"
// List of container runtime prefixes that can appear in container cgroup paths in systemd environments.
containerRuntimePrefixes = []string{containerdPrefix, crioPrefix, dockerPrefix}
// Suffix added to cgroup sub-paths for systemd
systemdSuffix = ".slice"
// Suffix added to container cgroup sub-paths for systemd
systemdEndSuffix = ".scope"
// File system interface for dependency injection
fschecker fs = fsImpl{}
)
func getCgroupPathProvider() (cgroupPathProvider, error) {
for _, provider := range providers {
if _, err := provider.getBasePath(); err == nil {
return provider, nil
}
}
return nil, fmt.Errorf("no valid cgroup path provider found")
}
type cgroupPathProvider interface {
getBasePath() (string, error)
getContainerPath(podId string, containerId string, qos v1.PodQOSClass) (string, error)
}
type defaultProvider struct {
basePath string
}
type systemdProvider struct {
basePath string
}
type nestedProvider struct {
basePath string
}
type nestedSystemProvider struct {
basePath string
}
func newDefaultProvider() defaultProvider {
return defaultProvider{basePath: defaultCgroupBasePath}
}
func newSystemdProvider() systemdProvider {
return systemdProvider{basePath: systemdCgroupBasePath}
}
func newNestedProvider() nestedProvider {
return nestedProvider{basePath: nestedCgroupBasePath}
}
func newNestedSystemdProvider() nestedSystemProvider {
return nestedSystemProvider{basePath: nestedSystemdCgroupBasePath}
}
func (cp defaultProvider) getBasePath() (string, error) {
return validateCgroupPath(cp.basePath)
}
func (cp defaultProvider) getContainerPath(podId string, containerId string, qos v1.PodQOSClass) (string, error) {
return getDefaultContainerPathCommon(cp.basePath, podId, containerId, qos)
}
func (cp systemdProvider) getBasePath() (string, error) {
return validateCgroupPath(cp.basePath)
}
func (cp systemdProvider) getContainerPath(podId string, containerId string, qos v1.PodQOSClass) (string, error) {
subPaths := []string{"kubepods"}
return getSystemdContainerPathCommon(subPaths, podId, containerId, qos)
}
func (cp nestedProvider) getBasePath() (string, error) {
return validateCgroupPath(cp.basePath)
}
func (cp nestedProvider) getContainerPath(podId string, containerId string, qos v1.PodQOSClass) (string, error) {
return getDefaultContainerPathCommon(cp.basePath, podId, containerId, qos)
}
func (cp nestedSystemProvider) getBasePath() (string, error) {
return validateCgroupPath(cp.basePath)
}
func (cp nestedSystemProvider) getContainerPath(podId string, containerId string, qos v1.PodQOSClass) (string, error) {
subPaths := []string{"kubelet", "kubepods"}
return getSystemdContainerPathCommon(subPaths, podId, containerId, qos)
}
func getSystemdContainerPathCommon(subPaths []string, podId string, containerId string, qos v1.PodQOSClass) (string, error) {
var (
ret string
err error
path string
)
podIdStr := fmt.Sprintf("pod%s", podId)
if qos == v1.PodQOSGuaranteed {
if path, err = toSystemd(append(subPaths, podIdStr)); err != nil {
return "", fmt.Errorf("unable to construct cgroup path: %w", err)
}
} else {
qosStr := strings.ToLower(string(qos))
if path, err = toSystemd(append(subPaths, qosStr, podIdStr)); err != nil {
return "", fmt.Errorf("unable to construct cgroup path: %w", err)
}
}
// construct and append container sub path with container id
for _, prefix := range containerRuntimePrefixes {
containerSubPath := fmt.Sprintf("%s%s%s", prefix, containerId, systemdEndSuffix)
fullPath := filepath.Join(path, containerSubPath)
ret, err = validateCgroupPath(fullPath)
if err == nil {
break
}
}
return ret, err
}
func validateCgroupPath(path string) (string, error) {
fullPath := cgroups.GetCgroupRoot() + path
if _, err := fschecker.Stat(fullPath); err == nil {
return fullPath, nil
}
return "", fmt.Errorf("no valid cgroup path found")
}
func getBaseCgroupPathForQos(path string, qos v1.PodQOSClass) string {
if qos == v1.PodQOSGuaranteed {
return path
}
return filepath.Join(path, strings.ToLower(string(qos)))
}
func getDefaultContainerPathCommon(path string, podId string, containerId string, qos v1.PodQOSClass) (string, error) {
podIdStr := fmt.Sprintf("pod%s", podId)
path = filepath.Join(getBaseCgroupPathForQos(path, qos), podIdStr, containerId)
return validateCgroupPath(path)
}
// Sets up dependency injection for unit testing.
func initProviderTest(fsProvider fs) {
fschecker = fsProvider
}
type fsImpl struct{}
func (f fsImpl) Stat(name string) (os.FileInfo, error) {
return os.Stat(name)
}
// Following helpers are adapted from: https://github.com/kubernetes/kubernetes/blob/master/pkg/kubelet/cm/cgroup_manager_linux.go.
// toSystemd converts the given cgroup name to a systemd name.
// For example, the name {"kubepods", "burstable", "pod1234-abcd-5678-efgh"} becomes
// "/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod1234_abcd_5678_efgh.slice"
func toSystemd(cgroupName []string) (string, error) {
newparts := []string{}
for _, part := range cgroupName {
part = escapeSystemdCgroupName(part)
newparts = append(newparts, part)
}
result, err := expandSlice(strings.Join(newparts, "-") + systemdSuffix)
if err != nil {
return "", fmt.Errorf("error converting cgroup name [%v] to systemd format: %w", cgroupName, err)
}
return result, nil
}
func escapeSystemdCgroupName(part string) string {
return strings.ReplaceAll(part, "-", "_")
}
// systemd represents slice hierarchy using `-`, so we need to follow suit when
// generating the path of slice. Essentially, test-a-b.slice becomes
// /test.slice/test-a.slice/test-a-b.slice.
func expandSlice(slice string) (string, error) {
suffix := ".slice"
// Name has to end with ".slice", but can't be just ".slice".
if len(slice) < len(suffix) || !strings.HasSuffix(slice, suffix) {
return "", fmt.Errorf("invalid slice name: %s", slice)
}
// Path-separators are not allowed.
if strings.Contains(slice, "/") {
return "", fmt.Errorf("invalid slice name: %s", slice)
}
var path, prefix string
sliceName := strings.TrimSuffix(slice, suffix)
// if input was -.slice, we should just return root now
if sliceName == "-" {
return "/", nil
}
for component := range strings.SplitSeq(sliceName, "-") {
// test--a.slice isn't permitted, nor is -test.slice.
if component == "" {
return "", fmt.Errorf("invalid slice name: %s", slice)
}
// Append the component to the path and to the prefix.
path += "/" + prefix + component + suffix
prefix += component + "-"
}
return path, nil
}
// SPDX-License-Identifier: Apache-2.0
// Copyright Authors of Cilium
package manager
import (
"github.com/go-openapi/runtime/middleware"
"github.com/cilium/cilium/api/v1/models"
daemonrestapi "github.com/cilium/cilium/api/v1/server/restapi/daemon"
)
type getCgroupDumpMetadataRestApiHandler struct {
cgroupManager CGroupManager
}
func newGetCgroupDumpMetadataRestApiHandler(cgroupManager CGroupManager) daemonrestapi.GetCgroupDumpMetadataHandler {
return &getCgroupDumpMetadataRestApiHandler{
cgroupManager: cgroupManager,
}
}
func (h *getCgroupDumpMetadataRestApiHandler) Handle(params daemonrestapi.GetCgroupDumpMetadataParams) middleware.Responder {
resp := models.CgroupDumpMetadata{}
metadata := h.cgroupManager.DumpPodMetadata()
for _, pm := range metadata {
var respCms []*models.CgroupContainerMetadata
for _, cm := range pm.Containers {
respCm := &models.CgroupContainerMetadata{
CgroupID: cm.CgroupId,
CgroupPath: cm.CgroupPath,
}
respCms = append(respCms, respCm)
}
respPm := &models.CgroupPodMetadata{
Name: pm.Name,
Namespace: pm.Namespace,
Containers: respCms,
Ips: pm.IPs,
}
resp.PodMetadatas = append(resp.PodMetadatas, respPm)
}
return daemonrestapi.NewGetCgroupDumpMetadataOK().WithPayload(&resp)
}
// SPDX-License-Identifier: Apache-2.0
// Copyright Authors of Cilium
package cidr
import (
"bytes"
"fmt"
"net"
"slices"
)
// NewCIDR returns a new CIDR using a net.IPNet
func NewCIDR(ipnet *net.IPNet) *CIDR {
if ipnet == nil {
return nil
}
return &CIDR{ipnet}
}
func NewCIDRSlice(ipnets []*net.IPNet) []*CIDR {
if ipnets == nil {
return nil
}
cidrs := make([]*CIDR, len(ipnets))
for i, ipnet := range ipnets {
cidrs[i] = NewCIDR(ipnet)
}
return cidrs
}
func CIDRsToIPNets(cidrs []*CIDR) []*net.IPNet {
if cidrs == nil {
return nil
}
ipnets := make([]*net.IPNet, len(cidrs))
for i, cidr := range cidrs {
ipnets[i] = cidr.IPNet
}
return ipnets
}
// CIDR is a network CIDR representation based on net.IPNet
type CIDR struct {
*net.IPNet
}
func (c *CIDR) String() string {
if c == nil {
var n *net.IPNet
return n.String()
}
return c.IPNet.String()
}
// DeepEqual is an deepequal function, deeply comparing the receiver with other.
// in must be non-nil.
func (in *CIDR) DeepEqual(other *CIDR) bool {
if other == nil {
return false
}
if (in.IPNet == nil) != (other.IPNet == nil) {
return false
} else if in.IPNet != nil {
if !in.IPNet.IP.Equal(other.IPNet.IP) {
return false
}
inOnes, inBits := in.IPNet.Mask.Size()
otherOnes, otherBits := other.IPNet.Mask.Size()
return inOnes == otherOnes && inBits == otherBits
}
return true
}
// DeepCopy creates a deep copy of a CIDR
func (n *CIDR) DeepCopy() *CIDR {
if n == nil {
return nil
}
out := new(CIDR)
n.DeepCopyInto(out)
return out
}
// DeepCopyInto is a deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *CIDR) DeepCopyInto(out *CIDR) {
*out = *in
if in.IPNet == nil {
return
}
out.IPNet = new(net.IPNet)
*out.IPNet = *in.IPNet
if in.IPNet.IP != nil {
in, out := &in.IPNet.IP, &out.IPNet.IP
*out = make(net.IP, len(*in))
copy(*out, *in)
}
if in.IPNet.Mask != nil {
in, out := &in.IPNet.Mask, &out.IPNet.Mask
*out = make(net.IPMask, len(*in))
copy(*out, *in)
}
}
// Equal returns true if the receiver's CIDR equals the other CIDR.
func (n *CIDR) Equal(o *CIDR) bool {
if n == nil || o == nil {
return n == o
}
return Equal(n.IPNet, o.IPNet)
}
// Equal returns true if the n and o net.IPNet CIDRs are Equal.
func Equal(n, o *net.IPNet) bool {
if n == nil || o == nil {
return n == o
}
if n == o {
return true
}
return n.IP.Equal(o.IP) &&
bytes.Equal(n.Mask, o.Mask)
}
// ZeroNet generates a zero net.IPNet object for the given address family
func ZeroNet(family int) *net.IPNet {
switch family {
case FAMILY_V4:
return &net.IPNet{
IP: net.IPv4zero,
Mask: net.CIDRMask(0, 8*net.IPv4len),
}
case FAMILY_V6:
return &net.IPNet{
IP: net.IPv6zero,
Mask: net.CIDRMask(0, 8*net.IPv6len),
}
}
return nil
}
// ContainsAll returns true if 'ipNets1' contains all net.IPNet of 'ipNets2'
func ContainsAll(ipNets1, ipNets2 []*net.IPNet) bool {
for _, n2 := range ipNets2 {
if !slices.ContainsFunc(ipNets1, func(n1 *net.IPNet) bool {
return Equal(n2, n1)
}) {
return false
}
}
return true
}
// ParseCIDR parses the CIDR string using net.ParseCIDR
func ParseCIDR(str string) (*CIDR, error) {
_, ipnet, err := net.ParseCIDR(str)
if err != nil {
return nil, err
}
return NewCIDR(ipnet), nil
}
// MustParseCIDR parses the CIDR string using net.ParseCIDR and panics if the
// CIDR cannot be parsed
func MustParseCIDR(str string) *CIDR {
c, err := ParseCIDR(str)
if err != nil {
panic(fmt.Sprintf("Unable to parse CIDR '%s': %s", str, err))
}
return c
}
// SPDX-License-Identifier: Apache-2.0
// Copyright Authors of Cilium
package cidr
func createIPNetMap(list []*CIDR) map[string]*CIDR {
m := map[string]*CIDR{}
for _, c := range list {
if c != nil {
m[c.String()] = c
}
}
return m
}
func listMissingIPNets(existing map[string]*CIDR, new []*CIDR) (missing []*CIDR) {
for _, c := range new {
if c != nil {
if _, ok := existing[c.String()]; !ok {
missing = append(missing, c)
}
}
}
return
}
// DiffCIDRLists compares an old and new list of CIDRs and returns the list of
// removed and added CIDRs
func DiffCIDRLists(old, new []*CIDR) (add, remove []*CIDR) {
add = listMissingIPNets(createIPNetMap(old), new)
remove = listMissingIPNets(createIPNetMap(new), old)
return
}
// SPDX-License-Identifier: Apache-2.0
// Copyright Authors of Cilium
package client
import (
"context"
"errors"
"fmt"
"io"
"net"
"net/http"
"net/url"
"os"
"slices"
"strings"
"text/tabwriter"
"time"
runtime_client "github.com/go-openapi/runtime/client"
"github.com/go-openapi/strfmt"
clientapi "github.com/cilium/cilium/api/v1/client"
"github.com/cilium/cilium/api/v1/models"
"github.com/cilium/cilium/pkg/defaults"
)
type Client struct {
clientapi.CiliumAPI
}
// DefaultSockPath returns default UNIX domain socket path or
// path set using CILIUM_SOCK env variable
func DefaultSockPath() string {
// Check if environment variable points to socket
e := os.Getenv(defaults.SockPathEnv)
if e == "" {
// If unset, fall back to default value
e = defaults.SockPath
}
return "unix://" + e
}
func configureTransport(tr *http.Transport, proto, addr string) *http.Transport {
if tr == nil {
tr = &http.Transport{}
}
if proto == "unix" {
// No need for compression in local communications.
tr.DisableCompression = true
tr.DialContext = func(_ context.Context, _, _ string) (net.Conn, error) {
return net.Dial(proto, addr)
}
} else {
tr.Proxy = http.ProxyFromEnvironment
tr.DialContext = (&net.Dialer{}).DialContext
}
return tr
}
// NewDefaultClient creates a client with default parameters connecting to UNIX domain socket.
func NewDefaultClient() (*Client, error) {
return NewClient("")
}
// NewDefaultClientWithTimeout creates a client with default parameters connecting to UNIX
// domain socket and waits for cilium-agent availability.
func NewDefaultClientWithTimeout(timeout time.Duration) (*Client, error) {
timeoutAfter := time.After(timeout)
var c *Client
var err error
for {
select {
case <-timeoutAfter:
return nil, fmt.Errorf("failed to create cilium agent client after %f seconds timeout: %w", timeout.Seconds(), err)
default:
}
c, err = NewDefaultClient()
if err != nil {
time.Sleep(500 * time.Millisecond)
continue
}
for {
select {
case <-timeoutAfter:
return nil, fmt.Errorf("failed to create cilium agent client after %f seconds timeout: %w", timeout.Seconds(), err)
default:
}
// This is an API call that we do to the cilium-agent to check
// if it is up and running.
_, err = c.Daemon.GetConfig(nil)
if err != nil {
time.Sleep(500 * time.Millisecond)
continue
}
return c, nil
}
}
}
// NewClient creates a client for the given `host`.
// If host is nil then use SockPath provided by CILIUM_SOCK
// or the cilium default SockPath
func NewClient(host string) (*Client, error) {
clientTrans, err := NewRuntime(WithHost(host))
return &Client{*clientapi.New(clientTrans, strfmt.Default)}, err
}
type runtimeOptions struct {
host string
basePath string
}
func WithHost(host string) func(options *runtimeOptions) {
return func(options *runtimeOptions) {
options.host = host
}
}
func WithBasePath(basePath string) func(options *runtimeOptions) {
return func(options *runtimeOptions) {
options.basePath = basePath
}
}
func NewTransport(host string) (*http.Transport, error) {
if host == "" {
host = DefaultSockPath()
}
schema, host, found := strings.Cut(host, "://")
if !found {
return nil, fmt.Errorf("invalid host format '%s'", host)
}
switch schema {
case "tcp":
if _, err := url.Parse("tcp://" + host); err != nil {
return nil, err
}
host = "http://" + host
case "unix":
}
return configureTransport(nil, schema, host), nil
}
func NewRuntime(opts ...func(options *runtimeOptions)) (*runtime_client.Runtime, error) {
r := runtimeOptions{}
for _, opt := range opts {
opt(&r)
}
basePath := r.basePath
if basePath == "" {
basePath = clientapi.DefaultBasePath
}
host := r.host
if host == "" {
host = DefaultSockPath()
}
_, hostHeader, found := strings.Cut(host, "://")
if !found {
return nil, fmt.Errorf("invalid host format '%s'", host)
}
if strings.HasPrefix(host, "unix") {
// For local communication (unix domain sockets), the hostname is not used. Leave
// Host header empty because otherwise it would be rejected by net/http client-side
// sanitization, see https://go.dev/issue/60374.
hostHeader = "localhost"
}
transport, err := NewTransport(host)
if err != nil {
return nil, err
}
httpClient := &http.Client{Transport: transport}
clientTrans := runtime_client.NewWithClient(hostHeader, basePath,
clientapi.DefaultSchemes, httpClient)
return clientTrans, nil
}
// Hint tries to improve the error message displayed to the user.
func Hint(err error) error {
if err == nil {
return err
}
if errors.Is(err, context.DeadlineExceeded) {
return fmt.Errorf("Cilium API client timeout exceeded")
}
e, _ := url.PathUnescape(err.Error())
if strings.Contains(err.Error(), defaults.SockPath) {
return fmt.Errorf("%s\nIs the agent running?", e)
}
return err
}
func timeSince(since time.Time) string {
out := "never"
if !since.IsZero() {
t := time.Since(since)
out = t.Truncate(time.Second).String() + " ago"
}
return out
}
func stateUnhealthy(state string) bool {
return state == models.StatusStateWarning ||
state == models.StatusStateFailure
}
func statusUnhealthy(s *models.Status) bool {
if s != nil {
return stateUnhealthy(s.State)
}
return false
}
// FormatStatusResponseBrief writes a one-line status to the writer. If
// everything ok, this is "ok", otherwise a message of the form "error in ..."
func FormatStatusResponseBrief(w io.Writer, sr *models.StatusResponse) {
msg := ""
switch {
case statusUnhealthy(sr.Kvstore):
msg = fmt.Sprintf("kvstore: %s", sr.Kvstore.Msg)
case statusUnhealthy(sr.ContainerRuntime):
msg = fmt.Sprintf("container runtime: %s", sr.ContainerRuntime.Msg)
case sr.Kubernetes != nil && stateUnhealthy(sr.Kubernetes.State):
msg = fmt.Sprintf("kubernetes: %s", sr.Kubernetes.Msg)
case statusUnhealthy(sr.Cilium):
msg = fmt.Sprintf("cilium: %s", sr.Cilium.Msg)
case sr.Cluster != nil && statusUnhealthy(sr.Cluster.CiliumHealth):
msg = fmt.Sprintf("cilium-health: %s", sr.Cluster.CiliumHealth.Msg)
}
// Only bother looking at controller failures if everything else is ok
if msg == "" {
for _, ctrl := range sr.Controllers {
if ctrl.Status == nil {
continue
}
if ctrl.Status.LastFailureMsg != "" {
msg = fmt.Sprintf("controller %s: %s",
ctrl.Name, ctrl.Status.LastFailureMsg)
break
}
}
}
if msg == "" {
fmt.Fprintf(w, "OK\n")
} else {
fmt.Fprintf(w, "error in %s\n", msg)
}
}
func clusterReadiness(cluster *models.RemoteCluster) string {
if !cluster.Ready {
return "not-ready"
}
return "ready"
}
func NumReadyClusters(clusters []*models.RemoteCluster) int {
numReady := 0
for _, cluster := range clusters {
if cluster.Ready {
numReady++
}
}
return numReady
}
type StatusDetails struct {
// AllAddress causes all addresses to be printed by FormatStatusResponse.
AllAddresses bool
// AllControllers causes all controllers to be printed by FormatStatusResponse.
AllControllers bool
// AllNodes causes all nodes to be printed by FormatStatusResponse.
AllNodes bool
// AllRedirects causes all redirects to be printed by FormatStatusResponse.
AllRedirects bool
// AllClusters causes all clusters to be printed by FormatStatusResponse.
AllClusters bool
// BPFMapDetails causes BPF map details to be printed by FormatStatusResponse.
BPFMapDetails bool
// KubeProxyReplacementDetails causes BPF kube-proxy details to be printed by FormatStatusResponse.
KubeProxyReplacementDetails bool
// ClockSourceDetails causes BPF time-keeping internals to be printed by FormatStatusResponse.
ClockSourceDetails bool
}
var (
// StatusAllDetails causes no additional status details to be printed by
// FormatStatusResponse.
StatusNoDetails = StatusDetails{}
// StatusAllDetails causes all status details to be printed by FormatStatusResponse.
StatusAllDetails = StatusDetails{
AllAddresses: true,
AllControllers: true,
AllNodes: true,
AllRedirects: true,
AllClusters: true,
BPFMapDetails: true,
KubeProxyReplacementDetails: true,
ClockSourceDetails: true,
}
)
// FormatStatusResponse writes a StatusResponse as a string to the writer. The bit mask sd controls
// whether a additional details are printed about a certain aspect of the status. In case there are
// errors, some details may be printed regardless of the value of sd.
func FormatStatusResponse(w io.Writer, sr *models.StatusResponse, sd StatusDetails) {
if sr.Kvstore != nil {
fmt.Fprintf(w, "KVStore:\t%s\t%s\n", sr.Kvstore.State, sr.Kvstore.Msg)
}
if sr.ContainerRuntime != nil {
fmt.Fprintf(w, "ContainerRuntime:\t%s\t%s\n",
sr.ContainerRuntime.State, sr.ContainerRuntime.Msg)
}
kubeProxyDevices := ""
if sr.Kubernetes != nil {
fmt.Fprintf(w, "Kubernetes:\t%s\t%s\n", sr.Kubernetes.State, sr.Kubernetes.Msg)
if sr.Kubernetes.State != models.K8sStatusStateDisabled {
slices.Sort(sr.Kubernetes.K8sAPIVersions)
fmt.Fprintf(w, "Kubernetes APIs:\t[\"%s\"]\n", strings.Join(sr.Kubernetes.K8sAPIVersions, "\", \""))
}
}
if sr.KubeProxyReplacement != nil {
devices := ""
if sr.KubeProxyReplacement.Mode != models.KubeProxyReplacementModeFalse {
for i, dev := range sr.KubeProxyReplacement.DeviceList {
kubeProxyDevices += fmt.Sprintf("%s %s", dev.Name, strings.Join(dev.IP, " "))
if dev.Name == sr.KubeProxyReplacement.DirectRoutingDevice {
kubeProxyDevices += " (Direct Routing)"
}
if i+1 != len(sr.KubeProxyReplacement.Devices) {
kubeProxyDevices += ", "
}
}
if len(sr.KubeProxyReplacement.DeviceList) > 0 {
devices = "[" + kubeProxyDevices + "]"
}
}
fmt.Fprintf(w, "KubeProxyReplacement:\t%s\t%s\n",
sr.KubeProxyReplacement.Mode, devices)
}
if sr.HostFirewall != nil {
fmt.Fprintf(w, "Host firewall:\t%s", sr.HostFirewall.Mode)
if sr.HostFirewall.Mode != models.HostFirewallModeDisabled {
fmt.Fprintf(w, "\t[%s]", strings.Join(sr.HostFirewall.Devices, ", "))
}
fmt.Fprintf(w, "\n")
}
if sr.Srv6 != nil {
var fields []string
status := "Disabled"
fields = append(fields, status)
if sr.Srv6.Enabled {
fields[0] = "Enabled"
fields = append(fields, fmt.Sprintf("[encap-mode: %s]", sr.Srv6.Srv6EncapMode))
}
fmt.Fprintf(w, "SRv6:\t%s\n", strings.Join(fields, "\t"))
}
if sr.CniChaining != nil {
fmt.Fprintf(w, "CNI Chaining:\t%s\n", sr.CniChaining.Mode)
}
if sr.CniFile != nil {
fmt.Fprintf(w, "CNI Config file:\t%s\n", sr.CniFile.Msg)
}
if sr.Cilium != nil {
fmt.Fprintf(w, "Cilium:\t%s %s\n", sr.Cilium.State, sr.Cilium.Msg)
}
if sr.Stale != nil {
sortedProbes := make([]string, 0, len(sr.Stale))
for probe := range sr.Stale {
sortedProbes = append(sortedProbes, probe)
}
slices.Sort(sortedProbes)
stalesStr := make([]string, 0, len(sr.Stale))
for _, probe := range sortedProbes {
stalesStr = append(stalesStr, fmt.Sprintf("%q since %s", probe, sr.Stale[probe]))
}
fmt.Fprintf(w, "Stale status:\t%s\n", strings.Join(stalesStr, ", "))
}
if nm := sr.NodeMonitor; nm != nil {
fmt.Fprintf(w, "NodeMonitor:\tListening for events on %d CPUs with %dx%d of shared memory\n",
nm.Cpus, nm.Npages, nm.Pagesize)
if nm.Lost != 0 || nm.Unknown != 0 {
fmt.Fprintf(w, "\t%d events lost, %d unknown notifications\n", nm.Lost, nm.Unknown)
}
} else {
fmt.Fprintf(w, "NodeMonitor:\tDisabled\n")
}
if sr.Cluster != nil {
if sr.Cluster.CiliumHealth != nil {
ch := sr.Cluster.CiliumHealth
fmt.Fprintf(w, "Cilium health daemon:\t%s\t%s\n", ch.State, ch.Msg)
}
}
if sr.Ipam != nil {
fmt.Fprintf(w, "IPAM:\t%s\n", sr.Ipam.Status)
if sd.AllAddresses {
fmt.Fprintf(w, "Allocated addresses:\n")
out := make([]string, 0, len(sr.Ipam.Allocations))
for ip, owner := range sr.Ipam.Allocations {
out = append(out, fmt.Sprintf(" %s (%s)", ip, owner))
}
slices.Sort(out)
for _, line := range out {
fmt.Fprintln(w, line)
}
}
}
if sr.ClusterMesh != nil {
fmt.Fprintf(w, "ClusterMesh:\t%d/%d remote clusters ready, %d global-services\n",
NumReadyClusters(sr.ClusterMesh.Clusters), len(sr.ClusterMesh.Clusters), sr.ClusterMesh.NumGlobalServices)
verbosity := RemoteClustersStatusNotReadyOnly
if sd.AllClusters {
verbosity = RemoteClustersStatusVerbose
}
FormatStatusResponseRemoteClusters(w, sr.ClusterMesh.Clusters, verbosity)
}
if sr.IPV4BigTCP != nil {
status := "Disabled"
if sr.IPV4BigTCP.Enabled {
max := fmt.Sprintf("[%d]", sr.IPV4BigTCP.MaxGSO)
if sr.IPV4BigTCP.MaxGRO != sr.IPV4BigTCP.MaxGSO {
max = fmt.Sprintf("[%d, %d]", sr.IPV4BigTCP.MaxGRO, sr.IPV4BigTCP.MaxGSO)
}
status = fmt.Sprintf("Enabled\t%s", max)
}
fmt.Fprintf(w, "IPv4 BIG TCP:\t%s\n", status)
}
if sr.IPV6BigTCP != nil {
status := "Disabled"
if sr.IPV6BigTCP.Enabled {
max := fmt.Sprintf("[%d]", sr.IPV6BigTCP.MaxGSO)
if sr.IPV6BigTCP.MaxGRO != sr.IPV6BigTCP.MaxGSO {
max = fmt.Sprintf("[%d, %d]", sr.IPV6BigTCP.MaxGRO, sr.IPV6BigTCP.MaxGSO)
}
status = fmt.Sprintf("Enabled\t%s", max)
}
fmt.Fprintf(w, "IPv6 BIG TCP:\t%s\n", status)
}
if sr.BandwidthManager != nil {
var status string
if !sr.BandwidthManager.Enabled {
status = "Disabled"
} else {
status = fmt.Sprintf("EDT with BPF [%s] [%s]",
strings.ToUpper(sr.BandwidthManager.CongestionControl),
strings.Join(sr.BandwidthManager.Devices, ", "))
}
fmt.Fprintf(w, "BandwidthManager:\t%s\n", status)
}
if sr.Routing != nil {
status := "Network: " + sr.Routing.InterHostRoutingMode
if sr.Routing.InterHostRoutingMode == models.RoutingInterHostRoutingModeTunnel {
status = status + " [" + sr.Routing.TunnelProtocol + "]"
}
status = status + "\tHost: " + sr.Routing.IntraHostRoutingMode
fmt.Fprintf(w, "Routing:\t%s\n", status)
}
if sr.AttachMode != "" {
status := "Legacy TC"
if sr.AttachMode == models.AttachModeTcx {
status = "TCX"
}
fmt.Fprintf(w, "Attach Mode:\t%s\n", status)
}
if sr.DatapathMode != "" {
status := "?"
if sr.DatapathMode == models.DatapathModeVeth {
status = "veth"
} else if sr.DatapathMode == models.DatapathModeNetkitDashL2 {
status = "netkit-l2"
} else if sr.DatapathMode == models.DatapathModeNetkit {
status = "netkit"
}
fmt.Fprintf(w, "Device Mode:\t%s\n", status)
}
if sr.Masquerading != nil {
var status string
enabled := func(enabled bool) string {
if enabled {
return "Enabled"
}
return "Disabled"
}
if sr.Masquerading.EnabledProtocols == nil {
status = enabled(sr.Masquerading.Enabled)
} else if !sr.Masquerading.EnabledProtocols.IPV4 && !sr.Masquerading.EnabledProtocols.IPV6 {
status = enabled(false)
} else {
if sr.Masquerading.Mode == models.MasqueradingModeBPF {
if sr.Masquerading.IPMasqAgent {
status = "BPF (ip-masq-agent)"
} else {
status = "BPF"
}
if sr.KubeProxyReplacement != nil {
devStr := ""
for i, dev := range sr.KubeProxyReplacement.DeviceList {
devStr += dev.Name
if i+1 != len(sr.KubeProxyReplacement.DeviceList) {
devStr += ", "
}
}
status += fmt.Sprintf(
"\t[%s]\t%s %s",
devStr,
sr.Masquerading.SnatExclusionCidrV4,
sr.Masquerading.SnatExclusionCidrV6,
)
}
} else if sr.Masquerading.Mode == models.MasqueradingModeIptables {
status = "IPTables"
}
status = fmt.Sprintf("%s [IPv4: %s, IPv6: %s]", status,
enabled(sr.Masquerading.EnabledProtocols.IPV4), enabled(sr.Masquerading.EnabledProtocols.IPV6))
}
fmt.Fprintf(w, "Masquerading:\t%s\n", status)
}
if sd.ClockSourceDetails && sr.ClockSource != nil {
status := sr.ClockSource.Mode
if sr.ClockSource.Mode == models.ClockSourceModeJiffies {
status = fmt.Sprintf("%s\t[%d Hz]",
sr.ClockSource.Mode, sr.ClockSource.Hertz)
}
fmt.Fprintf(w, "Clock Source for BPF:\t%s\n", status)
}
if sr.Controllers != nil {
nFailing, out := 0, []string{" Name\tLast success\tLast error\tCount\tMessage\n"}
for _, ctrl := range sr.Controllers {
status := ctrl.Status
if status == nil {
continue
}
if status.ConsecutiveFailureCount > 0 {
nFailing++
} else if !sd.AllControllers {
continue
}
failSince := timeSince(time.Time(status.LastFailureTimestamp))
successSince := timeSince(time.Time(status.LastSuccessTimestamp))
err := "no error"
if status.LastFailureMsg != "" {
err = status.LastFailureMsg
}
out = append(out, fmt.Sprintf(" %s\t%s\t%s\t%d\t%s\t\n",
ctrl.Name, successSince, failSince, status.ConsecutiveFailureCount, err))
}
nOK := len(sr.Controllers) - nFailing
fmt.Fprintf(w, "Controller Status:\t%d/%d healthy\n", nOK, len(sr.Controllers))
if len(out) > 1 {
tab := tabwriter.NewWriter(w, 0, 0, 3, ' ', 0)
slices.Sort(out)
for _, s := range out {
fmt.Fprint(tab, s)
}
tab.Flush()
}
}
if sr.Proxy != nil {
fmt.Fprintf(w, "Proxy Status:\tOK, ip %s, %d redirects active on ports %s, Envoy: %s\n",
sr.Proxy.IP, sr.Proxy.TotalRedirects, sr.Proxy.PortRange, sr.Proxy.EnvoyDeploymentMode)
if sd.AllRedirects && sr.Proxy.TotalRedirects > 0 {
out := make([]string, 0, len(sr.Proxy.Redirects)+1)
for _, r := range sr.Proxy.Redirects {
out = append(out, fmt.Sprintf(" %s\t%s\t%d\n", r.Proxy, r.Name, r.ProxyPort))
}
tab := tabwriter.NewWriter(w, 0, 0, 3, ' ', 0)
fmt.Fprint(tab, " Protocol\tRedirect\tProxy Port\n")
slices.Sort(out)
for _, s := range out {
fmt.Fprint(tab, s)
}
tab.Flush()
}
} else {
fmt.Fprintf(w, "Proxy Status:\tNo managed proxy redirect\n")
}
if sr.IdentityRange != nil {
fmt.Fprintf(w, "Global Identity Range:\tmin %d, max %d\n",
sr.IdentityRange.MinIdentity, sr.IdentityRange.MaxIdentity)
} else {
fmt.Fprintf(w, "Global Identity Range:\tUnknown\n")
}
if sr.Hubble != nil {
var fields []string
state := sr.Hubble.State
if sr.Hubble.Msg != "" {
state = fmt.Sprintf("%s %s", state, sr.Hubble.Msg)
}
fields = append(fields, state)
if o := sr.Hubble.Observer; o != nil {
var observer []string
if o.MaxFlows > 0 {
observer = append(observer, fmt.Sprintf("Current/Max Flows: %d/%d (%.2f%%)",
o.CurrentFlows, o.MaxFlows, (float64(o.CurrentFlows)/float64(o.MaxFlows))*100))
}
if o.Uptime > 0 {
observer = append(observer, fmt.Sprintf("Flows/s: %.2f",
float64(o.SeenFlows)/time.Duration(o.Uptime).Seconds()))
}
fields = append(fields, strings.Join(observer, ", "))
}
if sr.HubbleMetrics != nil {
metrics := sr.HubbleMetrics.State
if sr.HubbleMetrics.Msg != "" {
metrics = fmt.Sprintf("%s (%s)", metrics, sr.HubbleMetrics.Msg)
}
fields = append(fields, fmt.Sprintf("Metrics: %s", metrics))
}
fmt.Fprintf(w, "Hubble:\t%s\n", strings.Join(fields, "\t"))
}
if sd.KubeProxyReplacementDetails && sr.Kubernetes != nil && sr.KubeProxyReplacement != nil {
var selection, mode, dsrMode, xdp string
lb := "Disabled"
cIP := "Enabled"
nPort := "Disabled"
if np := sr.KubeProxyReplacement.Features.NodePort; np.Enabled {
selection = np.Algorithm
if selection == models.KubeProxyReplacementFeaturesNodePortAlgorithmMaglev {
selection = fmt.Sprintf("%s (Table Size: %d)", np.Algorithm, np.LutSize)
}
xdp = np.Acceleration
mode = np.Mode
if mode == models.KubeProxyReplacementFeaturesNodePortModeDSR ||
mode == models.KubeProxyReplacementFeaturesNodePortModeHybrid {
dsrMode = np.DsrMode
}
nPort = fmt.Sprintf("Enabled (Range: %d-%d)", np.PortMin, np.PortMax)
lb = "Enabled"
}
affinity := "Disabled"
if sr.KubeProxyReplacement.Features.SessionAffinity.Enabled {
affinity = "Enabled"
}
hPort := "Disabled"
if sr.KubeProxyReplacement.Features.HostPort.Enabled {
hPort = "Enabled"
}
eIP := "Disabled"
if sr.KubeProxyReplacement.Features.ExternalIPs.Enabled {
eIP = "Enabled"
}
socketLB := "Disabled"
if slb := sr.KubeProxyReplacement.Features.SocketLB; slb.Enabled {
socketLB = "Enabled"
}
socketLBTracing := "Disabled"
if st := sr.KubeProxyReplacement.Features.SocketLBTracing; st.Enabled {
socketLBTracing = "Enabled"
}
socketLBCoverage := "Full"
if sr.KubeProxyReplacement.Features.BpfSocketLBHostnsOnly {
socketLBCoverage = "Hostns-only"
}
nat46X64 := "Disabled"
nat46X64GW := "Disabled"
nat46X64SVC := "Disabled"
prefixes := ""
if sr.KubeProxyReplacement.Features.Nat46X64.Enabled {
nat46X64 = "Enabled"
if svc := sr.KubeProxyReplacement.Features.Nat46X64.Service; svc.Enabled {
nat46X64SVC = "Enabled"
}
if gw := sr.KubeProxyReplacement.Features.Nat46X64.Gateway; gw.Enabled {
nat46X64GW = "Enabled"
prefixes = strings.Join(gw.Prefixes, ", ")
}
}
fmt.Fprintf(w, "KubeProxyReplacement Details:\n")
tab := tabwriter.NewWriter(w, 0, 0, 3, ' ', 0)
fmt.Fprintf(tab, " Status:\t%s\n", sr.KubeProxyReplacement.Mode)
fmt.Fprintf(tab, " Socket LB:\t%s\n", socketLB)
fmt.Fprintf(tab, " Socket LB Tracing:\t%s\n", socketLBTracing)
fmt.Fprintf(tab, " Socket LB Coverage:\t%s\n", socketLBCoverage)
if kubeProxyDevices != "" {
fmt.Fprintf(tab, " Devices:\t%s\n", kubeProxyDevices)
}
if mode != "" {
fmt.Fprintf(tab, " Mode:\t%s\n", mode)
}
if dsrMode != "" {
fmt.Fprintf(tab, " DSR Dispatch Mode:\t%s\n", dsrMode)
}
if selection != "" {
fmt.Fprintf(tab, " Backend Selection:\t%s\n", selection)
}
fmt.Fprintf(tab, " Session Affinity:\t%s\n", affinity)
if nat46X64 == "Disabled" {
fmt.Fprintf(tab, " NAT46/64 Support:\t%s\n", nat46X64)
} else {
fmt.Fprintf(tab, " NAT46/64 Support:\n")
fmt.Fprintf(tab, " - Services:\t%s\n", nat46X64SVC)
fmt.Fprintf(tab, " - Gateway:\t%s\n", nat46X64GW)
if nat46X64GW == "Enabled" && prefixes != "" {
fmt.Fprintf(tab, " Prefixes:\t%s\n", prefixes)
}
}
if xdp != "" {
fmt.Fprintf(tab, " XDP Acceleration:\t%s\n", xdp)
}
fmt.Fprintf(tab, " Services:\n")
fmt.Fprintf(tab, " - ClusterIP:\t%s\n", cIP)
fmt.Fprintf(tab, " - NodePort:\t%s \n", nPort)
fmt.Fprintf(tab, " - LoadBalancer:\t%s \n", lb)
fmt.Fprintf(tab, " - externalIPs:\t%s \n", eIP)
fmt.Fprintf(tab, " - HostPort:\t%s\n", hPort)
if len(sr.KubeProxyReplacement.Features.Annotations) > 0 {
fmt.Fprintf(tab, " Annotations:\n")
for _, annotation := range sr.KubeProxyReplacement.Features.Annotations {
fmt.Fprintf(tab, " - %s\n", annotation)
}
} else {
fmt.Fprintf(tab, " Annotations:\t(n/a)\n")
}
tab.Flush()
}
if sd.BPFMapDetails && sr.BpfMaps != nil {
dynamicSizingStatus := "off"
ratio := sr.BpfMaps.DynamicSizeRatio
if 0.0 < ratio && ratio <= 1.0 {
dynamicSizingStatus = fmt.Sprintf("on (ratio: %f)", ratio)
}
fmt.Fprintf(w, "BPF Maps:\tdynamic sizing: %s\n", dynamicSizingStatus)
tab := tabwriter.NewWriter(w, 0, 0, 3, ' ', 0)
fmt.Fprintf(tab, " Name\tSize\n")
for _, m := range sr.BpfMaps.Maps {
fmt.Fprintf(tab, " %s\t%d\n", m.Name, m.Size)
}
tab.Flush()
}
if sr.Encryption != nil {
var fields []string
if sr.Encryption.Msg != "" {
fields = append(fields, sr.Encryption.Msg)
} else if wg := sr.Encryption.Wireguard; wg != nil {
fields = append(fields, fmt.Sprintf("[NodeEncryption: %s", wg.NodeEncryption))
ifaces := make([]string, 0, len(wg.Interfaces))
for _, i := range wg.Interfaces {
iface := fmt.Sprintf("%s (Pubkey: %s, Port: %d, Peers: %d)",
i.Name, i.PublicKey, i.ListenPort, i.PeerCount)
ifaces = append(ifaces, iface)
}
fields = append(fields, fmt.Sprintf("%s]", strings.Join(ifaces, ", ")))
}
fmt.Fprintf(w, "Encryption:\t%s\t%s\n", sr.Encryption.Mode, strings.Join(fields, ", "))
}
}
// RemoteClustersStatusVerbosity specifies the verbosity when formatting the remote clusters status information.
type RemoteClustersStatusVerbosity uint
const (
// RemoteClustersStatusVerbose outputs all remote clusters information.
RemoteClustersStatusVerbose RemoteClustersStatusVerbosity = iota
// RemoteClustersStatusBrief outputs a one-line summary only for ready clusters.
RemoteClustersStatusBrief
// RemoteClustersStatusNotReadyOnly outputs the remote clusters information for non-ready clusters only.
RemoteClustersStatusNotReadyOnly
)
func FormatStatusResponseRemoteClusters(w io.Writer, clusters []*models.RemoteCluster, verbosity RemoteClustersStatusVerbosity) {
for _, cluster := range clusters {
if verbosity != RemoteClustersStatusNotReadyOnly || !cluster.Ready {
fmt.Fprintf(w, " %s: %s, %d nodes, %d endpoints, %d identities, %d services, %d MCS-API service exports, %d reconnections (last: %s)\n",
cluster.Name, clusterReadiness(cluster), cluster.NumNodes,
cluster.NumEndpoints, cluster.NumIdentities, cluster.NumSharedServices, cluster.NumServiceExports,
cluster.NumFailures, timeSince(time.Time(cluster.LastFailure)))
if verbosity == RemoteClustersStatusBrief && cluster.Ready {
continue
}
fmt.Fprintf(w, " └ %s\n", cluster.Status)
fmt.Fprint(w, " └ remote configuration: ")
if cluster.Config != nil {
fmt.Fprintf(w, "expected=%t, retrieved=%t", cluster.Config.Required, cluster.Config.Retrieved)
serviceExportsConfig := "unsupported"
if cluster.Config.ServiceExportsEnabled != nil {
if *cluster.Config.ServiceExportsEnabled {
serviceExportsConfig = "enabled"
} else {
serviceExportsConfig = "disabled"
}
}
if cluster.Config.Retrieved {
fmt.Fprintf(w, ", cluster-id=%d, kvstoremesh=%t, sync-canaries=%t, service-exports=%s",
cluster.Config.ClusterID, cluster.Config.Kvstoremesh, cluster.Config.SyncCanaries, serviceExportsConfig)
}
} else {
fmt.Fprint(w, "expected=unknown, retrieved=unknown")
}
fmt.Fprint(w, "\n")
if cluster.Synced != nil {
fmt.Fprintf(w, " └ synchronization status: nodes=%v, endpoints=%v, identities=%v, services=%v",
cluster.Synced.Nodes, cluster.Synced.Endpoints, cluster.Synced.Identities, cluster.Synced.Services)
if cluster.Synced.ServiceExports != nil {
fmt.Fprintf(w, ", service-exports=%v", *cluster.Synced.ServiceExports)
}
fmt.Fprintln(w)
}
}
}
}
// SPDX-License-Identifier: Apache-2.0
// Copyright Authors of Cilium
package client
import (
"maps"
"github.com/cilium/cilium/api/v1/client/daemon"
"github.com/cilium/cilium/api/v1/models"
"github.com/cilium/cilium/pkg/api"
)
// ConfigGet returns a daemon configuration.
func (c *Client) ConfigGet() (*models.DaemonConfiguration, error) {
resp, err := c.Daemon.GetConfig(nil)
if err != nil {
return nil, Hint(err)
}
return resp.Payload, nil
}
// ConfigPatch modifies the daemon configuration.
func (c *Client) ConfigPatch(cfg models.DaemonConfigurationSpec) error {
fullCfg, err := c.ConfigGet()
if err != nil {
return err
}
maps.Copy(fullCfg.Spec.Options, cfg.Options)
if cfg.PolicyEnforcement != "" {
fullCfg.Spec.PolicyEnforcement = cfg.PolicyEnforcement
}
params := daemon.NewPatchConfigParams().WithConfiguration(fullCfg.Spec).WithTimeout(api.ClientTimeout)
_, err = c.Daemon.PatchConfig(params)
return Hint(err)
}
// SPDX-License-Identifier: Apache-2.0
// Copyright Authors of Cilium
package client
import (
"github.com/cilium/cilium/api/v1/client/endpoint"
"github.com/cilium/cilium/api/v1/models"
"github.com/cilium/cilium/pkg/api"
pkgEndpointID "github.com/cilium/cilium/pkg/endpoint/id"
"github.com/cilium/cilium/pkg/labels"
)
// EndpointList returns a list of all endpoints
func (c *Client) EndpointList() ([]*models.Endpoint, error) {
resp, err := c.Endpoint.GetEndpoint(nil)
if err != nil {
return nil, Hint(err)
}
return resp.Payload, nil
}
// EndpointDeleteMany deletes multiple endpoints
func (c *Client) EndpointDeleteMany(req *models.EndpointBatchDeleteRequest) error {
params := endpoint.NewDeleteEndpointParams().WithEndpoint(req).WithTimeout(api.ClientTimeout)
_, _, err := c.Endpoint.DeleteEndpoint(params)
return Hint(err)
}
// EndpointGet returns endpoint by ID
func (c *Client) EndpointGet(id string) (*models.Endpoint, error) {
params := endpoint.NewGetEndpointIDParams().WithID(id).WithTimeout(api.ClientTimeout)
resp, err := c.Endpoint.GetEndpointID(params)
if err != nil {
/* Since plugins rely on checking the error type, we don't wrap this
* with Hint(...)
*/
return nil, err
}
return resp.Payload, nil
}
// EndpointCreate creates a new endpoint
func (c *Client) EndpointCreate(ep *models.EndpointChangeRequest) (*models.Endpoint, error) {
id := pkgEndpointID.NewCiliumID(ep.ID)
params := endpoint.NewPutEndpointIDParams().WithID(id).WithEndpoint(ep).WithTimeout(api.ClientTimeout)
resp, err := c.Endpoint.PutEndpointID(params)
if err != nil {
return nil, Hint(err)
}
return resp.Payload, nil
}
// EndpointPatch modifies the endpoint
func (c *Client) EndpointPatch(id string, ep *models.EndpointChangeRequest) error {
params := endpoint.NewPatchEndpointIDParams().WithID(id).WithEndpoint(ep).WithTimeout(api.ClientTimeout)
_, err := c.Endpoint.PatchEndpointID(params)
return Hint(err)
}
// EndpointDelete deletes endpoint
func (c *Client) EndpointDelete(id string) error {
params := endpoint.NewDeleteEndpointIDParams().WithID(id).WithTimeout(api.ClientTimeout)
_, _, err := c.Endpoint.DeleteEndpointID(params)
return Hint(err)
}
// EndpointLogGet returns endpoint log
func (c *Client) EndpointLogGet(id string) (models.EndpointStatusLog, error) {
params := endpoint.NewGetEndpointIDLogParams().WithID(id).WithTimeout(api.ClientTimeout)
resp, err := c.Endpoint.GetEndpointIDLog(params)
if err != nil {
return nil, Hint(err)
}
return resp.Payload, nil
}
// EndpointHealthGet returns endpoint healthz
func (c *Client) EndpointHealthGet(id string) (*models.EndpointHealth, error) {
params := endpoint.NewGetEndpointIDHealthzParams().WithID(id).WithTimeout(api.ClientTimeout)
resp, err := c.Endpoint.GetEndpointIDHealthz(params)
if err != nil {
return nil, Hint(err)
}
return resp.Payload, nil
}
// EndpointConfigGet returns endpoint configuration
func (c *Client) EndpointConfigGet(id string) (*models.EndpointConfigurationStatus, error) {
params := endpoint.NewGetEndpointIDConfigParams().WithID(id).WithTimeout(api.ClientTimeout)
resp, err := c.Endpoint.GetEndpointIDConfig(params)
if err != nil {
return nil, Hint(err)
}
return resp.Payload, nil
}
// EndpointConfigPatch modifies endpoint configuration
func (c *Client) EndpointConfigPatch(id string, cfg *models.EndpointConfigurationSpec) error {
params := endpoint.NewPatchEndpointIDConfigParams().WithID(id).WithTimeout(api.ClientTimeout)
if cfg != nil {
params.SetEndpointConfiguration(cfg)
}
_, err := c.Endpoint.PatchEndpointIDConfig(params)
return Hint(err)
}
// EndpointLabelsGet returns endpoint label configuration
func (c *Client) EndpointLabelsGet(id string) (*models.LabelConfiguration, error) {
params := endpoint.NewGetEndpointIDLabelsParams().WithID(id).WithTimeout(api.ClientTimeout)
resp, err := c.Endpoint.GetEndpointIDLabels(params)
if err != nil {
return nil, Hint(err)
}
return resp.Payload, nil
}
// EndpointLabelsPut modifies endpoint label configuration
// add: List of labels to add and enable. If the label is an orchestration
// system label which has been disabled before, it will be removed from
// the disabled list and readded to the orchestration list. Otherwise
// it will be added to the custom label list.
//
// delete: List of labels to delete. If the label is an orchestration system
// label, then it will be deleted from the orchestration list and
// added to the disabled list. Otherwise it will be removed from the
// custom list.
func (c *Client) EndpointLabelsPatch(id string, toAdd, toDelete models.Labels) error {
currentCfg, err := c.EndpointLabelsGet(id)
if err != nil {
return err
}
userLbl := labels.NewLabelsFromModel(currentCfg.Status.Realized.User)
for _, lbl := range toAdd {
lblParsed := labels.ParseLabel(lbl)
if _, found := userLbl[lblParsed.Key]; !found {
userLbl[lblParsed.Key] = lblParsed
}
}
for _, lbl := range toDelete {
lblParsed := labels.ParseLabel(lbl)
delete(userLbl, lblParsed.Key)
}
currentCfg.Spec.User = userLbl.GetModel()
params := endpoint.NewPatchEndpointIDLabelsParams().WithID(id).WithTimeout(api.ClientTimeout)
_, err = c.Endpoint.PatchEndpointIDLabels(params.WithConfiguration(currentCfg.Spec))
return Hint(err)
}
// SPDX-License-Identifier: Apache-2.0
// Copyright Authors of Cilium
package client
import (
"github.com/cilium/cilium/api/v1/client/policy"
"github.com/cilium/cilium/api/v1/models"
"github.com/cilium/cilium/pkg/api"
)
// IdentityGet returns a security identity.
func (c *Client) IdentityGet(id string) (*models.Identity, error) {
params := policy.NewGetIdentityIDParams().WithID(id).WithTimeout(api.ClientTimeout)
resp, err := c.Policy.GetIdentityID(params)
if err != nil {
return nil, Hint(err)
}
return resp.Payload, nil
}
// SPDX-License-Identifier: Apache-2.0
// Copyright Authors of Cilium
package client
import (
"github.com/cilium/cilium/api/v1/client/ipam"
"github.com/cilium/cilium/api/v1/models"
"github.com/cilium/cilium/pkg/api"
)
const (
AddressFamilyIPv6 = "ipv6"
AddressFamilyIPv4 = "ipv4"
)
// IPAMAllocate allocates an IP address out of address family specific pool.
func (c *Client) IPAMAllocate(family, owner, pool string, expiration bool) (*models.IPAMResponse, error) {
params := ipam.NewPostIpamParams().WithTimeout(api.ClientTimeout)
if family != "" {
params.SetFamily(&family)
}
if owner != "" {
params.SetOwner(&owner)
}
if pool != "" {
params.SetPool(&pool)
}
params.SetExpiration(&expiration)
resp, err := c.Ipam.PostIpam(params)
if err != nil {
return nil, Hint(err)
}
return resp.Payload, nil
}
// IPAMAllocateIP tries to allocate a particular IP address.
func (c *Client) IPAMAllocateIP(ip, owner, pool string) error {
params := ipam.NewPostIpamIPParams().WithIP(ip).WithOwner(&owner).WithTimeout(api.ClientTimeout)
if pool != "" {
params.SetPool(&pool)
}
_, err := c.Ipam.PostIpamIP(params)
return Hint(err)
}
// IPAMReleaseIP releases a IP address back to the pool.
func (c *Client) IPAMReleaseIP(ip, pool string) error {
params := ipam.NewDeleteIpamIPParams().WithIP(ip).WithTimeout(api.ClientTimeout)
if pool != "" {
params.SetPool(&pool)
}
_, err := c.Ipam.DeleteIpamIP(params)
return Hint(err)
}
// SPDX-License-Identifier: Apache-2.0
// Copyright Authors of Cilium
package client
import (
"github.com/cilium/cilium/api/v1/models"
)
// GetLRPs returns a list of all local redirect policies.
func (c *Client) GetLRPs() ([]*models.LRPSpec, error) {
resp, err := c.Service.GetLrp(nil)
if err != nil {
return nil, Hint(err)
}
return resp.Payload, nil
}
// SPDX-License-Identifier: Apache-2.0
// Copyright Authors of Cilium
package client
import (
"github.com/cilium/cilium/api/v1/client/policy"
"github.com/cilium/cilium/api/v1/models"
"github.com/cilium/cilium/pkg/api"
)
// PolicyPut inserts the `policyJSON`
// Deprecated, to be removed in v1.19
func (c *Client) PolicyPut(policyJSON string) (*models.Policy, error) {
params := policy.NewPutPolicyParams().WithPolicy(policyJSON).WithTimeout(api.ClientTimeout)
resp, err := c.Policy.PutPolicy(params)
if err != nil {
return nil, Hint(err)
}
return resp.Payload, nil
}
// PolicyReplace replaces the `policyJSON`
// Deprecated, to be removed in v1.19
func (c *Client) PolicyReplace(policyJSON string, replace bool, replaceWithLabels []string) (*models.Policy, error) {
params := policy.NewPutPolicyParams().WithPolicy(policyJSON).WithReplace(&replace).WithReplaceWithLabels(replaceWithLabels).WithTimeout(api.ClientTimeout)
resp, err := c.Policy.PutPolicy(params)
if err != nil {
return nil, Hint(err)
}
return resp.Payload, nil
}
// PolicyGet returns policy rules
// Deprecated, to be removed in v1.19
func (c *Client) PolicyGet(labels []string) (*models.Policy, error) {
params := policy.NewGetPolicyParams().WithLabels(labels).WithTimeout(api.ClientTimeout)
resp, err := c.Policy.GetPolicy(params)
if err != nil {
return nil, Hint(err)
}
return resp.Payload, nil
}
// PolicyCacheGet returns the contents of a SelectorCache.
func (c *Client) PolicyCacheGet() (models.SelectorCache, error) {
params := policy.NewGetPolicySelectorsParams().WithTimeout(api.ClientTimeout)
resp, err := c.Policy.GetPolicySelectors(params)
if err != nil {
return nil, Hint(err)
}
return resp.Payload, nil
}
// PolicyDelete deletes policy rules
// Deprecated, to be removed in v1.19
func (c *Client) PolicyDelete(labels []string) (*models.Policy, error) {
params := policy.NewDeletePolicyParams().WithLabels(labels).WithTimeout(api.ClientTimeout)
resp, err := c.Policy.DeletePolicy(params)
if err != nil {
return nil, Hint(err)
}
return resp.Payload, Hint(err)
}
// SPDX-License-Identifier: Apache-2.0
// Copyright Authors of Cilium
package client
import (
"github.com/cilium/cilium/api/v1/client/prefilter"
"github.com/cilium/cilium/api/v1/models"
"github.com/cilium/cilium/pkg/api"
)
// GetPrefilter returns a list of all CIDR prefixes
func (c *Client) GetPrefilter() (*models.Prefilter, error) {
resp, err := c.Prefilter.GetPrefilter(nil)
if err != nil {
return nil, Hint(err)
}
return resp.Payload, nil
}
// PatchPrefilter sets a list of CIDR prefixes
func (c *Client) PatchPrefilter(spec *models.PrefilterSpec) (*models.Prefilter, error) {
params := prefilter.NewPatchPrefilterParams().WithPrefilterSpec(spec).WithTimeout(api.ClientTimeout)
resp, err := c.Prefilter.PatchPrefilter(params)
if err != nil {
return nil, Hint(err)
}
return resp.Payload, nil
}
// DeletePrefilter deletes a list of CIDR prefixes
func (c *Client) DeletePrefilter(spec *models.PrefilterSpec) (*models.Prefilter, error) {
params := prefilter.NewDeletePrefilterParams().WithPrefilterSpec(spec).WithTimeout(api.ClientTimeout)
resp, err := c.Prefilter.DeletePrefilter(params)
if err != nil {
return nil, Hint(err)
}
return resp.Payload, nil
}
// SPDX-License-Identifier: Apache-2.0
// Copyright Authors of Cilium
package client
import (
"github.com/cilium/cilium/api/v1/models"
)
// GetServices returns a list of all services.
func (c *Client) GetServices() ([]*models.Service, error) {
resp, err := c.Service.GetService(nil)
if err != nil {
return nil, Hint(err)
}
return resp.Payload, nil
}
// SPDX-License-Identifier: Apache-2.0
// Copyright Authors of Cilium
package types
import (
"bytes"
"cmp"
"errors"
"fmt"
"net"
"net/netip"
"strconv"
"strings"
"go4.org/netipx"
"github.com/cilium/cilium/pkg/cidr"
)
//
// In this file, we define types and utilities for cluster-aware
// addressing which identifies network endpoints using IP address
// and an optional ClusterID. With this special addressing scheme,
// we can distinguish network endpoints (e.g. Pods) that have the
// same IP address, but belong to the different cluster.
//
// A "bare" IP address is still a valid identifier because there
// are cases that endpoints can be identified without ClusterID
// (e.g. network endpoint has a unique IP address). We can consider
// this as a special case that ClusterID "doesn't matter". ClusterID
// 0 is reserved for indicating that.
//
// AddrCluster is a type that holds a pair of IP and ClusterID.
// We should use this type as much as possible when we implement
// IP + Cluster addressing. We should avoid managing IP and ClusterID
// separately. Otherwise, it is very hard for code readers to see
// where we are using cluster-aware addressing.
type AddrCluster struct {
addr netip.Addr
clusterID uint32
}
const AddrClusterLen = 20
var (
errUnmarshalBadAddress = errors.New("AddrCluster.UnmarshalJSON: bad address")
errMarshalInvalidAddress = errors.New("AddrCluster.MarshalJSON: invalid address")
jsonZeroAddress = []byte("\"\"")
)
// MarshalJSON marshals the address as a string in the form
// <addr>@<clusterID>, e.g. "1.2.3.4@1"
func (a *AddrCluster) MarshalJSON() ([]byte, error) {
if !a.addr.IsValid() {
if a.clusterID != 0 {
return nil, errMarshalInvalidAddress
}
// AddrCluster{} is the zero value. Preserve this across the
// marshalling by returning an empty string.
return jsonZeroAddress, nil
}
var b bytes.Buffer
b.WriteByte('"')
b.WriteString(a.String())
b.WriteByte('"')
return b.Bytes(), nil
}
func (a *AddrCluster) UnmarshalJSON(data []byte) error {
if bytes.Equal(data, jsonZeroAddress) {
return nil
}
if len(data) <= 2 || data[0] != '"' || data[len(data)-1] != '"' {
return errUnmarshalBadAddress
}
// Drop the parens
data = data[1 : len(data)-1]
a2, err := ParseAddrCluster(string(data))
if err != nil {
return err
}
a.addr = a2.addr
a.clusterID = a2.clusterID
return nil
}
// ParseAddrCluster parses s as an IP + ClusterID and returns AddrCluster.
// The string s can be a bare IP string (any IP address format allowed in
// netip.ParseAddr()) or IP string + @ + ClusterID with decimal. Bare IP
// string is considered as IP string + @ + ClusterID = 0.
func ParseAddrCluster(s string) (AddrCluster, error) {
atIndex := strings.LastIndex(s, "@")
var (
addrStr string
clusterIDStr string
)
if atIndex == -1 {
// s may be a bare IP address string, still valid
addrStr = s
clusterIDStr = ""
} else {
// s may be a IP + ClusterID string
addrStr = s[:atIndex]
clusterIDStr = s[atIndex+1:]
}
addr, err := netip.ParseAddr(addrStr)
if err != nil {
return AddrCluster{}, err
}
if clusterIDStr == "" {
if atIndex != len(s)-1 {
return AddrCluster{addr: addr, clusterID: 0}, nil
} else {
// handle the invalid case like "10.0.0.0@"
return AddrCluster{}, fmt.Errorf("empty cluster ID")
}
}
clusterID64, err := strconv.ParseUint(clusterIDStr, 10, 32)
if err != nil {
return AddrCluster{}, err
}
return AddrCluster{addr: addr, clusterID: uint32(clusterID64)}, nil
}
// MustParseAddrCluster calls ParseAddr(s) and panics on error. It is
// intended for use in tests with hard-coded strings.
func MustParseAddrCluster(s string) AddrCluster {
addrCluster, err := ParseAddrCluster(s)
if err != nil {
panic(err)
}
return addrCluster
}
// AddrClusterFromIP parses the given net.IP using netipx.FromStdIP and returns
// AddrCluster with ClusterID = 0.
func AddrClusterFromIP(ip net.IP) (AddrCluster, bool) {
addr, ok := netipx.FromStdIP(ip)
if !ok {
return AddrCluster{}, false
}
return AddrCluster{addr: addr, clusterID: 0}, true
}
func MustAddrClusterFromIP(ip net.IP) AddrCluster {
addr, ok := AddrClusterFromIP(ip)
if !ok {
panic("cannot convert net.IP to AddrCluster")
}
return addr
}
// AddrClusterFrom creates AddrCluster from netip.Addr and ClusterID
func AddrClusterFrom(addr netip.Addr, clusterID uint32) AddrCluster {
return AddrCluster{addr: addr, clusterID: clusterID}
}
// Addr returns IP address part of AddrCluster as netip.Addr. This function
// exists for keeping backward compatibility between the existing components
// which are not aware of the cluster-aware addressing. Calling this function
// against the AddrCluster which has non-zero clusterID will lose the ClusterID
// information. It should be used with an extra care.
func (ac AddrCluster) Addr() netip.Addr {
return ac.addr
}
// ClusterID returns ClusterID part of AddrCluster as uint32. We should avoid
// using this function as much as possible and treat IP address and ClusterID
// together.
func (ac AddrCluster) ClusterID() uint32 {
return ac.clusterID
}
// Equal returns true when given AddrCluster has a same IP address and ClusterID
func (ac0 AddrCluster) Equal(ac1 AddrCluster) bool {
return ac0 == ac1
}
// Compare returns an integer comparing two [AddrCluster] objects.
// The result will be 0 if ac0 == ac1, -1 if ac0 < ac1, and +1 if ac0 > ac1.
func (ac0 AddrCluster) Compare(ac1 AddrCluster) int {
if ret := ac0.addr.Compare(ac1.addr); ret != 0 {
return ret
}
return cmp.Compare(ac0.clusterID, ac1.clusterID)
}
// Less compares ac0 and ac1 and returns true if ac0 is lesser than ac1
func (ac0 AddrCluster) Less(ac1 AddrCluster) bool {
return ac0.Compare(ac1) == -1
}
// This is an alias of Equal which only exists for satisfying deepequal-gen
func (ac0 *AddrCluster) DeepEqual(ac1 *AddrCluster) bool {
return ac0.Equal(*ac1)
}
// DeepCopyInto copies in to out
func (in *AddrCluster) DeepCopyInto(out *AddrCluster) {
if out == nil {
return
}
out.addr = in.addr
out.clusterID = in.clusterID
}
// DeepCopy returns a new copy of AddrCluster
func (in *AddrCluster) DeepCopy() *AddrCluster {
out := new(AddrCluster)
in.DeepCopyInto(out)
return out
}
// String returns the string representation of the AddrCluster. If
// AddrCluster.clusterID = 0, it returns bare IP address string. Otherwise, it
// returns IP string + "@" + ClusterID (e.g. 10.0.0.1@1)
func (ac AddrCluster) String() string {
if ac.clusterID == 0 {
return ac.addr.String()
}
return ac.addr.String() + "@" + strconv.FormatUint(uint64(ac.clusterID), 10)
}
// Is4 reports whether IP address part of AddrCluster is an IPv4 address.
func (ac AddrCluster) Is4() bool {
return ac.addr.Is4()
}
// Is6 reports whether IP address part of AddrCluster is an IPv6 address.
func (ac AddrCluster) Is6() bool {
return ac.addr.Is6()
}
// IsUnspecified reports whether IP address part of the AddrCluster is an
// unspecified address, either the IPv4 address "0.0.0.0" or the IPv6
// address "::".
func (ac AddrCluster) IsUnspecified() bool {
return ac.addr.IsUnspecified()
}
// As20 returns the AddrCluster in its 20-byte representation which consists
// of 16-byte IP address part from netip.Addr.As16 and 4-byte ClusterID part.
func (ac AddrCluster) As20() (ac20 [20]byte) {
addr16 := ac.addr.As16()
copy(ac20[:16], addr16[:])
ac20[16] = byte(ac.clusterID >> 24)
ac20[17] = byte(ac.clusterID >> 16)
ac20[18] = byte(ac.clusterID >> 8)
ac20[19] = byte(ac.clusterID)
return ac20
}
// AsNetIP returns the IP address part of AddCluster as a net.IP type. This
// function exists for keeping backward compatibility between the existing
// components which are not aware of the cluster-aware addressing. Calling
// this function against the AddrCluster which has non-zero clusterID will
// lose the ClusterID information. It should be used with an extra care.
func (ac AddrCluster) AsNetIP() net.IP {
return ac.addr.AsSlice()
}
func (ac AddrCluster) AsPrefixCluster() PrefixCluster {
return PrefixClusterFrom(netip.PrefixFrom(ac.addr, ac.addr.BitLen()), WithClusterID(ac.clusterID))
}
// PrefixCluster is a type that holds a pair of prefix and ClusterID.
// We should use this type as much as possible when we implement
// prefix + Cluster addressing. We should avoid managing prefix and
// ClusterID separately. Otherwise, it is very hard for code readers
// to see where we are using cluster-aware addressing.
type PrefixCluster struct {
prefix netip.Prefix
clusterID uint32
}
// NewPrefixCluster builds an instance of a PrefixCluster with the input
// prefix and clusterID.
func NewPrefixCluster(prefix netip.Prefix, clusterID uint32) PrefixCluster {
return PrefixCluster{prefix, clusterID}
}
// NewLocalPrefixCluster builds an instance of a PrefixCluster with the input
// prefix and clusterID set to 0.
func NewLocalPrefixCluster(prefix netip.Prefix) PrefixCluster {
return NewPrefixCluster(prefix, 0)
}
// ParsePrefixCluster parses s as an Prefix + ClusterID and returns PrefixCluster.
// The string s can be a bare IP prefix string (any prefix format allowed in
// netip.ParsePrefix()) or prefix string + @ + ClusterID with decimal. Bare prefix
// string is considered as prefix string + @ + ClusterID = 0.
func ParsePrefixCluster(s string) (PrefixCluster, error) {
atIndex := strings.LastIndex(s, "@")
var (
prefixStr string
clusterIDStr string
)
if atIndex == -1 {
// s may be a bare IP prefix string, still valid
prefixStr = s
clusterIDStr = ""
} else {
// s may be a prefix + ClusterID string
prefixStr = s[:atIndex]
clusterIDStr = s[atIndex+1:]
}
prefix, err := netip.ParsePrefix(prefixStr)
if err != nil {
return PrefixCluster{}, err
}
if clusterIDStr == "" {
if atIndex != len(s)-1 {
return PrefixCluster{prefix: prefix, clusterID: 0}, nil
} else {
// handle the invalid case like "10.0.0.0/24@"
return PrefixCluster{}, fmt.Errorf("empty cluster ID")
}
}
clusterID64, err := strconv.ParseUint(clusterIDStr, 10, 32)
if err != nil {
return PrefixCluster{}, err
}
return PrefixCluster{prefix: prefix, clusterID: uint32(clusterID64)}, nil
}
// MustParsePrefixCluster calls ParsePrefixCluster(s) and panics on error.
// It is intended for use in tests with hard-coded strings.
func MustParsePrefixCluster(s string) PrefixCluster {
prefixCluster, err := ParsePrefixCluster(s)
if err != nil {
panic(err)
}
return prefixCluster
}
func (pc PrefixCluster) IsSingleIP() bool {
return pc.prefix.IsSingleIP()
}
type PrefixClusterOpts func(*PrefixCluster)
func WithClusterID(id uint32) PrefixClusterOpts {
return func(pc *PrefixCluster) { pc.clusterID = id }
}
func PrefixClusterFrom(prefix netip.Prefix, opts ...PrefixClusterOpts) PrefixCluster {
pc := PrefixCluster{prefix: prefix}
for _, opt := range opts {
opt(&pc)
}
return pc
}
func PrefixClusterFromCIDR(c *cidr.CIDR, opts ...PrefixClusterOpts) PrefixCluster {
if c == nil {
return PrefixCluster{}
}
addr, ok := netipx.FromStdIP(c.IP)
if !ok {
return PrefixCluster{}
}
ones, _ := c.Mask.Size()
return PrefixClusterFrom(netip.PrefixFrom(addr, ones), opts...)
}
func (pc0 PrefixCluster) Equal(pc1 PrefixCluster) bool {
return pc0.prefix == pc1.prefix && pc0.clusterID == pc1.clusterID
}
func (pc PrefixCluster) IsValid() bool {
return pc.prefix.IsValid()
}
func (pc PrefixCluster) AddrCluster() AddrCluster {
return AddrClusterFrom(pc.prefix.Addr(), pc.clusterID)
}
func (pc PrefixCluster) ClusterID() uint32 {
return pc.clusterID
}
func (pc PrefixCluster) String() string {
if pc.clusterID == 0 {
return pc.prefix.String()
}
return pc.prefix.String() + "@" + strconv.FormatUint(uint64(pc.clusterID), 10)
}
// AsPrefix returns the IP prefix part of PrefixCluster as a netip.Prefix type.
// This function exists for keeping backward compatibility between the existing
// components which are not aware of the cluster-aware addressing. Calling
// this function against the PrefixCluster which has non-zero clusterID will
// lose the ClusterID information. It should be used with an extra care.
func (pc PrefixCluster) AsPrefix() netip.Prefix {
return netip.PrefixFrom(pc.prefix.Addr(), pc.prefix.Bits())
}
// AsIPNet returns the IP prefix part of PrefixCluster as a net.IPNet type. This
// function exists for keeping backward compatibility between the existing
// components which are not aware of the cluster-aware addressing. Calling
// this function against the PrefixCluster which has non-zero clusterID will
// lose the ClusterID information. It should be used with an extra care.
func (pc PrefixCluster) AsIPNet() net.IPNet {
return *netipx.PrefixIPNet(pc.AsPrefix())
}
// This function is solely exists for annotating IPCache's key string with ClusterID.
// IPCache's key string is IP address or Prefix string (10.0.0.1 and 10.0.0.0/32 are
// different entry). This function assumes given string is one of those format and
// just put @<ClusterID> suffix and there's no format check for performance reason.
// User must make sure the input is a valid IP or Prefix string.
//
// We should eventually remove this function once we finish refactoring IPCache and
// stop using string as a key. At that point, we should consider using PrefixCluster
// type for IPCache's key.
func AnnotateIPCacheKeyWithClusterID(key string, clusterID uint32) string {
return key + "@" + strconv.FormatUint(uint64(clusterID), 10)
}
// SPDX-License-Identifier: Apache-2.0
// Copyright Authors of Cilium
package types
import (
"errors"
"fmt"
"github.com/spf13/pflag"
"github.com/cilium/cilium/pkg/defaults"
ipamOption "github.com/cilium/cilium/pkg/ipam/option"
)
const (
// OptClusterName is the name of the OptClusterName option
OptClusterName = "cluster-name"
// OptClusterID is the name of the OptClusterID option
OptClusterID = "cluster-id"
// OptMaxConnectedClusters is the name of the OptMaxConnectedClusters option
OptMaxConnectedClusters = "max-connected-clusters"
)
// ClusterInfo groups together the ClusterID and the ClusterName
type ClusterInfo struct {
ID uint32 `mapstructure:"cluster-id"`
Name string `mapstructure:"cluster-name"`
MaxConnectedClusters uint32 `mapstructure:"max-connected-clusters"`
}
// DefaultClusterInfo represents the default ClusterInfo values.
var DefaultClusterInfo = ClusterInfo{
ID: 0,
Name: defaults.ClusterName,
MaxConnectedClusters: defaults.MaxConnectedClusters,
}
// Flags implements the cell.Flagger interface, to register the given flags.
func (def ClusterInfo) Flags(flags *pflag.FlagSet) {
flags.Uint32(OptClusterID, def.ID, "Unique identifier of the cluster")
flags.String(OptClusterName, def.Name, "Name of the cluster. It must consist of at most 32 lower case alphanumeric characters and '-', start and end with an alphanumeric character.")
flags.Uint32(OptMaxConnectedClusters, def.MaxConnectedClusters, "Maximum number of clusters to be connected in a clustermesh. Increasing this value will reduce the maximum number of identities available. Valid configurations are [255, 511].")
}
// Validate validates that the ClusterID is in the valid range (including ClusterID == 0),
// and that the ClusterName is different from the default value if the ClusterID != 0.
func (c ClusterInfo) Validate() error {
if c.ID < ClusterIDMin || c.ID > ClusterIDMax {
return fmt.Errorf("invalid cluster id %d: must be in range %d..%d",
c.ID, ClusterIDMin, ClusterIDMax)
}
return c.validateName()
}
// ValidateStrict validates that the ClusterID is in the valid range, but not 0,
// and that the ClusterName is different from the default value.
func (c ClusterInfo) ValidateStrict() error {
if err := ValidateClusterID(c.ID); err != nil {
return err
}
return c.validateName()
}
// ValidateBuggyClusterID returns an error if a buggy cluster ID (i.e., with the
// 7th bit set) is used in combination with ENI IPAM mode or AWS CNI chaining.
func (c ClusterInfo) ValidateBuggyClusterID(ipamMode, chainingMode string) error {
if (c.ID&0x80) != 0 && (ipamMode == ipamOption.IPAMENI || ipamMode == ipamOption.IPAMAlibabaCloud || chainingMode == "aws-cni") {
return errors.New("Cilium is currently affected by a bug that causes traffic matched " +
"by network policies to be incorrectly dropped when running in either ENI mode (both " +
"AWS and AlibabaCloud) or AWS VPC CNI chaining mode, if the cluster ID is 128-255 (and " +
"384-511 when max-connected-clusters=511). " +
"Please refer to https://github.com/cilium/cilium/issues/21330 for additional details.")
}
return nil
}
func (c ClusterInfo) validateName() error {
if err := ValidateClusterName(c.Name); err != nil {
return fmt.Errorf("invalid cluster name: %w", err)
}
if c.ID != 0 && c.Name == defaults.ClusterName {
return fmt.Errorf("cannot use default cluster name (%s) with option %s",
defaults.ClusterName, OptClusterID)
}
return nil
}
// ExtendedClusterMeshEnabled returns true if MaxConnectedClusters value has
// been set to a value larger than the default 255.
func (c ClusterInfo) ExtendedClusterMeshEnabled() bool {
return c.MaxConnectedClusters != defaults.MaxConnectedClusters
}
// ValidateRemoteConfig validates the remote CiliumClusterConfig to ensure
// compatibility with this cluster's configuration.
func (c ClusterInfo) ValidateRemoteConfig(config CiliumClusterConfig) error {
if err := ValidateClusterID(config.ID); err != nil {
return err
}
if c.ExtendedClusterMeshEnabled() && (c.MaxConnectedClusters != config.Capabilities.MaxConnectedClusters) {
return fmt.Errorf("mismatched MaxConnectedClusters; local=%d, remote=%d", c.MaxConnectedClusters, config.Capabilities.MaxConnectedClusters)
}
return nil
}
// QuirksConfig allows the user to configure how Cilium behaves when a set
// of incompatible options are configured together into the agent.
type QuirksConfig struct {
// AllowUnsafePolicySKBUsage determines whether to hard-fail startup
// due to detection of a configuration combination that may trigger
// connection impact in the dataplane due to clustermesh IDs
// conflicting with other usage of skb->mark field. See GH-21330.
AllowUnsafePolicySKBUsage bool
}
var DefaultQuirks = QuirksConfig{
AllowUnsafePolicySKBUsage: false,
}
func (_ QuirksConfig) Flags(flags *pflag.FlagSet) {
flags.Bool("allow-unsafe-policy-skb-usage", false,
"Allow the daemon to continue to operate even if conflicting "+
"clustermesh ID configuration is detected which may "+
"impact the ability for Cilium to enforce network "+
"policy both within and across clusters")
flags.MarkHidden("allow-unsafe-policy-skb-usage")
}
const PolicyAnyCluster = ""
// PolicyConfig allows the user to configure config related to ClusterMesh and policies
type PolicyConfig struct {
// PolicyDefaultLocalCluster control whether policy rules assume
// by default the local cluster if not explicitly selected
PolicyDefaultLocalCluster bool
}
var DefaultPolicyConfig = PolicyConfig{
PolicyDefaultLocalCluster: true,
}
func (p PolicyConfig) Flags(flags *pflag.FlagSet) {
flags.Bool(
"policy-default-local-cluster", p.PolicyDefaultLocalCluster,
"Control whether policy rules assume by default the local cluster if not explicitly selected",
)
}
// LocalClusterNameForPolicies returns what should be considered the local cluster
// name in network policies
func LocalClusterNameForPolicies(cfg PolicyConfig, localClusterName string) string {
if cfg.PolicyDefaultLocalCluster {
return localClusterName
} else {
return PolicyAnyCluster
}
}
// SPDX-License-Identifier: Apache-2.0
// Copyright Authors of Cilium
package types
import (
"errors"
"fmt"
"regexp"
"github.com/cilium/hive/cell"
"github.com/cilium/cilium/pkg/defaults"
)
const (
// ClusterIDMin is the minimum value of the cluster ID
ClusterIDMin = 0
ClusterIDExt511 = 511
ClusterIDUnset = ClusterIDMin
)
// ClusterIDMax is the maximum value of the cluster ID
var ClusterIDMax uint32 = defaults.MaxConnectedClusters
// A cluster name must respect the following constraints:
// * It must contain at most 32 characters;
// * It must begin and end with a lower case alphanumeric character;
// * It may contain lower case alphanumeric characters and dashes between.
const (
// clusterNameMaxLength is the maximum allowed length of a cluster name.
clusterNameMaxLength = 32
// clusterNameRegexStr is the regex to validate a cluster name.
clusterNameRegexStr = `^([a-z0-9][-a-z0-9]*)?[a-z0-9]$`
)
var clusterNameRegex = regexp.MustCompile(clusterNameRegexStr)
// InitClusterIDMax validates and sets the ClusterIDMax package level variable.
func (c ClusterInfo) InitClusterIDMax() error {
switch c.MaxConnectedClusters {
case defaults.MaxConnectedClusters, ClusterIDExt511:
ClusterIDMax = c.MaxConnectedClusters
default:
return fmt.Errorf("--%s=%d is invalid; supported values are [%d, %d]", OptMaxConnectedClusters, c.MaxConnectedClusters, defaults.MaxConnectedClusters, ClusterIDExt511)
}
return nil
}
// ValidateClusterID ensures that the given clusterID is within the configured
// range of the ClusterMesh.
func ValidateClusterID(clusterID uint32) error {
if clusterID == ClusterIDMin {
return fmt.Errorf("ClusterID %d is reserved", ClusterIDMin)
}
if clusterID > ClusterIDMax {
return fmt.Errorf("ClusterID > %d is not supported", ClusterIDMax)
}
return nil
}
// ValidateClusterName validates that the given name matches the cluster name specifications.
func ValidateClusterName(name string) error {
if name == "" {
return errors.New("must not be empty")
}
if len(name) > clusterNameMaxLength {
return fmt.Errorf("must not be more than %d characters", clusterNameMaxLength)
}
if !clusterNameRegex.MatchString(name) {
return errors.New("must consist of lower case alphanumeric characters and '-', and must start and end with an alphanumeric character")
}
return nil
}
func RegisterClusterInfoValidator(lc cell.Lifecycle, cinfo ClusterInfo) {
lc.Append(cell.Hook{
OnStart: func(cell.HookContext) error {
if err := cinfo.InitClusterIDMax(); err != nil {
return err
}
if err := cinfo.ValidateStrict(); err != nil {
return err
}
return nil
},
})
}
type CiliumClusterConfig struct {
ID uint32 `json:"id,omitempty"`
Capabilities CiliumClusterConfigCapabilities `json:"capabilities,omitempty"`
}
type CiliumClusterConfigCapabilities struct {
// Supports per-prefix "synced" canaries
SyncedCanaries bool `json:"syncedCanaries,omitempty"`
// The information concerning the given cluster is cached from an external
// kvstore (for instance, by kvstoremesh). This implies that keys are stored
// under the dedicated "cilium/cache" prefix, and all are cluster-scoped.
Cached bool `json:"cached,omitempty"`
// The maximum number of clusters the given cluster can support in a ClusterMesh.
MaxConnectedClusters uint32 `json:"maxConnectedClusters,omitempty"`
// Whether or not MCS-API ServiceExports is enabled by the cluster.
// Additionally a nil values means that it's not supported.
ServiceExportsEnabled *bool `json:"serviceExportsEnabled,omitempty"`
}
// SPDX-License-Identifier: Apache-2.0
// Copyright Authors of Cilium
package command
import (
"encoding/json"
"errors"
"fmt"
"regexp"
"strings"
"unicode"
"github.com/spf13/cast"
"github.com/spf13/viper"
)
const (
comma = ','
equal = '='
)
var keyValueRegex = regexp.MustCompile(`([\w-:;./@]+=([\w-:;,./@][\w-:;,./@ ]*[\w-:;,./@])?[\w-:;,./@]*,)*([\w-:;./@]+=([\w-:;,./@][\w-:;,./@ ]*)?[\w-:;./@]+)$`)
// GetStringMapString contains one enhancement to support k1=v2,k2=v2 compared to original
// implementation of GetStringMapString function
// Related upstream issue https://github.com/spf13/viper/issues/911
func GetStringMapString(vp *viper.Viper, key string) map[string]string {
v, _ := GetStringMapStringE(vp, key)
return v
}
// GetStringMapStringE is same as GetStringMapString, but with error
func GetStringMapStringE(vp *viper.Viper, key string) (map[string]string, error) {
return ToStringMapStringE(vp.Get(key))
}
// ToStringMapStringE casts an interface to a map[string]string type. The underlying
// interface type might be a map or string. In the latter case, it is attempted to be
// json decoded, falling back to the k1=v2,k2=v2 format in case it doesn't look like json.
func ToStringMapStringE(data any) (map[string]string, error) {
if data == nil {
return map[string]string{}, nil
}
v, err := cast.ToStringMapStringE(data)
if err != nil {
var syntaxErr *json.SyntaxError
if !errors.As(err, &syntaxErr) {
return v, err
}
switch s := data.(type) {
case string:
if len(s) == 0 {
return map[string]string{}, nil
}
// if the input is starting with either '{' or '[', just preserve original json parsing error.
firstIndex := strings.IndexFunc(s, func(r rune) bool {
return !unicode.IsSpace(r)
})
if firstIndex != -1 && (s[firstIndex] == '{' || s[firstIndex] == '[') {
return v, err
}
if !isValidKeyValuePair(s) {
return map[string]string{}, fmt.Errorf("'%s' is not formatted as key=value,key1=value1", s)
}
var v = map[string]string{}
kvs := splitKeyValue(s, comma, equal)
for _, kv := range kvs {
temp := strings.Split(kv, string(equal))
if len(temp) != 2 {
return map[string]string{}, fmt.Errorf("'%s' in '%s' is not formatted as key=value,key1=value1", kv, s)
}
v[temp[0]] = temp[1]
}
return v, nil
}
}
return v, nil
}
// isValidKeyValuePair returns true if the input is following key1=value1,key2=value2,...,keyN=valueN format.
func isValidKeyValuePair(str string) bool {
if len(str) == 0 {
return true
}
return len(keyValueRegex.ReplaceAllString(str, "")) == 0
}
// splitKeyValue is similar to strings.Split, but looks ahead to make sure
// that sep character is allowed in value component of key-value pair.
//
// Example: with the input "c6a.2xlarge=4,15,15,m4.xlarge=2,4,8",
// - strings.Split function will return []string{"c6a.2xlarge=4", "15", "15", "m4.xlarge=2", "4", "8"}.
// - splitKeyValue function will return []string{"c6a.2xlarge=4,15,15", "m4.xlarge=2,4,8"} instead.
func splitKeyValue(str string, sep rune, keyValueSep rune) []string {
var sepIndexes, kvValueSepIndexes []int
// find all indexes of separator character
for i := range len(str) {
switch int32(str[i]) {
case sep:
sepIndexes = append(sepIndexes, i)
case keyValueSep:
kvValueSepIndexes = append(kvValueSepIndexes, i)
}
}
// there's only a single key-value if there are no separators ("key=value")
// or a single key-value separator ("key=option1:value1,option2:value2")
if len(sepIndexes) == 0 || len(kvValueSepIndexes) == 1 {
return []string{str}
}
if len(sepIndexes) == 1 {
index := sepIndexes[0]
return []string{str[:index], str[index+1:]}
}
var res []string
var start = 0
for i := range sepIndexes {
last := len(str)
if i < len(sepIndexes)-1 {
last = sepIndexes[i+1]
}
if strings.ContainsRune(str[sepIndexes[i]:last], keyValueSep) {
res = append(res, str[start:sepIndexes[i]])
start = sepIndexes[i] + 1
}
}
// append the remaining for last sep index
res = append(res, str[start:])
return res
}
// SPDX-License-Identifier: Apache-2.0
// Copyright Authors of Cilium
package command
import (
"encoding/json"
"fmt"
"os"
"regexp"
"strings"
"github.com/spf13/cobra"
"go.yaml.in/yaml/v3"
"k8s.io/client-go/util/jsonpath"
)
var (
outputOpt string
re = regexp.MustCompile(`^jsonpath\=(.*)`)
)
// OutputOption returns true if an output option was specified.
func OutputOption() bool {
return len(outputOpt) > 0
}
// OutputOptionString returns the output option as a string
func OutputOptionString() string {
if outputOpt == "yaml" {
return "YAML"
}
if outputOpt == "json" || re.MatchString(outputOpt) {
return "JSON"
}
return "unknown"
}
// AddOutputOption adds the -o|--output option to any cmd to export to json or yaml.
func AddOutputOption(cmd *cobra.Command) {
cmd.Flags().StringVarP(&outputOpt, "output", "o", "", "json| yaml| jsonpath='{}'")
}
// ForceJSON sets output mode to JSON (for unit tests)
func ForceJSON() {
outputOpt = "json"
}
// PrintOutput receives an interface and dump the data using the --output flag.
// ATM only json or jsonpath. In the future yaml
func PrintOutput(data any) error {
return PrintOutputWithType(data, outputOpt)
}
// PrintOutputWithPatch merges data with patch and dump the data using the --output flag.
func PrintOutputWithPatch(data any, patch any) error {
mergedInterface, err := mergeInterfaces(data, patch)
if err != nil {
return fmt.Errorf("Unable to merge Interfaces: %w", err)
}
return PrintOutputWithType(mergedInterface, outputOpt)
}
func mergeInterfaces(data, patch any) (any, error) {
var i1, i2 any
data1, err := json.Marshal(data)
if err != nil {
return nil, err
}
data2, err := json.Marshal(patch)
if err != nil {
return nil, err
}
err = json.Unmarshal(data1, &i1)
if err != nil {
return nil, err
}
err = json.Unmarshal(data2, &i2)
if err != nil {
return nil, err
}
return recursiveMerge(i1, i2), nil
}
func recursiveMerge(i1, i2 any) any {
switch i1 := i1.(type) {
case map[string]any:
i2, ok := i2.(map[string]any)
if !ok {
return i1
}
for k, v2 := range i2 {
if v1, ok := i1[k]; ok {
i1[k] = recursiveMerge(v1, v2)
} else {
i1[k] = v2
}
}
case nil:
i2, ok := i2.(map[string]any)
if ok {
return i2
}
}
return i1
}
// PrintOutputWithType receives an interface and dump the data using the --output flag.
// ATM only json, yaml, or jsonpath.
func PrintOutputWithType(data any, outputType string) error {
if outputType == "json" {
return dumpJSON(data, "")
}
if outputType == "yaml" {
return dumpYAML(data)
}
if re.MatchString(outputType) {
return dumpJSON(data, re.ReplaceAllString(outputType, "$1"))
}
return fmt.Errorf("couldn't find output printer")
}
// DumpJSONToString dumps the contents of data into a string. If jsonpath is
// non-empty, will attempt to do jsonpath filtering using said string. Returns a
// string containing the JSON in data, or an error if any JSON marshaling,
// parsing operations fail.
func DumpJSONToString(data any, jsonPath string) (string, error) {
if len(jsonPath) == 0 {
result, err := json.MarshalIndent(data, "", " ")
if err != nil {
fmt.Fprintf(os.Stderr, "Couldn't marshal to json: '%s'\n", err)
return "", err
}
fmt.Println(string(result))
return "", nil
}
parser := jsonpath.New("").AllowMissingKeys(true)
if err := parser.Parse(jsonPath); err != nil {
fmt.Fprintf(os.Stderr, "Couldn't parse jsonpath expression: '%s'\n", err)
return "", err
}
var sb strings.Builder
if err := parser.Execute(&sb, data); err != nil {
fmt.Fprintf(os.Stderr, "Couldn't parse jsonpath expression: '%s'\n", err)
return "", err
}
return sb.String(), nil
}
// dumpJSON dumps the data variable to the stdout as json.
// If something fails, it returns an error
// If jsonPath is passed, it runs the json query over data var.
func dumpJSON(data any, jsonPath string) error {
jsonStr, err := DumpJSONToString(data, jsonPath)
if err != nil {
return err
}
fmt.Println(jsonStr)
return nil
}
// dumpYAML dumps the data variable to the stdout as yaml.
// If something fails, it returns an error
func dumpYAML(data any) error {
result, err := yaml.Marshal(data)
if err != nil {
fmt.Fprintf(os.Stderr, "Couldn't marshal to yaml: '%s'\n", err)
return err
}
fmt.Println(string(result))
return nil
}
// SPDX-License-Identifier: Apache-2.0
// Copyright Authors of Cilium
package common
import (
"fmt"
"os"
"reflect"
"strconv"
"strings"
)
// C2GoArray transforms an hexadecimal string representation into a byte slice.
// Example:
// str := "0x12, 0xff, 0x0, 0x1"
// fmt.Print(C2GoArray(str)) //`{0x12, 0xFF, 0x0, 0x01}`"
func C2GoArray(str string) []byte {
ret := []byte{}
if str == "" {
return ret
}
for hexDigit := range strings.SplitSeq(str, ", ") {
strDigit := strings.TrimPrefix(hexDigit, "0x")
digitUint64, err := strconv.ParseUint(strDigit, 16, 8)
if err != nil {
return nil
}
ret = append(ret, byte(digitUint64))
}
return ret
}
// GoArray2C transforms a byte slice into its hexadecimal string representation.
// Example:
// array := []byte{0x12, 0xFF, 0x0, 0x01}
// fmt.Print(GoArray2C(array)) // "{ 0x12, 0xff, 0x0, 0x1 }"
func GoArray2C(array []byte) string {
return goArray2C(array, true)
}
// GoArray2CNoSpaces does the same as GoArray2C, but no spaces are used in
// the final output.
// Example:
// array := []byte{0x12, 0xFF, 0x0, 0x01}
// fmt.Print(GoArray2CNoSpaces(array)) // "{0x12,0xff,0x0,0x1}"
func GoArray2CNoSpaces(array []byte) string {
return goArray2C(array, false)
}
func goArray2C(array []byte, space bool) string {
ret := ""
format := ",%#x"
if space {
format = ", %#x"
}
for i, e := range array {
if i == 0 {
ret = ret + fmt.Sprintf("%#x", e)
} else {
ret = ret + fmt.Sprintf(format, e)
}
}
return ret
}
// RequireRootPrivilege checks if the user running cmd is root. If not, it exits the program
func RequireRootPrivilege(cmd string) {
if os.Getuid() != 0 {
fmt.Fprintf(os.Stderr, "Please run %q command(s) with root privileges.\n", cmd)
os.Exit(1)
}
}
func MergeChannels[T any](chans ...<-chan T) <-chan T {
out := make(chan T)
cases := make([]reflect.SelectCase, len(chans))
for i, ch := range chans {
cases[i] = reflect.SelectCase{
Dir: reflect.SelectRecv,
Chan: reflect.ValueOf(ch),
}
}
go func() {
defer close(out)
_, value, ok := reflect.Select(cases)
if !ok {
return
}
out <- value.Interface().(T)
}()
return out
}
// SPDX-License-Identifier: Apache-2.0
// Copyright Authors of Cilium
package comparator
import "slices"
// MapStringEqualsIgnoreKeys returns true if both maps have the same values for
// the keys that are not present in the 'ignoreKeys'.
func MapStringEqualsIgnoreKeys(m1, m2 map[string]string, ignoreKeys []string) bool {
switch {
case m1 == nil && m2 == nil:
return true
case m1 == nil && m2 != nil,
m1 != nil && m2 == nil:
return false
}
ignoredM1 := 0
for k1, v1 := range m1 {
var ignore bool
if slices.Contains(ignoreKeys, k1) {
ignore = true
}
if ignore {
ignoredM1++
continue
}
if v2, ok := m2[k1]; !ok || v2 != v1 {
return false
}
}
ignoredM2 := 0
for _, ig := range ignoreKeys {
if _, ok := m2[ig]; ok {
ignoredM2++
}
}
return len(m1)-ignoredM1 == len(m2)-ignoredM2
}
// SPDX-License-Identifier: Apache-2.0
// Copyright Authors of Cilium
package components
import (
"os"
"strings"
)
const (
// CiliumAgentName is the name of cilium-agent (daemon) process name.
CiliumAgentName = "cilium-agent"
// CiliumDaemonTestName is the name of test binary for daemon package.
CiliumDaemonTestName = "cmd.test"
)
// IsCiliumAgent checks whether the current process is cilium-agent (daemon).
func IsCiliumAgent() bool {
binaryName := os.Args[0]
return strings.HasSuffix(binaryName, CiliumAgentName) ||
strings.HasSuffix(binaryName, CiliumDaemonTestName)
}
// SPDX-License-Identifier: Apache-2.0
// Copyright Authors of Cilium
package bitlpm
import (
"math/bits"
"net/netip"
"unsafe"
)
// CIDRTrie can hold both IPv4 and IPv6 prefixes
// at the same time.
type CIDRTrie[T any] struct {
v4 Trie[cidrKey, T]
v6 Trie[cidrKey, T]
}
// NewCIDRTrie creates a new CIDRTrie[T any].
func NewCIDRTrie[T any]() *CIDRTrie[T] {
return &CIDRTrie[T]{
v4: NewTrie[cidrKey, T](32),
v6: NewTrie[cidrKey, T](128),
}
}
// ExactLookup returns the value for a given CIDR, but only
// if there is an exact match for the CIDR in the Trie.
func (c *CIDRTrie[T]) ExactLookup(cidr netip.Prefix) (T, bool) {
return c.treeForFamily(cidr).ExactLookup(uint(cidr.Bits()), cidrKey(cidr))
}
// LongestPrefixMatch returns the longest matched value for a given address.
func (c *CIDRTrie[T]) LongestPrefixMatch(addr netip.Addr) (netip.Prefix, T, bool) {
if !addr.IsValid() {
var p netip.Prefix
var def T
return p, def, false
}
bits := addr.BitLen()
prefix := netip.PrefixFrom(addr, bits)
k, v, ok := c.treeForFamily(prefix).LongestPrefixMatch(cidrKey(prefix))
if ok {
return netip.Prefix(k), v, ok
}
var p netip.Prefix
return p, v, ok
}
// Ancestors iterates over every CIDR pair that contains the CIDR argument.
func (c *CIDRTrie[T]) Ancestors(cidr netip.Prefix, fn func(k netip.Prefix, v T) bool) {
c.treeForFamily(cidr).Ancestors(uint(cidr.Bits()), cidrKey(cidr), func(prefix uint, k cidrKey, v T) bool {
return fn(netip.Prefix(k), v)
})
}
func (c *CIDRTrie[T]) AncestorIterator(cidr netip.Prefix) ancestorIterator[cidrKey, T] {
return c.treeForFamily(cidr).AncestorIterator(uint(cidr.Bits()), cidrKey(cidr))
}
// AncestorsLongestPrefixFirst iterates over every CIDR pair that contains the CIDR argument,
// longest matching prefix first, then iterating towards the root of the trie.
func (c *CIDRTrie[T]) AncestorsLongestPrefixFirst(cidr netip.Prefix, fn func(k netip.Prefix, v T) bool) {
c.treeForFamily(cidr).AncestorsLongestPrefixFirst(uint(cidr.Bits()), cidrKey(cidr), func(prefix uint, k cidrKey, v T) bool {
return fn(netip.Prefix(k), v)
})
}
func (c *CIDRTrie[T]) AncestorLongestPrefixFirstIterator(cidr netip.Prefix) ancestorLPFIterator[cidrKey, T] {
return c.treeForFamily(cidr).AncestorLongestPrefixFirstIterator(uint(cidr.Bits()), cidrKey(cidr))
}
// Descendants iterates over every CIDR that is contained by the CIDR argument.
func (c *CIDRTrie[T]) Descendants(cidr netip.Prefix, fn func(k netip.Prefix, v T) bool) {
c.treeForFamily(cidr).Descendants(uint(cidr.Bits()), cidrKey(cidr), func(prefix uint, k cidrKey, v T) bool {
return fn(netip.Prefix(k), v)
})
}
func (c *CIDRTrie[T]) DescendantIterator(cidr netip.Prefix) descendantIterator[cidrKey, T] {
return c.treeForFamily(cidr).DescendantIterator(uint(cidr.Bits()), cidrKey(cidr))
}
// DescendantsShortestPrefixFirst iterates over every CIDR that is contained by the CIDR argument.
func (c *CIDRTrie[T]) DescendantsShortestPrefixFirst(cidr netip.Prefix, fn func(k netip.Prefix, v T) bool) {
c.treeForFamily(cidr).DescendantsShortestPrefixFirst(uint(cidr.Bits()), cidrKey(cidr), func(prefix uint, k cidrKey, v T) bool {
return fn(netip.Prefix(k), v)
})
}
func (c *CIDRTrie[T]) DescendantShortestPrefixFirstIterator(cidr netip.Prefix) descendantSPFIterator[cidrKey, T] {
return c.treeForFamily(cidr).DescendantShortestPrefixFirstIterator(uint(cidr.Bits()), cidrKey(cidr))
}
// Upsert adds or updates the value for a given prefix.
func (c *CIDRTrie[T]) Upsert(cidr netip.Prefix, v T) bool {
return c.treeForFamily(cidr).Upsert(uint(cidr.Bits()), cidrKey(cidr), v)
}
// Delete removes a given prefix from the tree.
func (c *CIDRTrie[T]) Delete(cidr netip.Prefix) bool {
return c.treeForFamily(cidr).Delete(uint(cidr.Bits()), cidrKey(cidr))
}
// Len returns the total number of ipv4 and ipv6 prefixes in the trie.
func (c *CIDRTrie[T]) Len() uint {
return c.v4.Len() + c.v6.Len()
}
// ForEach iterates over every element of the Trie. It iterates over IPv4
// keys first.
func (c *CIDRTrie[T]) ForEach(fn func(k netip.Prefix, v T) bool) {
var v4Break bool
c.v4.ForEach(func(prefix uint, k cidrKey, v T) bool {
if !fn(netip.Prefix(k), v) {
v4Break = true
return false
}
return true
})
if !v4Break {
c.v6.ForEach(func(prefix uint, k cidrKey, v T) bool {
return fn(netip.Prefix(k), v)
})
}
}
func (c *CIDRTrie[T]) treeForFamily(cidr netip.Prefix) Trie[cidrKey, T] {
if cidr.Addr().Is6() {
return c.v6
}
return c.v4
}
type cidrKey netip.Prefix
func (k cidrKey) BitValueAt(idx uint) uint8 {
addr := netip.Prefix(k).Addr()
if addr.Is4() {
word := (*(*[2]uint64)(unsafe.Pointer(&addr)))[1]
return uint8((word >> (31 - idx)) & 1)
}
if idx < 64 {
word := (*(*[2]uint64)(unsafe.Pointer(&addr)))[0]
return uint8((word >> (63 - idx)) & 1)
} else {
word := (*(*[2]uint64)(unsafe.Pointer(&addr)))[1]
return uint8((word >> (127 - idx)) & 1)
}
}
func (k cidrKey) CommonPrefix(k2 cidrKey) uint {
addr1 := netip.Prefix(k).Addr()
addr2 := netip.Prefix(k2).Addr()
words1 := (*[2]uint64)(unsafe.Pointer(&addr1))
words2 := (*[2]uint64)(unsafe.Pointer(&addr2))
if addr1.Is4() {
word1 := uint32((*words1)[1])
word2 := uint32((*words2)[1])
return uint(bits.LeadingZeros32(word1 ^ word2))
}
v := bits.LeadingZeros64((*words1)[0] ^ (*words2)[0])
if v == 64 {
v += bits.LeadingZeros64((*words1)[1] ^ (*words2)[1])
}
return uint(v)
}
// SPDX-License-Identifier: Apache-2.0
// Copyright Authors of Cilium
package bitlpm
import "net/netip"
// CIDRTrieMap holds a map of CIDRTries, keyed by a generic comparable type,
// where each trie is capable of storing both IPv4 and IPv6 prefixes at the same time.
type CIDRTrieMap[K comparable, T any] struct {
m map[K]*CIDRTrie[T]
}
// NewCIDRTrieMap creates a new CIDRTrieMap[K comparable, T any].
func NewCIDRTrieMap[K comparable, T any]() *CIDRTrieMap[K, T] {
return &CIDRTrieMap[K, T]{make(map[K]*CIDRTrie[T])}
}
// Descendants iterates over every CIDR that is contained by the CIDR argument in the trie identified by key.
func (cm *CIDRTrieMap[K, T]) Descendants(key K, cidr netip.Prefix, fn func(k netip.Prefix, v T) bool) {
if cm.m[key] == nil {
return
}
cm.m[key].Descendants(cidr, fn)
}
// Upsert adds or updates the value for a given prefix in the trie identified by key.
// If the key has no trie associated, a new empty one is created.
func (cm *CIDRTrieMap[K, T]) Upsert(key K, cidr netip.Prefix, v T) bool {
if cm.m[key] == nil {
cm.m[key] = NewCIDRTrie[T]()
}
return cm.m[key].Upsert(cidr, v)
}
// Delete removes a given prefix from the trie identified by key.
func (cm *CIDRTrieMap[K, T]) Delete(key K, cidr netip.Prefix) bool {
if cm.m[key] == nil {
return false
}
found := cm.m[key].Delete(cidr)
if cm.m[key].Len() == 0 {
delete(cm.m, key)
}
return found
}
// SPDX-License-Identifier: Apache-2.0
// Copyright Authors of Cilium
package bitlpm
import (
"container/heap"
)
// Trie is a [non-preemptive] [binary] [trie] that indexes arbitrarily long
// bit-based keys with associated prefix lengths indexed from [most significant bit]
// ("MSB") to [least significant bit] ("LSB") using the
// [longest prefix match algorithm].
//
// A prefix-length (hereafter "prefix"), in a prefix-key pair, represents the
// minimum number of bits (from MSB to LSB) that another comparable key
// must match.
//
// 'K' must implement the Key[K] interface, and should be passed by value for optimum
// performance. If 'K' is a pointer type, its Key[K] methods must accept nil receivers.
//
// Each method's comments describes the mechanism of how the method
// works.
//
// [non-preemptive]: https://en.wikipedia.org/wiki/Preemption_(computing)
// [binary]: https://en.wikipedia.org/wiki/Binary_number
// [trie]: https://en.wikipedia.org/wiki/Trie
// [most significant bit]: https://en.wikipedia.org/wiki/Bit_numbering#Most_significant_bit
// [least significant bit]: https://en.wikipedia.org/wiki/Bit_numbering#Least_significant_bit
// [longest prefix match algorithm]: https://en.wikipedia.org/wiki/Longest_prefix_match
type Trie[K Key[K], T any] interface {
// ExactLookup returns a value only if the prefix and key
// match an entry in the Trie exactly.
//
// Note: If the prefix argument exceeds the Trie's maximum
// prefix, it will be set to the Trie's maximum prefix.
ExactLookup(prefix uint, key K) (v T, ok bool)
// LongestPrefixMatch returns the longest prefix match for a specific
// key.
LongestPrefixMatch(key K) (k K, v T, ok bool)
// Ancestors iterates over every prefix-key pair that contains
// the prefix-key argument pair. If the function argument
// returns false the iteration will stop. Ancestors iterates
// keys from shortest to longest prefix match (that is, the
// longest match will be returned last).
//
// Note: If the prefix argument exceeds the Trie's maximum
// prefix, it will be set to the Trie's maximum prefix.
Ancestors(prefix uint, key K, fn func(uint, K, T) bool)
// AncestorIterator returns an iterator for ancestors that
// can be used to produce the 'Next' key/value pair in sequence.
AncestorIterator(prefix uint, key K) ancestorIterator[K, T]
// AncestorsLongestPrefixFirst iterates over every prefix-key pair that
// contains the prefix-key argument pair. If the function argument
// returns false the iteration will stop. AncestorsLongestPrefixFirst
// iterates keys from longest to shortest prefix match (that is, the
// longest matching prefix will be returned first).
AncestorsLongestPrefixFirst(prefix uint, key K, fn func(uint, K, T) bool)
// AncestorLongestPrefixFirstIterator returns an iterator for ancestors
// that can be used to produce the 'Next' key/value pair in sequence,
// starting from the key with the longest common prefix with 'key'.
AncestorLongestPrefixFirstIterator(prefix uint, key K) ancestorLPFIterator[K, T]
// Descendants iterates over every prefix-key pair that is contained
// by the prefix-key argument pair. If the function argument
// returns false the iteration will stop. Descendants does **not** iterate
// over matches in any guaranteed order.
//
// Note: If the prefix argument exceeds the Trie's maximum
// prefix, it will be set to the Trie's maximum prefix.
Descendants(prefix uint, key K, fn func(uint, K, T) bool)
// DescendantIterator returns an iterator for descendants
// that can be used to produce the 'Next' key/value pair in sequence.
DescendantIterator(prefix uint, key K) descendantIterator[K, T]
// DescendantsShortestPrefixFirst iterates over every prefix-key pair that is contained by
// the prefix-key argument pair. If the function argument returns false the iteration will
// stop. DescendantsShortestPrefixFirst iterates keys starting from shortest prefix, and
// progressing towards keys with longer prefixes. Keys with equal prefix lengths are not
// iterated in any particular order.
DescendantsShortestPrefixFirst(prefix uint, key K, fn func(uint, K, T) bool)
// DescendantShortestPrefixFirstIterator returns an iterator for descendants
// that can be used to produce the 'Next' key/value pair in sequence,
// starting from the key with the shortest common prefix with 'key'.
DescendantShortestPrefixFirstIterator(prefix uint, key K) descendantSPFIterator[K, T]
// Upsert updates or inserts the trie with a a prefix, key,
// and value. The method returns true if the key is new, and
// false if the key already existed.
//
// Note: If the prefix argument exceeds the Trie's maximum
// prefix, it will be set to the Trie's maximum prefix.
Upsert(prefix uint, key K, value T) bool
// Delete removes a key with the exact given prefix and returns
// false if the key was not found.
//
// Note: If the prefix argument exceeds the Trie's maximum
// prefix, it will be set to the Trie's maximum prefix.
Delete(prefix uint, key K) bool
// Len returns the number of entries in the Trie
Len() uint
// ForEach iterates over every element of the Trie in no particular
// order. If the function argument returns false the iteration stops.
ForEach(fn func(uint, K, T) bool)
}
// Key is an interface that implements all the necessary
// methods to index and retrieve keys.
type Key[K any] interface {
// CommonPrefix returns the number of bits that
// are the same between this key and the argument
// value, starting from MSB.
CommonPrefix(K) uint
// BitValueAt returns the value of the bit at an argument
// index. MSB is 0 and LSB is n-1.
BitValueAt(uint) uint8
}
// trie is the generic implementation of a bit-trie that can
// accept arbitrary keys conforming to the Key interface.
type trie[K Key[K], T any] struct {
root *node[K, T]
maxPrefix uint
entries uint
}
// NewTrie returns a Trie that accepts the Key[K any] interface
// as its key argument. This enables the user of this Trie to
// define their own bit-key.
func NewTrie[K Key[K], T any](maxPrefix uint) Trie[K, T] {
return &trie[K, T]{
maxPrefix: maxPrefix,
}
}
// node represents a specific key and prefix in the trie
type node[K Key[K], T any] struct {
children [2]*node[K, T]
prefixLen uint
key K
intermediate bool
value T
}
// ExactLookup returns a value only if the prefix and key
// match an entry in the Trie exactly.
//
// Note: If the prefix argument exceeds the Trie's maximum
// prefix, it will be set to the Trie's maximum prefix.
func (t *trie[K, T]) ExactLookup(prefixLen uint, k K) (ret T, found bool) {
prefixLen = min(prefixLen, t.maxPrefix)
t.traverse(prefixLen, k, func(currentNode *node[K, T]) bool {
// Only copy node value if exact prefix length is found
if currentNode.prefixLen == prefixLen {
ret = currentNode.value
found = true
return false // no need to continue
}
return true
})
return ret, found
}
// LongestPrefixMatch returns the value for the key with the
// longest prefix match of the argument key.
func (t *trie[K, T]) LongestPrefixMatch(k K) (key K, value T, ok bool) {
var lpmNode *node[K, T]
t.traverse(t.maxPrefix, k, func(currentNode *node[K, T]) bool {
lpmNode = currentNode
return true
})
if lpmNode != nil {
return lpmNode.key, lpmNode.value, true
}
return
}
// Ancestors calls the function argument for every prefix/key/value in the trie
// that contains the prefix-key argument pair in order from shortest to longest
// prefix match. If the function argument returns false the iteration stops.
//
// Note: Ancestors sets any prefixLen argument that exceeds the maximum
// prefix allowed by the trie to the maximum prefix allowed by the
// trie.
func (t *trie[K, T]) Ancestors(prefixLen uint, k K, fn func(prefix uint, key K, value T) bool) {
prefixLen = min(prefixLen, t.maxPrefix)
t.traverse(prefixLen, k, func(currentNode *node[K, T]) bool {
return fn(currentNode.prefixLen, currentNode.key, currentNode.value)
})
}
// ancestorIterator implements Iteraror for ancestor iteration
type ancestorIterator[K Key[K], T any] struct {
key K
prefixLen uint
maxPrefix uint
currentNode *node[K, T]
}
// AncestorIterator returns an iterator for ancestors.
func (t *trie[K, T]) AncestorIterator(prefixLen uint, k K) ancestorIterator[K, T] {
return ancestorIterator[K, T]{
prefixLen: min(prefixLen, t.maxPrefix),
key: k,
maxPrefix: t.maxPrefix,
currentNode: t.root,
}
}
// Next produces the 'Next' key/value pair in the iteration sequence maintained by 'iter'. 'ok' is
// 'false' when the sequence ends; 'key' and 'value' are returned with empty values in that case.
func (i *ancestorIterator[K, T]) Next() (ok bool, key K, value T) {
for i.currentNode != nil {
k := i.key
prefixLen := i.prefixLen
currentNode := i.currentNode
matchLen := currentNode.prefixMatch(prefixLen, k)
// The current-node does not match.
if matchLen < currentNode.prefixLen {
break
}
// Skip over intermediate nodes
if currentNode.intermediate {
i.currentNode = currentNode.children[k.BitValueAt(currentNode.prefixLen)]
continue
}
if matchLen == i.maxPrefix {
i.currentNode = nil
} else {
i.currentNode = currentNode.children[k.BitValueAt(currentNode.prefixLen)]
}
return true, currentNode.key, currentNode.value
}
return false, key, value
}
// ancestorLPFIterator implements Iteraror for ancestor iteration for longest-prefix-first iteration
// order.
type ancestorLPFIterator[K Key[K], T any] struct {
stack nodes[K, T]
}
// AncestorLongestPrefixFirstIterator returns an iterator for ancestors
// that can be used to produce the 'Next' key/value pair in sequence,
// starting from the key with the longest common prefix with 'key'.
func (t *trie[K, T]) AncestorLongestPrefixFirstIterator(prefixLen uint, k K) ancestorLPFIterator[K, T] {
iter := ancestorLPFIterator[K, T]{}
for currentNode := t.root; currentNode != nil; currentNode = currentNode.children[k.BitValueAt(currentNode.prefixLen)] {
matchLen := currentNode.prefixMatch(prefixLen, k)
// The current-node does not match.
if matchLen < currentNode.prefixLen {
break
}
// Skip over intermediate nodes
if currentNode.intermediate {
continue
}
iter.stack.push(currentNode)
if matchLen == t.maxPrefix {
break
}
}
return iter
}
// Next produces the 'Next' key/value pair in the iteration sequence maintained by 'iter'. 'ok' is
// 'false' when the sequence ends; 'key' and 'value' are returned with empty values in that case.
func (i *ancestorLPFIterator[K, T]) Next() (ok bool, key K, value T) {
if len(i.stack) > 0 {
n := i.stack.pop()
return true, n.key, n.value
}
return false, key, value
}
// descendantIterator implements Iteraror for descendants iteration
type descendantIterator[K Key[K], T any] struct {
nodes nodes[K, T]
}
// DescendantIterator returns an iterator for descendants
// that can be used to produce the 'Next' key/value pair in sequence.
func (t *trie[K, T]) DescendantIterator(prefixLen uint, k K) descendantIterator[K, T] {
iter := descendantIterator[K, T]{}
prefixLen = min(prefixLen, t.maxPrefix)
currentNode := t.root
for currentNode != nil {
matchLen := currentNode.prefixMatch(prefixLen, k)
// CurrentNode matches the prefix-key argument
if matchLen >= prefixLen {
iter.nodes.push(currentNode)
break
}
// currentNode is a leaf and has no children. Calling k.BitValueAt may
// overrun the key storage.
if currentNode.prefixLen >= t.maxPrefix {
break
}
currentNode = currentNode.children[k.BitValueAt(currentNode.prefixLen)]
}
return iter
}
// Next produces the 'Next' key/value pair in the iteration sequence maintained by 'iter'. 'ok' is
// 'false' when the sequence ends; 'key' and 'value' are returned with empty values in that case.
func (i *descendantIterator[K, T]) Next() (ok bool, key K, value T) {
for len(i.nodes) > 0 {
// pop the latest node
n := i.nodes.pop()
// push the children, if any
if n.children[0] != nil {
i.nodes.push(n.children[0])
}
if n.children[1] != nil {
i.nodes.push(n.children[1])
}
// Skip over intermediate nodes
if n.intermediate {
continue
}
return true, n.key, n.value
}
return false, key, value
}
// descendantSPFIterator implements Iteraror for descendants iteration for shortest-prefix-first
// iteration order.
type descendantSPFIterator[K Key[K], T any] struct {
heap nodes[K, T]
}
// DescendantsShortestPrefixFirst iterates over every prefix-key pair that is contained by
// the prefix-key argument pair. If the function argument returns false the iteration will
// stop. DescendantsShortestPrefixFirst iterates keys starting from shortest prefix, and
// progressing towards keys with longer prefixes. Keys with equal prefix lengths are not
// iterated in any particular order.
func (t *trie[K, T]) DescendantShortestPrefixFirstIterator(prefixLen uint, k K) descendantSPFIterator[K, T] {
iter := descendantSPFIterator[K, T]{}
prefixLen = min(prefixLen, t.maxPrefix)
currentNode := t.root
for currentNode != nil {
matchLen := currentNode.prefixMatch(prefixLen, k)
// CurrentNode matches the prefix-key argument
if matchLen >= prefixLen {
iter.heap.push(currentNode)
break
}
// currentNode is a leaf and has no children. Calling k.BitValueAt may
// overrun the key storage.
if currentNode.prefixLen >= t.maxPrefix {
break
}
currentNode = currentNode.children[k.BitValueAt(currentNode.prefixLen)]
}
return iter
}
// Next produces the 'Next' key/value pair in the iteration sequence maintained by 'iter'. 'ok' is
// 'false' when the sequence ends; 'key' and 'value' are returned with empty values in that case.
func (i *descendantSPFIterator[K, T]) Next() (ok bool, key K, value T) {
for i.heap.Len() > 0 {
// pop the node with the lowest prefix length from the heap
n := i.heap.popHeap()
// push the children, if any, into the heap
if n.children[0] != nil {
i.heap.pushHeap(n.children[0])
}
if n.children[1] != nil {
i.heap.pushHeap(n.children[1])
}
// Skip over intermediate nodes
if n.intermediate {
continue
}
return true, n.key, n.value
}
return false, key, value
}
func (t *trie[K, T]) AncestorsLongestPrefixFirst(prefixLen uint, k K, fn func(prefix uint, key K, value T) bool) {
prefixLen = min(prefixLen, t.maxPrefix)
t.treverse(prefixLen, k, func(currentNode *node[K, T]) bool {
return fn(currentNode.prefixLen, currentNode.key, currentNode.value)
})
}
// Descendants calls the function argument for every prefix/key/value in the
// trie that is contained by the prefix-key argument pair. If the function
// argument returns false the iteration stops. Descendants does **not** iterate
// over matches in any guaranteed order.
//
// Note: Descendants sets any prefixLen argument that exceeds the maximum
// prefix allowed by the trie to the maximum prefix allowed by the
// trie.
func (t *trie[K, T]) Descendants(prefixLen uint, k K, fn func(prefix uint, key K, value T) bool) {
prefixLen = min(prefixLen, t.maxPrefix)
currentNode := t.root
for currentNode != nil {
matchLen := currentNode.prefixMatch(prefixLen, k)
// CurrentNode matches the prefix-key argument
if matchLen >= prefixLen {
currentNode.forEach(fn)
return
}
// currentNode is a leaf and has no children. Calling k.BitValueAt may
// overrun the key storage.
if currentNode.prefixLen >= t.maxPrefix {
return
}
currentNode = currentNode.children[k.BitValueAt(currentNode.prefixLen)]
}
}
// DescendantsShortestPrefixFirst iterates over every prefix-key pair that is contained by
// the prefix-key argument pair. If the function argument returns false the iteration will
// stop. DescendantsShortestPrefixFirst iterates keys starting from shortest prefix, and
// progressing towards keys with longer prefixes. Keys with equal prefix lengths are not
// iterated in any particular order.
//
// Note: Descendants sets any prefixLen argument that exceeds the maximum
// prefix allowed by the trie to the maximum prefix allowed by the
// trie.
func (t *trie[K, T]) DescendantsShortestPrefixFirst(prefixLen uint, k K, fn func(prefix uint, key K, value T) bool) {
prefixLen = min(prefixLen, t.maxPrefix)
currentNode := t.root
for currentNode != nil {
matchLen := currentNode.prefixMatch(prefixLen, k)
// CurrentNode matches the prefix-key argument
if matchLen >= prefixLen {
currentNode.forEachShortestPrefixFirst(fn)
return
}
// currentNode is a leaf and has no children. Calling k.BitValueAt may
// overrun the key storage.
if currentNode.prefixLen >= t.maxPrefix {
return
}
currentNode = currentNode.children[k.BitValueAt(currentNode.prefixLen)]
}
}
// traverse iterates over every prefix-key pair that contains the
// prefix-key argument pair in order from shortest to longest prefix
// match. If the function argument returns false the iteration will stop.
//
// traverse starts at the root node in the trie.
//
// The key and prefix being searched (the "search" key and prefix) are
// compared to the a trie node's key and prefix (the "node" key and
// prefix) to determine the extent to which the keys match (from MSB to
// LSB) up to the **least** specific (or shortest) prefix of the two keys
// (for example, if one of the keys has a prefix length of 2 and the other has
// a prefix length of 3 then the two keys will be compared up to the 2nd bit).
// If the key's match less than the node prefix (that is, the search
// key did not fully match the node key) then the traversal ends.
// If the key's match was greater than or equal to the node prefix
// then the node key is iterated over as a potential match,
// but traversal continues to ensure that there is not a more specific
// (that is, longer) match. The next bit, after the match length (between
// the search key and node key), on the search key is looked up to
// determine which children of the current node to traverse (to
// check if there is a more specific match). If there is no child then
// traversal ends. Otherwise traversal continues.
func (t *trie[K, T]) traverse(prefixLen uint, k K, fn func(currentNode *node[K, T]) bool) {
for currentNode := t.root; currentNode != nil; currentNode = currentNode.children[k.BitValueAt(currentNode.prefixLen)] {
matchLen := currentNode.prefixMatch(prefixLen, k)
// The current-node does not match.
if matchLen < currentNode.prefixLen {
return
}
// Skip over intermediate nodes
if currentNode.intermediate {
continue
}
if !fn(currentNode) || matchLen == t.maxPrefix {
return
}
}
}
// nPointersOnCacheline is the number of 64-bit pointers on a typical 64-byte cacheline.
// While allocations may cross cache line boundaries, it is still a good size to use for
// a minimum allocation when a small number of pointers is likely needed.
const nPointersOnCacheline = 8
// treverse is like traverse, but it calls 'fn' in reverse order, starting from the most specific match
func (t *trie[K, T]) treverse(prefixLen uint, k K, fn func(currentNode *node[K, T]) bool) {
// stack is used to reverse the order in which nodes are visited.
// Preallocate space for some pointers to reduce allocations and copies.
stack := make([]*node[K, T], 0, nPointersOnCacheline)
for currentNode := t.root; currentNode != nil; currentNode = currentNode.children[k.BitValueAt(currentNode.prefixLen)] {
matchLen := currentNode.prefixMatch(prefixLen, k)
// The current-node does not match.
if matchLen < currentNode.prefixLen {
break
}
// Skip over intermediate nodes
if currentNode.intermediate {
continue
}
stack = append(stack, currentNode)
if matchLen == t.maxPrefix {
break
}
}
// Call the function for stacked nodes in reverse order, i.e., longest-prefix-match first
for i := len(stack) - 1; i >= 0; i-- {
if !fn(stack[i]) {
return
}
}
}
// Upsert inserts or replaces a key and prefix (an "upsert" key and
// prefix) below keys that match it with a smaller (that is, less
// specific) prefix and above keys that match it with a
// more specific (that is "higher") prefix.
//
// Upsert starts with the root key (or "node"). The upsert key and node
// key are compared for the match length between them (see the
// `traverse` comments for details on how this works). If the match
// length is exactly equal to the node prefix then traversal
// continues as the next bit after the match length in the upsert key
// corresponds to one of the two child slots that belong to the node
// key. If the match length is not exactly equal, or there is no child
// to traverse to, or the node prefix is exactly equal to the
// upsert prefix (these conditions are not mutually exclusive) then traversal
// is finished. There are four possible insertion/replacement conditions
// to consider:
// 1. The node key is nil (that is, an empty children "slot"), in which
// case the previous key iterated over should be the upsert-key's
// parent. If there is no parent then the node key is now the
// root node.
// 2. The node key matches the upsert-node to the exact
// prefix. Then the upsert key should replace the node key.
// 3. The node key matches the upsert key to the upsert prefix,
// but node prefix is greater than the upsert prefix. In this
// case the node key will become a child of the upsert key.
// 4. The node key does not match with the upsert key to either
// the node prefix or the upsert prefix. In this case an
// intermediate node needs to be inserted that replaces the
// current position of the node key, but give it a prefix
// of the match between the upsert key and node key. The
// node key and upsert key become siblings.
//
// Intermediate keys/nodes:
// Sometimes when a new key is inserted it does not match any key up to
// its own prefix or its closest matching key's prefix. When this
// happens an intermediate node with the common prefix of the upsert
// key and closest match key. The new intermediate key replaces the closest
// match key's position in the trie and takes the closest match key and
// upsert key as children.
//
// For example, assuming a key size of 8 bytes, adding the prefix-keys of
// "0b001/8"(1-1), "0b010/7"(2-3), and "0b100/6"(4-7) would follow this logic:
//
// 1. "0b001/8" gets added first. It becomes the root node.
// 2. "0b010/7" is added. It will match "0b001/8" (the root node) up to
// 6 bits, because "0b010/7"'s 7th bit is 1 and "0b001/8" has 7th bit of 0.
// In this case, an intermediate node "0b001/6" will be created (the extent
// to which "0b010/7" and "0b001/8" match). The new intermediate node will
// have children "0b001/8" (in the 0 slot) and "0b010/7" (in the 1 slot).
// This new intermediate node become the new root node.
// 3. When "0b100/6" is added it will match the new root (which happens to
// be an intermediate node) "0b001/6" up to 5 bits. Therefore another
// intermediate node of "0b001/5" will be created, becoming the new root
// node. "0b001/6" will become the new intermediate node's child in the
// 0 slot and "0b100/6" will become the other child in the 1 slot.
// "0b001/5" becomes the new root node.
//
// Note: Upsert sets any "prefixLen" argument that exceeds the maximum
// prefix allowed by the trie to the maximum prefix allowed by the
// trie.
func (t *trie[K, T]) Upsert(prefixLen uint, k K, value T) bool {
prefixLen = min(prefixLen, t.maxPrefix)
upsertNode := &node[K, T]{
prefixLen: prefixLen,
key: k,
value: value,
}
var (
matchLen uint
parent *node[K, T]
bitVal uint8
)
currentNode := t.root
for currentNode != nil {
matchLen = currentNode.prefixMatch(prefixLen, k)
// The current node does not match the upsert-{prefix,key}
// or the current node matches to the maximum extent
// allowable by either the trie or the upsert-prefix.
if currentNode.prefixLen != matchLen ||
currentNode.prefixLen == t.maxPrefix ||
currentNode.prefixLen == prefixLen {
break
}
bitVal = k.BitValueAt(currentNode.prefixLen)
parent = currentNode
currentNode = currentNode.children[bitVal]
}
t.entries++
// Empty slot.
if currentNode == nil {
if parent == nil {
t.root = upsertNode
} else {
parent.children[bitVal] = upsertNode
}
return true
}
// There are three cases:
// 1. The current-node matches the upsert-node to the exact
// prefix. Then the upsert-node should replace the current-node.
// 2. The current-node matches the upsert-node, but the
// current-node has a more specific prefix than the
// upsert-node. Then the current-node should become a child
// of the upsert-node.
// 3. The current-node does not match with the upsert-node,
// but they overlap. Then a new intermediate-node should replace
// the current-node with a prefix equal to the overlap.
// The current-node and the upsert-node become children
// of the new intermediate node.
//
// For example, given two keys, "current" and "upsert":
// current: 0b1010/4
// upsert: 0b1000/3
// A new key of "0b1010/2" would then be added as an intermediate key
// (note: the 3rd bit does not matter, but unsetting is an extra
// operation that we avoid). "current" would be a child of
// intermediate at index "1" and "upsert" would be at index "0".
// The upsert-node matches the current-node up to the
// current-node's prefix, replace the current-node.
if matchLen == currentNode.prefixLen {
if parent == nil {
t.root = upsertNode
} else {
parent.children[bitVal] = upsertNode
}
upsertNode.children[0] = currentNode.children[0]
upsertNode.children[1] = currentNode.children[1]
// If we're not replacing an intermediate node
// then decrement this function's previous
// increment of `entries`.
if !currentNode.intermediate {
t.entries--
return false // count remains the same as before
}
return true
}
// The upsert-node matches the current-node up to
// the upsert-node's prefix, make the current-node
// a child of the upsert-node.
if matchLen == prefixLen {
if parent == nil {
t.root = upsertNode
} else {
parent.children[bitVal] = upsertNode
}
bitVal = currentNode.key.BitValueAt(matchLen)
upsertNode.children[bitVal] = currentNode
return true
}
// The upsert-node does not match the current-node
// up to the upsert-node's prefix and the current-node
// does not match the upsert-node up to the
// current-node's prefix, make the nodes siblings with
// an intermediate node.
intermediateNode := &node[K, T]{
prefixLen: matchLen,
key: currentNode.key,
intermediate: true,
}
if parent == nil {
t.root = intermediateNode
} else {
parent.children[bitVal] = intermediateNode
}
if k.BitValueAt(matchLen) == 0 {
intermediateNode.children[0] = upsertNode
intermediateNode.children[1] = currentNode
} else {
intermediateNode.children[0] = currentNode
intermediateNode.children[1] = upsertNode
}
return true
}
// Delete deletes only keys that match the exact values of the
// prefix length and key arguments.
//
// Delete traverses the trie until it either finds a node key
// that does not match the delete key to the node key's prefix
// (a definitive non-match) or the node key's prefix is equal
// to the delete prefix (a potential deletion). If the delete prefix,
// node prefix, and match length between the keys are equal to
// the same value then the key is deleted from the trie.
//
// Note: Delete sets any prefixLen argument that exceeds the maximum
// prefix allowed by the trie to the maximum prefix allowed by the
// trie.
func (t *trie[K, T]) Delete(prefixLen uint, k K) bool {
prefixLen = min(prefixLen, t.maxPrefix)
var (
grandParent, parent *node[K, T]
matchLen uint
bitVal, prevBitVal uint8
)
currentNode := t.root
for currentNode != nil {
// Find to what extent the current node matches with the
// delete-{prefix,key}.
matchLen = currentNode.prefixMatch(prefixLen, k)
// The current-node does not match or it has the same
// prefix length (the only potential deletion in the
// trie).
if currentNode.prefixLen != matchLen ||
currentNode.prefixLen == prefixLen {
break
}
prevBitVal = bitVal
bitVal = k.BitValueAt(currentNode.prefixLen)
// We preserve the grandParent in order
// to prune intermediate nodes when they
// are no longer necessary.
grandParent = parent
parent = currentNode
currentNode = currentNode.children[bitVal]
}
// Not found, or the current-node does not match
// the delete-prefix exactly, or the current-node
// does not match the delete-{prefix,key} lookup,
// or the current-node is intermediate.
if currentNode == nil ||
currentNode.prefixLen != prefixLen ||
currentNode.prefixLen != matchLen ||
currentNode.intermediate {
return false
}
t.entries--
// If this node has two children, we need to keep it as an intermediate
// node because we cannot migrate both children up the trie.
if currentNode.children[0] != nil && currentNode.children[1] != nil {
var emptyT T
currentNode.intermediate = true
// Make sure that the value associated with this intermediate
// node can be GC'd.
currentNode.value = emptyT
return true
}
// If the parent of the current-node to be deleted is an
// intermediate-node and the current-node has no children
// then the parent (intermediate) node can be deleted and
// its other child promoted up the trie.
if parent != nil && parent.intermediate &&
currentNode.children[0] == nil && currentNode.children[1] == nil {
var saveNode *node[K, T]
if k.BitValueAt(parent.prefixLen) == 0 {
saveNode = parent.children[1]
} else {
saveNode = parent.children[0]
}
parent.children[0] = nil
parent.children[1] = nil
if grandParent == nil {
t.root = saveNode
} else {
grandParent.children[prevBitVal] = saveNode
}
return true
}
// migrate the last child (if any) up the trie.
if currentNode.children[0] != nil {
currentNode = currentNode.children[0]
} else if currentNode.children[1] != nil {
currentNode = currentNode.children[1]
} else {
currentNode = nil
}
if parent == nil {
t.root = currentNode
} else {
parent.children[bitVal] = currentNode
}
return true
}
func (t *trie[K, T]) Len() uint {
return t.entries
}
func (t *trie[K, T]) ForEach(fn func(prefix uint, key K, value T) bool) {
if t.root != nil {
t.root.forEach(fn)
}
}
// prefixMatch returns the length that the node key and
// the argument key match, with the limit of the match being
// the lesser of the node-key prefix or the argument-key prefix.
func (n *node[K, T]) prefixMatch(prefix uint, k K) uint {
limit := min(n.prefixLen, prefix)
prefixLen := n.key.CommonPrefix(k)
if prefixLen >= limit {
return limit
}
return prefixLen
}
// forEach calls the argument function for each key and value in
// the subtree rooted at the current node, iterating recursively in depth-first manner.
func (n *node[K, T]) forEach(fn func(prefix uint, key K, value T) bool) {
if !n.intermediate {
if !fn(n.prefixLen, n.key, n.value) {
return
}
}
if n.children[0] != nil {
n.children[0].forEach(fn)
}
if n.children[1] != nil {
n.children[1].forEach(fn)
}
}
// nodes implements container/heap for trie nodes
type nodes[K Key[K], T any] []*node[K, T]
// container/heap interface functions
func (nodes nodes[K, T]) Len() int { return len(nodes) }
func (nodes nodes[K, T]) Less(i, j int) bool {
return nodes[i].prefixLen < nodes[j].prefixLen
}
func (nodes nodes[K, T]) Swap(i, j int) {
nodes[i], nodes[j] = nodes[j], nodes[i]
}
func (nodes *nodes[K, T]) Push(x any) {
node := x.(*node[K, T])
*nodes = append(*nodes, node)
}
func (nodes *nodes[K, T]) Pop() any {
old := *nodes
n := len(old)
node := old[n-1]
old[n-1] = nil // don't stop the GC from reclaiming the item eventually
*nodes = old[:n-1]
return node
}
func (nodes *nodes[K, T]) pop() *node[K, T] {
n := len(*nodes)
node := (*nodes)[n-1]
*nodes = (*nodes)[:n-1]
return node
}
func (nodes *nodes[K, T]) push(n *node[K, T]) {
*nodes = append(*nodes, n)
}
// convenience wrappers
func (nodes *nodes[K, T]) pushHeap(n *node[K, T]) {
heap.Push(nodes, n)
}
func (nodes *nodes[K, T]) popHeap() *node[K, T] {
return heap.Pop(nodes).(*node[K, T])
}
// forEachShortestPrefixFirst calls the argument function for each key and value in
// trie, iterating from shortest to longest prefix in the trie.
// A heap is used to track each un-visited subtree, starting from the root. Each node in the heap is
// the node with the shortest prefix of the subtree it represents. To iterate in the order
// of increasing prefix lengths, on each round we pop the node with the shortest prefix from the
// heap, visit it, and push its children into the heap.
func (n *node[K, T]) forEachShortestPrefixFirst(fn func(prefix uint, key K, value T) bool) {
// nodes is a heap used to track the heads of each unvisited subtree. Each node in the heap
// has the shortest prefix length of any node in the subtree it represents.
// Preallocate space for some pointers to reduce allocations and copies.
nodes := make(nodes[K, T], 0, nPointersOnCacheline)
nodes.pushHeap(n)
for nodes.Len() > 0 {
// pop the node with the lowest prefix length from the heap
n := nodes.popHeap()
if !n.intermediate {
if !fn(n.prefixLen, n.key, n.value) {
return
}
}
// push the children, if any, into the heap
if n.children[0] != nil {
nodes.pushHeap(n.children[0])
}
if n.children[1] != nil {
nodes.pushHeap(n.children[1])
}
}
}
// SPDX-License-Identifier: Apache-2.0
// Copyright Authors of Cilium
package bitlpm
import (
"math/bits"
"unsafe"
)
// Unsigned represents all types that have an underlying
// unsigned integer type, excluding uintptr and uint.
type Unsigned interface {
~uint8 | ~uint16 | ~uint32 | ~uint64
}
// UintTrie uses all unsigned integer types
// except for uintptr and uint.
type UintTrie[K Unsigned, V any] struct {
trie Trie[unsignedKey[K], V]
keySize uint
}
// NewUintTrie represents a Trie with a key of any
// uint type.
func NewUintTrie[K Unsigned, T any]() *UintTrie[K, T] {
var k K
size := uint(unsafe.Sizeof(k))
return &UintTrie[K, T]{
trie: NewTrie[unsignedKey[K], T](size * 8),
keySize: size,
}
}
func (ut *UintTrie[K, T]) Upsert(prefix uint, k K, value T) bool {
return ut.trie.Upsert(prefix, unsignedKey[K]{value: k}, value)
}
func (ut *UintTrie[K, T]) Delete(prefix uint, k K) bool {
return ut.trie.Delete(prefix, unsignedKey[K]{value: k})
}
func (ut *UintTrie[K, T]) ExactLookup(prefix uint, k K) (T, bool) {
return ut.trie.ExactLookup(prefix, unsignedKey[K]{value: k})
}
func (ut *UintTrie[K, T]) LongestPrefixMatch(k K) (K, T, bool) {
k2, v, ok := ut.trie.LongestPrefixMatch(unsignedKey[K]{value: k})
if ok {
return k2.value, v, ok
}
var empty K
return empty, v, ok
}
func (ut *UintTrie[K, T]) Ancestors(prefix uint, k K, fn func(prefix uint, key K, value T) bool) {
ut.trie.Ancestors(prefix, unsignedKey[K]{value: k}, func(prefix uint, k unsignedKey[K], v T) bool {
return fn(prefix, k.value, v)
})
}
func (ut *UintTrie[K, T]) Descendants(prefix uint, k K, fn func(prefix uint, key K, value T) bool) {
ut.trie.Descendants(prefix, unsignedKey[K]{value: k}, func(prefix uint, k unsignedKey[K], v T) bool {
return fn(prefix, k.value, v)
})
}
func (ut *UintTrie[K, T]) Len() uint {
return ut.trie.Len()
}
func (ut *UintTrie[K, T]) ForEach(fn func(prefix uint, key K, value T) bool) {
ut.trie.ForEach(func(prefix uint, k unsignedKey[K], v T) bool {
return fn(prefix, k.value, v)
})
}
type unsignedKey[U Unsigned] struct {
value U
}
func (u unsignedKey[U]) CommonPrefix(v unsignedKey[U]) uint {
switch any(u.value).(type) {
case uint8:
return uint(bits.LeadingZeros8(uint8(u.value ^ v.value)))
case uint16:
return uint(bits.LeadingZeros16(uint16(u.value ^ v.value)))
case uint32:
return uint(bits.LeadingZeros32(uint32(u.value ^ v.value)))
case uint64:
return uint(bits.LeadingZeros64(uint64(u.value ^ v.value)))
}
return 0
}
func (u unsignedKey[U]) BitValueAt(i uint) uint8 {
switch any(u.value).(type) {
case uint8:
if u.value&(1<<(7-i)) == 0 {
return 0
}
case uint16:
if u.value&(1<<(15-i)) == 0 {
return 0
}
case uint32:
if u.value&(1<<(31-i)) == 0 {
return 0
}
case uint64:
if u.value&(1<<(63-i)) == 0 {
return 0
}
}
return 1
}
// SPDX-License-Identifier: Apache-2.0
// Copyright Authors of Cilium
package cache
import (
"weak"
"github.com/cilium/cilium/pkg/lock"
)
const (
cacheSize = 1024
cacheMask = cacheSize - 1
)
func New[T any](hashfn func(T) uint64, skipfn func(x T) bool, eqfn func(a, b T) bool) *Cache[T] {
return &Cache[T]{
hashfn: hashfn,
eqfn: eqfn,
skipfn: skipfn,
}
}
// Cache is a simple fixed size cache for efficient deduplication of objects.
// The underlying array is held onto with a weak pointer to allow GC to collect
// it when under memory pressure.
type Cache[T any] struct {
mu lock.Mutex
arr weak.Pointer[[cacheSize]T]
skipfn func(T) bool
hashfn func(T) uint64
eqfn func(a, b T) bool
}
// Get a cached object if any. If Get() was called previously with an object equal to [x]
// and it is found from the cache then it is returned, otherwise [x] is inserted into
// cache.
func (c *Cache[T]) Get(x T) T {
if c.skipfn != nil && c.skipfn(x) {
return x
}
x, _ = c.getWithHash(x)
return x
}
func (c *Cache[T]) getArray() *[cacheSize]T {
if v := c.arr.Value(); v != nil {
return v
}
arr := [cacheSize]T{}
c.arr = weak.Make(&arr)
return &arr
}
func (c *Cache[T]) getWithHash(x T) (T, uint64) {
hash := c.hashfn(x)
idx := hash & cacheMask
c.mu.Lock()
defer c.mu.Unlock()
arr := c.getArray()
v := arr[idx]
if !c.eqfn(x, v) {
arr[idx] = x
v = x
}
return v, hash
}
// GetOrPutWith tries to find the object from the cache with the given hash and equality
// function. . If not found, [get] is called to construct the object.
func GetOrPutWith[T any](c *Cache[T], hash uint64, eq func(T) bool, get func() T) T {
idx := hash & cacheMask
c.mu.Lock()
defer c.mu.Unlock()
arr := c.getArray()
v := arr[idx]
if !eq(v) {
v = get()
arr[idx] = v
}
return v
}
// SPDX-License-Identifier: Apache-2.0
// Copyright Authors of Cilium
package cache
import (
"maps"
"github.com/cespare/xxhash/v2"
)
var (
Strings = New(
xxhash.Sum64String,
func(s string) bool {
// Skip caching of long strings
return len(s) > 256
},
func(a, b string) bool { return a == b },
)
StringMaps = New(
func(m map[string]string) (hash uint64) {
for k, v := range m {
// Dedup the strings
_, hashk := Strings.getWithHash(k)
_, hashv := Strings.getWithHash(v)
hash = hash ^ hashk ^ hashv
}
return
},
func(m map[string]string) bool {
// Skip caching of large maps
return len(m) > 32
},
maps.Equal,
)
)
// SPDX-License-Identifier: Apache-2.0
// Copyright Authors of Cilium
package container
import (
"cmp"
"encoding/json"
"slices"
)
// ImmSet is an immutable set optimized for a smallish (1-1000) set of items.
// Implemented as a sorted slice.
type ImmSet[T any] struct {
xs []T
cmp func(T, T) int
eq func(T, T) bool
}
func NewImmSet[T cmp.Ordered](items ...T) ImmSet[T] {
return NewImmSetFunc[T](cmp.Compare, items...)
}
func NewImmSetFunc[T any](compare func(T, T) int, items ...T) ImmSet[T] {
s := ImmSet[T]{slices.Clone(items), compare, cmpToEqual(compare)}
slices.SortFunc(s.xs, s.cmp)
s.xs = slices.CompactFunc(s.xs, s.eq)
return s
}
// AsSlice returns the underlying slice stored in the immutable set.
// The caller is NOT allowed to modify the slice.
func (s ImmSet[T]) AsSlice() []T {
return s.xs
}
func (s ImmSet[T]) Len() int {
return len(s.xs)
}
func (s ImmSet[T]) Has(x T) bool {
_, found := slices.BinarySearchFunc(s.xs, x, s.cmp)
return found
}
func (s *ImmSet[T]) MarshalJSON() ([]byte, error) {
return json.Marshal(s.xs)
}
func (s *ImmSet[T]) UnmarshalJSON(data []byte) error {
return json.Unmarshal(data, &s.xs)
}
func (s ImmSet[T]) Insert(xs ...T) ImmSet[T] {
if len(xs) > 1 {
xsAsImmSet := NewImmSetFunc(s.cmp, xs...)
return s.Union(xsAsImmSet)
}
xs2 := make([]T, 0, len(s.xs)+len(xs))
xs2 = append(xs2, s.xs...)
for _, x := range xs {
idx, found := slices.BinarySearchFunc(s.xs, x, s.cmp)
if !found {
xs2 = slices.Insert(xs2, idx, x)
}
}
return ImmSet[T]{xs: xs2, cmp: s.cmp, eq: s.eq}
}
func (s ImmSet[T]) Delete(xs ...T) ImmSet[T] {
if len(xs) > 1 {
xsAsImmSet := NewImmSetFunc(s.cmp, xs...)
return s.Difference(xsAsImmSet)
}
s.xs = slices.Clone(s.xs)
for _, x := range xs {
idx, found := slices.BinarySearchFunc(s.xs, x, s.cmp)
if found {
s.xs = slices.Delete(s.xs, idx, idx+1)
}
}
return s
}
func (s ImmSet[T]) Union(s2 ImmSet[T]) ImmSet[T] {
result := make([]T, 0, len(s.xs)+len(s2.xs))
xs1, xs2 := s.xs, s2.xs
for len(xs1) > 0 && len(xs2) > 0 {
switch diff := s.cmp(xs1[0], xs2[0]); {
case diff < 0:
result = append(result, xs1[0])
xs1 = xs1[1:]
case diff > 0:
result = append(result, xs2[0])
xs2 = xs2[1:]
default:
result = append(result, xs1[0])
xs1 = xs1[1:]
xs2 = xs2[1:]
}
}
result = append(result, xs1...)
result = append(result, xs2...)
return ImmSet[T]{result, s.cmp, s.eq}
}
func (s ImmSet[T]) Difference(s2 ImmSet[T]) ImmSet[T] {
result := make([]T, 0, len(s.xs))
xs1, xs2 := s.xs, s2.xs
for len(xs1) > 0 && len(xs2) > 0 {
switch diff := s.cmp(xs1[0], xs2[0]); {
case diff < 0:
result = append(result, xs1[0])
xs1 = xs1[1:]
case diff > 0:
xs2 = xs2[1:]
default:
xs1 = xs1[1:]
xs2 = xs2[1:]
}
}
result = append(result, xs1...)
return ImmSet[T]{result, s.cmp, s.eq}
}
func (s ImmSet[T]) Equal(s2 ImmSet[T]) bool {
return slices.EqualFunc(s.xs, s2.xs, s.eq)
}
func cmpToEqual[T any](cmp func(T, T) int) func(T, T) bool {
return func(a, b T) bool {
return cmp(a, b) == 0
}
}
// SPDX-License-Identifier: Apache-2.0
// Copyright Authors of Cilium
package container
import (
"iter"
)
// InsertOrderedMap is a map that allows iterating over the keys in the order
// they were inserted.
type InsertOrderedMap[K comparable, V any] struct {
indexes map[K]int
kvs []keyValuePair[K, V]
}
type keyValuePair[K, V any] struct {
key K
value V
}
// NewInsertOrderedMap creates a new insert-ordered map.
func NewInsertOrderedMap[K comparable, V any]() *InsertOrderedMap[K, V] {
return &InsertOrderedMap[K, V]{
indexes: map[K]int{},
kvs: []keyValuePair[K, V]{},
}
}
// Clear the map.
func (m *InsertOrderedMap[K, V]) Clear() {
clear(m.indexes)
clear(m.kvs)
m.kvs = m.kvs[:0]
}
// Len returns the number of items in the map.
func (m *InsertOrderedMap[K, V]) Len() int {
return len(m.kvs)
}
// All returns an iterator for keys and values in the map in insertion order.
func (m *InsertOrderedMap[K, V]) All() iter.Seq2[K, V] {
return func(yield func(K, V) bool) {
for _, kv := range m.kvs {
if !yield(kv.key, kv.value) {
break
}
}
}
}
// Keys returns an iterator for the keys in the map in insertion order.
func (m *InsertOrderedMap[K, V]) Keys() iter.Seq[K] {
return func(yield func(K) bool) {
for _, kv := range m.kvs {
if !yield(kv.key) {
break
}
}
}
}
// Values returns an iterator for the values in the map in insertion order.
func (m *InsertOrderedMap[K, V]) Values() iter.Seq[V] {
return func(yield func(V) bool) {
for _, kv := range m.kvs {
if !yield(kv.value) {
break
}
}
}
}
// Get a value from the map. O(1).
func (m *InsertOrderedMap[K, V]) Get(k K) (v V, found bool) {
var idx int
idx, found = m.indexes[k]
if !found {
return
}
return m.kvs[idx].value, true
}
// Delete a key from the map. O(n).
func (m *InsertOrderedMap[K, V]) Delete(k K) (found bool) {
var idx int
idx, found = m.indexes[k]
if !found {
return
}
delete(m.indexes, k)
// Shift over the deleted element and update indexes
for i := idx; i < len(m.kvs)-1; i++ {
m.kvs[i] = m.kvs[i+1]
m.indexes[m.kvs[i].key] = i
}
m.kvs = m.kvs[:len(m.kvs)-1]
return true
}
// Insert or update a key in the map. O(1).
// An update will not affect the ordering.
func (m *InsertOrderedMap[K, V]) Insert(k K, v V) {
idx, found := m.indexes[k]
if found {
m.kvs[idx].value = v
return
}
idx = len(m.kvs)
m.indexes[k] = idx
m.kvs = append(m.kvs, struct {
key K
value V
}{k, v})
}
// SPDX-License-Identifier: Apache-2.0
// Copyright Authors of Cilium
package container
import (
"sort"
)
// RingBuffer is a generic ring buffer implementation that contains
// sequential data (i.e. such as time ordered data).
// RingBuffer is implemented using slices. From testing, this should
// be fast than linked-list implementations, and also allows for efficient
// indexing of ordered data.
type RingBuffer struct {
buffer []any
next int // index of ring buffer head.
maxSize int
}
// NewRingBuffer constructs a new ring buffer for a given buffer size.
func NewRingBuffer(bufferSize int) *RingBuffer {
return &RingBuffer{
buffer: make([]any, 0, bufferSize),
maxSize: bufferSize,
}
}
func (eb *RingBuffer) isFull() bool {
return len(eb.buffer) >= eb.maxSize
}
func (eb *RingBuffer) incr() {
eb.next = (eb.next + 1) % eb.maxSize
}
// Add adds an element to the buffer.
func (eb *RingBuffer) Add(e any) {
if eb.maxSize == 0 {
return
}
if eb.isFull() {
eb.buffer[eb.next] = e
eb.incr()
return
}
eb.incr()
eb.buffer = append(eb.buffer, e)
}
func (eb *RingBuffer) dumpWithCallback(callback func(v any)) {
for i := range eb.buffer {
callback(eb.at(i))
}
}
func (eb *RingBuffer) at(i int) any {
return eb.buffer[eb.mapIndex(i)]
}
// firstValidIndex returns the first **absolute** index in the buffer that satisfies
// isValid.
// note: this value needs to be mapped before indexing the buffer.
func (eb *RingBuffer) firstValidIndex(isValid func(any) bool) int {
return sort.Search(len(eb.buffer), func(i int) bool {
return isValid(eb.at(i))
})
}
// IterateValid calls the callback on each element of the buffer, starting with
// the first element in the buffer that satisfies "isValid".
func (eb *RingBuffer) IterateValid(isValid func(any) bool, callback func(any)) {
startIndex := eb.firstValidIndex(isValid)
l := len(eb.buffer) - startIndex
for i := range l {
index := eb.mapIndex(startIndex + i)
callback(eb.buffer[index])
}
}
// maps index in [0:len(buffer)) to the actual index in buffer.
func (eb *RingBuffer) mapIndex(indexOffset int) int {
ret := (eb.next + indexOffset) % len(eb.buffer)
return ret
}
// Compact clears out invalidated elements in the buffer.
// This may require copying the entire buffer.
// It is assumed that if buffer[i] is invalid then every entry [0...i-1] is also not valid.
func (eb *RingBuffer) Compact(isValid func(any) bool) {
if len(eb.buffer) == 0 {
return
}
startIndex := eb.firstValidIndex(isValid)
// In this case, we compact the entire buffer.
if startIndex >= len(eb.buffer) {
eb.buffer = []any{}
eb.next = 0
return
}
mappedStart := eb.mapIndex(startIndex) // mapped start is the new index 0 of our buffer.
// new length will be how long the current buffer is, minus the absolute starting index.
newBufferLength := len(eb.buffer) - startIndex
// case where the head index is to the left of the tail index.
// e.x. [... head, tail, ...]
// mappedStart + newBufferLength is the upper bound of the new buffer list
// if we don't have to worry about mapping.
//
// e.x. [mappedStart:mappedStart+newBufferLength] <- this is our new buffer.
//
// If this value is less than or equal to the length then we don't need
// to worry about any part of the list wrapping around.
if mappedStart+newBufferLength > len(eb.buffer) {
// now we can find the actual end index, by offsetting the startIndex
// by the length and mapping it.
// [... startIndex+newBufferLen ... startIndex ...]
end := eb.mapIndex(startIndex + newBufferLength)
tmp := make([]any, len(eb.buffer[:end]))
copy(tmp, eb.buffer[:end])
eb.buffer = eb.buffer[mappedStart:]
eb.buffer = append(eb.buffer, tmp...)
// at this point the buffer is such that the 0th element
// maps to the 0th index in the buffer array.
eb.next = len(eb.buffer)
if eb.isFull() {
eb.next = eb.next % eb.maxSize
}
return
}
// otherwise, the head is to the right of the tail.
begin := mappedStart
end := mappedStart + newBufferLength
eb.buffer = eb.buffer[begin:end]
eb.next = len(eb.buffer)
if eb.isFull() {
eb.next = eb.next % eb.maxSize
}
}
// Iterate is a convenience function over IterateValid that iterates
// all elements in the buffer.
func (eb *RingBuffer) Iterate(callback func(any)) {
eb.IterateValid(func(e any) bool { return true }, callback)
}
// Size returns the size of the buffer.
func (eb *RingBuffer) Size() int {
return len(eb.buffer)
}
// SPDX-License-Identifier: Apache-2.0
// Copyright Authors of Cilium
package set
import (
"fmt"
"iter"
"maps"
"slices"
)
type empty struct{}
// Set contains zero, one, or more members. Zero or one members do not consume any additional
// storage, more than one members are held in an non-exported membersMap.
type Set[T comparable] struct {
single *T
members map[T]empty
}
// Empty returns 'true' if the set is empty.
func (s Set[T]) Empty() bool {
return s.single == nil && s.members == nil
}
// Len returns the number of members in the set.
func (s Set[T]) Len() int {
if s.single != nil {
return 1
}
return len(s.members)
}
func (s Set[T]) String() string {
if s.single != nil {
return fmt.Sprintf("%v", s.single)
}
res := ""
for m := range s.members {
if res != "" {
res += ","
}
res += fmt.Sprintf("%v", m)
}
return res
}
// NewSet returns a Set initialized to contain the members in 'members'.
func NewSet[T comparable](members ...T) Set[T] {
s := Set[T]{}
for _, member := range members {
s.Insert(member)
}
return s
}
// Has returns 'true' if 'member' is in the set.
func (s Set[T]) Has(member T) bool {
if s.single != nil {
return *s.single == member
}
_, ok := s.members[member]
return ok
}
// Insert inserts a member to the set.
// Returns 'true' when '*s' value has changed,
// so that if it is stored by value the caller must knows to update the stored value.
func (s *Set[T]) Insert(member T) (changed bool) {
switch s.Len() {
case 0:
s.single = &member
return true
case 1:
if member == *s.single {
return false
}
s.members = make(map[T]empty, 2)
s.members[*s.single] = empty{}
s.single = nil
s.members[member] = empty{}
return true
default:
s.members[member] = empty{}
return false
}
}
// Merge inserts members in 'o' into to the set 's'.
// Returns 'true' when '*s' value has changed,
// so that if it is stored by value the caller must knows to update the stored value.
func (s *Set[T]) Merge(sets ...Set[T]) (changed bool) {
for _, other := range sets {
for m := range other.Members() {
changed = s.Insert(m) || changed
}
}
return changed
}
// Remove removes a member from the set.
// Returns 'true' when '*s' value was changed, so that if it is stored by value the caller knows to
// update the stored value.
func (s *Set[T]) Remove(member T) (changed bool) {
length := s.Len()
switch length {
case 0:
case 1:
if *s.single == member {
s.single = nil
return true
}
case 2:
delete(s.members, member)
if len(s.members) == 1 {
for m := range s.members {
s.single = &m
}
s.members = nil
return true
}
default:
delete(s.members, member)
}
return false
}
// RemoveSets removes one or more Sets from the receiver set.
// Returns 'true' when '*s' value was changed, so that if it is stored by value the caller knows to
// update the stored value.
func (s *Set[T]) RemoveSets(sets ...Set[T]) (changed bool) {
for _, other := range sets {
for m := range other.Members() {
changed = s.Remove(m) || changed
}
}
return changed
}
// Clear makes the set '*s' empty.
func (s *Set[T]) Clear() {
s.single = nil
s.members = nil
}
// Equal returns 'true' if the receiver and argument sets are the same.
func (s Set[T]) Equal(o Set[T]) bool {
sLen := s.Len()
oLen := o.Len()
if sLen != oLen {
return false
}
switch sLen {
case 0:
return true
case 1:
return *s.single == *o.single
}
// compare the elements of the maps
for member := range s.members {
if _, ok := o.members[member]; !ok {
return false
}
}
return true
}
// Members returns an iterator for the members in the set.
func (s Set[T]) Members() iter.Seq[T] {
return func(yield func(m T) bool) {
if s.single != nil {
yield(*s.single)
} else {
for member := range s.members {
if !yield(member) {
return
}
}
}
}
}
// MembersOfType return an iterator for each member of type M in the set.
func MembersOfType[M any, T comparable](s Set[T]) iter.Seq[M] {
return func(yield func(m M) bool) {
if s.single != nil {
if v, ok := any(*s.single).(M); ok {
yield(v)
}
} else {
for m := range s.members {
if v, ok := any(m).(M); ok {
if !yield(v) {
return
}
}
}
}
}
}
// Get returns any one member from the set.
// Useful when it is known that the set has only one element.
func (s Set[T]) Get() (m T, found bool) {
length := s.Len()
switch length {
case 0:
case 1:
m = *s.single
default:
for m = range s.members {
break
}
}
return m, length > 0
}
// AsSlice converts the set to a slice.
func (s Set[T]) AsSlice() []T {
return slices.Collect(s.Members())
}
// Clone returns a copy of the set.
func (s Set[T]) Clone() Set[T] {
if s.members != nil {
return Set[T]{members: maps.Clone(s.members)}
}
return s // singular value or empty Set
}
// SPDX-License-Identifier: Apache-2.0
// Copyright Authors of Cilium
package versioned
import (
"errors"
"fmt"
"iter"
"log/slog"
"math"
"runtime"
"slices"
"strconv"
"sync/atomic"
"github.com/hashicorp/go-hclog"
"github.com/cilium/cilium/pkg/lock"
"github.com/cilium/cilium/pkg/logging/logfields"
"github.com/cilium/cilium/pkg/option"
)
type version uint64
const (
// invalidVersion is never found from the value range.
// Also used as the upper bound for new values, so that the value is
// found when looking with 'maxVersion'
invalidVersion = version(math.MaxUint64)
// maxVersion in a VersionHandle finds the latest version of all non-removed values
maxVersion = version(math.MaxUint64 - 1)
)
// KeepVersion is an exported version type used when releasing memory held for old versions.
type KeepVersion version
var (
ErrInvalidVersion = errors.New("invalid version")
ErrStaleTransaction = errors.New("stale transaction")
ErrStaleVersion = errors.New("stale version")
ErrVersionNotFound = errors.New("version not found")
)
// VersionHandle is used to keep values valid for a specific version from being released, so that
// they are available for use for as long as the VersionHandle is not closed.
//
// A special form with a nil coordinator, which is returned by Latest(), always finds the latest
// versions and does not keep any versions from being released.
type VersionHandle struct {
// 'version' is the version this versionHandle keeps from being released
version version
// Coordinator of this versionHandle, if any. Used for closing if non-nil.
// Atomic due to nilling and for copy prevention.
coordinator atomic.Pointer[Coordinator]
// Optional stack trace of the caller for debugging purposes
stacktrace hclog.CapturedStacktrace
}
func (h *VersionHandle) IsValid() bool {
return h != nil && (h.version == maxVersion || h.coordinator.Load() != nil)
}
func (h *VersionHandle) Version() KeepVersion {
return KeepVersion(h.version)
}
func (h *VersionHandle) String() string {
if h == nil {
return "version: <nil>"
}
validity := "invalid"
if h.IsValid() {
validity = "valid"
}
return fmt.Sprintf("%d (%s)", h.version, validity)
}
// Close releases the held version for removal once no handle for it are no longer held.
// This may not be called while holding any locks that the 'closer' function passed to the
// coordinator may take!
func (h *VersionHandle) Close() error {
if h == nil || h.version == invalidVersion {
return ErrInvalidVersion
}
// handle with 'maxVersion' is never closed
if h.version != maxVersion {
// Using CompareAndSwap makes sure each handle is closed at most once
coordinator := h.coordinator.Load()
if coordinator != nil && h.coordinator.CompareAndSwap(coordinator, nil) {
runtime.SetFinalizer(h, nil)
return coordinator.releaseVersion(h.version)
}
return ErrStaleVersion
}
return nil
}
// versionHandleFinalizer is used to warn about missing Close() calls.
func versionHandleFinalizer(h *VersionHandle) {
coordinator := h.coordinator.Load()
if coordinator != nil && coordinator.Logger != nil {
logger := coordinator.Logger
if h.stacktrace != "" {
logger = logger.With(logfields.Stacktrace, h.stacktrace)
}
logger.Error("Handle for version not closed.", logfields.Version, h.version)
}
h.Close()
}
func newVersionHandle(version version, coordinator *Coordinator) *VersionHandle {
// handle on maxVersion never expires
if version == maxVersion {
coordinator = nil
}
h := &VersionHandle{version: version}
h.coordinator.Store(coordinator)
if coordinator != nil {
// Set a finalizer to catch unclosed handles. The finalizer function complains
// loudly, so that we do not rely the finalizer for closing.
runtime.SetFinalizer(h, versionHandleFinalizer)
if option.Config.Debug {
// capture a stacktrace for debugging
h.stacktrace = hclog.Stacktrace()
}
}
return h
}
// Latest returns a VersionHandle for the latest version of current/non-removed values
// Only to be used in cases where the latest values are needed and no transactionality is required.
func Latest() *VersionHandle {
return newVersionHandle(maxVersion, nil)
}
type atomicVersion struct {
version atomic.Uint64
}
func (a *atomicVersion) load() version {
return version(a.version.Load())
}
func (a *atomicVersion) store(version version) {
a.version.Store(uint64(version))
}
type versionCount struct {
version version
count int
}
// Coordinator defines a common version number space for multiple versioned.Values,
// and provides facilities for cleaning out stale versions.
// The Value's are not directly managed by the Coordinator, but all the values under coordination
// should be cleaned by the 'cleaner' function given to the Coordinator.
type Coordinator struct {
// Logger supplied to NewCoordinator. Should be set if logging is desired.
Logger *slog.Logger
// Cleaner is called with the earliest version that must be kept.
// Must be set to clean up resources held for old versions.
// Cleaner function may be called concurrently, the function must synchronize
// use of any shared resources.
Cleaner func(KeepVersion)
// mutex protects the rest of the fields
mutex lock.RWMutex
// version is the version number of the last applied change
version version
// oldest version not cleaned off
oldestVersion version
// versions is an ordered list of outstanding VersionHandles with a reference count.
// Outdated values can be cleaned when there are no outstanding VersionHandles for them.
versions []versionCount
}
// PrepareNextVersion returns a transaction to be used when adding or removing values.
//
// Callers need to coordinate so that a single goroutine is performing modifications at any one
// time, consisting of the following operations:
//
// - tx := coordinator.PrepareNextVersion()
// - value.SetAt(... , tx)
// - value.RemoveAt(..., tx)
// - ...
//
// - tx.Commit()
func (v *Coordinator) PrepareNextVersion() *Tx {
v.mutex.Lock()
defer v.mutex.Unlock()
return &Tx{
nextVersion: v.version + 1,
coordinator: v,
}
}
// All values in Tx are constants
type Tx struct {
nextVersion version
coordinator *Coordinator
}
// LatestTx refers to maxVersion without having a coordinator, should only be used for testing.
var LatestTx = &Tx{nextVersion: maxVersion}
func (tx *Tx) String() string {
return strconv.FormatUint(uint64(tx.nextVersion), 10)
}
func (tx *Tx) After(v KeepVersion) bool {
return tx.nextVersion > version(v)
}
// Commit makes a new version of values available to readers
// Commit call may be omitted if no changes were actually made.
func (tx *Tx) Commit() error {
return tx.coordinator.commit(tx.nextVersion)
}
// GetVersionHandle returns a VersionHandle for the given transaction.
func (tx *Tx) GetVersionHandle() *VersionHandle {
// This is only needed to support LatestTx to make some testing easier
if tx.coordinator == nil && tx.nextVersion == maxVersion {
return Latest()
}
return tx.coordinator.getVersionHandle(tx.nextVersion)
}
func versionHandleCmp(a versionCount, b version) int {
if a.version < b {
return -1
}
if a.version == b {
return 0
}
return 1
}
// commit makes a new version of values available to readers
// and cleans up any possible stale versions
func (v *Coordinator) commit(version version) error {
v.mutex.Lock()
defer v.mutex.Unlock()
if v.version != version-1 {
return ErrStaleVersion
}
v.version = version
// clean up stale versions if any
v.clean()
return nil
}
func (v *Coordinator) releaseVersion(version version) error {
if version == invalidVersion {
return ErrInvalidVersion
}
v.mutex.Lock()
defer v.mutex.Unlock()
n, found := slices.BinarySearchFunc(v.versions, version, versionHandleCmp)
if !found {
if v.Logger != nil {
v.Logger.Error(
"Version not found.",
logfields.Version, version,
logfields.Stacktrace, hclog.Stacktrace(),
)
}
return ErrVersionNotFound
}
v.versions[n].count--
if v.versions[n].count <= 0 {
v.versions = slices.Delete(v.versions, n, n+1)
}
// clean if needed
v.clean()
return nil
}
// clean must be called with lock held
func (v *Coordinator) clean() {
// 'keepVersion' is the current version if there are no outstanding VersionHandles
keepVersion := v.version
if len(v.versions) > 0 {
// otherwise it is the oldest version for which there is an outstanding handle, if
// older than the current version, as if there was an implicit outstanding handle
// for the current version.
keepVersion = min(v.version, v.versions[0].version)
}
// Call the cleaner for 'keepVersion' only if not already called for this 'keepVersion'.
if keepVersion > v.oldestVersion {
// The cleaner is called from a goroutine without holding any locks
if v.Cleaner != nil {
if v.Logger != nil {
v.Logger.Debug(
"releaseVersion: calling cleaner",
logfields.OldVersion, v.oldestVersion,
logfields.NewVersion, keepVersion,
)
}
go v.Cleaner(KeepVersion(keepVersion))
v.oldestVersion = keepVersion
} else if v.Logger != nil {
v.Logger.Warn("VersionHandle.Close: Cleaner function not set")
}
}
}
// getVersionHandle returns a VersionHandle for the given version.
func (v *Coordinator) getVersionHandle(version version) *VersionHandle {
v.mutex.Lock()
defer v.mutex.Unlock()
return v.getVersionHandleLocked(version)
}
func (v *Coordinator) getVersionHandleLocked(version version) *VersionHandle {
// never get a handle for the invalid version
if version == invalidVersion {
version = maxVersion
}
if version < v.oldestVersion {
oldVersion := version
version = v.oldestVersion
if v.Logger != nil {
v.Logger.Warn(
"GetVersionHandle: Handle to a stale version requested, returning oldest valid version instead",
logfields.Stacktrace, hclog.Stacktrace(),
logfields.Version, version,
logfields.OldVersion, oldVersion,
)
}
}
n, found := slices.BinarySearchFunc(v.versions, version, versionHandleCmp)
if !found {
v.versions = slices.Insert(v.versions, n, versionCount{version, 1})
} else {
v.versions[n].count++
}
return newVersionHandle(version, v)
}
// GetVersionHandle returns a VersionHandle for the current version, so that it can not be
// cleaned off before the returned VersionHandle is closed.
func (v *Coordinator) GetVersionHandle() *VersionHandle {
v.mutex.Lock()
defer v.mutex.Unlock()
return v.getVersionHandleLocked(v.version)
}
// versionRange is a range from the first to one-past-the-last version, "[first, past)".
// 'past' is atomically modified to a smaller value when removing or a new version is added.
type versionRange struct {
first version // first version this value is valid for
past atomicVersion // first version this value is invalid for
}
func (r *versionRange) contains(version version) bool {
return r.first <= version && version < r.past.load()
}
// valueNode is the node used in the linked list rooted at Value[T]
type valueNode[T any] struct {
versions versionRange
next atomic.Pointer[valueNode[T]]
value T
}
// Value is a container for versioned values, implemented as a lock-free linked list.
type Value[T any] struct {
// valueNodes are non-overlapping and in sorted by version in ascending order.
head atomic.Pointer[valueNode[T]]
}
// SetAt adds the value with validity starting from 'version'. All values are added with "infinite"
// validity, which is then truncated when an overlapping entry is added, or the value is removed.
// 'next' version must be later than any the current version visible to the readers.
// Returns an error if this is not the case.
// Callers must coordinate for mutual exclusion.
func (v *Value[T]) SetAt(value T, tx *Tx) error {
version := tx.nextVersion
if version == invalidVersion {
return ErrInvalidVersion
}
newNode := &valueNode[T]{
versions: versionRange{
first: version,
},
value: value,
}
// All new values are initially set to never expire
// ('invalidVersion' is one past 'maxVersion')
newNode.versions.past.store(invalidVersion)
// find if there is a current value that is valid for this new version
node := v.head.Load()
prev := &v.head
for node != nil {
if version < node.versions.first {
return fmt.Errorf("SetAt may not add values at versions lower than those already added (%d<%d): %w", version, node.versions.first, ErrStaleVersion)
}
if node.versions.contains(version) {
// link the new node after the current one
newNode.next.Store(node.next.Load())
node.next.Store(newNode)
// truncate the validity of this node to end at 'version' *after* the new
// node with validity starting from 'version' has been linked after it
// (above), so that either this or the new value is reachable at all times
// for readers with 'version'
node.versions.past.store(version)
break
}
node = node.next.Load()
if node != nil {
prev = &node.next
}
}
if node == nil {
// Add the new value at the end
prev.Store(newNode)
}
return nil
}
// RemoveAt changes the validity of the stored value previously valid at version 'next' to have
// ended at version 'next'.
// 'next' must be later than any the current version visible to the readers.
// Returns an error if this is not the case.
// Callers must coordinate for mutual exclusion.
func (v *Value[T]) RemoveAt(tx *Tx) error {
version := tx.nextVersion
if version == invalidVersion {
return ErrInvalidVersion
}
for node := v.head.Load(); node != nil; node = node.next.Load() {
if version < node.versions.first {
return fmt.Errorf("RemoveAt may not be called with version lower than existing already (%d<%d): %w", version, node.versions.first, ErrStaleVersion)
}
if node.versions.contains(version) {
// Truncate the validity of this node to end at 'version'.
// After this readers with 'version' and above no longer see this value,
// while readers with versions before 'version' still see this.
node.versions.past.store(version)
break
}
}
return nil
}
// RemoveBefore removes all values whose validity ends before 'keepVersion'.
// Caller must coordinate for mutual exclusion.
func (v *Value[T]) RemoveBefore(keepVersion KeepVersion) {
version := version(keepVersion)
// find all values that are no longer valid at 'version'
node := v.head.Load()
for node != nil && node.versions.past.load() <= version {
// This node is no longer visible for readers with 'version' and above,
// so this can be safely removed.
node = node.next.Load()
}
v.head.Store(node)
}
// At returns value of type 'T' valid for the given version, or an empty value if none is found.
func (v *Value[T]) At(handle *VersionHandle) T {
if handle != nil {
version := handle.version
for node := v.head.Load(); node != nil; node = node.next.Load() {
if node.versions.contains(version) {
return node.value
}
}
}
var empty T
return empty
}
// Versioned is a pair of a version and any type T
type Versioned[T any] struct {
version version
value T
}
type VersionedSlice[T any] []Versioned[T]
// Append appends a pair of 'nextVersion' and 'value' to VersionedSlice 's', returning updated
// 's'. Needed to keep members private.
// Should only be called with monotonically increasing 'nextVersion's, so that the slice remains
// sorted by version in ascending order.
func (s VersionedSlice[T]) Append(value T, tx *Tx) VersionedSlice[T] {
return append(s, Versioned[T]{
version: tx.nextVersion,
value: value,
})
}
// Before returns an iterator over the elements in VersionedSlice 's' having a version earlier than
// 'keepVersion'.
// The slice is assumed to be sorted by version in ascending order.
func (s VersionedSlice[T]) Before(keepVersion KeepVersion) iter.Seq[T] {
return func(yield func(T) bool) {
version := version(keepVersion)
for n := range s {
if s[n].version >= version {
break
}
if !yield(s[n].value) {
return
}
}
}
}
// SPDX-License-Identifier: Apache-2.0
// Copyright Authors of Cilium
package controller
import (
"github.com/cilium/hive/cell"
"github.com/spf13/pflag"
"github.com/cilium/cilium/pkg/metrics"
"github.com/cilium/cilium/pkg/metrics/metric"
)
const (
controllerGroupMetrics = "controller-group-metrics"
// labelControllerGroupName is the label used
// to identify controller-specific metrics
labelControllerGroupName = "group_name"
)
var (
// GroupMetricEnabled is populated with the set of ControllerGroups for which metrics are enabled
groupMetricEnabled = map[string]bool{}
// GroupRuns is a Prometheus-compatible metric for Controller
// runs, labeled by completion status and Group name
GroupRuns = metrics.NoOpCounterVec
)
var Cell = cell.Module(
"controller",
"Controllers and Controller Lifecycle management",
cell.Config(defaultConfig),
metrics.Metric(NewMetrics),
cell.Invoke(Init),
)
type Metrics struct {
ControllerGroupRuns metric.Vec[metric.Counter]
}
func NewMetrics() Metrics {
return Metrics{
ControllerGroupRuns: metric.NewCounterVec(metric.CounterOpts{
ConfigName: metrics.Namespace + "_controllers_group_runs_total",
Namespace: metrics.Namespace,
Name: "controllers_group_runs_total",
Help: "Number of times that a controller group was run, labeled by completion status and controller group name",
}, []string{labelControllerGroupName, metrics.LabelStatus}),
}
}
type Config struct {
// ControllerGroupMetrics is an option which specifies the set of ControllerGroups names
// for which metrics will be enabled. The special values 'all' and 'none' are supported.
ControllerGroupMetrics []string
}
func (cfg Config) Flags(flags *pflag.FlagSet) {
flags.StringSlice(controllerGroupMetrics, cfg.ControllerGroupMetrics,
"List of controller group names for which to to enable metrics. "+
"Accepts 'all' and 'none'. "+
"The set of controller group names available is not guaranteed to be stable between Cilium versions.")
}
var defaultConfig = Config{
ControllerGroupMetrics: []string{},
}
func Init(cfg Config, m Metrics) {
// Initialize package-scoped references to Cell configuration
for _, name := range cfg.ControllerGroupMetrics {
groupMetricEnabled[name] = true
}
GroupRuns = m.ControllerGroupRuns
}
// SPDX-License-Identifier: Apache-2.0
// Copyright Authors of Cilium
package controller
import (
"context"
"errors"
"fmt"
"log/slog"
"math"
"math/rand/v2"
stdtime "time"
"github.com/cilium/hive/cell"
"github.com/cilium/cilium/pkg/lock"
"github.com/cilium/cilium/pkg/logging/logfields"
"github.com/cilium/cilium/pkg/metrics"
"github.com/cilium/cilium/pkg/option"
"github.com/cilium/cilium/pkg/time"
)
const (
success = "success"
failure = "failure"
// special Group "names" for metrics config
allControllerMetricsEnabled = "all"
noControllerMetricsEnabled = "none"
)
// ControllerFunc is a function that the controller runs. This type is used for
// DoFunc and StopFunc.
type ControllerFunc func(ctx context.Context) error
// ExitReason is a returnable type from DoFunc that causes the
// controller to exit. This reason is recorded in the controller's status. The
// controller is not removed from any manager.
// Construct one with NewExitReason("a reason")
type ExitReason struct {
// This is constucted in this odd way because the type assertion in
// runController didn't work otherwise.
error
}
// NewExitReason returns a new ExitReason
func NewExitReason(reason string) ExitReason {
return ExitReason{errors.New(reason)}
}
// Group contains metadata about a group of controllers
type Group struct {
// Name of the controller group.
//
// This name MUST NOT be dynamically generated based on
// resource identifier in order to limit metrics cardinality.
Name string
}
func NewGroup(name string) Group {
return Group{Name: name}
}
// ControllerParams contains all parameters of a controller
type ControllerParams struct {
// Group is used for aggregate metrics collection.
// The Group.Name must NOT be dynamically generated from a
// resource identifier in order to limit metrics cardinality.
Group Group
Health cell.Health
// DoFunc is the function that will be run until it succeeds and/or
// using the interval RunInterval if not 0.
// An unset DoFunc is an error and will be logged as one.
DoFunc ControllerFunc
// CancelDoFuncOnUpdate when set to true cancels the controller context
// (the DoFunc) to allow quick termination of controller
CancelDoFuncOnUpdate bool
// StopFunc is called when the controller stops. It is intended to run any
// clean-up tasks for the controller (e.g. deallocate/release resources)
// It is guaranteed that DoFunc is called at least once before StopFunc is
// called.
// An unset StopFunc is not an error (and will be a no-op)
// Note: Since this occurs on controller exit, error counts and tracking may
// not be checked after StopFunc is run.
StopFunc ControllerFunc
// If set to any other value than 0, will cause DoFunc to be run in the
// specified interval. The interval starts from when the DoFunc has
// returned last
RunInterval time.Duration
// If set to any other value than 0, will cap the error retry interval
// to the specified interval.
MaxRetryInterval time.Duration
// ErrorRetryBaseDuration is the initial time to wait to run DoFunc
// again on return of an error. On each consecutive error, this value
// is multiplied by the number of consecutive errors to provide a
// constant back off. The default is 1s.
ErrorRetryBaseDuration time.Duration
// NoErrorRetry when set to true, disabled retries on errors
NoErrorRetry bool
Context context.Context
// Jitter represents the maximum duration to delay the execution of DoFunc.
Jitter time.Duration
}
// undefinedDoFunc is used when no DoFunc is set. controller.DoFunc is set to this
// when the controller is incorrectly initialised.
func undefinedDoFunc(name string) error {
return fmt.Errorf("controller %s DoFunc is nil", name)
}
// NoopFunc is a no-op placeholder for DoFunc & StopFunc.
// It is automatically used when StopFunc is undefined, and can be used as a
// DoFunc stub when the controller should only run StopFunc.
func NoopFunc(ctx context.Context) error {
return nil
}
// isGroupMetricEnabled returns true if metrics are enabled for the Group
//
// The controller metrics config option is used to determine
// if "all", "none" (takes precedence over "all"), or the
// given set of Group names should be enabled.
//
// If no controller metrics config option was provided,
// only then is the DefaultMetricsEnabled field used.
func isGroupMetricEnabled(g Group) bool {
var metricsEnabled = groupMetricEnabled
if metricsEnabled == nil {
// There is currently no guarantee that a caller of this function
// has initialized the configuration map using the hive cell.
return false
}
if metricsEnabled[noControllerMetricsEnabled] {
// "none" takes precedence over "all"
return false
} else if metricsEnabled[allControllerMetricsEnabled] {
return true
} else {
return metricsEnabled[g.Name]
}
}
// Controller is a simple pattern that allows to perform the following
// tasks:
// - Run an operation in the background and retry until it succeeds
// - Perform a regular sync operation in the background
//
// A controller has configurable retry intervals and will collect statistics
// on number of successful runs, number of failures, last error message,
// and last error timestamp.
//
// Controllers have a name and are tied to a Manager. The manager is typically
// bound to higher level objects such as endpoint. These higher level objects
// can then run multiple controllers to perform async tasks such as:
// - Annotating k8s resources with values
// - Synchronizing an object with the kvstore
// - Any other async operation to may fail and require retries
//
// Embedding the Manager into higher level resources allows to bind controllers
// to the lifetime of that object. Controllers also have a UUID to allow
// correlating all log messages of a controller instance.
//
// Guidelines to writing controllers:
// - Make sure that the task the controller performs is done in an atomic
// fashion, e.g. if a controller modifies a resource in multiple steps, an
// intermediate manipulation operation failing should not leave behind
// an inconsistent state. This can typically be achieved by locking the
// resource and rolling back or by using transactions.
// - Controllers typically act on behalf of a higher level object such as an
// endpoint. The controller must ensure that the higher level object is
// properly locked when accessing any fields.
// - Controllers run asynchronously in the background, it is the responsibility
// of the controller to be aware of the lifecycle of the owning higher level
// object. This is typically achieved by removing all controllers when the
// owner dies. It is the responsibility of the owner to either lock the owner
// in a way that will delay destruction throughout the controller run or to
// check for the destruction throughout the run.
type controller struct {
// Constant after creation, safe to access without locking
group Group
name string
uuid string
logger *slog.Logger
// Channels written to and/or closed by the manager
stop chan struct{}
update chan struct{}
trigger chan struct{}
// terminated is closed by the controller goroutine when it terminates
terminated chan struct{}
// Manipulated by the controller, read by the Manager, requires locking
mutex lock.RWMutex
successCount int
lastSuccessStamp time.Time
failureCount int
consecutiveErrors int
lastError error
lastErrorStamp time.Time
lastDuration time.Duration
// Manipulated by the Manager, read by the controller.
paramMutex lock.Mutex
params ControllerParams
cancelDoFunc context.CancelFunc
}
func (c *controller) Params() ControllerParams {
c.paramMutex.Lock()
defer c.paramMutex.Unlock()
return c.params
}
// updateParams sanitizes and sets the controller's parameters.
//
// If the RunInterval exceeds ControllerMaxInterval, it will be capped.
//
// Manager's mutex must be held; controller.mutex must not be held
func (c *controller) SetParams(params ControllerParams) {
c.paramMutex.Lock()
defer c.paramMutex.Unlock()
// ensure the callbacks are valid
if params.DoFunc == nil {
params.DoFunc = func(ctx context.Context) error {
return undefinedDoFunc(c.name)
}
}
if params.StopFunc == nil {
params.StopFunc = NoopFunc
}
// Enforce max controller interval
maxInterval := time.Duration(option.Config.MaxControllerInterval) * time.Second
if maxInterval > 0 && params.RunInterval > maxInterval {
c.logger.Info("Limiting interval",
logfields.Interval, maxInterval,
)
params.RunInterval = maxInterval
}
// Save current context on update if not canceling
ctx := c.params.Context
// Check if the current context needs to be cancelled
if c.params.CancelDoFuncOnUpdate && c.cancelDoFunc != nil {
c.cancelDoFunc()
c.params.Context = nil
}
// (re)set the context as the previous might have been cancelled
if c.params.Context == nil {
if params.Context == nil {
ctx, c.cancelDoFunc = context.WithCancel(context.Background())
} else {
ctx, c.cancelDoFunc = context.WithCancel(params.Context)
}
}
c.params = params
c.params.Context = ctx
}
// GetSuccessCount returns the number of successful controller runs
func (c *controller) GetSuccessCount() int {
c.mutex.RLock()
defer c.mutex.RUnlock()
return c.successCount
}
// GetFailureCount returns the number of failed controller runs
func (c *controller) GetFailureCount() int {
c.mutex.RLock()
defer c.mutex.RUnlock()
return c.failureCount
}
// GetLastError returns the last error returned
func (c *controller) GetLastError() error {
c.mutex.RLock()
defer c.mutex.RUnlock()
return c.lastError
}
// GetLastErrorTimestamp returns the last error returned
func (c *controller) GetLastErrorTimestamp() time.Time {
c.mutex.RLock()
defer c.mutex.RUnlock()
return c.lastErrorStamp
}
func (c *controller) runController() {
params := c.Params()
errorRetries := 1
for {
var err error
params = c.Params()
interval := params.RunInterval
start := time.Now()
jitter := time.Duration(0)
if params.Jitter > 0 {
jitter = time.Duration(rand.Int64N(int64(params.Jitter)))
select {
case <-time.After(jitter):
// jitter sleep finished
case <-params.Context.Done():
// context cancelled, exit early but ensure shutdown logic runs
goto shutdown
case <-c.stop:
// controller stopped during jitter sleep
goto shutdown
}
}
err = params.DoFunc(params.Context)
duration := time.Since(start)
c.mutex.Lock()
c.lastDuration = duration
c.logger.Debug("Controller func executed", logfields.Duration, c.lastDuration)
if err != nil {
if params.Context.Err() != nil {
// The controller's context was canceled. Let's wait for the
// next controller update (or stop).
err = NewExitReason("controller context canceled")
}
var exitReason ExitReason
if errors.As(err, &exitReason) {
// This is actually not an error case, but it causes an exit
c.recordSuccess(params.Health)
c.lastError = exitReason // This will be shown in the controller status
// Don't exit the goroutine, since that only happens when the
// controller is explicitly stopped. Instead, just wait for
// the next update.
c.logger.Debug("Controller run succeeded; waiting for next controller update or stop")
interval = time.Duration(math.MaxInt64)
} else {
c.logger.Debug(
"Controller run failed",
fieldConsecutiveErrors, errorRetries,
logfields.Error, err,
)
c.recordError(err, params.Health)
if !params.NoErrorRetry {
if params.ErrorRetryBaseDuration != time.Duration(0) {
interval = time.Duration(errorRetries) * params.ErrorRetryBaseDuration
} else {
interval = time.Duration(errorRetries) * time.Second
}
if params.MaxRetryInterval > 0 && interval > params.MaxRetryInterval {
c.logger.Debug(
"Cap retry interval to max allowed value",
logfields.CalculatedInterval, interval,
logfields.MaxAllowedInterval, params.MaxRetryInterval,
)
interval = params.MaxRetryInterval
}
errorRetries++
}
}
} else {
c.recordSuccess(params.Health)
// reset error retries after successful attempt
errorRetries = 1
// If no run interval is specified, no further updates
// are required.
if interval == time.Duration(0) {
// Don't exit the goroutine, since that only happens when the
// controller is explicitly stopped. Instead, just wait for
// the next update.
c.logger.Debug("Controller run succeeded; waiting for next controller update or stop")
interval = time.Duration(math.MaxInt64)
}
}
c.mutex.Unlock()
select {
case <-c.stop:
goto shutdown
case <-c.update:
// update channel is never closed
case <-stdtime.After(interval):
// timer channel is not yet closed
case <-c.trigger:
// trigger channel is never closed
}
// If we receive a signal on multiple channels golang will pick one randomly.
// This select will make sure we don't execute the controller
// while we are shutting down.
select {
case <-c.stop:
goto shutdown
default:
}
}
shutdown:
c.logger.Debug("Shutting down controller")
if err := params.StopFunc(context.TODO()); err != nil {
c.mutex.Lock()
c.recordError(err, params.Health)
c.mutex.Unlock()
c.logger.Warn(
"Error on Controller stop",
fieldConsecutiveErrors, errorRetries,
logfields.Error, err,
)
}
close(c.terminated)
}
// recordError updates all statistic collection variables on error
// c.mutex must be held.
func (c *controller) recordError(err error, h cell.Health) {
if h != nil {
h.Degraded(c.name, err)
}
c.lastError = err
c.lastErrorStamp = time.Now()
c.failureCount++
c.consecutiveErrors++
metrics.ControllerRuns.WithLabelValues(failure).Inc()
if isGroupMetricEnabled(c.group) {
GroupRuns.WithLabelValues(c.group.Name, failure).Inc()
}
metrics.ControllerRunsDuration.WithLabelValues(failure).Observe(c.lastDuration.Seconds())
}
// recordSuccess updates all statistic collection variables on success
// c.mutex must be held.
func (c *controller) recordSuccess(h cell.Health) {
if h != nil {
h.OK(c.name)
}
c.lastError = nil
c.lastSuccessStamp = time.Now()
c.successCount++
c.consecutiveErrors = 0
metrics.ControllerRuns.WithLabelValues(success).Inc()
if isGroupMetricEnabled(c.group) {
GroupRuns.WithLabelValues(c.group.Name, success).Inc()
}
metrics.ControllerRunsDuration.WithLabelValues(success).Observe(c.lastDuration.Seconds())
}
// SPDX-License-Identifier: Apache-2.0
// Copyright Authors of Cilium
package controller
import (
"fmt"
"maps"
"github.com/go-openapi/strfmt"
"github.com/google/uuid"
"github.com/cilium/cilium/api/v1/models"
"github.com/cilium/cilium/pkg/lock"
"github.com/cilium/cilium/pkg/logging"
"github.com/cilium/cilium/pkg/logging/logfields"
"github.com/cilium/cilium/pkg/time"
)
var (
// globalStatus is the global status of all controllers
globalStatus = NewManager()
)
type controllerMap map[string]*managedController
// Manager is a list of controllers
type Manager struct {
controllers controllerMap
mutex lock.RWMutex
}
// NewManager allocates a new manager
func NewManager() *Manager {
return &Manager{
controllers: controllerMap{},
}
}
// GetGlobalStatus returns the status of all controllers
func GetGlobalStatus() models.ControllerStatuses {
return globalStatus.GetStatusModel()
}
// UpdateController installs or updates a controller in the
// manager. A controller is primarily identified by its name.
// If a controller with the name already exists, the controller
// will be shut down and replaced with the provided controller.
//
// Updating a controller will cause the DoFunc to be run immediately regardless
// of any previous conditions. It will also cause any statistics to be reset.
//
// If multiple callers make an UpdateController call within a short period,
// then this function may elide intermediate updates, depending on how long it
// takes to complete DoFunc. The final parameters update will be applied and
// run when the controller catches up.
func (m *Manager) UpdateController(name string, params ControllerParams) {
m.updateController(name, params)
}
func (m *Manager) updateController(name string, params ControllerParams) *managedController {
start := time.Now()
m.mutex.Lock()
defer m.mutex.Unlock()
if m.controllers == nil {
m.controllers = controllerMap{}
}
ctrl := m.lookupLocked(name)
if ctrl != nil {
ctrl.logger.Debug("Updating existing controller")
ctrl.SetParams(params)
// Notify the goroutine of the params update.
select {
case ctrl.update <- struct{}{}:
default:
}
ctrl.logger.Debug("Controller update time", logfields.Duration, time.Since(start))
} else {
ctrl = m.createControllerLocked(name, params)
}
if params.Group.Name == "" {
ctrl.logger.Error(
"Controller initialized with unpopulated group information. " +
"Metrics will not be exported for this controller.")
}
return ctrl
}
func (m *Manager) createControllerLocked(name string, params ControllerParams) *managedController {
uuid := uuid.New().String()
ctrl := &managedController{
controller: controller{
// slogloggercheck: it's safe to use the default logger here as it has been initialized by the program up to this point.
logger: logging.DefaultSlogLogger.With(
logfields.LogSubsys, "controller",
fieldControllerName, name,
fieldUUID, uuid,
),
name: name,
group: params.Group,
uuid: uuid,
stop: make(chan struct{}),
update: make(chan struct{}, 1),
trigger: make(chan struct{}, 1),
terminated: make(chan struct{}),
},
}
ctrl.SetParams(params)
ctrl.logger.Debug("Starting new controller")
m.controllers[ctrl.name] = ctrl
globalStatus.mutex.Lock()
globalStatus.controllers[ctrl.uuid] = ctrl
globalStatus.mutex.Unlock()
go ctrl.runController()
return ctrl
}
// CreateController installs a new controller in the
// manager. If a controller with the name already exists
// this method returns false without triggering, otherwise
// creates the controller and runs it immediately.
func (m *Manager) CreateController(name string, params ControllerParams) bool {
m.mutex.Lock()
defer m.mutex.Unlock()
if m.controllers != nil {
if ctrl := m.lookupLocked(name); ctrl != nil {
return false
}
} else {
m.controllers = controllerMap{}
}
m.createControllerLocked(name, params)
return true
}
func (m *Manager) removeController(ctrl *managedController) {
ctrl.stopController()
delete(m.controllers, ctrl.name)
globalStatus.mutex.Lock()
delete(globalStatus.controllers, ctrl.uuid)
globalStatus.mutex.Unlock()
ctrl.logger.Debug("Removed controller")
}
func (m *Manager) lookup(name string) *managedController {
m.mutex.RLock()
defer m.mutex.RUnlock()
return m.lookupLocked(name)
}
func (m *Manager) lookupLocked(name string) *managedController {
if c, ok := m.controllers[name]; ok {
return c
}
return nil
}
func (m *Manager) removeAndReturnController(name string) (*managedController, error) {
m.mutex.Lock()
defer m.mutex.Unlock()
if m.controllers == nil {
return nil, fmt.Errorf("empty controller map")
}
oldCtrl := m.lookupLocked(name)
if oldCtrl == nil {
return nil, fmt.Errorf("unable to find controller %s", name)
}
m.removeController(oldCtrl)
return oldCtrl, nil
}
// RemoveController stops and removes a controller from the manager. If DoFunc
// is currently running, DoFunc is allowed to complete in the background.
func (m *Manager) RemoveController(name string) error {
_, err := m.removeAndReturnController(name)
return err
}
// RemoveControllerAndWait stops and removes a controller using
// RemoveController() and then waits for it to run to completion.
func (m *Manager) RemoveControllerAndWait(name string) error {
oldCtrl, err := m.removeAndReturnController(name)
if err == nil {
<-oldCtrl.terminated
}
return err
}
func (m *Manager) removeAll() []*managedController {
ctrls := []*managedController{}
m.mutex.Lock()
defer m.mutex.Unlock()
if m.controllers == nil {
return ctrls
}
for _, ctrl := range m.controllers {
m.removeController(ctrl)
ctrls = append(ctrls, ctrl)
}
return ctrls
}
// RemoveAll stops and removes all controllers of the manager
func (m *Manager) RemoveAll() {
m.removeAll()
}
// RemoveAllAndWait stops and removes all controllers of the manager and then
// waits for all controllers to exit
func (m *Manager) RemoveAllAndWait() {
ctrls := m.removeAll()
for _, ctrl := range ctrls {
<-ctrl.terminated
}
}
// GetStatusModel returns the status of all controllers as models.ControllerStatuses
func (m *Manager) GetStatusModel() models.ControllerStatuses {
// Create a copy of pointers to current controller so we can unlock the
// manager mutex quickly again
controllers := controllerMap{}
m.mutex.RLock()
maps.Copy(controllers, m.controllers)
m.mutex.RUnlock()
statuses := models.ControllerStatuses{}
for _, c := range controllers {
statuses = append(statuses, c.GetStatusModel())
}
return statuses
}
// TriggerController triggers the controller with the specified name.
func (m *Manager) TriggerController(name string) {
ctrl := m.lookup(name)
if ctrl == nil {
return
}
select {
case ctrl.trigger <- struct{}{}:
default:
}
}
type managedController struct {
controller
}
func (c *managedController) stopController() {
if c.cancelDoFunc != nil {
c.cancelDoFunc()
}
close(c.stop)
}
// GetStatusModel returns a models.ControllerStatus representing the
// controller's configuration & status
func (c *managedController) GetStatusModel() *models.ControllerStatus {
params := c.Params()
c.mutex.RLock()
defer c.mutex.RUnlock()
status := &models.ControllerStatus{
Name: c.name,
UUID: strfmt.UUID(c.uuid),
Configuration: &models.ControllerStatusConfiguration{
ErrorRetry: !params.NoErrorRetry,
ErrorRetryBase: strfmt.Duration(params.ErrorRetryBaseDuration),
Interval: strfmt.Duration(params.RunInterval),
},
Status: &models.ControllerStatusStatus{
SuccessCount: int64(c.successCount),
LastSuccessTimestamp: strfmt.DateTime(c.lastSuccessStamp),
FailureCount: int64(c.failureCount),
LastFailureTimestamp: strfmt.DateTime(c.lastErrorStamp),
ConsecutiveFailureCount: int64(c.consecutiveErrors),
},
}
if c.lastError != nil {
status.Status.LastFailureMsg = c.lastError.Error()
}
return status
}
// SPDX-License-Identifier: Apache-2.0
// Copyright Authors of Cilium
package counter
import "maps"
// Counter tracks references for comparable .
//
// No thread safety is provided within this structure, the user is expected to
// handle concurrent access to this structure if it is used from multiple
// threads.
type Counter[T comparable] map[T]int
// Add increments the reference count for the specified key.
func (c Counter[T]) Add(key T) bool {
value, exists := c[key]
c[key] = value + 1
return !exists
}
// Delete decrements the reference count for the specified key.
func (c Counter[T]) Delete(key T) bool {
value := c[key]
if value <= 1 {
delete(c, key)
return true
}
c[key] = value - 1
return false
}
// DeepCopy makes a new copy of the received Counter.
func (c Counter[T]) DeepCopy() Counter[T] {
result := make(Counter[T], len(c))
maps.Copy(result, c)
return result
}
// Has returns true if the given key has a non-zero refcount.
func (c Counter[T]) Has(key T) bool {
return c[key] > 0
}
// SPDX-License-Identifier: Apache-2.0
// Copyright Authors of Cilium
package counter
import (
"sort"
)
// IntCounter tracks references for integers with an optional limiter.
//
// No threadsafety is provided within this structure, the user is expected to
// handle concurrent access to this structure if it is used from multiple
// threads.
type IntCounter Counter[int]
// Add increments the reference count for the specified integer key.
func (i IntCounter) Add(key int) (changed bool) {
return Counter[int](i).Add(key)
}
// Delete decrements the reference count for the specified integer key.
func (i IntCounter) Delete(key int) bool {
return Counter[int](i).Delete(key)
}
// DeepCopy makes a new copy of the received IntCounter.
func (i IntCounter) DeepCopy() IntCounter {
return IntCounter(Counter[int](i).DeepCopy())
}
// ToBPFData returns the keys as a slice, sorted from high to low.
func (i IntCounter) ToBPFData() []int {
result := make([]int, 0, len(i))
for key := range i {
result = append(result, key)
}
sort.Sort(sort.Reverse(sort.IntSlice(result)))
return result
}
// SPDX-License-Identifier: Apache-2.0
// Copyright Authors of Cilium
package counter
import (
"fmt"
"net"
"net/netip"
"github.com/cilium/cilium/pkg/lock"
)
// PrefixLengthCounter tracks references to prefix lengths, limited by the
// maxUniquePrefixes count. Neither of the IPv4 or IPv6 counters nested within
// may contain more keys than the specified maximum number of unique prefixes.
type PrefixLengthCounter struct {
lock.RWMutex
v4 IntCounter
v6 IntCounter
maxUniquePrefixes4 int
maxUniquePrefixes6 int
}
// NewPrefixLengthCounter returns a new PrefixLengthCounter which limits
// insertions to the specified maximum number of unique prefix lengths.
func NewPrefixLengthCounter(maxUniquePrefixes6, maxUniquePrefixes4 int) *PrefixLengthCounter {
return &PrefixLengthCounter{
v4: make(IntCounter),
v6: make(IntCounter),
maxUniquePrefixes4: maxUniquePrefixes4,
maxUniquePrefixes6: maxUniquePrefixes6,
}
}
func createIPNet(ones, bits int) netip.Prefix {
var addr netip.Addr
switch bits {
case net.IPv4len * 8:
addr = netip.IPv4Unspecified()
case net.IPv6len * 8:
addr = netip.IPv6Unspecified()
default:
// fall through to default library error
}
return netip.PrefixFrom(addr, ones)
}
// DefaultPrefixLengthCounter creates a default prefix length counter that
// already counts the minimum and maximum prefix lengths for IP hosts and
// default routes (ie, /32 and /0). As with NewPrefixLengthCounter, insertions
// are limited to the specified maximum number of unique prefix lengths.
func DefaultPrefixLengthCounter() *PrefixLengthCounter {
maxIPv4 := net.IPv4len*8 + 1
maxIPv6 := net.IPv6len*8 + 1
counter := NewPrefixLengthCounter(maxIPv6, maxIPv4)
defaultPrefixes := []netip.Prefix{
// IPv4
createIPNet(0, net.IPv4len*8), // world
createIPNet(net.IPv4len*8, net.IPv4len*8), // hosts
// IPv6
createIPNet(0, net.IPv6len*8), // world
createIPNet(net.IPv6len*8, net.IPv6len*8), // hosts
}
if _, err := counter.Add(defaultPrefixes); err != nil {
panic(fmt.Errorf("Failed to create default prefix lengths: %w", err))
}
return counter
}
// checkLimits checks whether the specified new count of prefixes would exceed
// the specified limit on the maximum number of unique keys, and returns an
// error if it would exceed the limit.
func checkLimits(current, newCount, max int) error {
if newCount > max {
return fmt.Errorf("adding specified prefixes would result in too many prefix lengths (current: %d, result: %d, max: %d)",
current, newCount, max)
}
return nil
}
// Add increments references to prefix lengths for the specified IPNets to the
// counter. If the maximum number of unique prefix lengths would be exceeded,
// returns an error.
//
// Returns true if adding these prefixes results in an increase in the total
// number of unique prefix lengths in the counter.
func (p *PrefixLengthCounter) Add(prefixes []netip.Prefix) (bool, error) {
p.Lock()
defer p.Unlock()
// Assemble a map of references that need to be added
newV4Counter := p.v4.DeepCopy()
newV6Counter := p.v6.DeepCopy()
newV4Prefixes := false
newV6Prefixes := false
for _, prefix := range prefixes {
ones := prefix.Bits()
bits := prefix.Addr().BitLen()
switch bits {
case net.IPv4len * 8:
if newV4Counter.Add(ones) {
newV4Prefixes = true
}
case net.IPv6len * 8:
if newV6Counter.Add(ones) {
newV6Prefixes = true
}
default:
return false, fmt.Errorf("unsupported IPAddr bitlength %d", bits)
}
}
// Check if they can be added given the limit in place
if newV4Prefixes {
if err := checkLimits(len(p.v4), len(newV4Counter), p.maxUniquePrefixes4); err != nil {
return false, err
}
}
if newV6Prefixes {
if err := checkLimits(len(p.v6), len(newV6Counter), p.maxUniquePrefixes6); err != nil {
return false, err
}
}
// Set and return whether anything changed
p.v4 = newV4Counter
p.v6 = newV6Counter
return newV4Prefixes || newV6Prefixes, nil
}
// Delete reduces references to prefix lengths in the specified IPNets from
// the counter. Returns true if removing references to these prefix lengths
// would result in a decrese in the total number of unique prefix lengths in
// the counter.
func (p *PrefixLengthCounter) Delete(prefixes []netip.Prefix) (changed bool) {
p.Lock()
defer p.Unlock()
for _, prefix := range prefixes {
ones := prefix.Bits()
bits := prefix.Addr().BitLen()
switch bits {
case net.IPv4len * 8:
if p.v4.Delete(ones) {
changed = true
}
case net.IPv6len * 8:
if p.v6.Delete(ones) {
changed = true
}
}
}
return changed
}
// ToBPFData converts the counter into a set of prefix lengths that the BPF
// datapath can use for LPM lookup.
func (p *PrefixLengthCounter) ToBPFData() (s6, s4 []int) {
p.RLock()
defer p.RUnlock()
return p.v6.ToBPFData(), p.v4.ToBPFData()
}
// SPDX-License-Identifier: Apache-2.0
// Copyright Authors of Cilium
package certificatemanager
import (
"context"
"fmt"
"log/slog"
"os"
"path/filepath"
"github.com/cilium/hive/cell"
"github.com/spf13/pflag"
k8sClient "github.com/cilium/cilium/pkg/k8s/client"
"github.com/cilium/cilium/pkg/logging/logfields"
"github.com/cilium/cilium/pkg/policy/api"
)
var Cell = cell.Module(
"certificate-manager",
"Provides TLS certificates and secrets",
cell.Provide(NewManager),
cell.Config(defaultManagerConfig),
)
type CertificateManager interface {
GetTLSContext(ctx context.Context, tlsCtx *api.TLSContext, ns string) (ca, public, private string, inlineSecrets bool, err error)
}
type SecretManager interface {
GetSecretString(ctx context.Context, secret *api.Secret, ns string) (string, error)
PolicySecretSyncEnabled() bool
SecretsOnlyFromSecretsNamespace() bool
GetSecretSyncNamespace() string
}
var defaultManagerConfig = managerConfig{
CertificatesDirectory: "/var/run/cilium/certs",
}
type managerConfig struct {
// CertificatesDirectory is the root directory to be used by cilium to find
// certificates locally.
CertificatesDirectory string
EnablePolicySecretsSync bool
PolicySecretsNamespace string
PolicySecretsOnlyFromSecretsNamespace bool
}
func (mc managerConfig) Flags(flags *pflag.FlagSet) {
flags.String("certificates-directory", mc.CertificatesDirectory, "Root directory to find certificates specified in L7 TLS policy enforcement")
flags.Bool("enable-policy-secrets-sync", mc.EnablePolicySecretsSync, "Enables Envoy secret sync for Secrets used in CiliumNetworkPolicy and CiliumClusterwideNetworkPolicy")
flags.Bool("policy-secrets-only-from-secrets-namespace", mc.PolicySecretsOnlyFromSecretsNamespace, "Configures the agent to only read policy Secrets from the policy-secrets-namespace")
flags.String("policy-secrets-namespace", mc.PolicySecretsNamespace, "PolicySecretsNamesapce is the namespace having secrets used in CNP and CCNP")
}
// Manager will manage the way certificates are retrieved based in the given
// k8sClient and rootPath.
type manager struct {
Logger *slog.Logger
rootPath string
k8sClient k8sClient.Clientset
secretSyncNamespace string
secretSyncEnabled bool
secretsFromSecretSyncNamespace bool
}
// NewManager returns a new manager.
func NewManager(cfg managerConfig, clientset k8sClient.Clientset, logger *slog.Logger) (CertificateManager, SecretManager) {
m := &manager{
rootPath: cfg.CertificatesDirectory,
k8sClient: clientset,
Logger: logger,
secretSyncEnabled: cfg.EnablePolicySecretsSync,
secretsFromSecretSyncNamespace: cfg.PolicySecretsOnlyFromSecretsNamespace,
}
if cfg.PolicySecretsOnlyFromSecretsNamespace {
m.secretSyncNamespace = cfg.PolicySecretsNamespace
}
return m, m
}
// GetSecretSyncNamespace returns the configured secret synchronization namespace.
// An empty value means that secret synchronization is not enabled, and that
// the agent should read values from secrets used in policy directly, which requires
// the agent to have read access to all namespaces.
// Secret Synchronization config includes granting access to the policy-secrets-namespace, configured
// in the envoy Cell.
func (m *manager) GetSecretSyncNamespace() string {
return m.secretSyncNamespace
}
func (m *manager) PolicySecretSyncEnabled() bool {
return m.secretSyncEnabled
}
func (m *manager) SecretsOnlyFromSecretsNamespace() bool {
return m.secretsFromSecretSyncNamespace
}
// getSecrets returns either local or k8s secrets, giving precedence for local secrets if configured.
// It also returns a boolean indicating if the values were read from disk or not.
// The 'ns' parameter is used as the secret namespace if 'secret.Namespace' is an empty string, and is
// expected to be set as the same namespace as the source object (most likely a CNP or CCNP).
func (m *manager) getSecrets(ctx context.Context, secret *api.Secret, ns string) (string, map[string][]byte, bool, error) {
if secret == nil {
return "", nil, false, fmt.Errorf("Secret must not be nil")
}
if secret.Namespace != "" {
ns = secret.Namespace
}
if secret.Name == "" {
return ns, nil, false, fmt.Errorf("Missing Secret name")
}
nsName := filepath.Join(ns, secret.Name)
// Give priority to local secrets.
// K8s API request is only done if the local secret directory can't be read!
certPath := filepath.Join(m.rootPath, nsName)
files, ioErr := os.ReadDir(certPath)
if ioErr == nil {
secrets := make(map[string][]byte, len(files))
for _, file := range files {
var bytes []byte
path := filepath.Join(certPath, file.Name())
bytes, ioErr = os.ReadFile(path)
if ioErr == nil {
secrets[file.Name()] = bytes
}
}
// Return the (latest) error only if no secrets were found
if len(secrets) == 0 && ioErr != nil {
// Files read from disk, so bool returnval is true
return nsName, nil, true, ioErr
}
// Files read from disk, so bool returnval is true
return nsName, secrets, true, nil
}
if m.secretSyncEnabled && m.secretSyncNamespace != "" {
// If we get here, then the secret is _not_ being read from the filesystem,
// and secret sync is enabled, so we are sending via SDS,
// and then we don't want to inspect the Secret at all, because
// that will require the agent to have more access than it needs. So we return an empty `secrets` map.
// TODO(youngnick): Deprecate and remove reading from file for secrets.
emptySecrets := make(map[string][]byte)
return nsName, emptySecrets, false, nil
}
// If secret synchronization is disabled, then we need to read values
// directly from Kubernetes. Not a good idea, for security or performance reasons, but included
// for backwards compatibility.
// TODO(youngnick): Once we are comfortable with SDS stability, remove this and pass the
// reference to the original secret instead. (This will require changes to the secretsync
// package so that it can register specific secrets from anywhere.)
secrets, err := m.k8sClient.GetSecrets(ctx, ns, secret.Name)
return nsName, secrets, true, err
}
const (
caDefaultName = "ca.crt"
publicDefaultName = "tls.crt"
privateDefaultName = "tls.key"
)
// GetTLSContext returns a new ca, public and private certificates found based
// in the given api.TLSContext.
func (m *manager) GetTLSContext(ctx context.Context, tlsCtx *api.TLSContext, ns string) (ca, public, private string, inlineSecrets bool, err error) {
name, secrets, inlineSecrets, err := m.getSecrets(ctx, tlsCtx.Secret, ns)
if err != nil {
return "", "", "", false, err
}
// If the certificate hasn't been read from a file, we're going to be inserting a reference to an SDS secret instead,
// so we don't need to validate the values. Envoy will handle validation.
if !inlineSecrets {
m.Logger.Debug(
"Secret being read from Kubernetes via SDS",
logfields.Secret, name,
)
return "", "", "", false, nil
}
caName := caDefaultName
if tlsCtx.TrustedCA != "" {
caName = tlsCtx.TrustedCA
}
caBytes, ok := secrets[caName]
if ok {
ca = string(caBytes)
} else if tlsCtx.TrustedCA != "" {
return "", "", "", false, fmt.Errorf("Trusted CA %s not found in secret %s", caName, name)
}
publicName := publicDefaultName
if tlsCtx.Certificate != "" {
publicName = tlsCtx.Certificate
}
publicBytes, ok := secrets[publicName]
if ok {
public = string(publicBytes)
} else if tlsCtx.Certificate != "" {
return "", "", "", false, fmt.Errorf("Certificate %s not found in secret %s", publicName, name)
}
privateName := privateDefaultName
if tlsCtx.PrivateKey != "" {
privateName = tlsCtx.PrivateKey
}
privateBytes, ok := secrets[privateName]
if ok {
private = string(privateBytes)
} else if tlsCtx.PrivateKey != "" {
return "", "", "", false, fmt.Errorf("Private Key %s not found in secret %s", privateName, name)
}
if caBytes == nil && publicBytes == nil && privateBytes == nil {
return "", "", "", false, fmt.Errorf("TLS certificates not found in secret %s ", name)
}
// TODO(youngnick): Follow up PR that will change this to a deprecation warning once we actually
// mark read-from-file and direct read as deprecated.
m.Logger.Debug("Secret being used inline, not via SDS", logfields.Secret, name)
return ca, public, private, true, nil
}
// GetSecretString returns a secret string stored in a k8s secret
func (m *manager) GetSecretString(ctx context.Context, secret *api.Secret, ns string) (string, error) {
name, secrets, _, err := m.getSecrets(ctx, secret, ns)
if err != nil {
return "", err
}
// If the value hasn't been read from a file, we're going to be inserting a reference to an SDS secret instead,
// so we don't need to validate the values. Envoy will handle validation.
if len(secrets) == 0 {
m.Logger.Debug("Secret being read from Kubernetes via SDS", logfields.Secret, name)
return "", nil
}
if len(secrets) == 1 {
// get the lone item by looping into the map
for _, value := range secrets {
return string(value), nil
}
}
return "", fmt.Errorf("Secret %s must have exactly one item", name)
}
// SPDX-License-Identifier: Apache-2.0
// Copyright Authors of Cilium
package certificatemanager
import (
"context"
"errors"
"github.com/cilium/cilium/pkg/policy/api"
)
func NewMockSecretManagerInline() SecretManager {
return &mockSecretManager{
inlineValue: "somevalue",
}
}
func NewMockSecretManagerNotFound() SecretManager {
return &mockSecretManager{
inlineError: errors.New("not found"),
}
}
func NewMockSecretManagerSDS() SecretManager {
return &mockSecretManager{
isSDS: true,
}
}
type mockSecretManager struct {
inlineValue string
inlineError error
isSDS bool
}
func (m mockSecretManager) GetSecretString(_ context.Context, secret *api.Secret, ns string) (string, error) {
return m.inlineValue, m.inlineError
}
func (m mockSecretManager) PolicySecretSyncEnabled() bool {
return m.isSDS
}
func (m mockSecretManager) SecretsOnlyFromSecretsNamespace() bool {
return m.isSDS
}
func (m mockSecretManager) GetSecretSyncNamespace() string {
return ""
}
// SPDX-License-Identifier: Apache-2.0
// Copyright Authors of Cilium
package certloader
import (
"context"
"fmt"
"log/slog"
"github.com/cilium/hive/cell"
"github.com/cilium/hive/job"
"github.com/cilium/cilium/pkg/logging/logfields"
"github.com/cilium/cilium/pkg/promise"
"github.com/cilium/cilium/pkg/time"
)
// Config is the configuration for NewWatchedServerConfigPromise and NewWatchedClientConfigPromise.
type Config struct {
// Enable TLS watched configuration.
TLS bool
// Path to a TLS public certificate file. The file must contain PEM encoded data.
TLSCertFile string
// Path to a TLS private key file. The file must contain PEM encoded data.
TLSKeyFile string
// Paths to one or more TLS public client CA certificates files to use for TLS with mutual
// authentication (mTLS). The files must contain PEM encoded data.
TLSClientCAFiles []string
}
// NewWatchedServerConfigPromise provides a promise that resolves to a WatchedServerConfig.
// The resolved config can be used to obtain a tls.Config which can transparently reload
// certificates between two connections. The promise resolves when the certificates become
// ready and have been loaded.
//
// This is meant to be used as a Hive constructor and is recommended to be placed in a module
// and the promise provided wrapped in another type to avoid possible conflicts/replacements
// when used multiple times in the same hive.
func NewWatchedServerConfigPromise(lc cell.Lifecycle, jobGroup job.Group, log *slog.Logger, cfg Config) (promise.Promise[*WatchedServerConfig], error) {
log = log.With(logfields.Config, "certloader-server-tls")
if !cfg.TLS {
log.Info("Certloader TLS watcher disabled")
return nil, nil
}
resolver, promise := promise.New[*WatchedServerConfig]()
jobGroup.Add(job.OneShot("certloader-server-tls", func(ctx context.Context, _ cell.Health) error {
watchedConfigChan, err := FutureWatchedServerConfig(
ctx, log,
cfg.TLSClientCAFiles, cfg.TLSCertFile, cfg.TLSKeyFile,
)
if err != nil {
err := fmt.Errorf("failed to initialize certloader TLS watched server configuration: %w", err)
resolver.Reject(err)
return err
}
waitingMsgTimeout := time.After(30 * time.Second)
var watchedConfig *WatchedServerConfig
for watchedConfig == nil {
select {
case watchedConfig = <-watchedConfigChan:
case <-waitingMsgTimeout:
log.Info("Waiting for certloader TLS certificate and key files to be created")
case <-ctx.Done():
return nil
}
}
resolver.Resolve(watchedConfig)
return nil
}))
lc.Append(cell.Hook{
OnStop: func(ctx cell.HookContext) error {
// stop the resolved config watcher (best effort)
ctx, cancel := context.WithTimeout(ctx, time.Second)
defer cancel()
cfg, _ := promise.Await(ctx)
if cfg != nil {
cfg.Stop()
}
return nil
},
})
return promise, nil
}
// NewWatchedClientConfigPromise provides a promise that resolves to a WatchedClientConfig.
// The resolved config can be used to obtain a tls.Config. The promise resolves when the
// certificates become ready and have been loaded.
//
// This is meant to be used as a Hive constructor and is recommended to be placed in a module
// and the promise provided wrapped in another type to avoid possible conflicts/replacements
// when used multiple times in the same hive.
func NewWatchedClientConfigPromise(lc cell.Lifecycle, jobGroup job.Group, log *slog.Logger, cfg Config) (promise.Promise[*WatchedClientConfig], error) {
log = log.With(logfields.Config, "certloader-client-tls")
if !cfg.TLS {
log.Info("Certloader TLS watcher disabled")
return nil, nil
}
resolver, promise := promise.New[*WatchedClientConfig]()
jobGroup.Add(job.OneShot("certloader-client-tls", func(ctx context.Context, _ cell.Health) error {
watchedConfigChan, err := FutureWatchedClientConfig(
ctx, log,
cfg.TLSClientCAFiles, cfg.TLSCertFile, cfg.TLSKeyFile,
)
if err != nil {
err := fmt.Errorf("failed to initialize certloader TLS watched client configuration: %w", err)
resolver.Reject(err)
return err
}
waitingMsgTimeout := time.After(30 * time.Second)
var watchedConfig *WatchedClientConfig
for watchedConfig == nil {
select {
case watchedConfig = <-watchedConfigChan:
case <-waitingMsgTimeout:
log.Info("Waiting for certloader TLS certificate and key files to be created")
case <-ctx.Done():
return nil
}
}
resolver.Resolve(watchedConfig)
return nil
}))
lc.Append(cell.Hook{
OnStop: func(ctx cell.HookContext) error {
// stop the resolved config watcher (best effort)
ctx, cancel := context.WithTimeout(ctx, time.Second)
defer cancel()
cfg, _ := promise.Await(ctx)
if cfg != nil {
cfg.Stop()
}
return nil
},
})
return promise, nil
}
// SPDX-License-Identifier: Apache-2.0
// Copyright Authors of Cilium
package certloader
import (
"context"
"crypto/tls"
"log/slog"
"github.com/cilium/cilium/pkg/logging/logfields"
)
// ClientConfigBuilder creates tls.Config to be used as TLS client.
type ClientConfigBuilder interface {
IsMutualTLS() bool
ClientConfig(base *tls.Config) *tls.Config
}
// WatchedClientConfig is a ClientConfigBuilder backed up by files to be
// watched for changes.
type WatchedClientConfig struct {
*Watcher
log *slog.Logger
}
// NewWatchedClientConfig returns a WatchedClientConfig configured with the
// provided files. When caFiles is nil or empty, the system CA CertPool is
// used. To configure a mTLS capable ClientConfigBuilder, both certFile and
// privkeyFile must be provided.
func NewWatchedClientConfig(log *slog.Logger, caFiles []string, certFile, privkeyFile string) (*WatchedClientConfig, error) {
w, err := NewWatcher(log, caFiles, certFile, privkeyFile)
if err != nil {
return nil, err
}
c := &WatchedClientConfig{
Watcher: w,
log: log,
}
return c, nil
}
// FutureWatchedClientConfig returns a channel where exactly one
// WatchedClientConfig will be sent once the given files are ready and loaded.
// This can be useful when the file paths are well-known, but the files
// themselves don't exist yet. When caFiles is nil or empty, the system CA
// CertPool is used. To configure a mTLS capable ClientConfigBuilder, both
// certFile and privkeyFile must be provided.
func FutureWatchedClientConfig(ctx context.Context, log *slog.Logger, caFiles []string, certFile, privkeyFile string) (<-chan *WatchedClientConfig, error) {
ew, err := FutureWatcher(ctx, log, caFiles, certFile, privkeyFile)
if err != nil {
return nil, err
}
res := make(chan *WatchedClientConfig)
go func(res chan<- *WatchedClientConfig) {
defer close(res)
if watcher, ok := <-ew; ok {
res <- &WatchedClientConfig{
Watcher: watcher,
log: log,
}
}
}(res)
return res, nil
}
// IsMutualTLS implement ClientConfigBuilder.
func (c *WatchedClientConfig) IsMutualTLS() bool {
return c.HasKeypair()
}
// ClientConfig implement ClientConfigBuilder.
func (c *WatchedClientConfig) ClientConfig(base *tls.Config) *tls.Config {
// get both the keypair and CAs at once even if keypair may be used only
// later, in order to get a "consistent view" of the configuration as it
// may change between now and the call to GetClientCertificate.
keypair, caCertPool := c.KeypairAndCACertPool()
tlsConfig := base.Clone()
tlsConfig.RootCAs = caCertPool
if c.IsMutualTLS() {
tlsConfig.GetClientCertificate = func(_ *tls.CertificateRequestInfo) (*tls.Certificate, error) {
if c.log.Enabled(context.Background(), slog.LevelDebug) {
c.log.Debug("Client mtls handshake", logfields.KeyPairSN, keypairId(keypair))
}
return keypair, nil
}
}
return tlsConfig
}
// SPDX-License-Identifier: Apache-2.0
// Copyright Authors of Cilium
package certloader
import (
"crypto/tls"
"crypto/x509"
"strings"
)
// keypairId returns a string representation of the given Certificate chain
// Serial Numbers.
func keypairId(crt *tls.Certificate) string {
if crt == nil {
return "<nil>"
}
sn := serialNumbers(crt.Certificate)
return strings.Join(sn, ",")
}
// serialNumbers returns the given ASN1.DER encoded certificates Serial Number
// in hexadecimal format.
func serialNumbers(certificates [][]byte) []string {
sn := make([]string, 0, len(certificates))
for _, crt := range certificates {
parsed, err := x509.ParseCertificate(crt)
if err == nil {
sn = append(sn, parsed.SerialNumber.Text(16))
}
}
return sn
}
// SPDX-License-Identifier: Apache-2.0
// Copyright Authors of Cilium
package certloader
import (
"crypto/tls"
"crypto/x509"
"errors"
"fmt"
"os"
"github.com/cilium/cilium/pkg/lock"
)
// FileReloader is a set of TLS configuration files including custom CAs, and a
// certificate along with its private key (keypair) that can be reloaded
// dynamically via the Reload* functions.
type FileReloader struct {
// caFiles, certFile, and privkeyFile are constants for the FileReloader's
// lifetime, thus accessing them doesn't require acquiring the mutex.
caFiles []string
certFile string
privkeyFile string
mutex lock.Mutex
// fields below should only be accessed with mutex acquired as they may be
// updated concurrently.
caCertPool *x509.CertPool
caCertPoolGeneration uint // incremented when caCertPool is reloaded
keypair *tls.Certificate
keypairGeneration uint // incremented when keypair is reloaded
}
var (
// ErrInvalidKeypair is returned when either the certificate or its
// corresponding private key is missing.
ErrInvalidKeypair = errors.New("certificate and private key are both required, but only one was provided")
)
// NewFileReloaderReady create and returns a FileReloader using the given file.
// The files are already loaded when this function returns, thus the returned
// FileReloader is readily usable.
func NewFileReloaderReady(caFiles []string, certFile, privkeyFile string) (*FileReloader, error) {
r, err := NewFileReloader(caFiles, certFile, privkeyFile)
if err != nil {
return nil, err
}
// load the files for the first time.
if _, _, err := r.Reload(); err != nil {
return nil, err
}
return r, nil
}
// NewFileReloader create and returns a FileReloader using the given file. The
// files are not loaded when this function returns, and the caller is expected
// to call the Reload* functions until the returned FileReloader become ready.
func NewFileReloader(caFiles []string, certFile, privkeyFile string) (*FileReloader, error) {
if certFile != "" && privkeyFile == "" {
return nil, ErrInvalidKeypair
}
if certFile == "" && privkeyFile != "" {
return nil, ErrInvalidKeypair
}
r := &FileReloader{
caFiles: caFiles,
certFile: certFile,
privkeyFile: privkeyFile,
}
return r, nil
}
// HasKeypair returns true when the FileReloader contains both a certificate
// and its private key, false otherwise.
func (r *FileReloader) HasKeypair() bool {
return r.certFile != "" && r.privkeyFile != ""
}
// HasCustomCA returns true when the FileReloader has custom CAs configured,
// false otherwise.
func (r *FileReloader) HasCustomCA() bool {
return len(r.caFiles) > 0
}
// Ready returns true when the FileReloader is ready to be used, false
// otherwise.
func (r *FileReloader) Ready() bool {
keypair, caCertPool := r.KeypairAndCACertPool()
if r.HasKeypair() && keypair == nil {
return false
}
if r.HasCustomCA() && caCertPool == nil {
return false
}
return true
}
// KeypairAndCACertPool returns both the configured keypair and CAs. This
// function should only be called once the FileReloader is ready, see Ready().
func (r *FileReloader) KeypairAndCACertPool() (*tls.Certificate, *x509.CertPool) {
r.mutex.Lock()
keypair := r.keypair
caCertPool := r.caCertPool
r.mutex.Unlock()
return keypair, caCertPool
}
// Reload update the caCertPool reading the caFiles, and the keypair reading
// certFile and privkeyFile.
func (r *FileReloader) Reload() (keypair *tls.Certificate, caCertPool *x509.CertPool, err error) {
if r.HasKeypair() {
keypair, err = r.readKeypair()
if err != nil {
return
}
}
if r.HasCustomCA() {
caCertPool, err = r.readCertificateAuthority()
if err != nil {
return
}
}
r.mutex.Lock()
if r.HasKeypair() {
r.keypair = keypair
r.keypairGeneration++
}
if r.HasCustomCA() {
r.caCertPool = caCertPool
r.caCertPoolGeneration++
}
r.mutex.Unlock()
return
}
// ReloadKeypair update the keypair by reading certFile and privkeyFile.
func (r *FileReloader) ReloadKeypair() (*tls.Certificate, error) {
if !r.HasKeypair() {
return nil, nil
}
keypair, err := r.readKeypair()
if err != nil {
return nil, err
}
r.mutex.Lock()
r.keypair = keypair
r.keypairGeneration++
r.mutex.Unlock()
return keypair, nil
}
// ReloadCA update the caCertPool by reading the caFiles.
func (r *FileReloader) ReloadCA() (*x509.CertPool, error) {
if !r.HasCustomCA() {
return nil, nil
}
caCertPool, err := r.readCertificateAuthority()
if err != nil {
return nil, err
}
r.mutex.Lock()
r.caCertPool = caCertPool
r.caCertPoolGeneration++
r.mutex.Unlock()
return caCertPool, nil
}
// readKeypair read the certificate and private key.
func (r *FileReloader) readKeypair() (*tls.Certificate, error) {
keypair, err := tls.LoadX509KeyPair(r.certFile, r.privkeyFile)
if err != nil {
return nil, fmt.Errorf("failed to load keypair: %w", err)
}
return &keypair, nil
}
// readCertificateAuthority read the CA files.
func (r *FileReloader) readCertificateAuthority() (*x509.CertPool, error) {
caCertPool := x509.NewCertPool()
for _, path := range r.caFiles {
pem, err := os.ReadFile(path)
if err != nil {
return nil, fmt.Errorf("failed to load cert %q: %w", path, err)
}
if ok := caCertPool.AppendCertsFromPEM(pem); !ok {
return nil, fmt.Errorf("failed to load cert %q: must be PEM encoded", path)
}
}
return caCertPool, nil
}
// generations returns the keypair and caCertPool generation counters.
func (r *FileReloader) generations() (uint, uint) {
r.mutex.Lock()
defer r.mutex.Unlock()
return r.keypairGeneration, r.caCertPoolGeneration
}
// SPDX-License-Identifier: Apache-2.0
// Copyright Authors of Cilium
package certloader
import (
"context"
"crypto/tls"
"errors"
"log/slog"
"slices"
"github.com/cilium/cilium/pkg/logging/logfields"
)
var alpnProtocolH2 = "h2"
// ServerConfigBuilder creates tls.Config to be used as TLS server.
type ServerConfigBuilder interface {
IsMutualTLS() bool
ServerConfig(base *tls.Config) *tls.Config
}
// WatchedServerConfig is a ServerConfigBuilder backed up by files to be
// watched for changes. The tls.Config created will use the latest CA and
// keypair on each TLS handshake, allowing for smooth TLS configuration
// rotation.
type WatchedServerConfig struct {
*Watcher
log *slog.Logger
}
var (
// ErrMissingCertFile is returned when the certificate file is missing.
ErrMissingCertFile = errors.New("certificate file path is required")
// ErrMissingPrivkeyFile is returned when the private key file is missing.
ErrMissingPrivkeyFile = errors.New("private key file path is required")
)
// NewWatchedServerConfig returns a WatchedServerConfig configured with the
// provided files. both certFile and privkeyFile must be provided. To configure
// a mTLS capable ServerConfigBuilder, caFiles must contains at least one file
// path.
func NewWatchedServerConfig(log *slog.Logger, caFiles []string, certFile, privkeyFile string) (*WatchedServerConfig, error) {
if certFile == "" {
return nil, ErrMissingCertFile
}
if privkeyFile == "" {
return nil, ErrMissingPrivkeyFile
}
w, err := NewWatcher(log, caFiles, certFile, privkeyFile)
if err != nil {
return nil, err
}
c := &WatchedServerConfig{
Watcher: w,
log: log,
}
return c, nil
}
// FutureWatchedServerConfig returns a channel where exactly one
// WatchedServerConfig will be sent once the given files are ready and loaded.
// This can be useful when the file paths are well-known, but the files
// themselves don't exist yet. both certFile and privkeyFile must be provided.
// To configure a mTLS capable ServerConfigBuilder, caFiles must contains at
// least one file path.
func FutureWatchedServerConfig(ctx context.Context, log *slog.Logger, caFiles []string, certFile, privkeyFile string) (<-chan *WatchedServerConfig, error) {
if certFile == "" {
return nil, ErrMissingCertFile
}
if privkeyFile == "" {
return nil, ErrMissingPrivkeyFile
}
ew, err := FutureWatcher(ctx, log, caFiles, certFile, privkeyFile)
if err != nil {
return nil, err
}
res := make(chan *WatchedServerConfig)
go func(res chan<- *WatchedServerConfig) {
defer close(res)
if watcher, ok := <-ew; ok {
res <- &WatchedServerConfig{
Watcher: watcher,
log: log,
}
}
}(res)
return res, nil
}
// IsMutualTLS implement ServerConfigBuilder.
func (c *WatchedServerConfig) IsMutualTLS() bool {
return c.HasCustomCA()
}
// ServerConfig implement ServerConfigBuilder.
func (c *WatchedServerConfig) ServerConfig(base *tls.Config) *tls.Config {
// We return a tls.Config having only the GetConfigForClient member set.
// When a client initialize a TLS handshake, this function will be called
// and the tls.Config returned by GetConfigForClient will be used. This
// mechanism allow us to reload the certificates transparently between two
// clients connections without having to restart the server.
// See also the discussion at https://github.com/golang/go/issues/16066.
// Also related: https://github.com/golang/go/issues/35887
return &tls.Config{
GetConfigForClient: func(_ *tls.ClientHelloInfo) (*tls.Config, error) {
keypair, caCertPool := c.KeypairAndCACertPool()
tlsConfig := base.Clone()
tlsConfig.Certificates = []tls.Certificate{*keypair}
tlsConfig.NextProtos = constructWithH2ProtoIfNeed(tlsConfig.NextProtos)
if c.IsMutualTLS() {
// We've been configured to serve mTLS, so setup the ClientCAs
// accordingly.
tlsConfig.ClientCAs = caCertPool
// The caller may have its own desire about the handshake
// ClientAuthType. We honor it unless its tls.NoClientCert (the
// default zero value) as we are configured to serve mTLS.
if tlsConfig.ClientAuth == tls.NoClientCert {
tlsConfig.ClientAuth = tls.RequireAndVerifyClientCert
}
}
if c.log.Enabled(context.Background(), slog.LevelDebug) {
c.log.Debug(
"Server tls handshake",
logfields.KeyPairSN, keypairId(keypair),
)
}
return tlsConfig, nil
},
// NOTE: this MinVersion is not used as this tls.Config will be
// overridden by the one returned by GetConfigForClient. The effective
// MinVersion must be set by the provided base TLS configuration.
MinVersion: tls.VersionTLS13,
}
}
// constructWithH2ProtoIfNeed constructs a new slice of protocols with h2
func constructWithH2ProtoIfNeed(existingProtocols []string) []string {
if slices.Contains(existingProtocols, alpnProtocolH2) {
return existingProtocols
}
ret := make([]string, 0, len(existingProtocols)+1)
ret = append(ret, existingProtocols...)
return append(ret, alpnProtocolH2)
}
// SPDX-License-Identifier: Apache-2.0
// Copyright Authors of Cilium
package certloader
import (
"context"
"log/slog"
"sync"
"github.com/cilium/cilium/pkg/fswatcher"
"github.com/cilium/cilium/pkg/logging/logfields"
"github.com/cilium/cilium/pkg/time"
)
const watcherEventCoalesceWindow = 100 * time.Millisecond
// Watcher is a set of TLS configuration files including CA files, and a
// certificate along with its private key. The files are watched for change and
// reloaded automatically.
type Watcher struct {
*FileReloader
log *slog.Logger
fswatcher *fswatcher.Watcher
stop chan struct{}
}
// NewWatcher returns a Watcher that watch over the given file
// paths. The given files are expected to already exists when this function is
// called. On success, the returned Watcher is ready to use.
func NewWatcher(log *slog.Logger, caFiles []string, certFile, privkeyFile string) (*Watcher, error) {
r, err := NewFileReloaderReady(caFiles, certFile, privkeyFile)
if err != nil {
return nil, err
}
// An error here would be unexpected as we were able to create a
// FileReloader having read the files, so the files should exist and be
// "watchable".
fswatcher, err := newFsWatcher(log, caFiles, certFile, privkeyFile)
if err != nil {
return nil, err
}
w := &Watcher{
FileReloader: r,
log: log,
fswatcher: fswatcher,
stop: make(chan struct{}),
}
w.Watch()
return w, nil
}
// FutureWatcher returns a channel where exactly one Watcher will be sent once
// the given files are ready and loaded. This can be useful when the file paths
// are well-known, but the files themselves don't exist yet. Note that the
// requirement is that the file directories must exists. The provided context
// ensures that we cleanup the spawned goroutines if the files never become ready.
func FutureWatcher(ctx context.Context, log *slog.Logger, caFiles []string, certFile, privkeyFile string) (<-chan *Watcher, error) {
r, err := NewFileReloader(caFiles, certFile, privkeyFile)
if err != nil {
return nil, err
}
fswatcher, err := newFsWatcher(log, caFiles, certFile, privkeyFile)
if err != nil {
return nil, err
}
w := &Watcher{
FileReloader: r,
log: log,
fswatcher: fswatcher,
stop: make(chan struct{}),
}
res := make(chan *Watcher)
go func(res chan<- *Watcher) {
defer close(res)
// Attempt a reload without having received any fs notification in case
// all the files are already there. Note that the keypair and CA are
// reloaded separately as a "partial update" is still useful: If the
// FileReloader is "half-ready" (e.g. has loaded the keypair but failed
// to load the CA), we only need a successfully handled CA related fs
// notify event to become Ready (in other words, we don't need to
// receive a fs event for the keypair in that case to become ready).
_, keypairErr := w.ReloadKeypair()
_, caErr := w.ReloadCA()
ready := w.Watch()
if keypairErr == nil && caErr == nil {
log.Debug("TLS configuration ready")
res <- w
return
}
log.Debug("Waiting on fswatcher update to be ready")
select {
case <-ready:
log.Debug("TLS configuration ready")
res <- w
case <-w.stop:
case <-ctx.Done():
w.Stop()
}
}(res)
return res, nil
}
// Watch initialize the files watcher and update goroutine. It returns a ready
// channel that will be closed once an update made the underlying FileReloader
// ready.
func (w *Watcher) Watch() <-chan struct{} {
// prepare the ready channel to be returned. We will close it exactly once.
var once sync.Once
ready := make(chan struct{})
markReady := func() {
once.Do(func() {
close(ready)
})
}
// build maps for the CA files and keypair files to help detecting what has
// changed in order to reload only the appropriate certificates.
keypairMap := make(map[string]struct{})
caMap := make(map[string]struct{})
if w.FileReloader.certFile != "" {
keypairMap[w.FileReloader.certFile] = struct{}{}
}
if w.FileReloader.privkeyFile != "" {
keypairMap[w.FileReloader.privkeyFile] = struct{}{}
}
for _, path := range w.FileReloader.caFiles {
caMap[path] = struct{}{}
}
// used to coalesce fswatcher events that arrive within the same time window
var keypairReload, caReload <-chan time.Time
go func() {
defer w.fswatcher.Close()
for {
select {
case event := <-w.fswatcher.Events:
path := event.Name
w.log.Debug("Received fswatcher event",
logfields.Path, path,
logfields.Operation, event.Op,
)
_, keypairUpdated := keypairMap[path]
_, caUpdated := caMap[path]
if keypairUpdated {
if keypairReload == nil {
keypairReload = time.After(watcherEventCoalesceWindow)
}
} else if caUpdated {
if caReload == nil {
caReload = time.After(watcherEventCoalesceWindow)
}
} else {
// fswatcher should never send events for unknown files
w.log.Warn("Unknown file, ignoring.",
logfields.Path, path,
logfields.Operation, event.Op,
)
continue
}
case <-keypairReload:
keypairReload = nil
keypair, err := w.ReloadKeypair()
if err != nil {
w.log.Warn("Keypair update failed", logfields.Error, err)
continue
}
id := keypairId(keypair)
w.log.Info("Keypair updated", logfields.KeyPairSN, id)
if w.Ready() {
markReady()
}
case <-caReload:
caReload = nil
if _, err := w.ReloadCA(); err != nil {
w.log.Warn("Certificate authority update failed", logfields.Error, err)
continue
}
w.log.Info("Certificate authority updated")
if w.Ready() {
markReady()
}
case err := <-w.fswatcher.Errors:
w.log.Warn("fswatcher error", logfields.Error, err)
case <-w.stop:
w.log.Info("Stopping fswatcher")
return
}
}
}()
return ready
}
// Stop watching the files.
func (w *Watcher) Stop() {
select {
case <-w.stop:
default:
close(w.stop)
}
}
// newFsWatcher returns a fswatcher.Watcher watching over the given files.
// The fswatcher.Watcher supports watching over files which do not exist yet.
// A create event will be emitted once the file is added.
func newFsWatcher(logger *slog.Logger, caFiles []string, certFile, privkeyFile string) (*fswatcher.Watcher, error) {
trackFiles := []string{}
if certFile != "" {
trackFiles = append(trackFiles, certFile)
}
if privkeyFile != "" {
trackFiles = append(trackFiles, privkeyFile)
}
for _, path := range caFiles {
if path != "" {
trackFiles = append(trackFiles, path)
}
}
return fswatcher.New(logger, trackFiles)
}
// Code generated by dpgen. DO NOT EDIT.
// SPDX-License-Identifier: Apache-2.0
// Copyright Authors of Cilium
package config
// BPFHost is a configuration struct for a Cilium datapath object. Warning: do
// not instantiate directly! Always use [NewBPFHost] to ensure the default
// values configured in the ELF are honored.
type BPFHost struct {
// MTU of the device the bpf program is attached to (default: MTU set in
// node_config.h by agent).
DeviceMTU uint16 `config:"device_mtu"`
// Pass traffic with extended IP protocols.
EnableExtendedIPProtocols bool `config:"enable_extended_ip_protocols"`
// Masquerade traffic to remote nodes.
EnableRemoteNodeMasquerade bool `config:"enable_remote_node_masquerade"`
// Length of the Ethernet header on this device. May be set to zero on L2-less
// devices. (default __ETH_HLEN).
EthHeaderLength uint8 `config:"eth_header_length"`
// The host endpoint's security ID.
HostEpID uint16 `config:"host_ep_id"`
// Ifindex of the interface the bpf program is attached to.
InterfaceIfindex uint32 `config:"interface_ifindex"`
// MAC address of the interface the bpf program is attached to.
InterfaceMAC [8]byte `config:"interface_mac"`
// Masquerade address for IPv4 traffic.
NATIPv4Masquerade [4]byte `config:"nat_ipv4_masquerade"`
// Masquerade address for IPv6 traffic.
NATIPv6Masquerade [16]byte `config:"nat_ipv6_masquerade"`
// Pull security context from IP cache.
SecctxFromIPCache bool `config:"secctx_from_ipcache"`
// The endpoint's security label.
SecurityLabel uint32 `config:"security_label"`
Node
}
func NewBPFHost(node Node) *BPFHost {
return &BPFHost{0x5dc, false, false, 0xe, 0x0, 0x0, [8]byte{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0},
[4]byte{0x0, 0x0, 0x0, 0x0},
[16]byte{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0},
false, 0x0, node}
}
// Code generated by dpgen. DO NOT EDIT.
// SPDX-License-Identifier: Apache-2.0
// Copyright Authors of Cilium
package config
// BPFLXC is a configuration struct for a Cilium datapath object. Warning: do
// not instantiate directly! Always use [NewBPFLXC] to ensure the default values
// configured in the ELF are honored.
type BPFLXC struct {
// MTU of the device the bpf program is attached to (default: MTU set in
// node_config.h by agent).
DeviceMTU uint16 `config:"device_mtu"`
// Pass traffic with extended IP protocols.
EnableExtendedIPProtocols bool `config:"enable_extended_ip_protocols"`
// Masquerade traffic to remote nodes.
EnableRemoteNodeMasquerade bool `config:"enable_remote_node_masquerade"`
// The endpoint's security ID.
EndpointID uint16 `config:"endpoint_id"`
// The endpoint's IPv4 address.
EndpointIPv4 [4]byte `config:"endpoint_ipv4"`
// The endpoint's IPv6 address.
EndpointIPv6 [16]byte `config:"endpoint_ipv6"`
// The endpoint's network namespace cookie.
EndpointNetNSCookie uint64 `config:"endpoint_netns_cookie"`
// The host endpoint's security ID.
HostEpID uint16 `config:"host_ep_id"`
// Ifindex of the interface the bpf program is attached to.
InterfaceIfindex uint32 `config:"interface_ifindex"`
// MAC address of the interface the bpf program is attached to.
InterfaceMAC [8]byte `config:"interface_mac"`
// Masquerade address for IPv4 traffic.
NATIPv4Masquerade [4]byte `config:"nat_ipv4_masquerade"`
// Masquerade address for IPv6 traffic.
NATIPv6Masquerade [16]byte `config:"nat_ipv6_masquerade"`
// The log level for policy verdicts in workload endpoints.
PolicyVerdictLogFilter uint32 `config:"policy_verdict_log_filter"`
// Pull security context from IP cache.
SecctxFromIPCache bool `config:"secctx_from_ipcache"`
// The endpoint's security label.
SecurityLabel uint32 `config:"security_label"`
Node
}
func NewBPFLXC(node Node) *BPFLXC {
return &BPFLXC{0x5dc, false, false, 0x0, [4]byte{0x0, 0x0, 0x0, 0x0},
[16]byte{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0},
0x0, 0x0, 0x0, [8]byte{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0},
[4]byte{0x0, 0x0, 0x0, 0x0},
[16]byte{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0},
0x0, false, 0x0, node}
}
// Code generated by dpgen. DO NOT EDIT.
// SPDX-License-Identifier: Apache-2.0
// Copyright Authors of Cilium
package config
// BPFNetwork is a configuration struct for a Cilium datapath object. Warning:
// do not instantiate directly! Always use [NewBPFNetwork] to ensure the default
// values configured in the ELF are honored.
type BPFNetwork struct {
Node
}
func NewBPFNetwork(node Node) *BPFNetwork {
return &BPFNetwork{node}
}
// Code generated by dpgen. DO NOT EDIT.
// SPDX-License-Identifier: Apache-2.0
// Copyright Authors of Cilium
package config
// Node is a configuration struct for a Cilium datapath object. Warning: do not
// instantiate directly! Always use [NewNode] to ensure the default values
// configured in the ELF are honored.
type Node struct {
// Index of the interface used to connect nodes in the cluster.
DirectRoutingDevIfindex uint32 `config:"direct_routing_dev_ifindex"`
// Internal IPv6 router address assigned to the cilium_host interface.
RouterIPv6 [16]byte `config:"router_ipv6"`
// IPv4 source address used for SNAT when a Pod talks to itself over a Service.
ServiceLoopbackIPv4 [4]byte `config:"service_loopback_ipv4"`
// Whether or not BPF_FIB_LOOKUP_SKIP_NEIGH is supported.
SupportsFibLookupSkipNeigh bool `config:"supports_fib_lookup_skip_neigh"`
// Length of payload to capture when tracing native packets.
TracePayloadLen uint32 `config:"trace_payload_len"`
// Length of payload to capture when tracing overlay packets.
TracePayloadLenOverlay uint32 `config:"trace_payload_len_overlay"`
}
func NewNode() *Node {
return &Node{0x0,
[16]byte{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0},
[4]byte{0x0, 0x0, 0x0, 0x0}, false, 0x0, 0x0}
}
// Code generated by dpgen. DO NOT EDIT.
// SPDX-License-Identifier: Apache-2.0
// Copyright Authors of Cilium
package config
// BPFOverlay is a configuration struct for a Cilium datapath object. Warning:
// do not instantiate directly! Always use [NewBPFOverlay] to ensure the default
// values configured in the ELF are honored.
type BPFOverlay struct {
// MTU of the device the bpf program is attached to (default: MTU set in
// node_config.h by agent).
DeviceMTU uint16 `config:"device_mtu"`
// Pass traffic with extended IP protocols.
EnableExtendedIPProtocols bool `config:"enable_extended_ip_protocols"`
// Masquerade traffic to remote nodes.
EnableRemoteNodeMasquerade bool `config:"enable_remote_node_masquerade"`
// Ifindex of the interface the bpf program is attached to.
InterfaceIfindex uint32 `config:"interface_ifindex"`
// MAC address of the interface the bpf program is attached to.
InterfaceMAC [8]byte `config:"interface_mac"`
// Masquerade address for IPv4 traffic.
NATIPv4Masquerade [4]byte `config:"nat_ipv4_masquerade"`
// Masquerade address for IPv6 traffic.
NATIPv6Masquerade [16]byte `config:"nat_ipv6_masquerade"`
// Pull security context from IP cache.
SecctxFromIPCache bool `config:"secctx_from_ipcache"`
Node
}
func NewBPFOverlay(node Node) *BPFOverlay {
return &BPFOverlay{0x5dc, false, false, 0x0, [8]byte{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0},
[4]byte{0x0, 0x0, 0x0, 0x0},
[16]byte{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0},
false, node}
}
// SPDX-License-Identifier: Apache-2.0
// Copyright Authors of Cilium
package config
import (
"fmt"
"reflect"
)
// StructToMap converts an instance of a Go struct generated by [varsToStruct]
// into a map of configuration values to be passed to LoadCollection.
//
// Only struct members with a `config` tag are included. The tag value is used
// as the key in the map, and the map value is the runtime value of the member.
func StructToMap(obj any) (map[string]any, error) {
toValue := reflect.ValueOf(obj)
if toValue.Type().Kind() != reflect.Ptr {
return nil, fmt.Errorf("%T is not a pointer to struct", obj)
}
if toValue.IsNil() {
return nil, fmt.Errorf("nil pointer to %T", obj)
}
fields, err := structFields(toValue.Elem(), TagName, nil)
if err != nil {
return nil, err
}
vars := make(map[string]any, len(fields))
for _, field := range fields {
tag := field.Tag.Get(TagName)
if tag == "" {
return nil, fmt.Errorf("field %s has no tag", field.Name)
}
if vars[tag] != nil {
return nil, fmt.Errorf("tag %s on field %s occurs multiple times in object", tag, field.Name)
}
vars[tag] = field.value.Interface()
}
return vars, nil
}
// structField represents a struct field containing a struct tag.
type structField struct {
reflect.StructField
value reflect.Value
}
// structFields recursively gathers all fields of a struct and its nested
// structs that are tagged with the given tag.
func structFields(structVal reflect.Value, tag string, visited map[reflect.Type]bool) ([]structField, error) {
if visited == nil {
visited = make(map[reflect.Type]bool)
}
structType := structVal.Type()
if structType.Kind() != reflect.Struct {
return nil, fmt.Errorf("%s is not a struct", structType)
}
if visited[structType] {
return nil, fmt.Errorf("recursion on type %s", structType)
}
fields := make([]structField, 0, structType.NumField())
for i := range structType.NumField() {
field := structField{structType.Field(i), structVal.Field(i)}
// If the field is tagged, gather it and move on.
name := field.Tag.Get(tag)
if name != "" {
fields = append(fields, field)
continue
}
// If the field does not have an ebpf tag, but is a struct or a pointer
// to a struct, attempt to gather its fields as well.
var v reflect.Value
switch field.Type.Kind() {
case reflect.Ptr:
if field.Type.Elem().Kind() != reflect.Struct {
continue
}
if field.value.IsNil() {
return nil, fmt.Errorf("nil pointer to %s", structType)
}
// Obtain the destination type of the pointer.
v = field.value.Elem()
case reflect.Struct:
// Reference the value's type directly.
v = field.value
default:
continue
}
inner, err := structFields(v, tag, visited)
if err != nil {
return nil, fmt.Errorf("field %s: %w", field.Name, err)
}
fields = append(fields, inner...)
}
return fields, nil
}
// Code generated by dpgen. DO NOT EDIT.
// SPDX-License-Identifier: Apache-2.0
// Copyright Authors of Cilium
package config
// BPFWireguard is a configuration struct for a Cilium datapath object. Warning:
// do not instantiate directly! Always use [NewBPFWireguard] to ensure the
// default values configured in the ELF are honored.
type BPFWireguard struct {
// MTU of the device the bpf program is attached to (default: MTU set in
// node_config.h by agent).
DeviceMTU uint16 `config:"device_mtu"`
// Pass traffic with extended IP protocols.
EnableExtendedIPProtocols bool `config:"enable_extended_ip_protocols"`
// Masquerade traffic to remote nodes.
EnableRemoteNodeMasquerade bool `config:"enable_remote_node_masquerade"`
// Ifindex of the interface the bpf program is attached to.
InterfaceIfindex uint32 `config:"interface_ifindex"`
// MAC address of the interface the bpf program is attached to.
InterfaceMAC [8]byte `config:"interface_mac"`
// Masquerade address for IPv4 traffic.
NATIPv4Masquerade [4]byte `config:"nat_ipv4_masquerade"`
// Masquerade address for IPv6 traffic.
NATIPv6Masquerade [16]byte `config:"nat_ipv6_masquerade"`
// Pull security context from IP cache.
SecctxFromIPCache bool `config:"secctx_from_ipcache"`
Node
}
func NewBPFWireguard(node Node) *BPFWireguard {
return &BPFWireguard{0x5dc, false, false, 0x0, [8]byte{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0},
[4]byte{0x0, 0x0, 0x0, 0x0},
[16]byte{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0},
false, node}
}
// Code generated by dpgen. DO NOT EDIT.
// SPDX-License-Identifier: Apache-2.0
// Copyright Authors of Cilium
package config
// BPFXDP is a configuration struct for a Cilium datapath object. Warning: do
// not instantiate directly! Always use [NewBPFXDP] to ensure the default values
// configured in the ELF are honored.
type BPFXDP struct {
// MTU of the device the bpf program is attached to (default: MTU set in
// node_config.h by agent).
DeviceMTU uint16 `config:"device_mtu"`
// Pass traffic with extended IP protocols.
EnableExtendedIPProtocols bool `config:"enable_extended_ip_protocols"`
// Masquerade traffic to remote nodes.
EnableRemoteNodeMasquerade bool `config:"enable_remote_node_masquerade"`
// Ifindex of the interface the bpf program is attached to.
InterfaceIfindex uint32 `config:"interface_ifindex"`
// MAC address of the interface the bpf program is attached to.
InterfaceMAC [8]byte `config:"interface_mac"`
// Masquerade address for IPv4 traffic.
NATIPv4Masquerade [4]byte `config:"nat_ipv4_masquerade"`
// Masquerade address for IPv6 traffic.
NATIPv6Masquerade [16]byte `config:"nat_ipv6_masquerade"`
// Pull security context from IP cache.
SecctxFromIPCache bool `config:"secctx_from_ipcache"`
Node
}
func NewBPFXDP(node Node) *BPFXDP {
return &BPFXDP{0x5dc, false, false, 0x0, [8]byte{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0},
[4]byte{0x0, 0x0, 0x0, 0x0},
[16]byte{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0},
false, node}
}
// SPDX-License-Identifier: Apache-2.0
// Copyright Authors of Cilium
package defines
import (
"fmt"
"github.com/cilium/hive/cell"
)
// Map is the type containing the key-value pairs representing extra define
// directives for datapath node configuration.
type Map map[string]string
func (m Map) Merge(other Map) error {
for key, value := range other {
if _, ok := m[key]; ok {
return fmt.Errorf("extra node define overwrites key %q", key)
}
m[key] = value
}
return nil
}
// NodeOut allows injecting configuration into the datapath.
type NodeOut struct {
cell.Out
NodeDefines Map `group:"header-node-defines"`
}
// Fn is a function returning the key-value pairs representing extra define
// directives for datapath node configuration.
type Fn func() (Map, error)
// NodeFnOut allows injecting configuration into the datapath
// by invoking a callback.
//
// Prefer using [NodeOut] if possible since it has a valid zero value.
type NodeFnOut struct {
cell.Out
// Fn must not be nil.
Fn `group:"header-node-define-fns"`
}
// NewNodeFnOut wraps a function returning the key-value pairs representing
// extra define directives for datapath node configuration, so that it can be
// provided through the hive framework.
func NewNodeFnOut(fn Fn) NodeFnOut {
return NodeFnOut{Fn: fn}
}
// SPDX-License-Identifier: Apache-2.0
// Copyright Authors of Cilium
package safenetlink
import (
"context"
"errors"
"net"
"github.com/vishvananda/netlink"
"github.com/vishvananda/netlink/nl"
"github.com/cilium/cilium/pkg/resiliency"
"github.com/cilium/cilium/pkg/time"
)
const (
netlinkRetryInterval = 1 * time.Millisecond
netlinkRetryMax = 30
)
// WithRetry runs the netlinkFunc. If netlinkFunc returns netlink.ErrDumpInterrupted, the function is retried.
// If success or any other error is returned, WithRetry returns immediately, propagating the error.
func WithRetry(netlinkFunc func() error) error {
return resiliency.Retry(context.Background(), netlinkRetryInterval, netlinkRetryMax, func(ctx context.Context, retries int) (bool, error) {
err := netlinkFunc()
if errors.Is(err, netlink.ErrDumpInterrupted) {
return false, nil // retry
}
return true, err
})
}
// WithRetryResult works like WithRetry, but allows netlinkFunc to have a return value besides the error
func WithRetryResult[T any](netlinkFunc func() (T, error)) (out T, err error) {
err = WithRetry(func() error {
out, err = netlinkFunc()
return err
})
return out, err
}
// AddrList wraps netlink.AddrList, but retries the call automatically
// if netlink.ErrDumpInterrupted is returned
func AddrList(link netlink.Link, family int) ([]netlink.Addr, error) {
return WithRetryResult(func() ([]netlink.Addr, error) {
//nolint:forbidigo
return netlink.AddrList(link, family)
})
}
// BridgeVlanList wraps netlink.BridgeVlanList, but retries the call automatically
// if netlink.ErrDumpInterrupted is returned
func BridgeVlanList() (map[int32][]*nl.BridgeVlanInfo, error) {
return WithRetryResult(func() (map[int32][]*nl.BridgeVlanInfo, error) {
//nolint:forbidigo
return netlink.BridgeVlanList()
})
}
// ChainList wraps netlink.ChainList, but retries the call automatically
// if netlink.ErrDumpInterrupted is returned
func ChainList(link netlink.Link, parent uint32) ([]netlink.Chain, error) {
return WithRetryResult(func() ([]netlink.Chain, error) {
//nolint:forbidigo
return netlink.ChainList(link, parent)
})
}
// ClassList wraps netlink.ClassList, but retries the call automatically
// if netlink.ErrDumpInterrupted is returned
func ClassList(link netlink.Link, parent uint32) ([]netlink.Class, error) {
return WithRetryResult(func() ([]netlink.Class, error) {
//nolint:forbidigo
return netlink.ClassList(link, parent)
})
}
// ConntrackTableList wraps netlink.ConntrackTableList, but retries the call automatically
// if netlink.ErrDumpInterrupted is returned
func ConntrackTableList(table netlink.ConntrackTableType, family netlink.InetFamily) ([]*netlink.ConntrackFlow, error) {
return WithRetryResult(func() ([]*netlink.ConntrackFlow, error) {
//nolint:forbidigo
return netlink.ConntrackTableList(table, family)
})
}
// DevLinkGetDeviceList wraps netlink.DevLinkGetDeviceList, but retries the call automatically
// if netlink.ErrDumpInterrupted is returned
func DevLinkGetDeviceList() ([]*netlink.DevlinkDevice, error) {
return WithRetryResult(func() ([]*netlink.DevlinkDevice, error) {
//nolint:forbidigo
return netlink.DevLinkGetDeviceList()
})
}
// DevLinkGetAllPortList wraps netlink.DevLinkGetAllPortList, but retries the call automatically
// if netlink.ErrDumpInterrupted is returned
func DevLinkGetAllPortList() ([]*netlink.DevlinkPort, error) {
return WithRetryResult(func() ([]*netlink.DevlinkPort, error) {
//nolint:forbidigo
return netlink.DevLinkGetAllPortList()
})
}
// DevlinkGetDeviceParams wraps netlink.DevlinkGetDeviceParams, but retries the call automatically
// if netlink.ErrDumpInterrupted is returned
func DevlinkGetDeviceParams(bus string, device string) ([]*netlink.DevlinkParam, error) {
return WithRetryResult(func() ([]*netlink.DevlinkParam, error) {
//nolint:forbidigo
return netlink.DevlinkGetDeviceParams(bus, device)
})
}
// FilterList wraps netlink.FilterList, but retries the call automatically
// if netlink.ErrDumpInterrupted is returned
func FilterList(link netlink.Link, parent uint32) ([]netlink.Filter, error) {
return WithRetryResult(func() ([]netlink.Filter, error) {
//nolint:forbidigo
return netlink.FilterList(link, parent)
})
}
// FouList wraps netlink.FouList, but retries the call automatically
// if netlink.ErrDumpInterrupted is returned
func FouList(fam int) ([]netlink.Fou, error) {
return WithRetryResult(func() ([]netlink.Fou, error) {
//nolint:forbidigo
return netlink.FouList(fam)
})
}
// GenlFamilyList wraps netlink.GenlFamilyList, but retries the call automatically
// if netlink.ErrDumpInterrupted is returned
func GenlFamilyList() ([]*netlink.GenlFamily, error) {
return WithRetryResult(func() ([]*netlink.GenlFamily, error) {
//nolint:forbidigo
return netlink.GenlFamilyList()
})
}
// GTPPDPList wraps netlink.GTPPDPList, but retries the call automatically
// if netlink.ErrDumpInterrupted is returned
func GTPPDPList() ([]*netlink.PDP, error) {
return WithRetryResult(func() ([]*netlink.PDP, error) {
//nolint:forbidigo
return netlink.GTPPDPList()
})
}
// LinkByName wraps netlink.LinkByName, but retries the call automatically
// if netlink.ErrDumpInterrupted is returned
func LinkByName(name string) (netlink.Link, error) {
return WithRetryResult(func() (netlink.Link, error) {
//nolint:forbidigo
return netlink.LinkByName(name)
})
}
// LinkByAlias wraps netlink.LinkByAlias, but retries the call automatically
// if netlink.ErrDumpInterrupted is returned
func LinkByAlias(alias string) (netlink.Link, error) {
return WithRetryResult(func() (netlink.Link, error) {
//nolint:forbidigo
return netlink.LinkByAlias(alias)
})
}
// LinkList wraps netlink.LinkList, but retries the call automatically
// if netlink.ErrDumpInterrupted is returned
func LinkList() ([]netlink.Link, error) {
return WithRetryResult(func() ([]netlink.Link, error) {
//nolint:forbidigo
return netlink.LinkList()
})
}
// LinkSubscribeWithOptions wraps netlink.LinkSubscribeWithOptions, but retries the call automatically
// if netlink.ErrDumpInterrupted is returned
func LinkSubscribeWithOptions(ch chan<- netlink.LinkUpdate, done <-chan struct{}, options netlink.LinkSubscribeOptions) error {
return WithRetry(func() error {
//nolint:forbidigo
return netlink.LinkSubscribeWithOptions(ch, done, options)
})
}
// NeighList wraps netlink.NeighList, but retries the call automatically
// if netlink.ErrDumpInterrupted is returned
func NeighList(linkIndex, family int) ([]netlink.Neigh, error) {
return WithRetryResult(func() ([]netlink.Neigh, error) {
//nolint:forbidigo
return netlink.NeighList(linkIndex, family)
})
}
// NeighProxyList wraps netlink.NeighProxyList, but retries the call automatically
// if netlink.ErrDumpInterrupted is returned
func NeighProxyList(linkIndex, family int) ([]netlink.Neigh, error) {
return WithRetryResult(func() ([]netlink.Neigh, error) {
//nolint:forbidigo
return netlink.NeighProxyList(linkIndex, family)
})
}
// NeighListExecute wraps netlink.NeighListExecute, but retries the call automatically
// if netlink.ErrDumpInterrupted is returned
func NeighListExecute(msg netlink.Ndmsg) ([]netlink.Neigh, error) {
return WithRetryResult(func() ([]netlink.Neigh, error) {
//nolint:forbidigo
return netlink.NeighListExecute(msg)
})
}
// NeighSubscribeWithOptions wraps netlink.NeighSubscribeWithOptions, but retries the call automatically
// if netlink.ErrDumpInterrupted is returned
func NeighSubscribeWithOptions(ch chan<- netlink.NeighUpdate, done <-chan struct{}, options netlink.NeighSubscribeOptions) error {
return WithRetry(func() error {
//nolint:forbidigo
return netlink.NeighSubscribeWithOptions(ch, done, options)
})
}
// LinkGetProtinfo wraps netlink.LinkGetProtinfo, but retries the call automatically
// if netlink.ErrDumpInterrupted is returned
func LinkGetProtinfo(link netlink.Link) (netlink.Protinfo, error) {
return WithRetryResult(func() (netlink.Protinfo, error) {
//nolint:forbidigo
return netlink.LinkGetProtinfo(link)
})
}
// QdiscList wraps netlink.QdiscList, but retries the call automatically
// if netlink.ErrDumpInterrupted is returned
func QdiscList(link netlink.Link) ([]netlink.Qdisc, error) {
return WithRetryResult(func() ([]netlink.Qdisc, error) {
//nolint:forbidigo
return netlink.QdiscList(link)
})
}
// RdmaLinkList wraps netlink.RdmaLinkList, but retries the call automatically
// if netlink.ErrDumpInterrupted is returned
func RdmaLinkList() ([]*netlink.RdmaLink, error) {
return WithRetryResult(func() ([]*netlink.RdmaLink, error) {
//nolint:forbidigo
return netlink.RdmaLinkList()
})
}
// RdmaLinkByName wraps netlink.RdmaLinkByName, but retries the call automatically
// if netlink.ErrDumpInterrupted is returned
func RdmaLinkByName(name string) (*netlink.RdmaLink, error) {
return WithRetryResult(func() (*netlink.RdmaLink, error) {
//nolint:forbidigo
return netlink.RdmaLinkByName(name)
})
}
// RdmaLinkDel wraps netlink.RdmaLinkDel, but retries the call automatically
// if netlink.ErrDumpInterrupted is returned
func RdmaLinkDel(name string) error {
return WithRetry(func() error {
//nolint:forbidigo
return netlink.RdmaLinkDel(name)
})
}
// RouteList wraps netlink.RouteList, but retries the call automatically
// if netlink.ErrDumpInterrupted is returned
func RouteList(link netlink.Link, family int) ([]netlink.Route, error) {
return WithRetryResult(func() ([]netlink.Route, error) {
//nolint:forbidigo
return netlink.RouteList(link, family)
})
}
// RouteListFiltered wraps netlink.RouteListFiltered, but retries the call automatically
// if netlink.ErrDumpInterrupted is returned
func RouteListFiltered(family int, filter *netlink.Route, filterMask uint64) ([]netlink.Route, error) {
return WithRetryResult(func() ([]netlink.Route, error) {
//nolint:forbidigo
return netlink.RouteListFiltered(family, filter, filterMask)
})
}
// RouteListFilteredIter wraps netlink.RouteListFilteredIter, but retries the call automatically
// if netlink.ErrDumpInterrupted is returned
func RouteListFilteredIter(family int, filter *netlink.Route, filterMask uint64, f func(netlink.Route) (cont bool)) error {
return WithRetry(func() error {
//nolint:forbidigo
return netlink.RouteListFilteredIter(family, filter, filterMask, f)
})
}
// RouteSubscribeWithOptions wraps netlink.RouteSubscribeWithOptions, but retries the call automatically
// if netlink.ErrDumpInterrupted is returned
func RouteSubscribeWithOptions(ch chan<- netlink.RouteUpdate, done <-chan struct{}, options netlink.RouteSubscribeOptions) error {
return WithRetry(func() error {
//nolint:forbidigo
return netlink.RouteSubscribeWithOptions(ch, done, options)
})
}
// RuleList wraps netlink.RuleList, but retries the call automatically
// if netlink.ErrDumpInterrupted is returned
func RuleList(family int) ([]netlink.Rule, error) {
return WithRetryResult(func() ([]netlink.Rule, error) {
//nolint:forbidigo
return netlink.RuleList(family)
})
}
// RuleListFiltered wraps netlink.RuleListFiltered, but retries the call automatically
// if netlink.ErrDumpInterrupted is returned
func RuleListFiltered(family int, filter *netlink.Rule, filterMask uint64) ([]netlink.Rule, error) {
return WithRetryResult(func() ([]netlink.Rule, error) {
//nolint:forbidigo
return netlink.RuleListFiltered(family, filter, filterMask)
})
}
// SocketGet wraps netlink.SocketGet, but retries the call automatically
// if netlink.ErrDumpInterrupted is returned
func SocketGet(local, remote net.Addr) (*netlink.Socket, error) {
return WithRetryResult(func() (*netlink.Socket, error) {
//nolint:forbidigo
return netlink.SocketGet(local, remote)
})
}
// SocketDiagTCPInfo wraps netlink.SocketDiagTCPInfo, but retries the call automatically
// if netlink.ErrDumpInterrupted is returned
func SocketDiagTCPInfo(family uint8) ([]*netlink.InetDiagTCPInfoResp, error) {
return WithRetryResult(func() ([]*netlink.InetDiagTCPInfoResp, error) {
//nolint:forbidigo
return netlink.SocketDiagTCPInfo(family)
})
}
// SocketDiagTCP wraps netlink.SocketDiagTCP, but retries the call automatically
// if netlink.ErrDumpInterrupted is returned
func SocketDiagTCP(family uint8) ([]*netlink.Socket, error) {
return WithRetryResult(func() ([]*netlink.Socket, error) {
//nolint:forbidigo
return netlink.SocketDiagTCP(family)
})
}
// SocketDiagUDPInfo wraps netlink.SocketDiagUDPInfo, but retries the call automatically
// if netlink.ErrDumpInterrupted is returned
func SocketDiagUDPInfo(family uint8) ([]*netlink.InetDiagUDPInfoResp, error) {
return WithRetryResult(func() ([]*netlink.InetDiagUDPInfoResp, error) {
//nolint:forbidigo
return netlink.SocketDiagUDPInfo(family)
})
}
// SocketDiagUDP wraps netlink.SocketDiagUDP, but retries the call automatically
// if netlink.ErrDumpInterrupted is returned
func SocketDiagUDP(family uint8) ([]*netlink.Socket, error) {
return WithRetryResult(func() ([]*netlink.Socket, error) {
//nolint:forbidigo
return netlink.SocketDiagUDP(family)
})
}
// UnixSocketDiagInfo wraps netlink.UnixSocketDiagInfo, but retries the call automatically
// if netlink.ErrDumpInterrupted is returned
func UnixSocketDiagInfo() ([]*netlink.UnixDiagInfoResp, error) {
return WithRetryResult(func() ([]*netlink.UnixDiagInfoResp, error) {
//nolint:forbidigo
return netlink.UnixSocketDiagInfo()
})
}
// UnixSocketDiag wraps netlink.UnixSocketDiag, but retries the call automatically
// if netlink.ErrDumpInterrupted is returned
func UnixSocketDiag() ([]*netlink.UnixSocket, error) {
return WithRetryResult(func() ([]*netlink.UnixSocket, error) {
//nolint:forbidigo
return netlink.UnixSocketDiag()
})
}
// SocketXDPGetInfo wraps netlink.SocketXDPGetInfo, but retries the call automatically
// if netlink.ErrDumpInterrupted is returned
func SocketXDPGetInfo(ino uint32, cookie uint64) (*netlink.XDPDiagInfoResp, error) {
return WithRetryResult(func() (*netlink.XDPDiagInfoResp, error) {
//nolint:forbidigo
return netlink.SocketXDPGetInfo(ino, cookie)
})
}
// SocketDiagXDP wraps netlink.SocketDiagXDP, but retries the call automatically
// if netlink.ErrDumpInterrupted is returned
func SocketDiagXDP() ([]*netlink.XDPDiagInfoResp, error) {
return WithRetryResult(func() ([]*netlink.XDPDiagInfoResp, error) {
//nolint:forbidigo
return netlink.SocketDiagXDP()
})
}
// VDPAGetDevList wraps netlink.VDPAGetDevList, but retries the call automatically
// if netlink.ErrDumpInterrupted is returned
func VDPAGetDevList() ([]*netlink.VDPADev, error) {
return WithRetryResult(func() ([]*netlink.VDPADev, error) {
//nolint:forbidigo
return netlink.VDPAGetDevList()
})
}
// VDPAGetDevConfigList wraps netlink.VDPAGetDevConfigList, but retries the call automatically
// if netlink.ErrDumpInterrupted is returned
func VDPAGetDevConfigList() ([]*netlink.VDPADevConfig, error) {
return WithRetryResult(func() ([]*netlink.VDPADevConfig, error) {
//nolint:forbidigo
return netlink.VDPAGetDevConfigList()
})
}
// VDPAGetMGMTDevList wraps netlink.VDPAGetMGMTDevList, but retries the call automatically
// if netlink.ErrDumpInterrupted is returned
func VDPAGetMGMTDevList() ([]*netlink.VDPAMGMTDev, error) {
return WithRetryResult(func() ([]*netlink.VDPAMGMTDev, error) {
//nolint:forbidigo
return netlink.VDPAGetMGMTDevList()
})
}
// XfrmPolicyList wraps netlink.XfrmPolicyList, but retries the call automatically
// if netlink.ErrDumpInterrupted is returned
func XfrmPolicyList(family int) ([]netlink.XfrmPolicy, error) {
return WithRetryResult(func() ([]netlink.XfrmPolicy, error) {
//nolint:forbidigo
return netlink.XfrmPolicyList(family)
})
}
// XfrmStateList wraps netlink.XfrmStateList, but retries the call automatically
// if netlink.ErrDumpInterrupted is returned
func XfrmStateList(family int) ([]netlink.XfrmState, error) {
return WithRetryResult(func() ([]netlink.XfrmState, error) {
//nolint:forbidigo
return netlink.XfrmStateList(family)
})
}
// SPDX-License-Identifier: Apache-2.0
// Copyright Authors of Cilium
package tunnel
import (
"github.com/cilium/hive/cell"
"github.com/cilium/cilium/pkg/kpr"
"github.com/cilium/cilium/pkg/loadbalancer"
"github.com/cilium/cilium/pkg/option"
)
// Cell is a cell that provides the parameters for the Cilium tunnel,
// based on user configuration and requests from external modules.
var Cell = cell.Module(
"datapath-tunnel-config",
"Tunneling configurations",
cell.Config(defaultConfig),
cell.Provide(
newConfig,
// Provide the datapath options.
Config.datapathConfigProvider,
// Enable tunnel configuration when it is the primary routing mode.
func(dcfg *option.DaemonConfig) EnablerOut {
return NewEnabler(dcfg.TunnelingEnabled())
},
// Enable tunnel configuration when DSR Geneve is enabled (this is currently
// handled here, as the corresponding logic has not yet been modularized).
func(kpr kpr.KPRConfig, lbcfg loadbalancer.Config) EnablerOut {
return NewEnabler(
(kpr.EnableNodePort ||
kpr.KubeProxyReplacement == option.KubeProxyReplacementTrue) &&
lbcfg.LoadBalancerUsesDSR() &&
lbcfg.DSRDispatch == loadbalancer.DSRDispatchGeneve,
// The datapath logic takes care of the MTU overhead. So no need to
// take it into account here.
// See encap_geneve_dsr_opt[4,6] in nodeport.h
WithoutMTUAdaptation(),
)
},
),
)
// SPDX-License-Identifier: Apache-2.0
// Copyright Authors of Cilium
package tunnel
import (
"fmt"
"strings"
"github.com/cilium/hive/cell"
"github.com/spf13/pflag"
dpcfgdef "github.com/cilium/cilium/pkg/datapath/linux/config/defines"
"github.com/cilium/cilium/pkg/datapath/linux/safenetlink"
"github.com/cilium/cilium/pkg/defaults"
)
// EncapProtocol represents the valid types of encapsulation protocols.
type EncapProtocol string
// UnderlayProtocol represents the valid types of underlay protocols for the tunnel.
type UnderlayProtocol string
const (
// VXLAN specifies VXLAN encapsulation
VXLAN EncapProtocol = "vxlan"
// Geneve specifies Geneve encapsulation
Geneve EncapProtocol = "geneve"
// Disabled specifies to disable encapsulation
Disabled EncapProtocol = ""
IPv4 UnderlayProtocol = "ipv4"
IPv6 UnderlayProtocol = "ipv6"
)
func (tp EncapProtocol) String() string { return string(tp) }
func (tp EncapProtocol) toDpID() string {
switch tp {
case VXLAN:
return "1"
case Geneve:
return "2"
default:
return ""
}
}
// Config represents the materialized tunneling configuration to be used,
// depending on the user configuration and optional overrides required by
// additional features.
type Config struct {
underlay UnderlayProtocol
protocol EncapProtocol
port uint16
srcPortLow uint16
srcPortHigh uint16
deviceName string
shouldAdaptMTU bool
}
type newConfigIn struct {
cell.In
Cfg userCfg
Enablers []enabler `group:"request-enable-tunneling"`
}
var (
configDisabled = Config{
underlay: IPv4,
protocol: Disabled,
port: 0,
srcPortLow: 0,
srcPortHigh: 0,
deviceName: "",
shouldAdaptMTU: false,
}
)
func newConfig(in newConfigIn) (Config, error) {
switch EncapProtocol(in.Cfg.TunnelProtocol) {
case VXLAN, Geneve:
default:
return configDisabled, fmt.Errorf("invalid tunnel protocol %q", in.Cfg.TunnelProtocol)
}
switch UnderlayProtocol(in.Cfg.UnderlayProtocol) {
case IPv4, IPv6:
default:
return configDisabled, fmt.Errorf("invalid IP family for underlay %q", in.Cfg.UnderlayProtocol)
}
cfg := Config{
underlay: UnderlayProtocol(in.Cfg.UnderlayProtocol),
protocol: EncapProtocol(in.Cfg.TunnelProtocol),
port: in.Cfg.TunnelPort,
srcPortLow: 0,
srcPortHigh: 0,
deviceName: "",
shouldAdaptMTU: false,
}
if _, err := fmt.Sscanf(in.Cfg.TunnelSourcePortRange, "%d-%d", &cfg.srcPortLow, &cfg.srcPortHigh); err != nil {
return configDisabled, fmt.Errorf("invalid tunnel source port range %q", in.Cfg.TunnelSourcePortRange)
}
var enabled bool
for _, e := range in.Enablers {
if e.enable {
enabled = true
cfg.shouldAdaptMTU = cfg.shouldAdaptMTU || e.needsMTUAdaptation
for _, validator := range e.validators {
if err := validator(cfg.protocol); err != nil {
return configDisabled, err
}
}
}
}
if !enabled {
return configDisabled, nil
}
switch cfg.protocol {
case VXLAN:
cfg.deviceName = defaults.VxlanDevice
if cfg.port == 0 {
cfg.port = defaults.TunnelPortVXLAN
}
case Geneve:
cfg.deviceName = defaults.GeneveDevice
if cfg.port == 0 {
cfg.port = defaults.TunnelPortGeneve
}
}
return cfg, nil
}
// NewTestConfig returns a new TunnelConfig for testing purposes.
func NewTestConfig(proto EncapProtocol) Config {
//exhaustruct:ignore // Test code can underspecify the default config
cfg := Config{protocol: proto}
switch proto {
case VXLAN:
cfg.port = defaults.TunnelPortVXLAN
cfg.deviceName = defaults.VxlanDevice
case Geneve:
cfg.port = defaults.TunnelPortGeneve
cfg.deviceName = defaults.GeneveDevice
}
return cfg
}
// EncapProtocol returns the enabled tunnel protocol. The tunnel protocol may be
// set to either VXLAN or Geneve even when the primary mode is native routing, in
// case an additional feature (e.g., egress gateway) may request some traffic to
// be routed through a tunnel.
func (cfg Config) EncapProtocol() EncapProtocol { return cfg.protocol }
func (cfg Config) UnderlayProtocol() UnderlayProtocol { return cfg.underlay }
// Port returns the port used by the tunnel (0 if disabled).
func (cfg Config) Port() uint16 { return cfg.port }
// SrcPortLow returns the lower src port hint to be used by the tunnel (0 if disabled).
func (cfg Config) SrcPortLow() uint16 { return cfg.srcPortLow }
// SrcPortHigh returns the upper src port hint to be used by the tunnel (0 if disabled).
func (cfg Config) SrcPortHigh() uint16 { return cfg.srcPortHigh }
// DeviceName returns the name of the tunnel device (empty if disabled).
func (cfg Config) DeviceName() string { return cfg.deviceName }
// ShouldAdaptMTU returns whether we should adapt the MTU calculation to
// account for encapsulation.
func (cfg Config) ShouldAdaptMTU() bool { return cfg.shouldAdaptMTU }
func (cfg Config) datapathConfigProvider() (dpcfgdef.NodeOut, dpcfgdef.NodeFnOut) {
defines := make(dpcfgdef.Map)
definesFn := func() (dpcfgdef.Map, error) { return nil, nil }
if cfg.EncapProtocol() != Disabled {
defines[fmt.Sprintf("TUNNEL_PROTOCOL_%s", strings.ToUpper(VXLAN.String()))] = VXLAN.toDpID()
defines[fmt.Sprintf("TUNNEL_PROTOCOL_%s", strings.ToUpper(Geneve.String()))] = Geneve.toDpID()
defines["TUNNEL_PROTOCOL"] = cfg.EncapProtocol().toDpID()
defines["TUNNEL_PORT"] = fmt.Sprintf("%d", cfg.Port())
defines["TUNNEL_SRC_PORT_LOW"] = fmt.Sprintf("%d", cfg.SrcPortLow())
defines["TUNNEL_SRC_PORT_HIGH"] = fmt.Sprintf("%d", cfg.SrcPortHigh())
definesFn = func() (dpcfgdef.Map, error) {
tunnelDev, err := safenetlink.LinkByName(cfg.DeviceName())
if err != nil {
return nil, fmt.Errorf("failed to retrieve device info for %q: %w", cfg.DeviceName(), err)
}
return dpcfgdef.Map{
"ENCAP_IFINDEX": fmt.Sprintf("%d", tunnelDev.Attrs().Index),
}, nil
}
}
return dpcfgdef.NodeOut{NodeDefines: defines}, dpcfgdef.NewNodeFnOut(definesFn)
}
// EnablerOut allows requesting to enable tunneling functionalities.
type EnablerOut struct {
cell.Out
Enabler enabler `group:"request-enable-tunneling"`
}
// NewEnabler returns an object to be injected through hive to request to
// enable tunneling functionalities. Extra options are meaningful only when
// enable is set to true, and are ignored otherwise.
func NewEnabler(enable bool, opts ...enablerOpt) EnablerOut {
enabler := enabler{enable: enable, needsMTUAdaptation: enable}
for _, opt := range opts {
opt(&enabler)
}
return EnablerOut{Enabler: enabler}
}
// WithValidator allows to register extra validation functions
// to assert that the configured tunnel protocol matches the one expected by
// the given feature.
func WithValidator(validator func(EncapProtocol) error) enablerOpt {
return func(te *enabler) {
te.validators = append(te.validators, validator)
}
}
// WithoutMTUAdaptation conveys that the given feature request
// to enable tunneling, but the MTU adaptation is already handled externally.
func WithoutMTUAdaptation() enablerOpt {
return func(te *enabler) {
te.needsMTUAdaptation = false
}
}
type enabler struct {
enable bool
needsMTUAdaptation bool
validators []func(EncapProtocol) error
}
type enablerOpt func(*enabler)
// userCfg wraps the tunnel-related user configurations.
type userCfg struct {
TunnelProtocol string
TunnelSourcePortRange string
TunnelPort uint16
UnderlayProtocol string
}
// Flags implements the cell.Flagger interface, to register the given flags.
func (def userCfg) Flags(flags *pflag.FlagSet) {
flags.String("tunnel-protocol", def.TunnelProtocol, "Encapsulation protocol to use for the overlay (\"vxlan\" or \"geneve\")")
flags.Uint16("tunnel-port", def.TunnelPort, fmt.Sprintf("Tunnel port (default %d for \"vxlan\" and %d for \"geneve\")", defaults.TunnelPortVXLAN, defaults.TunnelPortGeneve))
flags.String("tunnel-source-port-range", def.TunnelSourcePortRange, fmt.Sprintf("Tunnel source port range hint (default %s)", defaults.TunnelSourcePortRange))
flags.String("underlay-protocol", def.UnderlayProtocol, "IP family for the underlay (\"ipv4\" or \"ipv6\")")
}
var defaultConfig = userCfg{
TunnelProtocol: defaults.TunnelProtocol,
TunnelSourcePortRange: defaults.TunnelSourcePortRange,
TunnelPort: 0, // auto-detect based on the protocol.
UnderlayProtocol: defaults.UnderlayProtocol,
}
// SPDX-License-Identifier: Apache-2.0
// Copyright Authors of Cilium
package debug
import (
"fmt"
"maps"
"github.com/cilium/cilium/pkg/lock"
)
// StatusFunc is a function returning the debug status of a subsytem. It is
// passed into RegisterStatusFunc().
type StatusFunc func() string
// StatusMap is the collection of debug status of all subsystems. The key is
// the subsystem name. The value is the subsystem debug status.
type StatusMap map[string]string
// StatusObject is the interface an object must impelement to be able to be
// passed into RegisterStatusObject().
type StatusObject interface {
// DebugStatus() is the equivalent of StatusFunc. It must return the
// debug status as a string.
DebugStatus() string
}
type functionMap map[string]StatusFunc
type statusFunctions struct {
functions functionMap
mutex lock.RWMutex
}
func newStatusFunctions() statusFunctions {
return statusFunctions{
functions: functionMap{},
}
}
func (s *statusFunctions) register(name string, fn StatusFunc) error {
s.mutex.Lock()
defer s.mutex.Unlock()
if _, ok := s.functions[name]; ok {
return fmt.Errorf("subsystem already registered")
}
s.functions[name] = fn
return nil
}
func (s *statusFunctions) registerStatusObject(name string, obj StatusObject) error {
return s.register(name, func() string { return obj.DebugStatus() })
}
func (s *statusFunctions) collectStatus() StatusMap {
// Make a copy to not hold the mutex while collecting the status
s.mutex.RLock()
fnCopy := maps.Clone(s.functions)
s.mutex.RUnlock()
status := StatusMap{}
for name, fn := range fnCopy {
status[name] = fn()
}
return status
}
var globalStatusFunctions = newStatusFunctions()
// RegisterStatusFunc registers a subsystem and associates a status function to
// call for debug status collection
func RegisterStatusFunc(name string, fn StatusFunc) error {
return globalStatusFunctions.register(name, fn)
}
// RegisterStatusObject registers a subsystem and associated a status object on
// which DebugStatus() is called to collect debug status
func RegisterStatusObject(name string, obj StatusObject) error {
return globalStatusFunctions.registerStatusObject(name, obj)
}
// CollectSubsystemStatus collects the status of all subsystems and returns it
func CollectSubsystemStatus() StatusMap {
return globalStatusFunctions.collectStatus()
}
// SPDX-License-Identifier: Apache-2.0
// Copyright Authors of Cilium
package id
import (
"fmt"
"math"
"net/netip"
"strconv"
"strings"
)
// MaxEndpointID is the maximum endpoint identifier.
const MaxEndpointID = math.MaxUint16
// PrefixType describes the type of endpoint identifier
type PrefixType string
func (s PrefixType) String() string { return string(s) }
const (
// CiliumLocalIdPrefix is a numeric identifier with local scope. It has
// no cluster wide meaning and is only unique in the scope of a single
// agent. An endpoint is guaranteed to always have a local scope identifier.
CiliumLocalIdPrefix PrefixType = "cilium-local"
// CiliumGlobalIdPrefix is an endpoint identifier with global scope.
// This addressing mechanism is currently unused.
CiliumGlobalIdPrefix PrefixType = "cilium-global"
// ContainerIdPrefix is used to address an endpoint via its primary
// container ID. The container ID is specific to the container runtime
// in use. Only the primary container that defines the networking scope
// can be used to address an endpoint.
// This can only be used to look up endpoints which have not opted-out of
// legacy identifiers.
// Deprecated. Use CNIAttachmentIdPrefix instead
ContainerIdPrefix PrefixType = "container-id"
// CNIAttachmentIdPrefix is used to address an endpoint via its primary
// container ID and container interface passed to the CNI plugin.
// This attachment ID uniquely identifies a CNI ADD and CNI DEL invocation pair.
CNIAttachmentIdPrefix PrefixType = "cni-attachment-id"
// DockerEndpointPrefix is used to address an endpoint via the Docker
// endpoint ID. This method is only possible if the endpoint was
// created via the cilium-docker plugin and the container is backed by
// the libnetwork abstraction.
DockerEndpointPrefix PrefixType = "docker-endpoint"
// ContainerNamePrefix is used to address the endpoint via the
// container's name. This addressing mechanism depends on the container
// runtime. Only the primary container that the networking scope can be
// used to address an endpoint.
// This can only be used to look up endpoints which have not opted-out of
// legacy identifiers.
// Deprecated. Use CNIAttachmentIdPrefix instead
ContainerNamePrefix PrefixType = "container-name"
// CEPNamePrefix is used to address an endpoint via its Kubernetes
// CiliumEndpoint resource name. This addressing only works if the endpoint
// is represented as a Kubernetes CiliumEndpoint resource.
CEPNamePrefix PrefixType = "cep-name"
// PodNamePrefix is used to address an endpoint via the Kubernetes pod
// name. This addressing only works if the endpoint represents as
// Kubernetes pod.
// This can only be used to look up endpoints which have not opted-out of
// legacy identifiers.
// Deprecated. May not be unique. Use CEPNamePrefix instead.
PodNamePrefix PrefixType = "pod-name"
// IPv4Prefix is used to address an endpoint via the endpoint's IPv4
// address.
IPv4Prefix PrefixType = "ipv4"
// IPv6Prefix is the prefix used to refer to an endpoint via IPv6 address
IPv6Prefix PrefixType = "ipv6"
)
// NewCiliumID returns a new endpoint identifier of type CiliumLocalIdPrefix
func NewCiliumID(id int64) string {
return NewID(CiliumLocalIdPrefix, strconv.FormatInt(id, 10))
}
// NewID returns a new endpoint identifier
func NewID(prefix PrefixType, id string) string {
return string(prefix) + ":" + id
}
// NewIPPrefixID returns an identifier based on the IP address specified. If ip
// is invalid, an empty string is returned.
func NewIPPrefixID(ip netip.Addr) string {
if ip.IsValid() {
if ip.Is4() {
return NewID(IPv4Prefix, ip.String())
}
return NewID(IPv6Prefix, ip.String())
}
return ""
}
// NewCNIAttachmentID returns an identifier based on the CNI attachment ID. If
// the containerIfName is empty, only the containerID will be used.
func NewCNIAttachmentID(containerID, containerIfName string) string {
id := containerID
if containerIfName != "" {
id = containerID + ":" + containerIfName
}
return NewID(CNIAttachmentIdPrefix, id)
}
// splitID splits ID into prefix and id. No validation is performed on prefix.
func splitID(id string) (PrefixType, string) {
if idx := strings.IndexByte(id, ':'); idx > -1 {
return PrefixType(id[:idx]), id[idx+1:]
}
// default prefix
return CiliumLocalIdPrefix, id
}
// ParseCiliumID parses id as cilium endpoint id and returns numeric portion.
func ParseCiliumID(id string) (int64, error) {
prefix, id := splitID(id)
if prefix != CiliumLocalIdPrefix {
return 0, fmt.Errorf("not a cilium identifier")
}
n, err := strconv.ParseInt(id, 0, 64)
if err != nil || n < 0 {
return 0, fmt.Errorf("invalid numeric cilium id: %w", err)
}
if n > MaxEndpointID {
return 0, fmt.Errorf("endpoint id too large: %d", n)
}
return n, nil
}
// Parse parses a string as an endpoint identified consists of an optional
// prefix [prefix:] followed by the identifier.
func Parse(id string) (PrefixType, string, error) {
prefix, id := splitID(id)
switch prefix {
case CiliumLocalIdPrefix,
CiliumGlobalIdPrefix,
CNIAttachmentIdPrefix,
ContainerIdPrefix,
DockerEndpointPrefix,
ContainerNamePrefix,
CEPNamePrefix,
PodNamePrefix,
IPv4Prefix,
IPv6Prefix:
return prefix, id, nil
}
return "", "", fmt.Errorf("unknown endpoint ID prefix \"%s\"", prefix)
}
// SPDX-License-Identifier: Apache-2.0
// Copyright Authors of Cilium
package regeneration
import (
"log/slog"
"github.com/cilium/hive/cell"
"github.com/cilium/cilium/pkg/hive"
)
// Fence delays the endpoint regeneration until all registered wait functions
// have returned.
//
// A new type around [hive.Fence] to give it a unique type that can be provided
// to Hive.
type Fence hive.Fence
func NewFence(lc cell.Lifecycle, log *slog.Logger) Fence {
return hive.NewFence(lc, log)
}
// SPDX-License-Identifier: Apache-2.0
// Copyright Authors of Cilium
package regeneration
import (
"context"
)
// DatapathRegenerationLevel determines what is expected of the datapath when
// a regeneration event is processed.
type DatapathRegenerationLevel int
const (
// Invalid is the default level to enforce explicit setting of
// the regeneration level.
Invalid DatapathRegenerationLevel = iota
// RegenerateWithoutDatapath indicates that datapath rebuild or reload
// is not required to implement this regeneration.
RegenerateWithoutDatapath
// RegenerateWithDatapath indicates that the datapath must be
// recompiled and reloaded to implement this regeneration.
RegenerateWithDatapath
)
// String converts a DatapathRegenerationLevel into a human-readable string.
func (r DatapathRegenerationLevel) String() string {
switch r {
case Invalid:
return "invalid"
case RegenerateWithoutDatapath:
return "no-rebuild"
case RegenerateWithDatapath:
return "rewrite+load"
default:
break
}
return "BUG: Unknown DatapathRegenerationLevel"
}
// ExternalRegenerationMetadata contains any information about a regeneration that
// the endpoint subsystem should be made aware of for a given endpoint.
type ExternalRegenerationMetadata struct {
// Reason provides context to source for the regeneration, which is
// used to generate useful log messages.
Reason string
// RegenerationLevel forces datapath regeneration according to the
// levels defined in the DatapathRegenerationLevel description.
RegenerationLevel DatapathRegenerationLevel
ParentContext context.Context
}
// SPDX-License-Identifier: Apache-2.0
// Copyright Authors of Cilium
package envoypolicy
import (
"context"
"fmt"
"log/slog"
"strings"
cilium "github.com/cilium/proxy/go/cilium/api"
envoy_config_route "github.com/envoyproxy/go-control-plane/envoy/config/route/v3"
envoy_type_matcher "github.com/envoyproxy/go-control-plane/envoy/type/matcher/v3"
"k8s.io/apimachinery/pkg/types"
"github.com/cilium/cilium/pkg/crypto/certificatemanager"
"github.com/cilium/cilium/pkg/logging/logfields"
policyapi "github.com/cilium/cilium/pkg/policy/api"
)
type EnvoyL7RulesTranslator interface {
GetEnvoyHTTPRules(l7Rules *policyapi.L7Rules, ns string) (*cilium.HttpNetworkPolicyRules, bool)
}
type envoyL7RulesTranslator struct {
logger *slog.Logger
secretManager certificatemanager.SecretManager
}
func NewEnvoyL7RulesTranslator(logger *slog.Logger, secretManager certificatemanager.SecretManager) EnvoyL7RulesTranslator {
return &envoyL7RulesTranslator{
logger: logger,
secretManager: secretManager,
}
}
func (r *envoyL7RulesTranslator) GetEnvoyHTTPRules(l7Rules *policyapi.L7Rules, ns string) (*cilium.HttpNetworkPolicyRules, bool) {
if len(l7Rules.HTTP) > 0 { // Just cautious. This should never be false.
// Assume none of the rules have side-effects so that rule evaluation can
// be stopped as soon as the first allowing rule is found. 'canShortCircuit'
// is set to 'false' below if any rules with side effects are encountered,
// causing all the applicable rules to be evaluated instead.
canShortCircuit := true
httpRules := make([]*cilium.HttpNetworkPolicyRule, 0, len(l7Rules.HTTP))
for _, l7 := range l7Rules.HTTP {
rule, cs := r.getHTTPRule(&l7, ns)
httpRules = append(httpRules, rule)
if !cs {
canShortCircuit = false
}
}
SortHTTPNetworkPolicyRules(httpRules)
return &cilium.HttpNetworkPolicyRules{
HttpRules: httpRules,
}, canShortCircuit
}
return nil, true
}
func (r *envoyL7RulesTranslator) getHTTPRule(h *policyapi.PortRuleHTTP, ns string) (*cilium.HttpNetworkPolicyRule, bool) {
// Count the number of header matches we need
cnt := len(h.Headers) + len(h.HeaderMatches)
if h.Path != "" {
cnt++
}
if h.Method != "" {
cnt++
}
if h.Host != "" {
cnt++
}
headers := make([]*envoy_config_route.HeaderMatcher, 0, cnt)
if h.Path != "" {
headers = append(headers, &envoy_config_route.HeaderMatcher{
Name: ":path",
HeaderMatchSpecifier: &envoy_config_route.HeaderMatcher_StringMatch{
StringMatch: &envoy_type_matcher.StringMatcher{
MatchPattern: &envoy_type_matcher.StringMatcher_SafeRegex{
SafeRegex: &envoy_type_matcher.RegexMatcher{
Regex: h.Path,
},
},
},
},
})
}
if h.Method != "" {
headers = append(headers, &envoy_config_route.HeaderMatcher{
Name: ":method",
HeaderMatchSpecifier: &envoy_config_route.HeaderMatcher_StringMatch{
StringMatch: &envoy_type_matcher.StringMatcher{
MatchPattern: &envoy_type_matcher.StringMatcher_SafeRegex{
SafeRegex: &envoy_type_matcher.RegexMatcher{
Regex: h.Method,
},
},
},
},
})
}
if h.Host != "" {
headers = append(headers, &envoy_config_route.HeaderMatcher{
Name: ":authority",
HeaderMatchSpecifier: &envoy_config_route.HeaderMatcher_StringMatch{
StringMatch: &envoy_type_matcher.StringMatcher{
MatchPattern: &envoy_type_matcher.StringMatcher_SafeRegex{
SafeRegex: &envoy_type_matcher.RegexMatcher{
Regex: h.Host,
},
},
},
},
})
}
for _, hdr := range h.Headers {
strs := strings.SplitN(hdr, " ", 2)
if len(strs) == 2 {
// Remove ':' in "X-Key: true"
key := strings.TrimRight(strs[0], ":")
// Header presence and matching (literal) value needed.
headers = append(headers, &envoy_config_route.HeaderMatcher{
Name: key,
HeaderMatchSpecifier: &envoy_config_route.HeaderMatcher_StringMatch{
StringMatch: &envoy_type_matcher.StringMatcher{
MatchPattern: &envoy_type_matcher.StringMatcher_Exact{
Exact: strs[1],
},
},
},
})
} else {
// Only header presence needed
headers = append(headers, &envoy_config_route.HeaderMatcher{
Name: strs[0],
HeaderMatchSpecifier: &envoy_config_route.HeaderMatcher_PresentMatch{PresentMatch: true},
})
}
}
headerMatches := make([]*cilium.HeaderMatch, 0, len(h.HeaderMatches))
for _, hdr := range h.HeaderMatches {
var mismatch_action cilium.HeaderMatch_MismatchAction
switch hdr.Mismatch {
case policyapi.MismatchActionLog:
mismatch_action = cilium.HeaderMatch_CONTINUE_ON_MISMATCH
case policyapi.MismatchActionAdd:
mismatch_action = cilium.HeaderMatch_ADD_ON_MISMATCH
case policyapi.MismatchActionDelete:
mismatch_action = cilium.HeaderMatch_DELETE_ON_MISMATCH
case policyapi.MismatchActionReplace:
mismatch_action = cilium.HeaderMatch_REPLACE_ON_MISMATCH
default:
mismatch_action = cilium.HeaderMatch_FAIL_ON_MISMATCH
}
// Fetch the secret
value, err := r.getSecretString(hdr, ns)
if err != nil {
r.logger.Warn("Failed fetching K8s Secret, header match will fail", logfields.Error, err)
// Envoy treats an empty exact match value as matching ANY value; adding
// InvertMatch: true here will cause this rule to NEVER match.
headers = append(headers, &envoy_config_route.HeaderMatcher{
Name: hdr.Name,
HeaderMatchSpecifier: &envoy_config_route.HeaderMatcher_StringMatch{
StringMatch: &envoy_type_matcher.StringMatcher{
MatchPattern: &envoy_type_matcher.StringMatcher_Exact{
Exact: "",
},
},
},
InvertMatch: true,
})
} else if value != "" {
// Inline value provided.
// Header presence and matching (literal) value needed.
if mismatch_action == cilium.HeaderMatch_FAIL_ON_MISMATCH {
// fail on mismatch gets converted for regular HeaderMatcher
headers = append(headers, &envoy_config_route.HeaderMatcher{
Name: hdr.Name,
HeaderMatchSpecifier: &envoy_config_route.HeaderMatcher_StringMatch{
StringMatch: &envoy_type_matcher.StringMatcher{
MatchPattern: &envoy_type_matcher.StringMatcher_Exact{
Exact: value,
},
},
},
})
} else {
r.logger.Debug("HeaderMatches: Adding header", logfields.Name, hdr.Name)
headerMatches = append(headerMatches, &cilium.HeaderMatch{
MismatchAction: mismatch_action,
Name: hdr.Name,
Value: value,
})
}
} else if hdr.Secret == nil {
// No inline value and no secret.
// Header presence for FAIL_ON_MISMSTCH or matching empty value otherwise needed.
if mismatch_action == cilium.HeaderMatch_FAIL_ON_MISMATCH {
// Only header presence needed
headers = append(headers, &envoy_config_route.HeaderMatcher{
Name: hdr.Name,
HeaderMatchSpecifier: &envoy_config_route.HeaderMatcher_PresentMatch{PresentMatch: true},
})
} else {
r.logger.Debug("HeaderMatches: Adding header for an empty value", logfields.Name, hdr.Name)
headerMatches = append(headerMatches, &cilium.HeaderMatch{
MismatchAction: mismatch_action,
Name: hdr.Name,
})
}
} else {
// A secret is set, so we transform to an SDS value.
// cilium-envoy takes care of treating this as a presence match if the
// secret exists with an empty value.
r.logger.Debug("HeaderMatches: Adding header because SDS value is required", logfields.Name, hdr.Name)
headerMatches = append(headerMatches, &cilium.HeaderMatch{
MismatchAction: mismatch_action,
Name: hdr.Name,
ValueSdsSecret: namespacedNametoSyncedSDSSecretName(types.NamespacedName{
Namespace: hdr.Secret.Namespace,
Name: hdr.Secret.Name,
}, r.secretManager.GetSecretSyncNamespace()),
})
}
}
if len(headers) == 0 {
headers = nil
} else {
SortHeaderMatchers(headers)
}
if len(headerMatches) == 0 {
headerMatches = nil
} else {
// Optimally we should sort the headerMatches to avoid
// updating the policy if only the order of the rules
// has changed. Right now, when 'headerMatches' is a
// slice (rather than a map) the order only changes if
// the order of the rules in the imported policies
// changes, so there is minimal likelihood of
// unnecessary policy updates.
// SortHeaderMatches(headerMatches)
}
return &cilium.HttpNetworkPolicyRule{Headers: headers, HeaderMatches: headerMatches}, len(headerMatches) == 0
}
func (r *envoyL7RulesTranslator) getSecretString(hdr *policyapi.HeaderMatch, ns string) (string, error) {
value := ""
var err error
if hdr.Secret != nil {
value, err = r.secretManager.GetSecretString(context.TODO(), hdr.Secret, ns)
}
// Only use Value if secret was not obtained
if value == "" && hdr.Value != "" {
value = hdr.Value
if err != nil {
r.logger.Debug("HeaderMatches: Using a default value due to k8s secret not being available", logfields.Error, err)
err = nil
}
}
return value, err
}
func namespacedNametoSyncedSDSSecretName(namespacedName types.NamespacedName, policySecretsNamespace string) string {
if policySecretsNamespace == "" {
return fmt.Sprintf("%s/%s", namespacedName.Namespace, namespacedName.Name)
}
return fmt.Sprintf("%s/%s-%s", policySecretsNamespace, namespacedName.Namespace, namespacedName.Name)
}
// SPDX-License-Identifier: Apache-2.0
// Copyright Authors of Cilium
package envoypolicy
import (
"sort"
cilium "github.com/cilium/proxy/go/cilium/api"
envoy_config_route "github.com/envoyproxy/go-control-plane/envoy/config/route/v3"
)
// PortNetworkPolicySlice implements sort.Interface to sort a slice of
// *cilium.PortNetworkPolicy.
type PortNetworkPolicySlice []*cilium.PortNetworkPolicy
func (s PortNetworkPolicySlice) Len() int {
return len(s)
}
func (s PortNetworkPolicySlice) Less(i, j int) bool {
p1, p2 := s[i], s[j]
switch {
case p1.Protocol < p2.Protocol:
return true
case p1.Protocol > p2.Protocol:
return false
}
switch {
case p1.Port < p2.Port:
return true
case p1.Port > p2.Port:
return false
}
rules1, rules2 := p1.Rules, p2.Rules
switch {
case len(rules1) < len(rules2):
return true
case len(rules1) > len(rules2):
return false
}
// Assuming that the slices are sorted.
for idx := range rules1 {
r1, r2 := rules1[idx], rules2[idx]
switch {
case PortNetworkPolicyRuleLess(r1, r2):
return true
case PortNetworkPolicyRuleLess(r2, r1):
return false
}
}
// Elements are equal.
return false
}
func (s PortNetworkPolicySlice) Swap(i, j int) {
s[i], s[j] = s[j], s[i]
}
// SortPortNetworkPolicies sorts the given slice in place and returns
// the sorted slice for convenience.
func SortPortNetworkPolicies(policies []*cilium.PortNetworkPolicy) []*cilium.PortNetworkPolicy {
sort.Sort(PortNetworkPolicySlice(policies))
return policies
}
// PortNetworkPolicyRuleSlice implements sort.Interface to sort a slice of
// *cilium.PortNetworkPolicyRuleSlice.
type PortNetworkPolicyRuleSlice []*cilium.PortNetworkPolicyRule
// PortNetworkPolicyRuleLess reports whether the r1 rule should sort before
// the r2 rule.
// L3-L4-only rules are less than L7 rules.
func PortNetworkPolicyRuleLess(r1, r2 *cilium.PortNetworkPolicyRule) bool {
// TODO: Support Kafka.
http1, http2 := r1.GetHttpRules(), r2.GetHttpRules()
switch {
case http1 == nil && http2 != nil:
return true
case http1 != nil && http2 == nil:
return false
}
if http1 != nil && http2 != nil {
httpRules1, httpRules2 := http1.HttpRules, http2.HttpRules
switch {
case len(httpRules1) < len(httpRules2):
return true
case len(httpRules1) > len(httpRules2):
return false
}
// Assuming that the slices are sorted.
for idx := range httpRules1 {
httpRule1, httpRule2 := httpRules1[idx], httpRules2[idx]
switch {
case HTTPNetworkPolicyRuleLess(httpRule1, httpRule2):
return true
case HTTPNetworkPolicyRuleLess(httpRule2, httpRule1):
return false
}
}
}
remotePolicies1, remotePolicies2 := r1.RemotePolicies, r2.RemotePolicies
switch {
case len(remotePolicies1) < len(remotePolicies2):
return true
case len(remotePolicies1) > len(remotePolicies2):
return false
}
// Assuming that the slices are sorted.
for idx := range remotePolicies1 {
p1, p2 := remotePolicies1[idx], remotePolicies2[idx]
switch {
case p1 < p2:
return true
case p1 > p2:
return false
}
}
// Elements are equal.
return false
}
func (s PortNetworkPolicyRuleSlice) Len() int {
return len(s)
}
func (s PortNetworkPolicyRuleSlice) Less(i, j int) bool {
return PortNetworkPolicyRuleLess(s[i], s[j])
}
func (s PortNetworkPolicyRuleSlice) Swap(i, j int) {
s[i], s[j] = s[j], s[i]
}
// SortPortNetworkPolicyRules sorts the given slice in place
// and returns the sorted slice for convenience.
func SortPortNetworkPolicyRules(rules []*cilium.PortNetworkPolicyRule) []*cilium.PortNetworkPolicyRule {
sort.Sort(PortNetworkPolicyRuleSlice(rules))
return rules
}
// HTTPNetworkPolicyRuleSlice implements sort.Interface to sort a slice of
// *cilium.HttpNetworkPolicyRule.
type HTTPNetworkPolicyRuleSlice []*cilium.HttpNetworkPolicyRule
// HTTPNetworkPolicyRuleLess reports whether the r1 rule should sort before the
// r2 rule.
func HTTPNetworkPolicyRuleLess(r1, r2 *cilium.HttpNetworkPolicyRule) bool {
headers1, headers2 := r1.Headers, r2.Headers
switch {
case len(headers1) < len(headers2):
return true
case len(headers1) > len(headers2):
return false
}
// Assuming that the slices are sorted.
for idx := range headers1 {
header1, header2 := headers1[idx], headers2[idx]
switch {
case HeaderMatcherLess(header1, header2):
return true
case HeaderMatcherLess(header2, header1):
return false
}
}
// Elements are equal.
return false
}
func (s HTTPNetworkPolicyRuleSlice) Len() int {
return len(s)
}
func (s HTTPNetworkPolicyRuleSlice) Less(i, j int) bool {
return HTTPNetworkPolicyRuleLess(s[i], s[j])
}
func (s HTTPNetworkPolicyRuleSlice) Swap(i, j int) {
s[i], s[j] = s[j], s[i]
}
// SortHTTPNetworkPolicyRules sorts the given slice.
func SortHTTPNetworkPolicyRules(rules []*cilium.HttpNetworkPolicyRule) {
sort.Sort(HTTPNetworkPolicyRuleSlice(rules))
}
// HeaderMatcherSlice implements sort.Interface to sort a slice of
// *envoy_config_route.HeaderMatcher.
type HeaderMatcherSlice []*envoy_config_route.HeaderMatcher
// HeaderMatcherLess reports whether the m1 matcher should sort before the m2
// matcher.
func HeaderMatcherLess(m1, m2 *envoy_config_route.HeaderMatcher) bool {
switch {
case m1.Name < m2.Name:
return true
case m1.Name > m2.Name:
return false
}
// Compare the header_match_specifier oneof field, by comparing each
// possible field in the oneof individually:
// - exactMatch
// - regexMatch
// - rangeMatch
// - presentMatch
// - prefixMatch
// - suffixMatch
// Use the getters to access the fields and return zero values when they
// are not set.
s1 := m1.GetExactMatch()
s2 := m2.GetExactMatch()
switch {
case s1 < s2:
return true
case s1 > s2:
return false
}
srm1 := m1.GetSafeRegexMatch()
srm2 := m2.GetSafeRegexMatch()
switch {
case srm1 == nil && srm2 != nil:
return true
case srm1 != nil && srm2 == nil:
return false
case srm1 != nil && srm2 != nil:
switch {
case srm1.Regex < srm2.Regex:
return true
case srm1.Regex > srm2.Regex:
return false
}
}
rm1 := m1.GetRangeMatch()
rm2 := m2.GetRangeMatch()
switch {
case rm1 == nil && rm2 != nil:
return true
case rm1 != nil && rm2 == nil:
return false
case rm1 != nil && rm2 != nil:
switch {
case rm1.Start < rm2.Start:
return true
case rm1.Start > rm2.Start:
return false
}
switch {
case rm1.End < rm2.End:
return true
case rm1.End > rm2.End:
return false
}
}
switch {
case !m1.GetPresentMatch() && m2.GetPresentMatch():
return true
case m1.GetPresentMatch() && !m2.GetPresentMatch():
return false
}
s1 = m1.GetPrefixMatch()
s2 = m2.GetPrefixMatch()
switch {
case s1 < s2:
return true
case s1 > s2:
return false
}
s1 = m1.GetSuffixMatch()
s2 = m2.GetSuffixMatch()
switch {
case s1 < s2:
return true
case s1 > s2:
return false
}
switch {
case !m1.InvertMatch && m2.InvertMatch:
return true
case m1.InvertMatch && !m2.InvertMatch:
return false
}
// Elements are equal.
return false
}
func (s HeaderMatcherSlice) Len() int {
return len(s)
}
func (s HeaderMatcherSlice) Less(i, j int) bool {
return HeaderMatcherLess(s[i], s[j])
}
func (s HeaderMatcherSlice) Swap(i, j int) {
s[i], s[j] = s[j], s[i]
}
// SortHeaderMatchers sorts the given slice.
func SortHeaderMatchers(headers []*envoy_config_route.HeaderMatcher) {
sort.Sort(HeaderMatcherSlice(headers))
}
// SPDX-License-Identifier: Apache-2.0
// Copyright Authors of Cilium
package flowdebug
var perFlowDebug = false
// Enable enables per-flow debugging
func Enable() {
perFlowDebug = true
}
// Enabled reports the status of per-flow debugging
func Enabled() bool {
return perFlowDebug
}
// SPDX-License-Identifier: Apache-2.0
// Copyright Authors of Cilium
// Based on code from github.com/miekg/dns which is:
//
// Copyright 2009 The Go Authors. All rights reserved.
// Copyright 2011 Miek Gieben. All rights reserved.
// Copyright 2014 CloudFlare. All rights reserved.
package dns
import "strings"
// These functions were copied and adapted from github.com/miekg/dns.
// isFQDN reports whether the domain name s is fully qualified.
func isFQDN(s string) bool {
// Check for (and remove) a trailing dot, returning if there isn't one.
if s == "" || s[len(s)-1] != '.' {
return false
}
s = s[:len(s)-1]
// If we don't have an escape sequence before the final dot, we know it's
// fully qualified and can return here.
if s == "" || s[len(s)-1] != '\\' {
return true
}
// Otherwise we have to check if the dot is escaped or not by checking if
// there are an odd or even number of escape sequences before the dot.
i := strings.LastIndexFunc(s, func(r rune) bool {
return r != '\\'
})
// Test whether we have an even number of escape sequences before
// the dot or none.
return (len(s)-i)%2 != 0
}
// FQDN returns the fully qualified domain name from s.
// If s is already fully qualified, it behaves as the identity function.
func FQDN(s string) string {
if isFQDN(s) {
return strings.ToLower(s)
}
return strings.ToLower(s) + "."
}
// SPDX-License-Identifier: Apache-2.0
// Copyright Authors of Cilium
package matchpattern
import (
"errors"
"regexp"
"strings"
"github.com/cilium/cilium/pkg/fqdn/dns"
"github.com/cilium/cilium/pkg/fqdn/re"
)
const allowedDNSCharsREGroup = "[-a-zA-Z0-9_]"
// MatchAllAnchoredPattern is the simplest pattern that match all inputs. This resulting
// parsed regular expression is the same as an empty string regex (""), but this
// value is easier to reason about when serializing to and from json.
const MatchAllAnchoredPattern = "(?:)"
// MatchAllUnAnchoredPattern is the same as MatchAllAnchoredPattern, except that
// it can be or-ed (joined with "|") with other rules, and still match all rules.
const MatchAllUnAnchoredPattern = ".*"
// Validate ensures that pattern is a parsable matchPattern. It returns the
// regexp generated when validating.
func Validate(pattern string) (matcher *regexp.Regexp, err error) {
if err := prevalidate(pattern); err != nil {
return nil, err
}
return re.CompileRegex(ToAnchoredRegexp(pattern))
}
// ValidateWithoutCache is the same as Validate() but doesn't consult the regex
// LRU.
func ValidateWithoutCache(pattern string) (matcher *regexp.Regexp, err error) {
if err := prevalidate(pattern); err != nil {
return nil, err
}
return regexp.Compile(ToAnchoredRegexp(pattern))
}
func prevalidate(pattern string) error {
pattern = strings.TrimSpace(pattern)
pattern = strings.ToLower(pattern)
// error check
if strings.ContainsAny(pattern, "[]+{},") {
return errors.New(`Only alphanumeric ASCII characters, the hyphen "-", underscore "_", "." and "*" are allowed in a matchPattern`)
}
return nil
}
// Sanitize canonicalized the pattern for use by ToAnchoredRegexp
func Sanitize(pattern string) string {
if pattern == "*" {
return pattern
}
return dns.FQDN(pattern)
}
// ToAnchoredRegexp converts a MatchPattern field into a regexp string. It does not
// validate the pattern. It also adds anchors to ensure it match the whole string.
// It supports:
// * to select 0 or more DNS valid characters
func ToAnchoredRegexp(pattern string) string {
pattern = strings.TrimSpace(pattern)
pattern = strings.ToLower(pattern)
// handle the * match-all case. This will filter down to the end.
if pattern == "*" {
return "(^(" + allowedDNSCharsREGroup + "+[.])+$)|(^[.]$)"
}
pattern = escapeRegexpCharacters(pattern)
// Anchor the match to require the whole string to match this expression
return "^" + pattern + "$"
}
// ToUnAnchoredRegexp converts a MatchPattern field into a regexp string. It does not
// validate the pattern. It does not add regexp anchors.
// It supports:
// * to select 0 or more DNS valid characters
func ToUnAnchoredRegexp(pattern string) string {
pattern = strings.TrimSpace(pattern)
pattern = strings.ToLower(pattern)
// handle the * match-all case. This will filter down to the end.
if pattern == "*" {
return MatchAllUnAnchoredPattern
}
pattern = escapeRegexpCharacters(pattern)
return pattern
}
func escapeRegexpCharacters(pattern string) string {
// base case. "." becomes a literal .
pattern = strings.ReplaceAll(pattern, ".", "[.]")
// base case. * becomes .*, but only for DNS valid characters
// NOTE: this only works because the case above does not leave the *
pattern = strings.ReplaceAll(pattern, "*", allowedDNSCharsREGroup+"*")
return pattern
}
// Copyright 2022 ADA Logics Ltd
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//
package matchpattern
func FuzzMatchpatternValidate(data []byte) int {
_, _ = Validate(string(data))
return 1
}
func FuzzMatchpatternValidateWithoutCache(data []byte) int {
_, _ = ValidateWithoutCache(string(data))
return 1
}
// SPDX-License-Identifier: Apache-2.0
// Copyright Authors of Cilium
// Package re provides a simple function to access compile regex objects for
// the FQDN subsystem.
package re
import (
"fmt"
"log/slog"
"regexp"
lru "github.com/hashicorp/golang-lru/v2"
"github.com/cilium/cilium/pkg/defaults"
"github.com/cilium/cilium/pkg/option"
)
// CompileRegex compiles a pattern p into a regex and returns the regex object.
// The regex object will be cached by an LRU. If p has already been compiled
// and cached, this function will return the cached regex object. If not
// already cached, it will compile p into a regex object and cache it in the
// LRU. This function will return an error if the LRU has not already been
// initialized.
func CompileRegex(p string) (*regexp.Regexp, error) {
r, ok := regexCompileLRU.cache.Get(p)
if ok {
return r, nil
}
n, err := regexp.Compile(p)
if err != nil {
return nil, fmt.Errorf("failed to compile regex: %w", err)
}
regexCompileLRU.cache.Add(p, n)
return n, nil
}
func Resize(logger *slog.Logger, size uint) {
if size == 0 {
// effectively unlimited
size = 16_000_000
logger.Warn(fmt.Sprintf(
"FQDN regex compilation LRU size is unlimited, which can grow unbounded potentially consuming too much memory. Consider passing a maximum size via --%s.",
option.FQDNRegexCompileLRUSize,
))
}
regexCompileLRU.cache.Resize(int(size))
}
func newRegexCache(size uint) *RegexCompileLRU {
c, err := lru.New[string, *regexp.Regexp](int(size))
if err != nil {
panic(err) // unreachable, only for zero size
}
return &RegexCompileLRU{
cache: c,
}
}
// regexCompileLRU is the singleton instance of the LRU that's shared
// throughout Cilium.
var regexCompileLRU = newRegexCache(defaults.FQDNRegexCompileLRUSize)
// RegexCompileLRU is an LRU cache for storing compiled regex objects of FQDN
// names or patterns, used in CiliumNetworkPolicy or
// ClusterwideCiliumNetworkPolicy.
type RegexCompileLRU struct {
cache *lru.Cache[string, *regexp.Regexp]
}
// SPDX-License-Identifier: Apache-2.0
// Copyright Authors of Cilium
package fswatcher
import (
"hash/fnv"
"io"
"log/slog"
"os"
"path/filepath"
"sync"
"sync/atomic"
"testing"
"github.com/cilium/cilium/pkg/logging/logfields"
"github.com/cilium/cilium/pkg/time"
)
const (
// how often tracked targets are checked for changes by default
defaultInterval = 5 * time.Second
// when fswatcher detects that it runs in a test, it will poll the filesystem
// more frequently
testInterval = 50 * time.Millisecond
)
// Event closely resembles what fsnotify.Event provided
type Event struct {
// Path to the file or directory.
Name string
// File operation that triggered the event.
//
// This is a bitmask and some systems may send multiple operations at once.
// Use the Event.Has() method instead of comparing with ==.
Op Op
}
// Op describes a set of file operations.
type Op uint
// Subset from fsnotify
const (
// A new pathname was created.
Create Op = 1 << iota
// The pathname was written to; this does *not* mean the write has finished,
// and a write can be followed by more writes.
Write
// The path was removed
Remove
)
// Has reports if this operation has the given operation.
func (o Op) Has(h Op) bool { return o&h != 0 }
// Has reports if this event has the given operation.
func (e Event) Has(op Op) bool { return e.Op.Has(op) }
// Watcher implements a file polling mechanism which can track non-existing
// files and emit creation events for them. All files which are supposed to be
// tracked need to passed to the New constructor.
//
// When a directory is passed in as a tracked file, the watcher will watch all
// the files inside that directory, including recursion into any subdirectories.
//
// One of the primary use cases for the watcher is tracking kubernetes projected
// secrets which create a maze of symlinks. It is safe to watch symlink targets
// as they are properly resolved, even in the case of multiple symlinks chained
// together. Only the content of the final destination is considered when
// issuing Write events.
type Watcher struct {
logger *slog.Logger
// Events is used to signal changes to any of the tracked files. It is
// guaranteed that Event.Name will always match one of the file paths
// passed in trackedFiles to the constructor. This channel is unbuffered
// and must be read by the consumer to avoid deadlocks.
Events chan Event
// Errors reports any errors which may occur while watching. This channel
// is unbuffered and must be read by the consumer to avoid deadlocks.
Errors chan error
tracked map[string]state // tracking state
silent atomic.Bool // track updates but do not send notifications
// control the interval at which the watcher checks for changes
interval time.Duration
ticker <-chan time.Time
// stop channel used to indicate shutdown
stop chan struct{}
wg sync.WaitGroup
}
type state struct {
path string // tracked path as asked by the user
info os.FileInfo // stat info of the file, or the target if symlink
sum64 uint64 // checksum of the file, or the target if symlink
}
// Option to configure the Watcher
type Option func(*Watcher)
// WithInterval sets the interval at which the Watcher checks for changes
func WithInterval(d time.Duration) Option {
return func(w *Watcher) {
w.interval = d
}
}
// New creates a new Watcher which watches all trackedFile paths (they do not
// need to exist yet).
func New(defaultLogger *slog.Logger, trackedFiles []string, options ...Option) (*Watcher, error) {
interval := defaultInterval
if testing.Testing() {
interval = testInterval
}
w := &Watcher{
logger: defaultLogger.With(logfields.LogSubsys, "fswatcher"),
Events: make(chan Event),
Errors: make(chan error),
stop: make(chan struct{}),
interval: interval,
silent: atomic.Bool{},
}
for _, option := range options {
option(w)
}
// make a map of tracked files and assign them all empty state at the start
tracked := make(map[string]state, len(trackedFiles))
for _, f := range trackedFiles {
tracked[f] = state{path: f}
}
w.tracked = tracked
// do the initial discovery of the state of tracked files in silent mode and
// only issue notifications afterwards.
w.silent.Store(true)
w.tick()
w.silent.Store(false)
w.ticker = time.Tick(w.interval)
w.wg.Add(1)
go w.loop()
return w, nil
}
func (w *Watcher) Close() {
close(w.stop)
w.wg.Wait()
}
func (w *Watcher) loop() {
defer w.wg.Done()
for {
select {
case <-w.ticker:
w.tick()
case <-w.stop:
return
}
}
}
func (w *Watcher) tick() {
// get all the paths that are currently known and are being tracked and visit
// them in order. It's done this way because the `w.tracked` map can be
// modified as new directories are discovered.
var order []string
for path := range w.tracked {
order = append(order, path)
}
idx := -1 // start out of bounds because idx++ is done at the start of the loop
for {
idx++
if idx >= len(order) || idx < 0 {
break
}
path := order[idx]
oldState, ok := w.tracked[path]
if !ok {
// not sure how this can be possible, but better safe than sorry
continue
}
var (
oldInfo = oldState.info
newState = state{path: oldState.path}
)
// os.Stat follows symlinks, os.Lstat doesn't
info, err := os.Stat(path)
newState.info = info
if os.IsNotExist(err) {
// if the path does not exist, check if it existed before because if it
// did -- issue a deletion event
if oldState.info != nil {
// this file was deleted
w.sendEvent(Event{
Name: path,
Op: Remove,
})
// clear out old state from the map
w.tracked[oldState.path] = newState
}
continue
}
// some other type of error encountered while doing os.Stat
if err != nil {
w.sendError(err)
continue
}
// when encountering a directory as a tracked path, list it's contents and
// track those, including a recursion into subdirectories.
if info.IsDir() {
de, err := os.ReadDir(path)
if err != nil {
continue
}
for _, f := range de {
fp := filepath.Join(path, f.Name())
if _, ok := w.tracked[fp]; ok {
// this file is already being tracked, skip it
continue
}
// "schedule" this file to be checked at the end the order
order = append(order, fp)
w.tracked[fp] = state{path: fp}
}
// nothing else needs to be done for directory handling
continue
}
// compute the checksum of the file/symlink which is subsequently used to
// issue Write notifications
file, err := os.Open(path)
if err != nil {
w.sendError(err)
continue
}
h := fnv.New64()
_, err = io.Copy(h, file)
_ = file.Close()
if err != nil {
w.sendError(err)
continue
}
newState.sum64 = h.Sum64()
if oldState.info == nil {
// haven't seen info for this track path before -- issue a creation
op := Create
// issue Create&Write if the file has data
if info.Size() > 0 {
op |= Write
}
// this is a new file
w.sendEvent(Event{
Name: path,
Op: op,
})
} else {
// have seen this file/symlink before -- lets see if it changed size or contents
if info.Size() != oldInfo.Size() || newState.sum64 != oldState.sum64 {
w.sendEvent(Event{
Name: path,
Op: Write,
})
}
}
w.tracked[oldState.path] = newState
}
}
func (w *Watcher) sendEvent(e Event) {
if w.silent.Load() {
return
}
select {
case w.Events <- e:
w.logger.Debug("sent fswatcher event", logfields.Event, e)
case <-w.stop:
}
}
func (w *Watcher) sendError(err error) {
if w.silent.Load() {
return
}
select {
case w.Errors <- err:
case <-w.stop:
}
}
// SPDX-License-Identifier: Apache-2.0
// Copyright Authors of Cilium
package client
import (
"fmt"
"io"
"net"
"net/http"
"net/url"
"os"
"sort"
"strings"
"time"
runtime_client "github.com/go-openapi/runtime/client"
"github.com/go-openapi/strfmt"
clientapi "github.com/cilium/cilium/api/v1/health/client"
"github.com/cilium/cilium/api/v1/health/models"
"github.com/cilium/cilium/pkg/health/defaults"
)
type ConnectivityStatusType int
const (
ipUnavailable = "Unavailable"
ConnStatusReachable ConnectivityStatusType = 0
ConnStatusUnreachable ConnectivityStatusType = 1
ConnStatusUnknown ConnectivityStatusType = 2
)
func (c ConnectivityStatusType) String() string {
switch c {
case ConnStatusReachable:
return "reachable"
case ConnStatusUnreachable:
return "unreachable"
default:
return "unknown"
}
}
// Client is a client for cilium health
type Client struct {
clientapi.CiliumHealthAPI
}
func configureTransport(tr *http.Transport, proto, addr string) *http.Transport {
if tr == nil {
tr = &http.Transport{}
}
if proto == "unix" {
// No need for compression in local communications.
tr.DisableCompression = true
tr.Dial = func(_, _ string) (net.Conn, error) {
return net.Dial(proto, addr)
}
} else {
tr.Proxy = http.ProxyFromEnvironment
tr.Dial = (&net.Dialer{}).Dial
}
return tr
}
// NewDefaultClient creates a client with default parameters connecting to UNIX domain socket.
func NewDefaultClient() (*Client, error) {
return NewClient("")
}
// NewClient creates a client for the given `host`.
func NewClient(host string) (*Client, error) {
if host == "" {
// Check if environment variable points to socket
e := os.Getenv(defaults.SockPathEnv)
if e == "" {
// If unset, fall back to default value
e = defaults.SockPath
}
host = "unix://" + e
}
tmp := strings.SplitN(host, "://", 2)
if len(tmp) != 2 {
return nil, fmt.Errorf("invalid host format '%s'", host)
}
hostHeader := tmp[1]
switch tmp[0] {
case "tcp":
if _, err := url.Parse("tcp://" + tmp[1]); err != nil {
return nil, err
}
host = "http://" + tmp[1]
case "unix":
host = tmp[1]
// For local communication (unix domain sockets), the hostname is not used. Leave
// Host header empty because otherwise it would be rejected by net/http client-side
// sanitization, see https://go.dev/issue/60374.
hostHeader = "localhost"
}
transport := configureTransport(nil, tmp[0], host)
httpClient := &http.Client{Transport: transport}
clientTrans := runtime_client.NewWithClient(hostHeader, clientapi.DefaultBasePath,
clientapi.DefaultSchemes, httpClient)
return &Client{*clientapi.New(clientTrans, strfmt.Default)}, nil
}
// Hint tries to improve the error message displayed to the user.
func Hint(err error) error {
if err == nil {
return err
}
e, _ := url.PathUnescape(err.Error())
if strings.Contains(err.Error(), defaults.SockPath) {
return fmt.Errorf("%s\nIs the agent running?", e)
}
return fmt.Errorf("%s", e)
}
func GetConnectivityStatusType(cs *models.ConnectivityStatus) ConnectivityStatusType {
// If the connecticity status is nil, it means that there was no
// successful probe, but also no failed probe with a concrete reason. In
// that case, the status is unknown and it usually means that the new
// is still in the beginning of the bootstraping process.
if cs == nil {
return ConnStatusUnknown
}
// Empty status means successful probe.
if cs.Status == "" {
return ConnStatusReachable
}
// Non-empty status means that there was an explicit reason of failure.
return ConnStatusUnreachable
}
func GetPathConnectivityStatusType(cp *models.PathStatus) ConnectivityStatusType {
if cp == nil {
return ConnStatusUnreachable
}
statuses := []*models.ConnectivityStatus{
cp.Icmp,
cp.HTTP,
}
// Initially assume healthy status.
status := ConnStatusReachable
for _, cs := range statuses {
switch GetConnectivityStatusType(cs) {
case ConnStatusUnreachable:
// If any status is unreachable, return it immediately.
return ConnStatusUnreachable
case ConnStatusUnknown:
// If the status is unknown, prepare to return it. It's
// going to be returned if there is no unreachable
// status in next iterations.
status = ConnStatusUnknown
}
}
return status
}
// Returns a map of ConnectivityStatusType --> # of paths with ConnectivityStatusType
func SummarizePathConnectivityStatusType(cps []*models.PathStatus) map[ConnectivityStatusType]int {
status := make(map[ConnectivityStatusType]int)
for _, cp := range cps {
cst := GetPathConnectivityStatusType(cp)
status[cst]++
}
return status
}
func formatConnectivityStatus(w io.Writer, cs *models.ConnectivityStatus, path, indent string) {
status := cs.Status
lastProbed := cs.LastProbed
switch GetConnectivityStatusType(cs) {
case ConnStatusReachable:
latency := time.Duration(cs.Latency)
status = fmt.Sprintf("OK, RTT=%s", latency)
}
fmt.Fprintf(w, "%s%s:\t%s\t(Last probed: %s)\n", indent, path, status, lastProbed)
}
func formatPathStatus(w io.Writer, name string, cp *models.PathStatus, indent string, verbose bool) {
if cp == nil {
if verbose {
fmt.Fprintf(w, "%s%s connectivity:\tnil\n", indent, name)
}
return
}
fmt.Fprintf(w, "%s%s connectivity to %s:\n", indent, name, cp.IP)
indent = fmt.Sprintf("%s ", indent)
if cp.Icmp != nil {
formatConnectivityStatus(w, cp.Icmp, "ICMP to stack", indent)
}
if cp.HTTP != nil {
formatConnectivityStatus(w, cp.HTTP, "HTTP to agent", indent)
}
}
// allPathsAreHealthyOrUnknown checks whether ICMP and TCP(HTTP) connectivity
// to the given paths is available or had no explicit error status
// (which usually is the case when the new node is provisioned).
func allPathsAreHealthyOrUnknown(cps []*models.PathStatus) bool {
for _, cp := range cps {
if cp == nil {
return false
}
statuses := []*models.ConnectivityStatus{
cp.Icmp,
cp.HTTP,
}
for _, status := range statuses {
switch GetConnectivityStatusType(status) {
case ConnStatusUnreachable:
return false
}
}
}
return true
}
func nodeIsHealthy(node *models.NodeStatus) bool {
return allPathsAreHealthyOrUnknown(GetAllHostAddresses(node)) &&
allPathsAreHealthyOrUnknown(GetAllEndpointAddresses(node))
}
func nodeIsLocalhost(node *models.NodeStatus, self *models.SelfStatus) bool {
return self != nil && node.Name == self.Name
}
func getPrimaryAddressIP(node *models.NodeStatus) string {
if node.Host == nil || node.Host.PrimaryAddress == nil {
return ipUnavailable
}
return node.Host.PrimaryAddress.IP
}
// GetHostPrimaryAddress returns the PrimaryAddress for the Host within node.
// If node.Host is nil, returns nil.
func GetHostPrimaryAddress(node *models.NodeStatus) *models.PathStatus {
if node.Host == nil {
return nil
}
return node.Host.PrimaryAddress
}
// GetHostSecondaryAddresses returns the secondary host addresses (if any)
func GetHostSecondaryAddresses(node *models.NodeStatus) []*models.PathStatus {
if node.Host == nil {
return nil
}
return node.Host.SecondaryAddresses
}
// GetAllHostAddresses returns a list of all addresses (primary and any
// and any secondary) for the host of a given node. If node.Host is nil,
// returns nil.
func GetAllHostAddresses(node *models.NodeStatus) []*models.PathStatus {
if node.Host == nil {
return nil
}
return append([]*models.PathStatus{node.Host.PrimaryAddress}, node.Host.SecondaryAddresses...)
}
// GetEndpointPrimaryAddress returns the PrimaryAddress for the health endpoint
// within node. If node.HealthEndpoint is nil, returns nil.
func GetEndpointPrimaryAddress(node *models.NodeStatus) *models.PathStatus {
if node.HealthEndpoint == nil {
return nil
}
return node.HealthEndpoint.PrimaryAddress
}
// GetEndpointSecondaryAddresses returns the secondary health endpoint addresses
// (if any)
func GetEndpointSecondaryAddresses(node *models.NodeStatus) []*models.PathStatus {
if node.HealthEndpoint == nil {
return nil
}
return node.HealthEndpoint.SecondaryAddresses
}
// GetAllEndpointAddresses returns a list of all addresses (primary and any
// secondary) for the health endpoint within a given node.
// If node.HealthEndpoint is nil, returns nil.
func GetAllEndpointAddresses(node *models.NodeStatus) []*models.PathStatus {
if node.HealthEndpoint == nil {
return nil
}
return append([]*models.PathStatus{node.HealthEndpoint.PrimaryAddress}, node.HealthEndpoint.SecondaryAddresses...)
}
func formatNodeStatus(w io.Writer, node *models.NodeStatus, allNodes, verbose, localhost bool) bool {
localStr := ""
if localhost {
localStr = " (localhost)"
}
if verbose {
fmt.Fprintf(w, " %s%s:\n", node.Name, localStr)
formatPathStatus(w, "Host", GetHostPrimaryAddress(node), " ", verbose)
unhealthyPaths := !allPathsAreHealthyOrUnknown(GetHostSecondaryAddresses(node))
if (verbose || unhealthyPaths) && node.Host != nil {
for _, addr := range node.Host.SecondaryAddresses {
formatPathStatus(w, "Secondary Host", addr, " ", verbose)
}
}
formatPathStatus(w, "Endpoint", GetEndpointPrimaryAddress(node), " ", verbose)
unhealthyPaths = !allPathsAreHealthyOrUnknown(GetEndpointSecondaryAddresses(node))
if (verbose || unhealthyPaths) && node.HealthEndpoint != nil {
for _, addr := range node.HealthEndpoint.SecondaryAddresses {
formatPathStatus(w, "Secondary Endpoint", addr, " ", verbose)
}
}
return true
}
hostStatuses := SummarizePathConnectivityStatusType(GetAllHostAddresses(node))
endpointStatuses := SummarizePathConnectivityStatusType(GetAllEndpointAddresses(node))
if !nodeIsHealthy(node) {
ips := []string{getPrimaryAddressIP(node)}
for _, addr := range GetHostSecondaryAddresses(node) {
if addr == nil {
continue
}
ips = append(ips, addr.IP)
}
fmt.Fprintf(w, " %s%s\t%s\t%d/%d", node.Name, localStr, strings.Join(ips, ","), hostStatuses[ConnStatusReachable], len(GetAllHostAddresses(node)))
if hostStatuses[ConnStatusUnknown] > 0 {
fmt.Fprintf(w, " (%d unknown)", hostStatuses[ConnStatusUnknown])
}
fmt.Fprintf(w, "\t%d/%d", endpointStatuses[ConnStatusReachable], len(GetAllEndpointAddresses(node)))
if endpointStatuses[ConnStatusUnknown] > 0 {
fmt.Fprintf(w, " (%d unknown)", endpointStatuses[ConnStatusUnknown])
}
fmt.Fprintf(w, "\n")
return true
}
if allNodes {
ips := []string{getPrimaryAddressIP(node)}
for _, addr := range GetHostSecondaryAddresses(node) {
if addr == nil {
continue
}
ips = append(ips, addr.IP)
}
fmt.Fprintf(w, " %s%s\t%s\t%d/%d", node.Name, localStr, strings.Join(ips, ","), hostStatuses[ConnStatusReachable], len(GetAllHostAddresses(node)))
if hostStatuses[ConnStatusUnknown] > 0 {
fmt.Fprintf(w, " (%d unknown)", hostStatuses[ConnStatusUnknown])
}
fmt.Fprintf(w, "\t%d/%d", endpointStatuses[ConnStatusReachable], len(GetAllEndpointAddresses(node)))
if endpointStatuses[ConnStatusUnknown] > 0 {
fmt.Fprintf(w, " (%d unknown)", endpointStatuses[ConnStatusUnknown])
}
fmt.Fprintf(w, "\n")
return true
}
return false
}
// FormatHealthStatusResponse writes a HealthStatusResponse as a string to the
// writer.
//
// 'allNodes', if true, causes all nodes to be printed regardless of status
// 'verbose', if true, prints all information
// 'maxLines', if nonzero, determines the maximum number of lines to print
func FormatHealthStatusResponse(w io.Writer, sr *models.HealthStatusResponse, allNodes bool, verbose bool, maxLines int) {
var (
healthy int
localhost *models.NodeStatus
printedLines int
)
for _, node := range sr.Nodes {
if nodeIsHealthy(node) {
healthy++
}
if nodeIsLocalhost(node, sr.Local) {
localhost = node
}
}
fmt.Fprintf(w, "Cluster health:\t%d/%d reachable\t(%s)\t(Probe interval: %s)\n",
healthy, len(sr.Nodes), sr.Timestamp, sr.ProbeInterval)
fmt.Fprintf(w, "Name\tIP\tNode\tEndpoints\n")
if localhost != nil {
if formatNodeStatus(w, localhost, allNodes, verbose, true) {
printedLines++
}
}
nodes := sr.Nodes
sort.Slice(nodes, func(i, j int) bool {
return strings.Compare(nodes[i].Name, nodes[j].Name) < 0
})
for _, node := range nodes {
if printedLines == maxLines {
break
}
if node == localhost {
continue
}
if formatNodeStatus(w, node, allNodes, verbose, false) {
printedLines++
}
}
if len(sr.Nodes)-printedLines-healthy > 0 {
fmt.Fprintf(w, " ...\n")
}
}
// GetAndFormatHealthStatus fetches the health status from the cilium-health
// daemon via the default channel and formats its output as a string to the
// writer.
//
// 'verbose' and 'maxLines' are handled the same as in
// FormatHealthStatusResponse().
func GetAndFormatHealthStatus(w io.Writer, allNodes bool, verbose bool, maxLines int) {
client, err := NewClient("")
if err != nil {
fmt.Fprintf(w, "Cluster health:\t\t\tClient error: %s\n", err)
return
}
hr, err := client.Connectivity.GetStatus(nil)
if err != nil {
// The regular `cilium status` output will print the reason why.
fmt.Fprintf(w, "Cluster health:\t\t\tWarning\tcilium-health daemon unreachable\n")
return
}
FormatHealthStatusResponse(w, hr.Payload, allNodes, verbose, maxLines)
}
// SPDX-License-Identifier: Apache-2.0
// Copyright Authors of Cilium
package client
import (
"fmt"
"io"
"os"
"sort"
"strings"
"time"
"k8s.io/apimachinery/pkg/util/duration"
"github.com/cilium/cilium/pkg/hive/health/types"
)
const (
noPod = "(/)"
rootNode = ""
noErr = "<nil>"
)
// GetAndFormatModulesHealth retrieves modules health and formats output.
func GetAndFormatModulesHealth(w io.Writer, ss []types.Status, verbose bool, prefix string) {
// Although status' is received from the statedb remote table according to
// the order in which it's queried (in our case, by primary index identifier).
// We sort this to ensure order stability regardless.
sort.Slice(ss, func(i, j int) bool {
return ss[i].ID.String() < ss[j].ID.String()
})
if verbose {
r := newRoot(rootNode)
for _, s := range ss {
stack := strings.Split(s.ID.String(), ".")
upsertTree(r, &s, stack)
}
if len(r.nodes) != 0 {
r = r.nodes[0]
r.parent = nil
} else {
return
}
body := strings.ReplaceAll(r.String(), "\n", "\n"+prefix)
fmt.Fprintln(w, prefix+body)
return
}
tally := make(map[types.Level]int, 4)
for _, s := range ss {
tally[types.Level(s.Level)] += 1
}
fmt.Fprintf(w, "\t%s(%d) %s(%d) %s(%d)\n",
types.LevelStopped,
tally[types.LevelStopped],
types.LevelDegraded,
tally[types.LevelDegraded],
types.LevelOK,
tally[types.LevelOK],
)
}
type TreeView struct {
root *node
}
func NewTreeView() *TreeView {
return &TreeView{
root: newRoot("agent"),
}
}
func (t *TreeView) Render() {
fmt.Fprintln(os.Stdout, "\n"+t.root.String())
}
func (t *TreeView) UpsertStatus(ss []types.Status) {
for _, s := range ss {
upsertTree(t.root, &s, strings.Split(s.ID.String(), "."))
}
}
// upsertTree inserts a health report, using a stack of path tokens into
// a tree used for displaying health data.
//
// Because there is no longer a distinction between reporter leaves and parent nodes
// (i.e. parents of subtrees can have their own health status) we modify the tree to
// move all such "parent" reports down to a immediate child, such that in our output
// all health reports appear as leaves.
// upsertTree inserts a health report, using a stack of path tokens into
// a tree used for displaying health data.
//
// Because there is no longer a distinction between reporter leaves and parent nodes
// (i.e. parents of subtrees can have their own health status) we modify the tree to
// move all such "parent" reports down to a immediate child, such that in our output
// all health reports appear as leaves.
func upsertTree(r *node, report *types.Status, stack []string) {
if len(stack) == 1 {
name := stack[0]
meta := fmt.Sprintf("[%s] %s", strings.ToUpper(string(report.Level)), report.Message)
meta += fmt.Sprintf(" (%s, x%d)", ToAgeHuman(report.Updated), report.Count)
for _, c := range r.nodes {
if c.val == name {
c.meta = meta
c.report = report
return
}
}
r.addNodeWithMeta(name, meta, report)
return
}
pop := stack[0]
stack = stack[1:]
for _, c := range r.nodes {
if c.val == pop {
// In this case, if the node was a leaf, it may contain a status.
// Because parent nodes can now also have health status reports we
// fix this up by moving the report to a leaf node, thus maintaining
// the condition that only leaves have reporters.
if c.report != nil {
// Move former parent nodes health report to child leaf.
upsertTree(c, c.report, []string{"[reporter]"})
c.report = nil
c.meta = ""
}
upsertTree(c, report, stack)
return
}
}
// Add parent node.
n := r.addNode(pop, nil)
upsertTree(n, report, stack)
}
// ToAgeHuman converts time to duration.
func ToAgeHuman(t time.Time) string {
if t.IsZero() {
return "n/a"
}
return duration.HumanDuration(time.Since(t))
}
// SPDX-License-Identifier: Apache-2.0
// Copyright Authors of Cilium
package client
import (
"bytes"
"fmt"
"io"
"slices"
"sort"
"strconv"
"strings"
"github.com/cilium/cilium/pkg/hive/health/types"
)
const (
indentSize = 3
leafMaxWidth = 40
link decoration = "│"
mid decoration = "├──"
end decoration = "└──"
)
type decoration string
func newRoot(r string) *node {
return &node{val: r}
}
type node struct {
val, meta string
parent *node
nodes []*node
report *types.Status
}
func (n *node) addNode(v string, r *types.Status) *node {
return n.addNodeWithMeta(v, "", r)
}
func (n *node) addNodeWithMeta(v, m string, r *types.Status) *node {
node := node{
parent: n,
val: v,
meta: m,
report: r,
}
n.nodes = append(n.nodes, &node)
return &node
}
func (n *node) addBranch(v string) *node {
return n.addBranchWithMeta(v, "")
}
func (n *node) addBranchWithMeta(v, m string) *node {
b := node{
parent: n,
meta: m,
val: v,
}
n.nodes = append(n.nodes, &b)
return &b
}
func (n *node) find(val string) *node {
if n.val == val {
return n
}
for _, node := range n.nodes {
if node.val == val {
return node
}
if v := node.find(val); v != nil {
return v
}
}
return nil
}
func (n *node) asBytes() []byte {
var (
w = new(bytes.Buffer)
levelsEnded []int
max = computeMaxLevel(0, n)
)
if n.parent == nil {
w.WriteString(n.val)
if n.meta != "" {
w.WriteString(" " + n.meta)
}
fmt.Fprintln(w)
} else {
edge := mid
if len(n.nodes) == 0 {
edge = end
levelsEnded = append(levelsEnded, 0)
}
dumpVals(w, 0, max, levelsEnded, edge, n)
}
if len(n.nodes) > 0 {
dumpNodes(w, 0, max, levelsEnded, n.nodes)
}
return w.Bytes()
}
func (n *node) String() string {
return string(n.asBytes())
}
func (n *node) lastNode() *node {
c := len(n.nodes)
if c == 0 {
return nil
}
return n.nodes[c-1]
}
func computeMaxLevel(level int, n *node) int {
if n == nil || len(n.nodes) == 0 {
return level
}
var max int
for _, n := range n.nodes {
m := computeMaxLevel(level+1, n)
if m > max {
max = m
}
}
return max
}
func dumpNodes(w io.Writer, level, maxLevel int, levelsEnded []int, nodes []*node) {
sort.Slice(nodes, func(i, j int) bool {
return nodes[i].val < nodes[j].val
})
for i, node := range nodes {
edge := mid
if i == len(nodes)-1 {
levelsEnded = append(levelsEnded, level)
edge = end
}
dumpVals(w, level, maxLevel, levelsEnded, edge, node)
if len(node.nodes) > 0 {
dumpNodes(w, level+1, maxLevel, levelsEnded, node.nodes)
}
}
}
func dumpVals(w io.Writer, level, maxLevel int, levelsEnded []int, edge decoration, node *node) {
for i := range level {
if isEnded(levelsEnded, i) {
fmt.Fprint(w, strings.Repeat(" ", indentSize+1))
continue
}
fmt.Fprintf(w, "%s%s", link, strings.Repeat(" ", indentSize))
}
val := dumpVal(level, node)
if node.meta != "" {
c := max(maxLevel-level, 0)
fmt.Fprintf(w, "%s %-"+strconv.Itoa(leafMaxWidth+c*2)+"s%s%s\n", edge, val, strings.Repeat(" ", c), node.meta)
return
}
fmt.Fprintf(w, "%s %s\n", edge, val)
}
func isEnded(levelsEnded []int, level int) bool {
return slices.Contains(levelsEnded, level)
}
func dumpVal(level int, node *node) string {
lines := strings.Split(node.val, "\n")
if len(lines) < 2 {
return node.val
}
pad := indent(level, node)
for i := 1; i < len(lines); i++ {
lines[i] = fmt.Sprintf("%s%s", pad, lines[i])
}
return strings.Join(lines, "\n")
}
func indent(level int, node *node) string {
links := make([]string, level+1)
for node.parent != nil {
if isLast(node) {
links[level] = strings.Repeat(" ", indentSize+1)
} else {
links[level] = fmt.Sprintf("%s%s", link, strings.Repeat(" ", indentSize))
}
level--
node = node.parent
}
return strings.Join(links, "")
}
func isLast(n *node) bool {
return n == n.parent.lastNode()
}
// SPDX-License-Identifier: Apache-2.0
// Copyright Authors of Cilium
package hive
import (
"context"
"fmt"
"log/slog"
"time"
"github.com/cilium/hive/cell"
"golang.org/x/sync/semaphore"
"github.com/cilium/cilium/pkg/logging/logfields"
)
// Fence is an utility for blocking until registered wait functions have completed.
//
// The wait functions can only be Add()'d' before Hive is started, e.g. from constructors
// or invoke functions. Conversely Wait() method can only be called during/after Hive start.
type Fence interface {
// Add a named wait function to the fence.
// The provided function should block until ready or until context has been
// cancelled. It should return [context.Err] if context is cancelled.
//
// This method will panic if called after Hive has started to ensure that when
// Wait() is called all initializers have already been registered.
Add(name string, waitFn WaitFunc)
// Wait blocks until all registered initializers have completed or until
// context has cancelled.
//
// This method will panic if called before Hive is started.
// Can be called any number of times.
Wait(ctx context.Context) error
}
// NewFence constructs a new [Fence].
func NewFence(lc cell.Lifecycle, log *slog.Logger) Fence {
iwg := &fence{
mu: newContextMutex(),
log: log,
waitFuncs: map[string]WaitFunc{},
}
lc.Append(iwg)
return iwg
}
// WaitFunc is a function for waiting until some initialization has completed.
// If the context given to it is cancelled the function should stop and return ctx.Err().
type WaitFunc = func(context.Context) error
const nameLogField = "name"
type fence struct {
mu contextMutex
log *slog.Logger
started bool
waitFuncs map[string]WaitFunc
}
func (w *fence) Add(name string, waitFn WaitFunc) {
// Add calls must happen sequentially during Hive population so no context here.
w.mu.Lock(context.Background())
defer w.mu.Unlock()
if w.started {
panic("Add() called after Hive had already started! Add() must be used from provide/invoke functions.")
}
if _, found := w.waitFuncs[name]; found {
panic(fmt.Sprintf("%s already registered", name))
}
w.waitFuncs[name] = waitFn
}
func (w *fence) Wait(ctx context.Context) error {
if err := w.mu.Lock(ctx); err != nil {
return err
}
defer w.mu.Unlock()
if !w.started {
panic("Wait() called before Hive had already started! Wait() must be called during start to ensure all Add() calls have happened.")
}
if len(w.waitFuncs) == 0 {
return nil
}
remaining := len(w.waitFuncs)
for name, fn := range w.waitFuncs {
t0 := time.Now()
log := w.log.With(
nameLogField, name,
logfields.Remaining, remaining,
)
log.Info("Fence waiting")
if err := fn(ctx); err != nil {
log.Info("Fence error",
logfields.Error, err)
return fmt.Errorf("%s: %w", name, err)
}
log.Info("Fence done", logfields.Duration, time.Since(t0))
remaining--
delete(w.waitFuncs, name)
}
return nil
}
// Start implements cell.HookInterface.
func (w *fence) Start(ctx cell.HookContext) error {
if err := w.mu.Lock(ctx); err != nil {
return err
}
defer w.mu.Unlock()
w.started = true
return nil
}
// Stop implements cell.HookInterface.
func (w *fence) Stop(cell.HookContext) error {
return nil
}
var _ cell.HookInterface = &fence{}
type contextMutex struct {
sem *semaphore.Weighted
}
func newContextMutex() contextMutex {
return contextMutex{sem: semaphore.NewWeighted(1)}
}
func (c *contextMutex) Lock(ctx context.Context) error {
return c.sem.Acquire(ctx, 1)
}
func (c *contextMutex) Unlock() {
c.sem.Release(1)
}
// SPDX-License-Identifier: Apache-2.0
// Copyright Authors of Cilium
package health
import (
"github.com/cilium/cilium/pkg/hive/health/types"
"github.com/cilium/cilium/pkg/metrics"
"github.com/cilium/hive/cell"
"github.com/cilium/statedb"
"github.com/cilium/statedb/index"
)
var Cell = cell.Module(
"health",
"Modular Health Provider V2",
cell.ProvidePrivate(newTablesPrivate),
cell.Provide(
newHealthV2Provider,
statedb.RWTable[types.Status].ToTable,
),
// Module health metrics.
cell.Invoke(metricPublisher),
metrics.Metric(newMetrics),
cell.Provide(healthCommands),
)
var (
PrimaryIndex = statedb.Index[types.Status, types.HealthID]{
Name: "identifier",
FromObject: func(s types.Status) index.KeySet {
return index.NewKeySet([]byte(s.ID.String()))
},
FromKey: index.Stringer[types.HealthID],
FromString: index.FromString,
Unique: true,
}
LevelIndex = statedb.Index[types.Status, types.Level]{
Name: "level",
FromObject: func(s types.Status) index.KeySet {
return index.NewKeySet(index.Stringer(s.Level))
},
FromKey: index.Stringer[types.Level],
FromString: index.FromString,
Unique: false,
}
)
func newTablesPrivate(db *statedb.DB) (statedb.RWTable[types.Status], error) {
return statedb.NewTable(
db,
TableName,
PrimaryIndex,
LevelIndex)
}
// SPDX-License-Identifier: Apache-2.0
// Copyright Authors of Cilium
package health
import (
"fmt"
"os"
"slices"
"strings"
healthPkg "github.com/cilium/cilium/pkg/health/client"
"github.com/cilium/cilium/pkg/hive/health/types"
"github.com/cilium/hive"
"github.com/cilium/hive/script"
"github.com/cilium/statedb"
"github.com/spf13/pflag"
)
func healthCommands(db *statedb.DB, table statedb.Table[types.Status]) hive.ScriptCmdsOut {
return hive.NewScriptCmds(map[string]script.Cmd{
"health": healthTreeCommand(db, table),
"health/ok": allOK(db, table),
})
}
func healthTreeCommand(db *statedb.DB, table statedb.Table[types.Status]) script.Cmd {
return script.Command(
script.CmdUsage{
Summary: "Log health reporter tree",
Args: "[reporter-id-prefix]",
Flags: func(fs *pflag.FlagSet) {
fs.StringP("match", "m", "", "Output only health reports where the reporter ID path contains the substring")
fs.StringArrayP("levels", "s", []string{types.LevelOK, types.LevelDegraded, types.LevelDegraded},
"Output only health reports with the specified state (i.e. ok,degraded,stopped)")
fs.StringP("output", "o", "", "File to write output to")
},
Detail: []string{
"Prints out a health reporter tree",
"If passed prefix is not-empty then only nodes of this subtree",
"will be displayed",
},
},
func(s *script.State, args ...string) (script.WaitFunc, error) {
var prefix string
if len(args) > 0 {
prefix = args[0]
}
match, err := s.Flags.GetString("match")
if err != nil {
return nil, err
}
levels, err := s.Flags.GetStringArray("levels")
if err != nil {
return nil, err
}
file, err := s.Flags.GetString("output")
if err != nil {
return nil, err
}
for i := range levels {
levels[i] = strings.ToLower(levels[i])
}
w := s.LogWriter()
if file != "" {
p := s.Path(file)
fd, err := os.Create(p)
if err != nil {
return nil, err
}
w = fd
}
ss := getHealth(db, table, prefix, match, levels)
healthPkg.GetAndFormatModulesHealth(w, ss, true, "")
return nil, nil
},
)
}
func getHealth(db *statedb.DB, table statedb.Table[types.Status], prefix, match string, levels []string) []types.Status {
ss := []types.Status{}
if prefix != "" {
tx := db.ReadTxn()
for status := range table.Prefix(tx, PrimaryIndex.Query(types.HealthID(prefix))) {
ss = append(ss, status)
}
} else {
tx := db.ReadTxn()
for status := range table.All(tx) {
if match != "" && !strings.Contains(status.ID.String(), match) {
continue
}
if !slices.Contains(levels, strings.ToLower(status.Level.String())) {
continue
}
ss = append(ss, status)
}
}
return ss
}
func allOK(db *statedb.DB, table statedb.Table[types.Status]) script.Cmd {
return script.Command(
script.CmdUsage{
Summary: "Report and fail if there are degraded health reports",
Args: "[reporter-id-prefix]",
Flags: func(fs *pflag.FlagSet) {
fs.StringP("match", "m", "", "Output only health reports where the reporter ID path contains the substring")
},
Detail: []string{
"Checks that all specified health reporters are healthy.\n",
"If a non empty prefix is passed, only sub-trees of that .\n",
"reporter will be checked\n",
},
},
func(s *script.State, args ...string) (script.WaitFunc, error) {
var prefix string
if len(args) > 0 {
prefix = args[0]
}
match, err := s.Flags.GetString("match")
if err != nil {
return nil, err
}
w := s.LogWriter()
ss := getHealth(db, table, prefix, match, []string{strings.ToLower(types.LevelDegraded)})
healthPkg.GetAndFormatModulesHealth(w, ss, true, "")
if len(ss) != 0 {
return nil, fmt.Errorf("found %d degraded health reports", len(ss))
}
return nil, nil
},
)
}
// SPDX-License-Identifier: Apache-2.0
// Copyright Authors of Cilium
package health
import (
"context"
"strings"
"github.com/cilium/hive/cell"
"github.com/cilium/hive/job"
"github.com/cilium/statedb"
"github.com/cilium/cilium/pkg/hive/health/types"
"github.com/cilium/cilium/pkg/metrics/metric"
"github.com/cilium/cilium/pkg/rate"
"github.com/cilium/cilium/pkg/time"
)
type Metrics struct {
HealthStatusGauge metric.Vec[metric.Gauge]
DegradedHealthStatusGauge metric.DeletableVec[metric.Gauge]
}
func newMetrics() *Metrics {
return &Metrics{
HealthStatusGauge: metric.NewGaugeVec(metric.GaugeOpts{
ConfigName: "hive_health_status_levels",
Namespace: "cilium",
Subsystem: "hive",
Name: "status",
Help: "Counts of health status levels of Hive components",
}, []string{"status"}),
DegradedHealthStatusGauge: metric.NewGaugeVec(metric.GaugeOpts{
Namespace: "cilium",
Subsystem: "hive",
Name: "degraded_status",
Help: "Counts degraded health status levels of Hive components labeled by modules",
Disabled: true,
}, []string{"module"}),
}
}
type publishFunc func(map[types.Level]uint64, map[string]uint64)
type metricPublisherParams struct {
cell.In
DB *statedb.DB
Table statedb.Table[types.Status]
JobGroup job.Group
Metrics *Metrics
}
// metricPublisher periodically publishes the hive module health metric
// * cilium_hive_status
// * cilium_hive_degraded_status
func metricPublisher(p metricPublisherParams) {
// Performs the actual writing to the metric. Extracted to make testing easy.
publish := func(stats map[types.Level]uint64, degradedModuleCount map[string]uint64) {
for l, v := range stats {
p.Metrics.HealthStatusGauge.WithLabelValues(strings.ToLower(string(l))).Set(float64(v))
}
for k, v := range degradedModuleCount {
// If the module is healthy attempt to remove any associated metrics with that module
if v == 0 {
p.Metrics.DegradedHealthStatusGauge.DeleteLabelValues(k)
continue
}
p.Metrics.DegradedHealthStatusGauge.WithLabelValues(k).Set(float64(v))
}
}
if p.Metrics.HealthStatusGauge.IsEnabled() {
p.JobGroup.Add(job.OneShot("module-status-metrics",
func(ctx context.Context, health cell.Health) error {
return publishJob(ctx, p, publish)
}))
}
}
func publishJob(ctx context.Context, p metricPublisherParams, publish publishFunc) error {
// Limit rate of updates to the metric. The status table is updated often, the
// watch channel is closed on every modification (since we're watching all) and
// traversing the full table is somewhat expensive, so let's limit ourselves.
limiter := rate.NewLimiter(15*time.Second, 3)
defer limiter.Stop() // Avoids leaking a goroutine.
idToStatus := make(map[string]uint64)
it, watch := p.Table.AllWatch(p.DB.ReadTxn())
for {
stats := make(map[types.Level]uint64)
// Reset health ID status counts
for k := range idToStatus {
idToStatus[k] = 0
}
for obj := range it {
stats[obj.Level]++
_, ok := idToStatus[obj.ID.Module.String()]
if obj.Level == types.LevelDegraded {
idToStatus[obj.ID.Module.String()]++
} else if !ok {
idToStatus[obj.ID.Module.String()] = 0
}
}
publish(stats, idToStatus)
// Removed old IDs
for k, v := range idToStatus {
if v == 0 {
delete(idToStatus, k)
}
}
select {
case <-ctx.Done():
return ctx.Err()
case <-watch:
}
if err := limiter.Wait(ctx); err != nil {
return err
}
it, watch = p.Table.AllWatch(p.DB.ReadTxn())
}
}
// SPDX-License-Identifier: Apache-2.0
// Copyright Authors of Cilium
package health
import (
"context"
"fmt"
"log/slog"
"sync/atomic"
"github.com/cilium/hive/cell"
"github.com/cilium/statedb"
"github.com/cilium/cilium/pkg/hive/health/types"
"github.com/cilium/cilium/pkg/logging/logfields"
"github.com/cilium/cilium/pkg/time"
)
type providerParams struct {
cell.In
DB *statedb.DB
Lifecycle cell.Lifecycle
StatusTable statedb.RWTable[types.Status]
Logger *slog.Logger
}
type provider struct {
db *statedb.DB
stopped atomic.Bool
statusTable statedb.RWTable[types.Status]
logger *slog.Logger
}
const TableName = "health"
func newHealthV2Provider(params providerParams) types.Provider {
p := &provider{
statusTable: params.StatusTable,
db: params.DB,
logger: params.Logger,
}
params.Lifecycle.Append(p)
return p
}
func (p *provider) Start(ctx cell.HookContext) error {
return nil
}
func (p *provider) Stop(ctx cell.HookContext) error {
p.stopped.Store(true)
return nil
}
func (p *provider) ForModule(mid cell.FullModuleID) cell.Health {
return &moduleReporter{
logger: p.logger,
id: types.Identifier{Module: mid},
upsert: func(s types.Status) error {
if p.stopped.Load() {
return fmt.Errorf("provider is stopped, no more updates will take place")
}
tx := p.db.WriteTxn(p.statusTable)
defer tx.Abort()
old, _, found := p.statusTable.Get(tx, PrimaryIndex.QueryFromObject(s))
if found && !old.Stopped.IsZero() {
return fmt.Errorf("reporting for %q has been stopped", s.ID)
}
s.Count = 1
// If a similar status already exists, increment count, otherwise start back
// at zero.
if found && old.Level == s.Level && old.Message == s.Message && old.Error == s.Error {
s.Count = old.Count + 1
}
if _, _, err := p.statusTable.Insert(tx, s); err != nil {
return fmt.Errorf("upsert status %s: %w", s, err)
}
// To avoid excess debug logs, only report upserts if it's a new status,
// is not-OK or is a state change (ex. Degraded -> OK).
if !found || s.Level != types.LevelOK || old.Level != s.Level {
lastLevel := "none"
if old.Level != "" {
lastLevel = string(old.Level)
}
p.logger.Debug("upserting health status",
logfields.LastLevel, lastLevel,
logfields.ReporterID, s.ID,
logfields.Status, s,
)
}
tx.Commit()
return nil
},
deletePrefix: func(i types.Identifier) error {
if p.stopped.Load() {
return fmt.Errorf("provider is stopped, no more updates will take place")
}
tx := p.db.WriteTxn(p.statusTable)
defer tx.Abort()
q := PrimaryIndex.Query(types.HealthID(i.String()))
iter := p.statusTable.Prefix(tx, q)
var deleted int
for o := range iter {
if _, _, err := p.statusTable.Delete(tx, types.Status{
ID: o.ID,
}); err != nil {
return fmt.Errorf("deleting prunable child %s: %w", i, err)
}
deleted++
}
p.logger.Debug("delete health sub-tree",
logfields.Prefix, i,
logfields.Deleted, deleted,
)
tx.Commit()
return nil
},
stop: func(i types.Identifier) error {
if p.stopped.Load() {
return fmt.Errorf("provider is stopped, no more updates will take place")
}
tx := p.db.WriteTxn(p.statusTable)
defer tx.Abort()
old, _, found := p.statusTable.Get(tx, PrimaryIndex.Query(i.HealthID()))
if !found {
// Nothing to do.
return nil
}
if !old.Stopped.IsZero() {
return fmt.Errorf("reporting for %q has been stopped", i)
}
old.Level = types.LevelStopped
old.Stopped = time.Now()
if _, _, err := p.statusTable.Insert(tx, old); err != nil {
return fmt.Errorf("stopping reporter - upsert status %s: %w", old, err)
}
tx.Commit()
p.logger.Debug("stopping health reporter",
logfields.ReporterID, i,
)
return nil
},
providerStopped: p.stopped.Load,
}
}
type moduleReporter struct {
logger *slog.Logger
id types.Identifier
stopped atomic.Bool
providerStopped func() bool
upsert func(types.Status) error
stop func(types.Identifier) error
deletePrefix func(types.Identifier) error
}
func (r *moduleReporter) newScope(name string) *moduleReporter {
return &moduleReporter{
id: r.id.WithSubComponent(name),
upsert: r.upsert,
deletePrefix: r.deletePrefix,
stop: r.stop,
logger: r.logger,
}
}
func (r *moduleReporter) NewScope(name string) cell.Health {
return r.newScope(name)
}
func (r *moduleReporter) NewScopeWithContext(ctx context.Context, name string) cell.Health {
s := r.newScope(name)
go func() {
<-ctx.Done()
s.stopped.Store(true)
s.Close()
}()
return s
}
func (r *moduleReporter) OK(msg string) {
if r.stopped.Load() {
r.logger.Warn("report on stopped reporter", logfields.ReporterID, r.id)
}
ts := time.Now()
if err := r.upsert(types.Status{
ID: r.id,
Level: types.LevelOK,
Message: msg,
LastOK: ts,
Updated: ts,
}); err != nil {
r.logger.Error("failed to upsert ok health status", logfields.Error, err)
}
}
func (r *moduleReporter) Degraded(msg string, err error) {
if r.stopped.Load() {
r.logger.Warn("report on stopped reporter", logfields.ReporterID, r.id)
}
if err := r.upsert(types.Status{
ID: r.id,
Level: types.LevelDegraded,
Message: msg,
Error: err.Error(),
Updated: time.Now(),
}); err != nil {
r.logger.Error("failed to upsert degraded health status", logfields.Error, err)
}
}
// Stopped declares a reporter scope stopped, and will block further updates to it while
// maintaining the last known status of the reporter.
func (r *moduleReporter) Stopped(msg string) {
r.stopped.Store(true)
if err := r.stop(r.id); err != nil {
r.logger.Error("failed to delete reporter status tree", logfields.Error, err)
}
}
// Close completely closes out a tree, it will remove all health statuses below
// this reporter scope.
func (r *moduleReporter) Close() {
if err := r.deletePrefix(r.id); err != nil {
r.logger.Error("failed to delete reporter status tree", logfields.Error, err)
}
}
// SPDX-License-Identifier: Apache-2.0
// Copyright Authors of Cilium
package types
import (
"fmt"
"strconv"
"strings"
"time"
"github.com/cilium/hive/cell"
)
// Provider has functionality to create health reporters, scoped a
// module.
type Provider interface {
ForModule(mid cell.FullModuleID) cell.Health
}
type pathIdent []string
func (p pathIdent) String() string {
if len(p) == 0 {
return ""
}
return strings.Join(p, ".")
}
// HealthID is used as the key for the primary index for health status
// tables.
type HealthID string
func (id HealthID) String() string {
return string(id)
}
// Identifier is a fully qualified, path based identifier for health status
// which is made up of module ID and component ID parts.
type Identifier struct {
Module cell.FullModuleID
Component pathIdent
}
// WithSubComponent returns view of an identifier with an appended
// subcomponent.
func (i Identifier) WithSubComponent(name string) Identifier {
return Identifier{
Module: i.Module,
Component: append(i.Component, name),
}
}
func (i Identifier) String() string {
return strings.Join([]string{i.Module.String(), i.Component.String()}, ".")
}
func (i Identifier) HealthID() HealthID {
return HealthID(i.String())
}
// Status represents a current health status update.
type Status struct {
ID Identifier
Level Level
Message string
Error string
LastOK time.Time
Updated time.Time
Stopped time.Time
// Final is the final message set when a status is stopped.
Final string
Count uint64
}
func (Status) TableHeader() []string {
return []string{"Module", "Component", "Level", "Message", "Error", "LastOK", "UpdatedAt", "Count"}
}
func (s Status) TableRow() []string {
return []string{
s.ID.Module.String(),
s.ID.Component.String(),
string(s.Level),
s.Message,
s.Error,
s.LastOK.Format(time.RFC3339),
s.Updated.Format(time.RFC3339),
strconv.FormatUint(s.Count, 10),
}
}
func (s Status) String() string {
if s.Error != "" {
return fmt.Sprintf("%s: [%s] %s: %s", s.ID.String(), s.Level, s.Message, s.Error)
} else {
return fmt.Sprintf("%s: [%s] %s", s.ID.String(), s.Level, s.Message)
}
}
type Level string
func (s Level) String() string {
return string(s)
}
const (
LevelOK = "OK"
LevelDegraded = "Degraded"
LevelStopped = "Stopped"
)
// SPDX-License-Identifier: Apache-2.0
// Copyright Authors of Cilium
package hive
import (
"log/slog"
"net/netip"
"reflect"
"runtime/pprof"
"slices"
"time"
upstream "github.com/cilium/hive"
"github.com/cilium/hive/cell"
"github.com/cilium/hive/job"
"github.com/cilium/statedb"
"github.com/cilium/cilium/pkg/hive/health"
"github.com/cilium/cilium/pkg/hive/health/types"
"github.com/cilium/cilium/pkg/logging"
"github.com/cilium/cilium/pkg/logging/logfields"
"github.com/cilium/cilium/pkg/metrics"
)
type (
Hive = upstream.Hive
Options = upstream.Options
Shutdowner = upstream.Shutdowner
)
var (
ShutdownWithError = upstream.ShutdownWithError
)
// New wraps the hive.New to create a hive with defaults used by cilium-agent.
func New(cells ...cell.Cell) *Hive {
cells = append(
slices.Clone(cells),
job.Cell,
// Module health
cell.Group(
health.Cell,
cell.Provide(
func(provider types.Provider) cell.Health {
return provider.ForModule(nil)
},
),
),
// StateDB and its metrics
cell.Group(
statedb.Cell,
metrics.Metric(NewStateDBMetrics),
metrics.Metric(NewStateDBReconcilerMetrics),
cell.Provide(
NewStateDBMetricsImpl,
NewStateDBReconcilerMetricsImpl,
),
),
// The root slog FieldLogger.
cell.Provide(
func() logging.FieldLogger {
// slogloggercheck: its setup has been done before hive is Ran.
return logging.DefaultSlogLogger
},
// Root job group. This is mostly provided for tests so that we don't need a cell.Module
// wrapper to get a job.Group.
func(reg job.Registry, h cell.Health, l *slog.Logger, lc cell.Lifecycle) job.Group {
return reg.NewGroup(h, lc, job.WithLogger(l))
},
),
)
// Scope logging and health by module ID.
moduleDecorators := []cell.ModuleDecorator{
func(mid cell.ModuleID) logging.FieldLogger {
// slogloggercheck: its setup has been done before hive is Ran.
return logging.DefaultSlogLogger.With(logfields.LogSubsys, string(mid))
},
func(hp types.Provider, fmid cell.FullModuleID) cell.Health {
return hp.ForModule(fmid)
},
func(db *statedb.DB, mid cell.ModuleID) *statedb.DB {
return db.NewHandle(string(mid))
},
}
modulePrivateProviders := []cell.ModulePrivateProvider{
jobGroupProvider,
}
return upstream.NewWithOptions(
upstream.Options{
EnvPrefix: "CILIUM_",
ModulePrivateProviders: modulePrivateProviders,
ModuleDecorators: moduleDecorators,
DecodeHooks: decodeHooks,
StartTimeout: 5 * time.Minute,
StopTimeout: 1 * time.Minute,
LogThreshold: 100 * time.Millisecond,
},
cells...,
)
}
var decodeHooks = cell.DecodeHooks{
// Decode netip.Prefix fields
// TODO: move to github.com/cilium/hive/cell.decoderConfig default decode hooks once
// https://github.com/go-viper/mapstructure/pull/85 is merged.
func(from reflect.Type, to reflect.Type, data any) (any, error) {
if from.Kind() != reflect.String {
return data, nil
}
if to != reflect.TypeOf(netip.Prefix{}) {
return data, nil
}
return netip.ParsePrefix(data.(string))
},
}
func AddConfigOverride[Cfg cell.Flagger](h *Hive, override func(*Cfg)) {
upstream.AddConfigOverride[Cfg](h, override)
}
// jobGroupProvider provides a (private) job group to modules, with scoped health reporting, logging and metrics.
func jobGroupProvider(reg job.Registry, h cell.Health, l *slog.Logger, lc cell.Lifecycle, mid cell.ModuleID) job.Group {
return reg.NewGroup(h, lc,
job.WithLogger(l),
job.WithPprofLabels(pprof.Labels("cell", string(mid))))
}
// SPDX-License-Identifier: Apache-2.0
// Copyright Authors of Cilium
package hive
import (
"context"
"fmt"
"log/slog"
"github.com/cilium/hive/cell"
"github.com/cilium/cilium/pkg/lock"
)
// OnDemand provides access to a resource on-demand.
// On first call to Acquire() the resource is started (cell.Lifecycle.Start).
// If the starting of the resource fails Acquire() returns the error from Start().
// When all references are Release()'d the resource is stopped (cell.Lifecycle.Stop),
// and again failure from Stop() is returned.
type OnDemand[Resource any] interface {
// Acquire a resource. On the first call to Acquire() the underlying
// resource is started with the provided context that aborts the start
// if the context is cancelled. On failure to start the resulting error
// is returned.
Acquire(context.Context) (Resource, error)
// Release a resource. When the last acquired reference to the resource
// is released the resource is stopped. If stopping the resource fails
// the error is returned.
Release(resource Resource) error
}
type onDemand[Resource any] struct {
mu lock.Mutex
log *slog.Logger
refCount int
resource Resource
lc cell.Lifecycle
}
// NewOnDemand wraps a resource that will be started and stopped on-demand.
// The resource and the lifecycle hooks are provided separately, but can
// of course be the same thing. They're separate to support the use-case
// where the resource is a state object (e.g. StateDB table) and the hook is
// a job group that populates the object.
func NewOnDemand[Resource any](log *slog.Logger, resource Resource, lc cell.Lifecycle) OnDemand[Resource] {
return &onDemand[Resource]{
log: log,
refCount: 0,
resource: resource,
lc: lc,
}
}
// Acquire implements OnDemand.
func (o *onDemand[Resource]) Acquire(ctx context.Context) (r Resource, err error) {
o.mu.Lock()
defer o.mu.Unlock()
if o.refCount == 0 {
// This is the first acquisition of the resource. Start it.
if err = o.lc.Start(o.log, ctx); err != nil {
return r, fmt.Errorf("failed to start resource %T: %w", r, err)
}
}
o.refCount++
return o.resource, nil
}
// Release implements OnDemand.
func (o *onDemand[Resource]) Release(r Resource) error {
o.mu.Lock()
defer o.mu.Unlock()
if o.refCount <= 0 {
return fmt.Errorf("BUG: OnDemand.Release called with refCount <= 0")
}
o.refCount--
if o.refCount == 0 {
if err := o.lc.Stop(o.log, context.Background()); err != nil {
return fmt.Errorf("failed to stop resource %T: %w", r, err)
}
}
return nil
}
var _ OnDemand[cell.Hook] = &onDemand[cell.Hook]{}
type staticOnDemand[Resource any] struct {
resource Resource
}
// Acquire implements OnDemand.
func (s *staticOnDemand[Resource]) Acquire(context.Context) (Resource, error) {
return s.resource, nil
}
// Release implements OnDemand.
func (s *staticOnDemand[Resource]) Release(Resource) error {
return nil
}
var _ OnDemand[struct{}] = &staticOnDemand[struct{}]{}
// NewStaticOnDemand creates an on-demand resource that is "static",
// i.e. always running and not started or stopped.
func NewStaticOnDemand[Resource any](resource Resource) OnDemand[Resource] {
return &staticOnDemand[Resource]{resource}
}
// SPDX-License-Identifier: Apache-2.0
// Copyright Authors of Cilium
package hive
import (
"time"
"github.com/cilium/hive/cell"
"github.com/cilium/statedb/reconciler"
"github.com/cilium/cilium/pkg/metrics"
"github.com/cilium/cilium/pkg/metrics/metric"
)
type ReconcilerMetrics struct {
ReconciliationCount metric.Vec[metric.Counter]
ReconciliationDuration metric.Vec[metric.Observer]
ReconciliationTotalErrors metric.Vec[metric.Counter]
ReconciliationCurrentErrors metric.Vec[metric.Gauge]
PruneCount metric.Vec[metric.Counter]
PruneTotalErrors metric.Vec[metric.Counter]
PruneDuration metric.Vec[metric.Observer]
}
const (
labelModuleId = "module_id"
labelOperation = "op"
)
func NewStateDBReconcilerMetrics() ReconcilerMetrics {
m := ReconcilerMetrics{
ReconciliationCount: metric.NewCounterVec(metric.CounterOpts{
Disabled: true,
Namespace: metrics.Namespace,
Subsystem: "reconciler",
Name: "count",
Help: "Number of reconciliation rounds performed",
}, []string{labelModuleId}),
ReconciliationDuration: metric.NewHistogramVec(metric.HistogramOpts{
Disabled: true,
Namespace: metrics.Namespace,
Subsystem: "reconciler",
Name: "duration_seconds",
Help: "Histogram of per-operation duration during reconciliation",
// Use buckets in the 0.5ms-1s range.
Buckets: []float64{.0005, .001, .0025, .005, .01, .025, .05, 0.1, 0.25, 0.5, 1.0},
}, []string{labelModuleId, labelOperation}),
ReconciliationTotalErrors: metric.NewCounterVec(metric.CounterOpts{
Disabled: true,
Namespace: metrics.Namespace,
Subsystem: "reconciler",
Name: "errors_total",
Help: "Total number of errors encountered during reconciliation",
}, []string{labelModuleId}),
ReconciliationCurrentErrors: metric.NewGaugeVec(metric.GaugeOpts{
Disabled: true,
Namespace: metrics.Namespace,
Subsystem: "reconciler",
Name: "errors_current",
Help: "The number of objects currently failing to be reconciled",
}, []string{labelModuleId}),
PruneCount: metric.NewCounterVec(metric.CounterOpts{
Disabled: true,
Namespace: metrics.Namespace,
Subsystem: "reconciler",
Name: "prune_count",
Help: "Number of prunes performed",
}, []string{labelModuleId}),
PruneTotalErrors: metric.NewCounterVec(metric.CounterOpts{
Disabled: true,
Namespace: metrics.Namespace,
Subsystem: "reconciler",
Name: "prune_errors_total",
Help: "Total number of errors encountered during pruning",
}, []string{labelModuleId}),
PruneDuration: metric.NewHistogramVec(metric.HistogramOpts{
Disabled: true,
Namespace: metrics.Namespace,
Subsystem: "reconciler",
Name: "prune_duration_seconds",
Help: "Histogram of pruning duration",
}, []string{labelModuleId}),
}
return m
}
func NewStateDBReconcilerMetricsImpl(m ReconcilerMetrics) reconciler.Metrics {
return &reconcilerMetricsImpl{m}
}
type reconcilerMetricsImpl struct {
m ReconcilerMetrics
}
// PruneDuration implements reconciler.Metrics.
func (m *reconcilerMetricsImpl) PruneDuration(moduleID cell.FullModuleID, duration time.Duration) {
m.m.PruneDuration.WithLabelValues(moduleID.String()).
Observe(duration.Seconds())
}
// FullReconciliationErrors implements reconciler.Metrics.
func (m *reconcilerMetricsImpl) PruneError(moduleID cell.FullModuleID, err error) {
m.m.PruneCount.WithLabelValues(moduleID.String()).Inc()
if err != nil {
m.m.PruneTotalErrors.WithLabelValues(moduleID.String()).Add(1)
}
}
// ReconciliationDuration implements reconciler.Metrics.
func (m *reconcilerMetricsImpl) ReconciliationDuration(moduleID cell.FullModuleID, operation string, duration time.Duration) {
m.m.ReconciliationCount.WithLabelValues(moduleID.String()).Inc()
m.m.ReconciliationDuration.WithLabelValues(moduleID.String(), operation).
Observe(duration.Seconds())
}
// ReconciliationErrors implements reconciler.Metrics.
func (m *reconcilerMetricsImpl) ReconciliationErrors(moduleID cell.FullModuleID, new, current int) {
m.m.ReconciliationCurrentErrors.WithLabelValues(moduleID.String()).Set(float64(current))
m.m.ReconciliationCurrentErrors.WithLabelValues(moduleID.String()).Add(float64(new))
}
var _ reconciler.Metrics = &reconcilerMetricsImpl{}
// SPDX-License-Identifier: Apache-2.0
// Copyright Authors of Cilium
package hive
import (
"time"
"github.com/cilium/statedb"
"github.com/cilium/cilium/pkg/metrics"
"github.com/cilium/cilium/pkg/metrics/metric"
)
type StateDBMetrics struct {
// How long a read transaction was held.
WriteTxnDuration metric.Vec[metric.Observer]
// How long writers were blocked while waiting to acquire a write transaction for a specific table.
TableContention metric.Vec[metric.Observer]
// The amount of objects in a given table.
TableObjectCount metric.Vec[metric.Gauge]
// The current revision of a given table.
TableRevision metric.Vec[metric.Gauge]
// The amount of delete trackers for a given table.
TableDeleteTrackerCount metric.Vec[metric.Gauge]
// The amount of objects in the graveyard for a given table.
TableGraveyardObjectCount metric.Vec[metric.Gauge]
// The lowest revision of a given table that has been processed by the graveyard garbage collector.
TableGraveyardLowWatermark metric.Vec[metric.Gauge]
// The time it took to clean the graveyard for a given table.
TableGraveyardCleaningDuration metric.Vec[metric.Observer]
}
const (
labelTable = "table"
labelHandle = "handle"
)
type stateDBMetricsImpl struct {
m StateDBMetrics
}
// DeleteTrackerCount implements statedb.Metrics.
func (i stateDBMetricsImpl) DeleteTrackerCount(tableName string, numTrackers int) {
i.m.TableDeleteTrackerCount.WithLabelValues(tableName).Set(float64(numTrackers))
}
// GraveyardCleaningDuration implements statedb.Metrics.
func (i stateDBMetricsImpl) GraveyardCleaningDuration(tableName string, duration time.Duration) {
i.m.TableGraveyardCleaningDuration.WithLabelValues(tableName).Observe(float64(duration.Seconds()))
}
// GraveyardLowWatermark implements statedb.Metrics.
func (i stateDBMetricsImpl) GraveyardLowWatermark(tableName string, lowWatermark uint64) {
i.m.TableGraveyardLowWatermark.WithLabelValues(tableName).Set(float64(lowWatermark))
}
// GraveyardObjectCount implements statedb.Metrics.
func (i stateDBMetricsImpl) GraveyardObjectCount(tableName string, numDeletedObjects int) {
i.m.TableGraveyardObjectCount.WithLabelValues(tableName).Set(float64(numDeletedObjects))
}
// ObjectCount implements statedb.Metrics.
func (i stateDBMetricsImpl) ObjectCount(tableName string, numObjects int) {
i.m.TableObjectCount.WithLabelValues(tableName).Set(float64(numObjects))
}
// Revision implements statedb.Metrics.
func (i stateDBMetricsImpl) Revision(tableName string, revision uint64) {
i.m.TableRevision.WithLabelValues(tableName).Set(float64(revision))
}
// WriteTxnDuration implements statedb.Metrics.
func (i stateDBMetricsImpl) WriteTxnDuration(handle string, tables []string, acquire time.Duration) {
// Not using 'tables' as 'handle' is enough detail.
i.m.WriteTxnDuration.WithLabelValues(handle).Observe(acquire.Seconds())
}
// WriteTxnTableAcquisition implements statedb.Metrics.
func (i stateDBMetricsImpl) WriteTxnTableAcquisition(handle string, tableName string, acquire time.Duration) {
i.m.TableContention.WithLabelValues(handle, tableName)
}
// WriteTxnTotalAcquisition implements statedb.Metrics.
func (i stateDBMetricsImpl) WriteTxnTotalAcquisition(handle string, tables []string, acquire time.Duration) {
// Not gathering this metric as it's covered well by the per-table acquisition duration.
}
var _ statedb.Metrics = stateDBMetricsImpl{}
func NewStateDBMetrics() StateDBMetrics {
m := StateDBMetrics{
WriteTxnDuration: metric.NewHistogramVec(metric.HistogramOpts{
Namespace: metrics.Namespace,
Subsystem: "statedb",
Name: "write_txn_duration_seconds",
Help: "How long a write transaction was held.",
Disabled: true,
// Use buckets in the 0.5ms-1.0s range.
Buckets: []float64{.0005, .001, .0025, .005, .01, .025, .05, 0.1, 0.25, 0.5, 1.0},
}, []string{labelHandle}),
TableContention: metric.NewHistogramVec(metric.HistogramOpts{
Namespace: metrics.Namespace,
Subsystem: "statedb",
Name: "table_contention_seconds",
Help: "How long writers were blocked while waiting to acquire a write transaction for a specific table.",
// Use buckets in the 0.5ms-1.0s range.
Buckets: []float64{.0005, .001, .0025, .005, .01, .025, .05, 0.1, 0.25, 0.5, 1.0},
Disabled: true,
}, []string{labelHandle, labelTable}),
TableObjectCount: metric.NewGaugeVec(metric.GaugeOpts{
Namespace: metrics.Namespace,
Subsystem: "statedb",
Name: "table_objects",
Help: "The amount of objects in a given table.",
Disabled: true,
}, []string{labelTable}),
TableRevision: metric.NewGaugeVec(metric.GaugeOpts{
Namespace: metrics.Namespace,
Subsystem: "statedb",
Name: "table_revision",
Help: "The current revision of a given table.",
Disabled: true,
}, []string{labelTable}),
TableDeleteTrackerCount: metric.NewGaugeVec(metric.GaugeOpts{
Namespace: metrics.Namespace,
Subsystem: "statedb",
Name: "table_delete_trackers",
Help: "The amount of delete trackers for a given table.",
Disabled: true,
}, []string{labelTable}),
TableGraveyardObjectCount: metric.NewGaugeVec(metric.GaugeOpts{
Namespace: metrics.Namespace,
Subsystem: "statedb",
Name: "table_graveyard_objects",
Help: "The amount of objects in the graveyard for a given table.",
Disabled: true,
}, []string{labelTable}),
TableGraveyardLowWatermark: metric.NewGaugeVec(metric.GaugeOpts{
Namespace: metrics.Namespace,
Subsystem: "statedb",
Name: "table_graveyard_low_watermark",
Help: "The lowest revision of a given table that has been processed by the graveyard garbage collector.",
Disabled: true,
}, []string{labelTable}),
TableGraveyardCleaningDuration: metric.NewHistogramVec(metric.HistogramOpts{
Namespace: metrics.Namespace,
Subsystem: "statedb",
Name: "table_graveyard_cleaning_duration_seconds",
Help: "The time it took to clean the graveyard for a given table.",
// Use buckets in the 0.5ms-1.0s range.
Buckets: []float64{.0005, .001, .0025, .005, .01, .025, .05, 0.1, 0.25, 0.5, 1.0},
Disabled: true,
}, []string{labelTable}),
}
return m
}
func NewStateDBMetricsImpl(m StateDBMetrics) statedb.Metrics {
return stateDBMetricsImpl{m}
}
// SPDX-License-Identifier: Apache-2.0
// Copyright Authors of Hubble
package v1
import (
pb "github.com/cilium/cilium/api/v1/flow"
monitorAPI "github.com/cilium/cilium/pkg/monitor/api"
)
// FlowProtocol returns the protocol best describing the flow. If available,
// this is the L7 protocol name, then the L4 protocol name.
func FlowProtocol(flow *pb.Flow) string {
switch flow.GetEventType().GetType() {
case monitorAPI.MessageTypeAccessLog:
if l7 := flow.GetL7(); l7 != nil {
switch {
case l7.GetDns() != nil:
return "DNS"
case l7.GetHttp() != nil:
return "HTTP"
case l7.GetKafka() != nil:
return "Kafka"
}
}
return "Unknown L7"
case monitorAPI.MessageTypeDrop, monitorAPI.MessageTypeTrace,
monitorAPI.MessageTypePolicyVerdict, monitorAPI.MessageTypeCapture:
if l4 := flow.GetL4(); l4 != nil {
switch {
case l4.GetTCP() != nil:
return "TCP"
case l4.GetUDP() != nil:
return "UDP"
case l4.GetICMPv4() != nil:
return "ICMPv4"
case l4.GetICMPv6() != nil:
return "ICMPv6"
case l4.GetSCTP() != nil:
return "SCTP"
}
}
return "Unknown L4"
}
return "Unknown flow"
}
// SPDX-License-Identifier: Apache-2.0
// Copyright Authors of Hubble
package v1
import (
"google.golang.org/protobuf/types/known/timestamppb"
pb "github.com/cilium/cilium/api/v1/flow"
)
// Event represents a single event observed and stored by Hubble
type Event struct {
// Timestamp when event was observed in Hubble
Timestamp *timestamppb.Timestamp
// Event contains the actual event
Event any
}
// GetFlow returns the decoded flow, or nil if the event is nil or not a flow
func (ev *Event) GetFlow() *pb.Flow {
if ev == nil || ev.Event == nil {
return nil
}
if f, ok := ev.Event.(*pb.Flow); ok {
return f
}
return nil
}
// GetAgentEvent returns the decoded agent event, or nil if the event is nil
// or not an agent event
func (ev *Event) GetAgentEvent() *pb.AgentEvent {
if ev == nil || ev.Event == nil {
return nil
}
if f, ok := ev.Event.(*pb.AgentEvent); ok {
return f
}
return nil
}
// GetDebugEvent returns the decoded debug event, or nil if the event is nil
// or not an debug event
func (ev *Event) GetDebugEvent() *pb.DebugEvent {
if ev == nil || ev.Event == nil {
return nil
}
if d, ok := ev.Event.(*pb.DebugEvent); ok {
return d
}
return nil
}
// GetLostEvent returns the decoded lost event, or nil if the event is nil
// or not a lost event
func (ev *Event) GetLostEvent() *pb.LostEvent {
if ev == nil || ev.Event == nil {
return nil
}
if f, ok := ev.Event.(*pb.LostEvent); ok {
return f
}
return nil
}
// SPDX-License-Identifier: Apache-2.0
// Copyright Authors of Hubble
package agent
import (
"encoding/json"
"fmt"
"google.golang.org/protobuf/types/known/timestamppb"
"google.golang.org/protobuf/types/known/wrapperspb"
flowpb "github.com/cilium/cilium/api/v1/flow"
monitorAPI "github.com/cilium/cilium/pkg/monitor/api"
"github.com/cilium/cilium/pkg/time"
)
func notifyTimeNotificationToProto(typ flowpb.AgentEventType, n monitorAPI.TimeNotification) *flowpb.AgentEvent {
var ts *timestamppb.Timestamp
if goTime, err := time.Parse(time.RFC3339Nano, n.Time); err == nil {
ts = timestamppb.New(goTime)
}
return &flowpb.AgentEvent{
Type: typ,
Notification: &flowpb.AgentEvent_AgentStart{
AgentStart: &flowpb.TimeNotification{
Time: ts,
},
},
}
}
func notifyPolicyNotificationToProto(typ flowpb.AgentEventType, n monitorAPI.PolicyUpdateNotification) *flowpb.AgentEvent {
return &flowpb.AgentEvent{
Type: typ,
Notification: &flowpb.AgentEvent_PolicyUpdate{
PolicyUpdate: &flowpb.PolicyUpdateNotification{
RuleCount: int64(n.RuleCount),
Labels: n.Labels,
Revision: n.Revision,
},
},
}
}
func notifyEndpointRegenNotificationToProto(typ flowpb.AgentEventType, n monitorAPI.EndpointRegenNotification) *flowpb.AgentEvent {
return &flowpb.AgentEvent{
Type: typ,
Notification: &flowpb.AgentEvent_EndpointRegenerate{
EndpointRegenerate: &flowpb.EndpointRegenNotification{
Id: n.ID,
Labels: n.Labels,
Error: n.Error,
},
},
}
}
func notifyEndpointUpdateNotificationToProto(typ flowpb.AgentEventType, n monitorAPI.EndpointNotification) *flowpb.AgentEvent {
return &flowpb.AgentEvent{
Type: typ,
Notification: &flowpb.AgentEvent_EndpointUpdate{
EndpointUpdate: &flowpb.EndpointUpdateNotification{
Id: n.ID,
Labels: n.Labels,
Error: n.Error,
PodName: n.PodName,
Namespace: n.Namespace,
},
},
}
}
func notifyIPCacheNotificationToProto(typ flowpb.AgentEventType, n monitorAPI.IPCacheNotification) *flowpb.AgentEvent {
var oldIdentity *wrapperspb.UInt32Value
if n.OldIdentity != nil {
oldIdentity = &wrapperspb.UInt32Value{
Value: *n.OldIdentity,
}
}
var hostIPString string
if n.HostIP != nil {
hostIPString = n.HostIP.String()
}
var oldHostIPString string
if n.OldHostIP != nil {
oldHostIPString = n.OldHostIP.String()
}
return &flowpb.AgentEvent{
Type: typ,
Notification: &flowpb.AgentEvent_IpcacheUpdate{
IpcacheUpdate: &flowpb.IPCacheNotification{
Cidr: n.CIDR,
Identity: n.Identity,
OldIdentity: oldIdentity,
HostIp: hostIPString,
OldHostIp: oldHostIPString,
EncryptKey: uint32(n.EncryptKey),
Namespace: n.Namespace,
PodName: n.PodName,
},
},
}
}
func notifyUnknownToProto(typ flowpb.AgentEventType, msg monitorAPI.AgentNotifyMessage) *flowpb.AgentEvent {
n, _ := json.Marshal(msg.Notification)
return &flowpb.AgentEvent{
Type: typ,
Notification: &flowpb.AgentEvent_Unknown{
Unknown: &flowpb.AgentEventUnknown{
Type: fmt.Sprintf("%d", msg.Type),
Notification: string(n),
},
},
}
}
func NotifyMessageToProto(msg monitorAPI.AgentNotifyMessage) *flowpb.AgentEvent {
switch n := msg.Notification.(type) {
case monitorAPI.TimeNotification:
if msg.Type == monitorAPI.AgentNotifyStart {
return notifyTimeNotificationToProto(flowpb.AgentEventType_AGENT_STARTED, n)
}
case monitorAPI.PolicyUpdateNotification:
if msg.Type == monitorAPI.AgentNotifyPolicyUpdated {
return notifyPolicyNotificationToProto(flowpb.AgentEventType_POLICY_UPDATED, n)
} else if msg.Type == monitorAPI.AgentNotifyPolicyDeleted {
return notifyPolicyNotificationToProto(flowpb.AgentEventType_POLICY_DELETED, n)
}
case monitorAPI.EndpointRegenNotification:
if msg.Type == monitorAPI.AgentNotifyEndpointRegenerateSuccess {
return notifyEndpointRegenNotificationToProto(flowpb.AgentEventType_ENDPOINT_REGENERATE_SUCCESS, n)
} else if msg.Type == monitorAPI.AgentNotifyEndpointRegenerateFail {
return notifyEndpointRegenNotificationToProto(flowpb.AgentEventType_ENDPOINT_REGENERATE_FAILURE, n)
}
case monitorAPI.EndpointNotification:
if msg.Type == monitorAPI.AgentNotifyEndpointCreated {
return notifyEndpointUpdateNotificationToProto(flowpb.AgentEventType_ENDPOINT_CREATED, n)
} else if msg.Type == monitorAPI.AgentNotifyEndpointDeleted {
return notifyEndpointUpdateNotificationToProto(flowpb.AgentEventType_ENDPOINT_DELETED, n)
}
case monitorAPI.IPCacheNotification:
if msg.Type == monitorAPI.AgentNotifyIPCacheUpserted {
return notifyIPCacheNotificationToProto(flowpb.AgentEventType_IPCACHE_UPSERTED, n)
} else if msg.Type == monitorAPI.AgentNotifyIPCacheDeleted {
return notifyIPCacheNotificationToProto(flowpb.AgentEventType_IPCACHE_DELETED, n)
}
}
return notifyUnknownToProto(flowpb.AgentEventType_AGENT_EVENT_UNKNOWN, msg)
}
// SPDX-License-Identifier: Apache-2.0
// Copyright Authors of Hubble
package common
import (
"log/slog"
"net/netip"
pb "github.com/cilium/cilium/api/v1/flow"
"github.com/cilium/cilium/pkg/hubble/parser/getters"
"github.com/cilium/cilium/pkg/identity"
k8sConst "github.com/cilium/cilium/pkg/k8s/apis/cilium.io"
"github.com/cilium/cilium/pkg/k8s/utils"
"github.com/cilium/cilium/pkg/logging"
"github.com/cilium/cilium/pkg/logging/logfields"
"github.com/cilium/cilium/pkg/time"
)
type DatapathContext struct {
SrcIP netip.Addr
SrcLabelID uint32
DstIP netip.Addr
DstLabelID uint32
TraceObservationPoint pb.TraceObservationPoint
}
type EndpointResolver struct {
log *slog.Logger
logLimiter logging.Limiter
endpointGetter getters.EndpointGetter
identityGetter getters.IdentityGetter
ipGetter getters.IPGetter
}
func NewEndpointResolver(
log *slog.Logger,
endpointGetter getters.EndpointGetter,
identityGetter getters.IdentityGetter,
ipGetter getters.IPGetter,
) *EndpointResolver {
return &EndpointResolver{
log: log,
logLimiter: logging.NewLimiter(30*time.Second, 1),
endpointGetter: endpointGetter,
identityGetter: identityGetter,
ipGetter: ipGetter,
}
}
func (r *EndpointResolver) ResolveEndpoint(ip netip.Addr, datapathSecurityIdentity uint32, context DatapathContext) *pb.Endpoint {
// The datapathSecurityIdentity parameter is the numeric security identity
// obtained from the datapath.
// The numeric identity from the datapath can differ from the one we obtain
// from user-space (e.g. the endpoint manager or the IP cache), because
// the identity could have changed between the time the datapath event was
// created and the time the event reaches the Hubble parser.
// To aid in troubleshooting, we want to preserve what the datapath observed
// when it made the policy decision.
resolveIdentityConflict := func(userspaceID identity.NumericIdentity, isLocalEndpoint bool) uint32 {
// if the datapath did not provide an identity (e.g. FROM_LXC trace
// points), use what we have in the user-space cache
datapathID := identity.NumericIdentity(datapathSecurityIdentity)
if datapathID == identity.IdentityUnknown {
return userspaceID.Uint32()
}
// Log any identity discrepancies, unless or this is a known case where
// Hubble does not have the full picture (see inline comments below each case)
// or we've hit the log rate limit
if datapathID != userspaceID {
if context.TraceObservationPoint == pb.TraceObservationPoint_TO_OVERLAY &&
ip == context.SrcIP && datapathID.Uint32() == context.SrcLabelID &&
datapathID == identity.ReservedIdentityRemoteNode &&
userspaceID == identity.ReservedIdentityHost {
// Ignore
//
// When encapsulating a packet for sending via the overlay network, if the source
// seclabel = HOST_ID, then we reassign seclabel with LOCAL_NODE_ID and then send
// a trace notify.
} else if context.TraceObservationPoint == pb.TraceObservationPoint_TO_OVERLAY &&
ip == context.SrcIP && datapathID.Uint32() == context.SrcLabelID &&
!datapathID.IsReservedIdentity() && userspaceID == identity.ReservedIdentityHost {
// Ignore
//
// An IPSec encrypted packet will have the local cilium_host IP as the source
// address, but the datapath seclabel will be the one of the source pod.
} else if context.TraceObservationPoint == pb.TraceObservationPoint_FROM_ENDPOINT &&
ip == context.SrcIP && datapathID.Uint32() == context.SrcLabelID &&
(datapathID == identity.ReservedIdentityHealth || !datapathID.IsReservedIdentity()) &&
userspaceID.IsWorld() {
// Ignore
//
// Sometimes packets from endpoint link-local addresses are intercepted by
// cil_from_container. Because link-local addresses are not stored in the IP cache,
// Hubble assigns them WORLD_ID.
} else if context.TraceObservationPoint == pb.TraceObservationPoint_FROM_HOST &&
ip == context.SrcIP && datapathID.Uint32() == context.SrcLabelID &&
datapathID.IsWorld() && userspaceID == identity.ReservedIdentityKubeAPIServer {
// Ignore
//
// When a pod sends a packet to the Kubernetes API, its IP is masqueraded and then
// when it receives a response and the masquerade is reversed, cil_from_host
// determines that the source ID is WORLD_ID because there is no packet mark.
} else if (context.TraceObservationPoint == pb.TraceObservationPoint_FROM_HOST ||
context.TraceObservationPoint == pb.TraceObservationPoint_TO_OVERLAY) &&
ip == context.SrcIP && datapathID.Uint32() == context.SrcLabelID &&
isLocalEndpoint && userspaceID == identity.ReservedIdentityHost {
// Ignore
//
// When proxied packets (via Cilium DNS proxy) are sent from the host their source
// IP is that of the host, yet their security identity is retained from the
// original source pod.
} else if context.TraceObservationPoint == pb.TraceObservationPoint_TO_ENDPOINT &&
ip == context.SrcIP && datapathID.Uint32() == context.SrcLabelID &&
!datapathID.IsReservedIdentity() &&
(userspaceID == identity.ReservedIdentityHost || userspaceID == identity.ReservedIdentityRemoteNode) {
// Ignore
//
// When proxied packets (via Cilium DNS proxy) are received by the destination
// host their source IP is that of the proxy, yet their security identity is
// retained from the original source pod. This is a similar case to #4, but on the
// receiving side.
} else if r.logLimiter.Allow() {
r.log.Debug(
"stale identity observed",
logfields.DatapathIdentity, datapathID,
logfields.UserspaceIdentity, userspaceID,
logfields.Context, context,
logfields.IPAddr, ip,
)
}
}
return datapathID.Uint32()
}
// for local endpoints, use the available endpoint information
if r.endpointGetter != nil {
if ep, ok := r.endpointGetter.GetEndpointInfo(ip); ok {
epIdentity := resolveIdentityConflict(ep.GetIdentity(), true)
labels := ep.GetLabels()
e := &pb.Endpoint{
ID: uint32(ep.GetID()),
Identity: epIdentity,
ClusterName: (labels[k8sConst.PolicyLabelCluster]).Value,
Namespace: ep.GetK8sNamespace(),
Labels: SortAndFilterLabels(r.log, labels.GetModel(), identity.NumericIdentity(epIdentity)),
PodName: ep.GetK8sPodName(),
}
if pod := ep.GetPod(); pod != nil {
workload, workloadTypeMeta, ok := utils.GetWorkloadMetaFromPod(pod)
if ok {
e.Workloads = []*pb.Workload{{Kind: workloadTypeMeta.Kind, Name: workload.Name}}
}
}
return e
}
}
// for remote endpoints, assemble the information via ip and identity
numericIdentity := datapathSecurityIdentity
var namespace, podName string
if r.ipGetter != nil {
if ipIdentity, ok := r.ipGetter.LookupSecIDByIP(ip); ok {
numericIdentity = resolveIdentityConflict(ipIdentity.ID, false)
}
if meta := r.ipGetter.GetK8sMetadata(ip); meta != nil {
namespace, podName = meta.Namespace, meta.PodName
}
}
var labels []string
var clusterName string
if r.identityGetter != nil {
if id, err := r.identityGetter.GetIdentity(numericIdentity); err != nil {
r.log.Debug(
"failed to resolve identity",
logfields.Error, err,
logfields.Identity, numericIdentity,
)
} else {
labels = SortAndFilterLabels(r.log, id.Labels.GetModel(), identity.NumericIdentity(numericIdentity))
clusterName = (id.Labels[k8sConst.PolicyLabelCluster]).Value
}
}
return &pb.Endpoint{
Identity: numericIdentity,
ClusterName: clusterName,
Namespace: namespace,
Labels: labels,
PodName: podName,
}
}
// SPDX-License-Identifier: Apache-2.0
// Copyright Authors of Hubble
package common
import (
"log/slog"
"net"
"slices"
"strings"
"github.com/cilium/cilium/pkg/identity"
"github.com/cilium/cilium/pkg/logging/logfields"
)
func FilterCIDRLabels(log *slog.Logger, labels []string) []string {
// Cilium might return a bunch of cidr labels with different prefix length. Filter out all
// but the longest prefix cidr label, which can be useful for troubleshooting. This also
// relies on the fact that when a Cilium security identity has multiple CIDR labels, longer
// prefix is always a subset of shorter prefix.
cidrPrefix := "cidr:"
var filteredLabels []string
var maxSize int
var maxStr string
for _, label := range labels {
if !strings.HasPrefix(label, cidrPrefix) {
filteredLabels = append(filteredLabels, label)
continue
}
currLabel := strings.TrimPrefix(label, cidrPrefix)
// labels for IPv6 addresses are represented with - instead of : as
// : cannot be used in labels; make sure to convert it to a valid
// IPv6 representation
currLabel = strings.ReplaceAll(currLabel, "-", ":")
_, curr, err := net.ParseCIDR(currLabel)
if err != nil {
log.Warn(
"got an invalid cidr label",
logfields.Label, label,
)
continue
}
if currMask, _ := curr.Mask.Size(); currMask > maxSize {
maxSize, maxStr = currMask, label
}
}
if maxSize != 0 {
filteredLabels = append(filteredLabels, maxStr)
}
return filteredLabels
}
func SortAndFilterLabels(log *slog.Logger, labels []string, securityIdentity identity.NumericIdentity) []string {
if securityIdentity.HasLocalScope() {
labels = FilterCIDRLabels(log, labels)
}
slices.Sort(labels)
return labels
}
// SPDX-License-Identifier: Apache-2.0
// Copyright Authors of Hubble
package debug
import (
"fmt"
"log/slog"
"google.golang.org/protobuf/types/known/wrapperspb"
flowpb "github.com/cilium/cilium/api/v1/flow"
"github.com/cilium/cilium/pkg/hubble/parser/common"
"github.com/cilium/cilium/pkg/hubble/parser/errors"
"github.com/cilium/cilium/pkg/hubble/parser/getters"
k8sConst "github.com/cilium/cilium/pkg/k8s/apis/cilium.io"
"github.com/cilium/cilium/pkg/monitor"
"github.com/cilium/cilium/pkg/monitor/api"
)
// Parser is a parser for debug payloads
type Parser struct {
log *slog.Logger
endpointGetter getters.EndpointGetter
linkMonitor getters.LinkGetter
}
// New creates a new parser
func New(log *slog.Logger, endpointGetter getters.EndpointGetter) (*Parser, error) {
return &Parser{
log: log,
endpointGetter: endpointGetter,
}, nil
}
// Decode takes the a debug event payload obtained from the perf event ring
// buffer and decodes it
func (p *Parser) Decode(data []byte, cpu int) (*flowpb.DebugEvent, error) {
if len(data) == 0 {
return nil, errors.ErrEmptyData
}
eventType := data[0]
if eventType != api.MessageTypeDebug {
return nil, errors.NewErrInvalidType(eventType)
}
dbg := &monitor.DebugMsg{}
if err := dbg.Decode(data); err != nil {
return nil, fmt.Errorf("failed to parse debug event: %w", err)
}
decoded := &flowpb.DebugEvent{
Type: flowpb.DebugEventType(dbg.SubType),
Source: p.decodeEndpoint(dbg.Source),
Hash: wrapperspb.UInt32(dbg.Hash),
Arg1: wrapperspb.UInt32(dbg.Arg1),
Arg2: wrapperspb.UInt32(dbg.Arg2),
Arg3: wrapperspb.UInt32(dbg.Arg3),
Cpu: wrapperspb.Int32(int32(cpu)),
Message: dbg.Message(p.linkMonitor),
}
return decoded, nil
}
func (p *Parser) decodeEndpoint(id uint16) *flowpb.Endpoint {
if id == 0 {
return nil
}
epId := uint32(id)
if p.endpointGetter != nil {
if ep, ok := p.endpointGetter.GetEndpointInfoByID(id); ok {
labels := ep.GetLabels()
return &flowpb.Endpoint{
ID: epId,
Identity: uint32(ep.GetIdentity()),
ClusterName: (labels[k8sConst.PolicyLabelCluster]).Value,
Namespace: ep.GetK8sNamespace(),
Labels: common.SortAndFilterLabels(p.log, labels.GetModel(), ep.GetIdentity()),
PodName: ep.GetK8sPodName(),
}
}
}
return &flowpb.Endpoint{
ID: epId,
}
}
// SPDX-License-Identifier: Apache-2.0
// Copyright Authors of Hubble
package errors
import (
"errors"
"fmt"
)
var (
// ErrEmptyData gets returns when monitoring payload contained no data
ErrEmptyData = errors.New("empty data")
// ErrUnknownEventType is returned if the monitor event is an unknown type
ErrUnknownEventType = errors.New("unknown event type")
// ErrInvalidAgentMessageType is returned if an agent message is of invalid type
ErrInvalidAgentMessageType = errors.New("invalid agent message type")
// ErrEventSkipped is returned when an event was skipped (e.g. due to configuration
// or incomplete data)
ErrEventSkipped = errors.New("event was skipped")
)
// ErrInvalidType specifies when it was given a packet type that was not
// possible to be decoded by the decoder.
type ErrInvalidType struct {
invalidType byte
}
// NewErrInvalidType returns a new ErrInvalidType
func NewErrInvalidType(invalidType byte) error {
return ErrInvalidType{invalidType: invalidType}
}
func (e ErrInvalidType) Error() string {
return fmt.Sprintf("can't decode following payload type: %v", e.invalidType)
}
// IsErrInvalidType returns true if the given error is type of ErrInvalidType
func IsErrInvalidType(err error) bool {
var errInvalidType ErrInvalidType
return errors.As(err, &errInvalidType)
}
// Copyright 2022 ADA Logics Ltd
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//
package parser
import (
observerTypes "github.com/cilium/cilium/pkg/hubble/observer/types"
fuzz "github.com/AdaLogics/go-fuzz-headers"
)
var (
payloads = map[int]string{
0: "PerfEvent",
1: "AgentEvent",
2: "LostEvent",
}
)
func FuzzParserDecode(data []byte) int {
p, err := New(nil, nil, nil, nil, nil, nil, nil, nil)
if err != nil {
return 0
}
f := fuzz.NewConsumer(data)
payloadType, err := f.GetInt()
if err != nil {
return 0
}
mo := &observerTypes.MonitorEvent{}
switch payloads[payloadType%len(payloads)] {
case "PerfEvent":
pe := &observerTypes.PerfEvent{}
f.GenerateStruct(pe)
mo.Payload = pe
case "AgentEvent":
ae := &observerTypes.AgentEvent{}
f.GenerateStruct(ae)
mo.Payload = ae
case "LostEvent":
le := &observerTypes.LostEvent{}
f.GenerateStruct(le)
mo.Payload = le
}
_, _ = p.Decode(mo)
return 0
}
// SPDX-License-Identifier: Apache-2.0
// Copyright Authors of Hubble
package options
import "strings"
// Option is used to configure parsers
type Option func(*Options)
// Options contains all parser options
type Options struct {
CacheSize int
HubbleRedactSettings HubbleRedactSettings
EnableNetworkPolicyCorrelation bool
SkipUnknownCGroupIDs bool
}
// HubbleRedactSettings contains all hubble redact related options
type HubbleRedactSettings struct {
Enabled bool
RedactHTTPQuery bool
RedactHTTPUserInfo bool
RedactKafkaAPIKey bool
RedactHttpHeaders HttpHeadersList
}
// HttpHeadersList contains the allow/deny list of headers
type HttpHeadersList struct {
Allow map[string]struct{}
Deny map[string]struct{}
}
// CacheSize configures the amount of L7 requests cached for latency calculation
func CacheSize(size int) Option {
return func(opt *Options) {
opt.CacheSize = size
}
}
// WithRedact configures which data Hubble will redact.
func WithRedact(httpQuery, httpUserInfo, kafkaApiKey bool, allowHeaders, denyHeaders []string) Option {
return func(opt *Options) {
opt.HubbleRedactSettings.Enabled = true
opt.HubbleRedactSettings.RedactHTTPQuery = httpQuery
opt.HubbleRedactSettings.RedactHTTPUserInfo = httpUserInfo
opt.HubbleRedactSettings.RedactKafkaAPIKey = kafkaApiKey
opt.HubbleRedactSettings.RedactHttpHeaders = HttpHeadersList{
Allow: headerSliceToMap(allowHeaders),
Deny: headerSliceToMap(denyHeaders),
}
}
}
// WithNetworkPolicyCorrelation configures the Network Policy correlation of Hubble Flows.
func WithNetworkPolicyCorrelation(enabled bool) Option {
return func(opt *Options) {
opt.EnableNetworkPolicyCorrelation = enabled
}
}
// WithSkipUnknownCGroupIDs configures whether Hubble will skip events with unknown CGroup IDs.
func WithSkipUnknownCGroupIDs(enabled bool) Option {
return func(opt *Options) {
opt.SkipUnknownCGroupIDs = enabled
}
}
func headerSliceToMap(headerList []string) map[string]struct{} {
headerMap := make(map[string]struct{}, len(headerList))
for _, header := range headerList {
headerMap[strings.ToLower(header)] = struct{}{}
}
return headerMap
}
// SPDX-License-Identifier: Apache-2.0
// Copyright Authors of Hubble
// Copyright Authors of Cilium
package parser
import (
"log/slog"
"google.golang.org/protobuf/types/known/timestamppb"
"google.golang.org/protobuf/types/known/wrapperspb"
pb "github.com/cilium/cilium/api/v1/flow"
v1 "github.com/cilium/cilium/pkg/hubble/api/v1"
observerTypes "github.com/cilium/cilium/pkg/hubble/observer/types"
"github.com/cilium/cilium/pkg/hubble/parser/agent"
"github.com/cilium/cilium/pkg/hubble/parser/debug"
"github.com/cilium/cilium/pkg/hubble/parser/errors"
"github.com/cilium/cilium/pkg/hubble/parser/getters"
"github.com/cilium/cilium/pkg/hubble/parser/options"
"github.com/cilium/cilium/pkg/hubble/parser/seven"
"github.com/cilium/cilium/pkg/hubble/parser/sock"
"github.com/cilium/cilium/pkg/hubble/parser/threefour"
monitorAPI "github.com/cilium/cilium/pkg/monitor/api"
"github.com/cilium/cilium/pkg/proxy/accesslog"
)
// Decoder is an interface for the parser.
// It decodes a monitor event into a hubble event.
type Decoder interface {
// Decode transforms a monitor event into a hubble event.
Decode(monitorEvent *observerTypes.MonitorEvent) (*v1.Event, error)
}
// Parser for all flows
type Parser struct {
l34 *threefour.Parser
l7 *seven.Parser
dbg *debug.Parser
sock *sock.Parser
}
// New creates a new parser
func New(
log *slog.Logger,
endpointGetter getters.EndpointGetter,
identityGetter getters.IdentityGetter,
dnsGetter getters.DNSGetter,
ipGetter getters.IPGetter,
serviceGetter getters.ServiceGetter,
linkGetter getters.LinkGetter,
cgroupGetter getters.PodMetadataGetter,
opts ...options.Option,
) (*Parser, error) {
l34, err := threefour.New(log, endpointGetter, identityGetter, dnsGetter, ipGetter, serviceGetter, linkGetter, opts...)
if err != nil {
return nil, err
}
l7, err := seven.New(log, dnsGetter, ipGetter, serviceGetter, endpointGetter, opts...)
if err != nil {
return nil, err
}
dbg, err := debug.New(log, endpointGetter)
if err != nil {
return nil, err
}
sock, err := sock.New(log, endpointGetter, identityGetter, dnsGetter, ipGetter, serviceGetter, cgroupGetter, opts...)
if err != nil {
return nil, err
}
return &Parser{
l34: l34,
l7: l7,
dbg: dbg,
sock: sock,
}, nil
}
func lostEventSourceToProto(source int) pb.LostEventSource {
switch source {
case observerTypes.LostEventSourcePerfRingBuffer:
return pb.LostEventSource_PERF_EVENT_RING_BUFFER
case observerTypes.LostEventSourceEventsQueue:
return pb.LostEventSource_OBSERVER_EVENTS_QUEUE
case observerTypes.LostEventSourceHubbleRingBuffer:
return pb.LostEventSource_HUBBLE_RING_BUFFER
default:
return pb.LostEventSource_UNKNOWN_LOST_EVENT_SOURCE
}
}
// Decode decodes a cilium monitor 'payload' and returns a v1.Event with
// the Event field populated.
func (p *Parser) Decode(monitorEvent *observerTypes.MonitorEvent) (*v1.Event, error) {
if monitorEvent == nil {
return nil, errors.ErrEmptyData
}
// TODO: Pool decoded flows instead of allocating new objects each time.
ts := timestamppb.New(monitorEvent.Timestamp)
ev := &v1.Event{
Timestamp: ts,
}
switch payload := monitorEvent.Payload.(type) {
case *observerTypes.PerfEvent:
if len(payload.Data) == 0 {
return nil, errors.ErrEmptyData
}
flow := &pb.Flow{}
switch payload.Data[0] {
case monitorAPI.MessageTypeDebug:
// Debug and TraceSock are both perf ring buffer events without any
// associated captured network packet header, so we treat them
// separately
dbg, err := p.dbg.Decode(payload.Data, payload.CPU)
if err != nil {
return nil, err
}
ev.Event = dbg
return ev, nil
case monitorAPI.MessageTypeTraceSock:
if err := p.sock.Decode(payload.Data, flow); err != nil {
return nil, err
}
default:
if err := p.l34.Decode(payload.Data, flow); err != nil {
return nil, err
}
}
flow.Uuid = monitorEvent.UUID.String()
// FIXME: Time and NodeName are now part of GetFlowsResponse. We
// populate these fields for compatibility with old clients.
flow.Time = ts
flow.NodeName = monitorEvent.NodeName
ev.Event = flow
return ev, nil
case *observerTypes.AgentEvent:
switch payload.Type {
case monitorAPI.MessageTypeAccessLog:
flow := &pb.Flow{}
logrecord, ok := payload.Message.(accesslog.LogRecord)
if !ok {
return nil, errors.ErrInvalidAgentMessageType
}
if err := p.l7.Decode(&logrecord, flow); err != nil {
return nil, err
}
flow.Uuid = monitorEvent.UUID.String()
// FIXME: Time and NodeName are now part of GetFlowsResponse. We
// populate these fields for compatibility with old clients.
flow.Time = ts
flow.NodeName = monitorEvent.NodeName
ev.Event = flow
return ev, nil
case monitorAPI.MessageTypeAgent:
agentNotifyMessage, ok := payload.Message.(monitorAPI.AgentNotifyMessage)
if !ok {
return nil, errors.ErrInvalidAgentMessageType
}
ev.Event = agent.NotifyMessageToProto(agentNotifyMessage)
return ev, nil
default:
return nil, errors.ErrUnknownEventType
}
case *observerTypes.LostEvent:
ev.Event = &pb.LostEvent{
Source: lostEventSourceToProto(payload.Source),
NumEventsLost: payload.NumLostEvents,
Cpu: &wrapperspb.Int32Value{
Value: int32(payload.CPU),
},
}
return ev, nil
case nil:
return ev, errors.ErrEmptyData
default:
return nil, errors.ErrUnknownEventType
}
}
// SPDX-License-Identifier: Apache-2.0
// Copyright Authors of Hubble
package seven
import (
"fmt"
"strings"
"github.com/gopacket/gopacket/layers"
flowpb "github.com/cilium/cilium/api/v1/flow"
"github.com/cilium/cilium/pkg/proxy/accesslog"
)
func decodeDNS(flowType accesslog.FlowType, dns *accesslog.LogRecordDNS) *flowpb.Layer7_Dns {
qtypes := make([]string, 0, len(dns.QTypes))
for _, qtype := range dns.QTypes {
qtypes = append(qtypes, layers.DNSType(qtype).String())
}
if flowType == accesslog.TypeRequest {
// Set only fields that are relevant for requests.
return &flowpb.Layer7_Dns{
Dns: &flowpb.DNS{
Query: dns.Query,
ObservationSource: string(dns.ObservationSource),
Qtypes: qtypes,
},
}
}
ips := make([]string, 0, len(dns.IPs))
for _, ip := range dns.IPs {
ips = append(ips, ip.String())
}
rtypes := make([]string, 0, len(dns.AnswerTypes))
for _, rtype := range dns.AnswerTypes {
rtypes = append(rtypes, layers.DNSType(rtype).String())
}
return &flowpb.Layer7_Dns{
Dns: &flowpb.DNS{
Query: dns.Query,
Ips: ips,
Ttl: dns.TTL,
Cnames: dns.CNAMEs,
ObservationSource: string(dns.ObservationSource),
Rcode: uint32(dns.RCode),
Qtypes: qtypes,
Rrtypes: rtypes,
},
}
}
func dnsSummary(flowType accesslog.FlowType, dns *accesslog.LogRecordDNS) string {
types := []string{}
for _, t := range dns.QTypes {
types = append(types, layers.DNSType(t).String())
}
qTypeStr := strings.Join(types, ",")
switch flowType {
case accesslog.TypeRequest:
return fmt.Sprintf("DNS Query %s %s", dns.Query, qTypeStr)
case accesslog.TypeResponse:
rcode := layers.DNSResponseCode(dns.RCode)
var answer string
if rcode != layers.DNSResponseCodeNoErr {
answer = fmt.Sprintf("RCode: %s", rcode)
} else {
parts := make([]string, 0)
if len(dns.IPs) > 0 {
ips := make([]string, 0, len(dns.IPs))
for _, ip := range dns.IPs {
ips = append(ips, ip.String())
}
parts = append(parts, fmt.Sprintf("%q", strings.Join(ips, ",")))
}
if len(dns.CNAMEs) > 0 {
parts = append(parts, fmt.Sprintf("CNAMEs: %q", strings.Join(dns.CNAMEs, ",")))
}
answer = strings.Join(parts, " ")
}
sourceType := "Query"
switch dns.ObservationSource {
case accesslog.DNSSourceProxy:
sourceType = "Proxy"
}
return fmt.Sprintf("DNS Answer %s TTL: %d (%s %s %s)", answer, dns.TTL, sourceType, dns.Query, qTypeStr)
}
return ""
}
// SPDX-License-Identifier: Apache-2.0
// Copyright Authors of Hubble
package seven
import (
"fmt"
"maps"
"net/url"
"slices"
"strings"
flowpb "github.com/cilium/cilium/api/v1/flow"
"github.com/cilium/cilium/pkg/hubble/defaults"
"github.com/cilium/cilium/pkg/hubble/parser/options"
"github.com/cilium/cilium/pkg/proxy/accesslog"
"github.com/cilium/cilium/pkg/time"
)
func decodeHTTP(flowType accesslog.FlowType, http *accesslog.LogRecordHTTP, opts *options.Options) *flowpb.Layer7_Http {
var headers []*flowpb.HTTPHeader
for _, key := range slices.Sorted(maps.Keys(http.Headers)) {
for _, value := range http.Headers[key] {
filteredValue := filterHeader(key, value, opts.HubbleRedactSettings)
headers = append(headers, &flowpb.HTTPHeader{Key: key, Value: filteredValue})
}
}
uri := filteredURL(http.URL, opts.HubbleRedactSettings)
if flowType == accesslog.TypeRequest {
// Set only fields that are relevant for requests.
return &flowpb.Layer7_Http{
Http: &flowpb.HTTP{
Method: http.Method,
Protocol: http.Protocol,
Url: uri.String(),
Headers: headers,
},
}
}
return &flowpb.Layer7_Http{
Http: &flowpb.HTTP{
Code: uint32(http.Code),
Method: http.Method,
Protocol: http.Protocol,
Url: uri.String(),
Headers: headers,
},
}
}
func (p *Parser) httpSummary(flowType accesslog.FlowType, http *accesslog.LogRecordHTTP, flow *flowpb.Flow) string {
uri := filteredURL(http.URL, p.opts.HubbleRedactSettings)
httpRequest := http.Method + " " + uri.String()
switch flowType {
case accesslog.TypeRequest:
return fmt.Sprintf("%s %s", http.Protocol, httpRequest)
case accesslog.TypeResponse:
return fmt.Sprintf("%s %d %dms (%s)", http.Protocol, http.Code, uint64(time.Duration(flow.GetL7().LatencyNs)/time.Millisecond), httpRequest)
}
return ""
}
// filterHeader receives a key-value pair of an http header along with an HubbleRedactSettings.
// Based on the allow/deny lists of the provided HttpHeadersList it returns the original value
// or the redacted constant "HUBBLE_REDACTED" accordingly:
// 1. If HubbleRedactSettings is enabled (meaning that hubble.redact feature is enabled) but both allow/deny lists are empty then the value of the
// header is redacted by default.
// 2. If the header's key is contained in the allow list then the value
// of the header will not be redacted.
// 3. If the header's key is contained in the deny list then the value
// of the header will be redacted.
// 4. If none of the above happens, then if there is any allow list defined then the value will be redacted
// otherwise if there is a deny list defined the value will not be redacted.
func filterHeader(key string, value string, redactSettings options.HubbleRedactSettings) string {
if !redactSettings.Enabled {
return value
}
if len(redactSettings.RedactHttpHeaders.Allow) == 0 && len(redactSettings.RedactHttpHeaders.Deny) == 0 {
// That's the default case, where redact is generally enabled but not headers' lists
// have been specified. In that case we redact everything by default.
return defaults.SensitiveValueRedacted
}
if _, ok := redactSettings.RedactHttpHeaders.Allow[strings.ToLower(key)]; ok {
return value
}
if _, ok := redactSettings.RedactHttpHeaders.Deny[strings.ToLower(key)]; ok {
return defaults.SensitiveValueRedacted
}
if len(redactSettings.RedactHttpHeaders.Allow) > 0 {
return defaults.SensitiveValueRedacted
}
return value
}
// filteredURL return a copy of the given URL potentially mutated depending on
// Hubble redact settings.
// If configured and user info exists, it removes the password from the flow.
// If configured, it removes the URL's query parts from the flow.
func filteredURL(uri *url.URL, redactSettings options.HubbleRedactSettings) *url.URL {
if uri == nil {
// NOTE: return a non-nil URL so that we can always call String() on
// it.
return &url.URL{}
}
u2 := cloneURL(uri)
if redactSettings.RedactHTTPUserInfo && u2.User != nil {
if _, ok := u2.User.Password(); ok {
u2.User = url.UserPassword(u2.User.Username(), defaults.SensitiveValueRedacted)
}
}
if redactSettings.RedactHTTPQuery {
u2.RawQuery = ""
u2.Fragment = ""
}
return u2
}
// cloneURL return a copy of the given URL. Copied from src/net/http/clone.go.
func cloneURL(u *url.URL) *url.URL {
if u == nil {
return nil
}
u2 := new(url.URL)
*u2 = *u
if u.User != nil {
u2.User = new(url.Userinfo)
*u2.User = *u.User
}
return u2
}
// SPDX-License-Identifier: Apache-2.0
// Copyright Authors of Hubble
package seven
import (
"fmt"
flowpb "github.com/cilium/cilium/api/v1/flow"
"github.com/cilium/cilium/pkg/hubble/defaults"
"github.com/cilium/cilium/pkg/hubble/parser/options"
"github.com/cilium/cilium/pkg/proxy/accesslog"
)
func decodeKafka(flowType accesslog.FlowType, kafka *accesslog.LogRecordKafka, opts *options.Options) *flowpb.Layer7_Kafka {
// Conditionally exclude the API key from the flow.
var apiKey string
if opts.HubbleRedactSettings.RedactKafkaAPIKey {
apiKey = defaults.SensitiveValueRedacted
} else {
apiKey = kafka.APIKey
}
if flowType == accesslog.TypeRequest {
// Set only fields that are relevant for requests.
return &flowpb.Layer7_Kafka{
Kafka: &flowpb.Kafka{
ApiVersion: int32(kafka.APIVersion),
ApiKey: apiKey,
CorrelationId: kafka.CorrelationID,
Topic: kafka.Topic.Topic,
},
}
}
return &flowpb.Layer7_Kafka{
Kafka: &flowpb.Kafka{
ErrorCode: int32(kafka.ErrorCode),
ApiVersion: int32(kafka.APIVersion),
ApiKey: apiKey,
CorrelationId: kafka.CorrelationID,
Topic: kafka.Topic.Topic,
},
}
}
func kafkaSummary(flow *flowpb.Flow) string {
kafka := flow.GetL7().GetKafka()
if kafka == nil {
return ""
}
if flow.GetL7().Type == flowpb.L7FlowType_REQUEST {
return fmt.Sprintf("Kafka request %s correlation id %d topic '%s'",
kafka.ApiKey,
kafka.CorrelationId,
kafka.Topic)
}
// response
return fmt.Sprintf("Kafka response %s correlation id %d topic '%s' return code %d",
kafka.ApiKey,
kafka.CorrelationId,
kafka.Topic,
kafka.ErrorCode)
}
// SPDX-License-Identifier: Apache-2.0
// Copyright Authors of Hubble
package seven
import (
"fmt"
"log/slog"
"net/netip"
"slices"
lru "github.com/hashicorp/golang-lru/v2"
"google.golang.org/protobuf/types/known/timestamppb"
"google.golang.org/protobuf/types/known/wrapperspb"
flowpb "github.com/cilium/cilium/api/v1/flow"
"github.com/cilium/cilium/pkg/hubble/parser/errors"
"github.com/cilium/cilium/pkg/hubble/parser/getters"
"github.com/cilium/cilium/pkg/hubble/parser/options"
k8sConst "github.com/cilium/cilium/pkg/k8s/apis/cilium.io"
"github.com/cilium/cilium/pkg/k8s/utils"
"github.com/cilium/cilium/pkg/monitor/api"
"github.com/cilium/cilium/pkg/proxy/accesslog"
"github.com/cilium/cilium/pkg/source"
"github.com/cilium/cilium/pkg/time"
"github.com/cilium/cilium/pkg/u8proto"
)
// Parser is a parser for L7 payloads
type Parser struct {
log *slog.Logger
timestampCache *lru.Cache[string, time.Time]
traceContextCache *lru.Cache[string, *flowpb.TraceContext]
dnsGetter getters.DNSGetter
ipGetter getters.IPGetter
serviceGetter getters.ServiceGetter
endpointGetter getters.EndpointGetter
opts *options.Options
}
// New returns a new L7 parser
func New(
log *slog.Logger,
dnsGetter getters.DNSGetter,
ipGetter getters.IPGetter,
serviceGetter getters.ServiceGetter,
endpointGetter getters.EndpointGetter,
opts ...options.Option,
) (*Parser, error) {
args := &options.Options{
CacheSize: 10000,
HubbleRedactSettings: options.HubbleRedactSettings{
Enabled: false,
RedactHTTPUserInfo: true,
RedactHTTPQuery: false,
RedactKafkaAPIKey: false,
RedactHttpHeaders: options.HttpHeadersList{
Allow: map[string]struct{}{},
Deny: map[string]struct{}{},
},
},
}
for _, opt := range opts {
opt(args)
}
timestampCache, err := lru.New[string, time.Time](args.CacheSize)
if err != nil {
return nil, fmt.Errorf("failed to initialize cache: %w", err)
}
traceIDCache, err := lru.New[string, *flowpb.TraceContext](args.CacheSize)
if err != nil {
return nil, fmt.Errorf("failed to initialize cache: %w", err)
}
return &Parser{
log: log,
timestampCache: timestampCache,
traceContextCache: traceIDCache,
dnsGetter: dnsGetter,
ipGetter: ipGetter,
serviceGetter: serviceGetter,
endpointGetter: endpointGetter,
opts: args,
}, nil
}
// Decode decodes the data from 'payload' into 'decoded'
func (p *Parser) Decode(r *accesslog.LogRecord, decoded *flowpb.Flow) error {
// Safety: This function and all the helpers it invokes are not allowed to
// mutate r in any way. We only have read access to the LogRecord, as it
// may be shared with other consumers
if r == nil {
return errors.ErrEmptyData
}
timestamp, pbTimestamp, err := decodeTime(r.Timestamp)
if err != nil {
return err
}
ip := decodeIP(r.IPVersion, r.SourceEndpoint, r.DestinationEndpoint)
// Ignore IP parsing errors as IPs can be empty. Getters will handle invalid values.
// Flows with empty IPs have been observed in practice, but it was not clear what kind of flows
// those are - errors handling here should be revisited once it's clear.
sourceIP, _ := netip.ParseAddr(ip.Source)
destinationIP, _ := netip.ParseAddr(ip.Destination)
var sourceNames, destinationNames []string
var sourceNamespace, sourcePod, destinationNamespace, destinationPod string
if p.dnsGetter != nil {
sourceNames = p.dnsGetter.GetNamesOf(uint32(r.DestinationEndpoint.ID), sourceIP)
destinationNames = p.dnsGetter.GetNamesOf(uint32(r.SourceEndpoint.ID), destinationIP)
}
if p.ipGetter != nil {
if meta := p.ipGetter.GetK8sMetadata(sourceIP); meta != nil {
sourceNamespace, sourcePod = meta.Namespace, meta.PodName
}
if meta := p.ipGetter.GetK8sMetadata(destinationIP); meta != nil {
destinationNamespace, destinationPod = meta.Namespace, meta.PodName
}
}
srcEndpoint := decodeEndpoint(r.SourceEndpoint, sourceNamespace, sourcePod)
dstEndpoint := decodeEndpoint(r.DestinationEndpoint, destinationNamespace, destinationPod)
if p.endpointGetter != nil {
p.updateEndpointWorkloads(sourceIP, srcEndpoint)
p.updateEndpointWorkloads(destinationIP, dstEndpoint)
}
l4, sourcePort, destinationPort := decodeLayer4(r.TransportProtocol, r.SourceEndpoint, r.DestinationEndpoint)
var sourceService, destinationService *flowpb.Service
if p.serviceGetter != nil {
sourceService = p.serviceGetter.GetServiceByAddr(sourceIP, sourcePort)
destinationService = p.serviceGetter.GetServiceByAddr(destinationIP, destinationPort)
}
decoded.Time = pbTimestamp
decoded.Verdict = decodeVerdict(r.Verdict)
decoded.DropReason = 0
decoded.DropReasonDesc = flowpb.DropReason_DROP_REASON_UNKNOWN
decoded.IP = ip
decoded.L4 = l4
decoded.Source = srcEndpoint
decoded.Destination = dstEndpoint
decoded.Type = flowpb.FlowType_L7
decoded.SourceNames = sourceNames
decoded.DestinationNames = destinationNames
decoded.L7 = decodeLayer7(r, p.opts)
decoded.L7.LatencyNs = p.computeResponseTime(r, timestamp)
decoded.IsReply = decodeIsReply(r.Type)
decoded.Reply = decoded.GetIsReply().GetValue()
decoded.EventType = decodeCiliumEventType(api.MessageTypeAccessLog)
decoded.SourceService = sourceService
decoded.DestinationService = destinationService
decoded.TrafficDirection = decodeTrafficDirection(r.ObservationPoint)
decoded.PolicyMatchType = 0
decoded.TraceContext = p.getTraceContext(r)
decoded.Summary = p.getSummary(r, decoded)
return nil
}
func extractRequestID(r *accesslog.LogRecord) string {
var requestID string
if r.HTTP != nil {
requestID = r.HTTP.Headers.Get("X-Request-Id")
}
return requestID
}
func (p *Parser) getTraceContext(r *accesslog.LogRecord) *flowpb.TraceContext {
requestID := extractRequestID(r)
switch r.Type {
case accesslog.TypeRequest:
traceContext := extractTraceContext(r)
if traceContext == nil {
break
}
// Envoy should add a requestID to all requests it's managing, but if it's
// missing for some reason, don't add to the cache without a requestID.
if requestID != "" {
p.traceContextCache.Add(requestID, traceContext)
}
return traceContext
case accesslog.TypeResponse:
if requestID == "" {
return nil
}
traceContext, ok := p.traceContextCache.Get(requestID)
if !ok {
break
}
p.traceContextCache.Remove(requestID)
return traceContext
}
return nil
}
func (p *Parser) computeResponseTime(r *accesslog.LogRecord, timestamp time.Time) uint64 {
requestID := extractRequestID(r)
if requestID == "" {
return 0
}
switch r.Type {
case accesslog.TypeRequest:
p.timestampCache.Add(requestID, timestamp)
case accesslog.TypeResponse:
requestTimestamp, ok := p.timestampCache.Get(requestID)
if !ok {
return 0
}
p.timestampCache.Remove(requestID)
latency := timestamp.Sub(requestTimestamp).Nanoseconds()
if latency < 0 {
return 0
}
return uint64(latency)
}
return 0
}
func (p *Parser) updateEndpointWorkloads(ip netip.Addr, endpoint *flowpb.Endpoint) {
if ep, ok := p.endpointGetter.GetEndpointInfo(ip); ok {
if pod := ep.GetPod(); pod != nil {
workload, workloadTypeMeta, ok := utils.GetWorkloadMetaFromPod(pod)
if ok {
endpoint.Workloads = []*flowpb.Workload{{Kind: workloadTypeMeta.Kind, Name: workload.Name}}
}
}
}
}
func decodeTime(timestamp string) (goTime time.Time, pbTime *timestamppb.Timestamp, err error) {
goTime, err = time.Parse(time.RFC3339Nano, timestamp)
if err != nil {
return
}
pbTime = timestamppb.New(goTime)
err = pbTime.CheckValid()
return
}
func decodeVerdict(verdict accesslog.FlowVerdict) flowpb.Verdict {
switch verdict {
case accesslog.VerdictDenied:
return flowpb.Verdict_DROPPED
case accesslog.VerdictForwarded:
return flowpb.Verdict_FORWARDED
case accesslog.VerdictRedirected:
return flowpb.Verdict_REDIRECTED
case accesslog.VerdictError:
return flowpb.Verdict_ERROR
default:
return flowpb.Verdict_VERDICT_UNKNOWN
}
}
func decodeTrafficDirection(direction accesslog.ObservationPoint) flowpb.TrafficDirection {
switch direction {
case accesslog.Ingress:
return flowpb.TrafficDirection_INGRESS
case accesslog.Egress:
return flowpb.TrafficDirection_EGRESS
default:
return flowpb.TrafficDirection_TRAFFIC_DIRECTION_UNKNOWN
}
}
func decodeIP(version accesslog.IPVersion, source, destination accesslog.EndpointInfo) *flowpb.IP {
switch version {
case accesslog.VersionIPv4:
return &flowpb.IP{
Source: source.IPv4,
Destination: destination.IPv4,
IpVersion: flowpb.IPVersion_IPv4,
}
case accesslog.VersionIPV6:
return &flowpb.IP{
Source: source.IPv6,
Destination: destination.IPv6,
IpVersion: flowpb.IPVersion_IPv6,
}
default:
return nil
}
}
func decodeLayer4(protocol accesslog.TransportProtocol, source, destination accesslog.EndpointInfo) (l4 *flowpb.Layer4, srcPort, dstPort uint16) {
switch u8proto.U8proto(protocol) {
case u8proto.TCP:
return &flowpb.Layer4{
Protocol: &flowpb.Layer4_TCP{
TCP: &flowpb.TCP{
SourcePort: uint32(source.Port),
DestinationPort: uint32(destination.Port),
},
},
}, uint16(source.Port), uint16(destination.Port)
case u8proto.UDP:
return &flowpb.Layer4{
Protocol: &flowpb.Layer4_UDP{
UDP: &flowpb.UDP{
SourcePort: uint32(source.Port),
DestinationPort: uint32(destination.Port),
},
},
}, uint16(source.Port), uint16(destination.Port)
case u8proto.SCTP:
return &flowpb.Layer4{
Protocol: &flowpb.Layer4_SCTP{
SCTP: &flowpb.SCTP{
SourcePort: uint32(source.Port),
DestinationPort: uint32(destination.Port),
},
},
}, uint16(source.Port), uint16(destination.Port)
default:
return nil, 0, 0
}
}
func decodeEndpoint(endpoint accesslog.EndpointInfo, namespace, podName string) *flowpb.Endpoint {
labels := endpoint.Labels.GetModel()
slices.Sort(labels)
return &flowpb.Endpoint{
ID: uint32(endpoint.ID),
Identity: uint32(endpoint.Identity),
ClusterName: endpoint.Labels.Get(string(source.Kubernetes) + "." + k8sConst.PolicyLabelCluster),
Namespace: namespace,
Labels: labels,
PodName: podName,
}
}
func decodeLayer7(r *accesslog.LogRecord, opts *options.Options) *flowpb.Layer7 {
var flowType flowpb.L7FlowType
switch r.Type {
case accesslog.TypeRequest:
flowType = flowpb.L7FlowType_REQUEST
case accesslog.TypeResponse:
flowType = flowpb.L7FlowType_RESPONSE
case accesslog.TypeSample:
flowType = flowpb.L7FlowType_SAMPLE
}
switch {
case r.DNS != nil:
return &flowpb.Layer7{
Type: flowType,
Record: decodeDNS(r.Type, r.DNS),
}
case r.HTTP != nil:
return &flowpb.Layer7{
Type: flowType,
Record: decodeHTTP(r.Type, r.HTTP, opts),
}
case r.Kafka != nil:
return &flowpb.Layer7{
Type: flowType,
Record: decodeKafka(r.Type, r.Kafka, opts),
}
default:
return &flowpb.Layer7{
Type: flowType,
}
}
}
func decodeIsReply(t accesslog.FlowType) *wrapperspb.BoolValue {
return &wrapperspb.BoolValue{
Value: t == accesslog.TypeResponse,
}
}
func decodeCiliumEventType(eventType uint8) *flowpb.CiliumEventType {
return &flowpb.CiliumEventType{
Type: int32(eventType),
}
}
func genericSummary(l7 *accesslog.LogRecordL7) string {
return fmt.Sprintf("%s Fields: %s", l7.Proto, l7.Fields)
}
func (p *Parser) getSummary(logRecord *accesslog.LogRecord, flow *flowpb.Flow) string {
if logRecord == nil {
return ""
}
if http := logRecord.HTTP; http != nil {
return p.httpSummary(logRecord.Type, http, flow)
} else if kafka := logRecord.Kafka; kafka != nil {
return kafkaSummary(flow)
} else if dns := logRecord.DNS; dns != nil {
return dnsSummary(logRecord.Type, dns)
} else if generic := logRecord.L7; generic != nil {
return genericSummary(generic)
}
return ""
}
// SPDX-License-Identifier: Apache-2.0
// Copyright Authors of Hubble
package seven
import (
"context"
"net/http"
"go.opentelemetry.io/otel/propagation"
"go.opentelemetry.io/otel/trace"
flowpb "github.com/cilium/cilium/api/v1/flow"
"github.com/cilium/cilium/pkg/proxy/accesslog"
)
// traceparentHeader is a HTTP header defined in the W3C Trace Context specification:
// https://www.w3.org/TR/trace-context/
// It identifies the incoming request in a tracing system and contains, among
// other things, the trace ID.
const traceparentHeader = "traceparent"
func extractTraceContext(record *accesslog.LogRecord) *flowpb.TraceContext {
if record == nil {
return nil
}
switch {
case record.HTTP != nil:
traceID := traceIDFromHTTPHeader(record.HTTP.Headers)
if traceID == "" {
return nil
}
return &flowpb.TraceContext{
Parent: &flowpb.TraceParent{
TraceId: traceID,
},
}
case record.Kafka != nil:
// TODO
return nil
default:
return nil
}
}
func traceIDFromHTTPHeader(h http.Header) string {
if h.Get(traceparentHeader) == "" {
// return early if no trace parent header is present to avoid
// unnecessary processing and memory allocation
return ""
}
tc := propagation.TraceContext{}
sp := trace.SpanContextFromContext(
tc.Extract(
context.Background(),
propagation.HeaderCarrier(h),
),
)
if sp.HasTraceID() {
return sp.TraceID().String()
}
return ""
}
// SPDX-License-Identifier: Apache-2.0
// Copyright Authors of Hubble
package sock
import (
"fmt"
"log/slog"
"net/netip"
"strings"
"go4.org/netipx"
flowpb "github.com/cilium/cilium/api/v1/flow"
"github.com/cilium/cilium/pkg/hubble/parser/common"
"github.com/cilium/cilium/pkg/hubble/parser/errors"
"github.com/cilium/cilium/pkg/hubble/parser/getters"
"github.com/cilium/cilium/pkg/hubble/parser/options"
"github.com/cilium/cilium/pkg/logging/logfields"
"github.com/cilium/cilium/pkg/monitor"
monitorAPI "github.com/cilium/cilium/pkg/monitor/api"
)
// Parser is a parser for SockTraceNotify payloads
type Parser struct {
log *slog.Logger
endpointGetter getters.EndpointGetter
identityGetter getters.IdentityGetter
dnsGetter getters.DNSGetter
ipGetter getters.IPGetter
serviceGetter getters.ServiceGetter
cgroupGetter getters.PodMetadataGetter
epResolver *common.EndpointResolver
skipUnknownCGroupIDs bool
}
// New creates a new parser
func New(log *slog.Logger,
endpointGetter getters.EndpointGetter,
identityGetter getters.IdentityGetter,
dnsGetter getters.DNSGetter,
ipGetter getters.IPGetter,
serviceGetter getters.ServiceGetter,
cgroupGetter getters.PodMetadataGetter,
opts ...options.Option,
) (*Parser, error) {
args := &options.Options{
SkipUnknownCGroupIDs: true,
}
for _, opt := range opts {
opt(args)
}
return &Parser{
log: log,
endpointGetter: endpointGetter,
identityGetter: identityGetter,
dnsGetter: dnsGetter,
ipGetter: ipGetter,
serviceGetter: serviceGetter,
cgroupGetter: cgroupGetter,
epResolver: common.NewEndpointResolver(log, endpointGetter, identityGetter, ipGetter),
skipUnknownCGroupIDs: args.SkipUnknownCGroupIDs,
}, nil
}
// Decode takes a raw trace sock event payload obtained from the perf event ring
// buffer and decodes it into a flow
func (p *Parser) Decode(data []byte, decoded *flowpb.Flow) error {
if len(data) == 0 {
return errors.ErrEmptyData
}
eventType := data[0]
if eventType != monitorAPI.MessageTypeTraceSock {
return errors.NewErrInvalidType(eventType)
}
sock := &monitor.TraceSockNotify{}
if err := sock.Decode(data); err != nil {
return fmt.Errorf("failed to parse sock trace event: %w", err)
}
ipVersion := decodeIPVersion(sock.Flags)
srcIP := p.decodeEndpointIP(sock.CgroupId, ipVersion)
if !srcIP.IsValid() && p.skipUnknownCGroupIDs {
// Skip events for which we cannot determine the endpoint ip based on
// the numeric cgroup id, since those events do not provide much value
// to end users.
return errors.ErrEventSkipped
}
srcPort := uint16(0) // source port is not known for TraceSock events
// Ignore invalid IPs - getters will handle invalid values.
// IPs can be empty for Ethernet-only packets.
dstIP, _ := netipx.FromStdIP(sock.IP())
dstPort := sock.DstPort
datapathContext := common.DatapathContext{
SrcIP: srcIP,
SrcLabelID: 0,
DstIP: dstIP,
DstLabelID: 0,
}
srcEndpoint := p.epResolver.ResolveEndpoint(srcIP, 0, datapathContext)
dstEndpoint := p.epResolver.ResolveEndpoint(dstIP, 0, datapathContext)
// On the reverse path, source and destination IP of the packet are reversed
isRevNat := decodeRevNat(sock.XlatePoint)
if isRevNat {
srcIP, dstIP = dstIP, srcIP
srcPort, dstPort = dstPort, srcPort
srcEndpoint, dstEndpoint = dstEndpoint, srcEndpoint
}
decoded.Verdict = decodeVerdict(sock.XlatePoint)
decoded.IP = decodeL3(srcIP, dstIP, ipVersion)
decoded.L4 = decodeL4(sock.L4Proto, srcPort, dstPort)
decoded.Source = srcEndpoint
decoded.SourceNames = p.resolveNames(dstEndpoint.GetID(), srcIP)
decoded.SourceService = p.decodeService(srcIP, srcPort)
decoded.Destination = dstEndpoint
decoded.DestinationService = p.decodeService(dstIP, dstPort)
decoded.DestinationNames = p.resolveNames(srcEndpoint.GetID(), dstIP)
decoded.Type = flowpb.FlowType_SOCK
decoded.EventType = decodeCiliumEventType(sock.Type, sock.XlatePoint)
decoded.SockXlatePoint = flowpb.SocketTranslationPoint(sock.XlatePoint)
decoded.SocketCookie = sock.SockCookie
decoded.CgroupId = sock.CgroupId
decoded.Summary = decodeSummary(sock)
return nil
}
func decodeIPVersion(flags uint8) flowpb.IPVersion {
if (flags & monitor.TraceSockNotifyFlagIPv6) != 0 {
return flowpb.IPVersion_IPv6
}
return flowpb.IPVersion_IPv4
}
func (p *Parser) decodeEndpointIP(cgroupId uint64, ipVersion flowpb.IPVersion) netip.Addr {
if p.cgroupGetter != nil {
if m := p.cgroupGetter.GetPodMetadataForContainer(cgroupId); m != nil {
for _, podIP := range m.IPs {
isIPv6 := strings.Contains(podIP, ":")
if isIPv6 && ipVersion == flowpb.IPVersion_IPv6 ||
!isIPv6 && ipVersion == flowpb.IPVersion_IPv4 {
ip, err := netip.ParseAddr(podIP)
if err != nil {
p.log.Debug(
"failed to parse pod IP",
logfields.Error, err,
logfields.CGroupID, cgroupId,
logfields.K8sPodName, m.Name,
logfields.K8sNamespace, m.Namespace,
logfields.IPAddr, podIP,
)
return netip.Addr{}
}
return ip
}
}
p.log.Debug(
"no matching IP for pod",
logfields.CGroupID, cgroupId,
logfields.K8sPodName, m.Name,
logfields.K8sNamespace, m.Namespace,
)
}
}
return netip.Addr{}
}
func decodeL3(srcIP, dstIP netip.Addr, ipVersion flowpb.IPVersion) *flowpb.IP {
var srcIPStr, dstIPStr string
if srcIP.IsValid() {
srcIPStr = srcIP.String()
}
if dstIP.IsValid() {
dstIPStr = dstIP.String()
}
return &flowpb.IP{
Source: srcIPStr,
Destination: dstIPStr,
IpVersion: ipVersion,
}
}
func decodeL4(proto uint8, srcPort, dstPort uint16) *flowpb.Layer4 {
switch proto {
case monitor.L4ProtocolTCP:
return &flowpb.Layer4{
Protocol: &flowpb.Layer4_TCP{
TCP: &flowpb.TCP{
SourcePort: uint32(srcPort),
DestinationPort: uint32(dstPort),
},
},
}
case monitor.L4ProtocolUDP:
return &flowpb.Layer4{
Protocol: &flowpb.Layer4_UDP{
UDP: &flowpb.UDP{
SourcePort: uint32(srcPort),
DestinationPort: uint32(dstPort),
},
},
}
}
return nil
}
func (p *Parser) resolveNames(epID uint32, ip netip.Addr) (names []string) {
if p.dnsGetter != nil {
return p.dnsGetter.GetNamesOf(epID, ip)
}
return nil
}
func (p *Parser) decodeService(ip netip.Addr, port uint16) *flowpb.Service {
if p.serviceGetter != nil {
return p.serviceGetter.GetServiceByAddr(ip, port)
}
return nil
}
func decodeVerdict(xlatePoint uint8) flowpb.Verdict {
switch xlatePoint {
case monitor.XlatePointPreDirectionFwd,
monitor.XlatePointPreDirectionRev:
return flowpb.Verdict_TRACED
case monitor.XlatePointPostDirectionFwd,
monitor.XlatePointPostDirectionRev:
return flowpb.Verdict_TRANSLATED
}
return flowpb.Verdict_VERDICT_UNKNOWN
}
func decodeRevNat(xlatePoint uint8) bool {
switch xlatePoint {
case monitor.XlatePointPreDirectionRev,
monitor.XlatePointPostDirectionRev:
return true
}
return false
}
func decodeCiliumEventType(eventType, subtype uint8) *flowpb.CiliumEventType {
return &flowpb.CiliumEventType{
Type: int32(eventType),
SubType: int32(subtype),
}
}
func decodeSummary(sock *monitor.TraceSockNotify) string {
switch sock.L4Proto {
case monitor.L4ProtocolTCP:
return "TCP"
case monitor.L4ProtocolUDP:
return "UDP"
default:
return "Unknown"
}
}
// SPDX-License-Identifier: Apache-2.0
// Copyright Authors of Hubble
package threefour
import (
"fmt"
"log/slog"
"net/netip"
"strings"
"github.com/gopacket/gopacket"
"github.com/gopacket/gopacket/layers"
"go4.org/netipx"
"google.golang.org/protobuf/types/known/wrapperspb"
pb "github.com/cilium/cilium/api/v1/flow"
"github.com/cilium/cilium/pkg/byteorder"
"github.com/cilium/cilium/pkg/hubble/parser/common"
"github.com/cilium/cilium/pkg/hubble/parser/errors"
"github.com/cilium/cilium/pkg/hubble/parser/getters"
"github.com/cilium/cilium/pkg/hubble/parser/options"
"github.com/cilium/cilium/pkg/lock"
"github.com/cilium/cilium/pkg/monitor"
monitorAPI "github.com/cilium/cilium/pkg/monitor/api"
"github.com/cilium/cilium/pkg/policy/correlation"
)
// Parser is a parser for L3/L4 payloads
type Parser struct {
log *slog.Logger
endpointGetter getters.EndpointGetter
identityGetter getters.IdentityGetter
dnsGetter getters.DNSGetter
ipGetter getters.IPGetter
serviceGetter getters.ServiceGetter
linkGetter getters.LinkGetter
epResolver *common.EndpointResolver
correlateL3L4Policy bool
// TODO: consider using a pool of these
packet *packet
}
// re-usable packet to avoid reallocating gopacket datastructures
type packet struct {
lock.Mutex
decLayerL2Dev *gopacket.DecodingLayerParser
decLayerL3Dev struct {
IPv4 *gopacket.DecodingLayerParser
IPv6 *gopacket.DecodingLayerParser
}
decLayerOverlay struct {
VXLAN *gopacket.DecodingLayerParser
Geneve *gopacket.DecodingLayerParser
}
Layers []gopacket.LayerType
layers.Ethernet
layers.IPv4
layers.IPv6
layers.ICMPv4
layers.ICMPv6
layers.TCP
layers.UDP
layers.SCTP
overlay struct {
Layers []gopacket.LayerType
layers.VXLAN
layers.Geneve
layers.Ethernet
layers.IPv4
layers.IPv6
layers.ICMPv4
layers.ICMPv6
layers.TCP
layers.UDP
layers.SCTP
}
}
// New returns a new L3/L4 parser
func New(
log *slog.Logger,
endpointGetter getters.EndpointGetter,
identityGetter getters.IdentityGetter,
dnsGetter getters.DNSGetter,
ipGetter getters.IPGetter,
serviceGetter getters.ServiceGetter,
linkGetter getters.LinkGetter,
opts ...options.Option,
) (*Parser, error) {
packet := &packet{}
decoders := []gopacket.DecodingLayer{
&packet.Ethernet,
&packet.IPv4, &packet.IPv6,
&packet.ICMPv4, &packet.ICMPv6,
&packet.TCP, &packet.UDP, &packet.SCTP,
}
packet.decLayerL2Dev = gopacket.NewDecodingLayerParser(layers.LayerTypeEthernet, decoders...)
packet.decLayerL3Dev.IPv4 = gopacket.NewDecodingLayerParser(layers.LayerTypeIPv4, decoders...)
packet.decLayerL3Dev.IPv6 = gopacket.NewDecodingLayerParser(layers.LayerTypeIPv6, decoders...)
overlayDecoders := []gopacket.DecodingLayer{
&packet.overlay.VXLAN, &packet.overlay.Geneve,
&packet.overlay.Ethernet,
&packet.overlay.IPv4, &packet.overlay.IPv6,
&packet.overlay.ICMPv4, &packet.overlay.ICMPv6,
&packet.overlay.TCP, &packet.overlay.UDP, &packet.overlay.SCTP,
}
packet.decLayerOverlay.VXLAN = gopacket.NewDecodingLayerParser(layers.LayerTypeVXLAN, overlayDecoders...)
packet.decLayerOverlay.Geneve = gopacket.NewDecodingLayerParser(layers.LayerTypeGeneve, overlayDecoders...)
// Let packet.decLayer.DecodeLayers return a nil error when it
// encounters a layer it doesn't have a parser for, instead of returning
// an UnsupportedLayerType error.
packet.decLayerL2Dev.IgnoreUnsupported = true
packet.decLayerL3Dev.IPv4.IgnoreUnsupported = true
packet.decLayerL3Dev.IPv6.IgnoreUnsupported = true
packet.decLayerOverlay.VXLAN.IgnoreUnsupported = true
packet.decLayerOverlay.Geneve.IgnoreUnsupported = true
args := &options.Options{
EnableNetworkPolicyCorrelation: true,
}
for _, opt := range opts {
opt(args)
}
return &Parser{
log: log,
dnsGetter: dnsGetter,
endpointGetter: endpointGetter,
identityGetter: identityGetter,
ipGetter: ipGetter,
serviceGetter: serviceGetter,
linkGetter: linkGetter,
epResolver: common.NewEndpointResolver(log, endpointGetter, identityGetter, ipGetter),
packet: packet,
correlateL3L4Policy: args.EnableNetworkPolicyCorrelation,
}, nil
}
// Decode decodes the data from 'data' into 'decoded'
func (p *Parser) Decode(data []byte, decoded *pb.Flow) error {
if len(data) == 0 {
return errors.ErrEmptyData
}
eventType := data[0]
var packetOffset int
var dn *monitor.DropNotify
var tn *monitor.TraceNotify
var pvn *monitor.PolicyVerdictNotify
var dbg *monitor.DebugCapture
var eventSubType uint8
var authType pb.AuthType
switch eventType {
case monitorAPI.MessageTypeDrop:
dn = &monitor.DropNotify{}
if err := dn.Decode(data); err != nil {
return fmt.Errorf("failed to parse drop: %w", err)
}
eventSubType = dn.SubType
packetOffset = (int)(dn.DataOffset())
case monitorAPI.MessageTypeTrace:
tn = &monitor.TraceNotify{}
if err := tn.Decode(data); err != nil {
return fmt.Errorf("failed to parse trace: %w", err)
}
eventSubType = tn.ObsPoint
if tn.ObsPoint != 0 {
decoded.TraceObservationPoint = pb.TraceObservationPoint(tn.ObsPoint)
} else {
// specifically handle the zero value in the observation enum so the json
// export and the API don't carry extra meaning with the zero value
decoded.TraceObservationPoint = pb.TraceObservationPoint_TO_ENDPOINT
}
packetOffset = (int)(tn.DataOffset())
case monitorAPI.MessageTypePolicyVerdict:
pvn = &monitor.PolicyVerdictNotify{}
if err := pvn.Decode(data); err != nil {
return fmt.Errorf("failed to parse policy verdict: %w", err)
}
eventSubType = pvn.SubType
packetOffset = monitor.PolicyVerdictNotifyLen
authType = pb.AuthType(pvn.GetAuthType())
case monitorAPI.MessageTypeCapture:
dbg = &monitor.DebugCapture{}
if err := dbg.Decode(data); err != nil {
return fmt.Errorf("failed to parse debug capture: %w", err)
}
eventSubType = dbg.SubType
packetOffset = monitor.DebugCaptureLen
default:
return errors.NewErrInvalidType(eventType)
}
if len(data) < packetOffset {
return fmt.Errorf("not enough bytes to decode %d", data)
}
isL3Device := tn != nil && tn.IsL3Device() || dn != nil && dn.IsL3Device()
isIPv6 := tn != nil && tn.IsIPv6() || dn != nil && dn.IsIPv6()
isVXLAN := tn != nil && tn.IsVXLAN() || dn != nil && dn.IsVXLAN()
isGeneve := tn != nil && tn.IsGeneve() || dn != nil && dn.IsGeneve()
ether, ip, l4, tunnel, srcIP, dstIP, srcPort, dstPort, summary, err := decodeLayers(data[packetOffset:], p.packet, isL3Device, isIPv6, isVXLAN, isGeneve)
if err != nil {
return err
}
if tn != nil && ip != nil {
if !tn.OriginalIP().IsUnspecified() {
// Ignore invalid IP - getters will handle invalid value.
srcIP, _ = netipx.FromStdIP(tn.OriginalIP())
// On SNAT the trace notification has OrigIP set to the pre
// translation IP and the source IP parsed from the header is the
// post translation IP. The check is here because sometimes we get
// trace notifications with OrigIP set to the header's IP
// (pre-translation events?)
if ip.GetSource() != srcIP.String() {
ip.SourceXlated = ip.GetSource()
ip.Source = srcIP.String()
}
}
ip.Encrypted = tn.IsEncrypted()
}
srcLabelID, dstLabelID := decodeSecurityIdentities(dn, tn, pvn)
datapathContext := common.DatapathContext{
SrcIP: srcIP,
SrcLabelID: srcLabelID,
DstIP: dstIP,
DstLabelID: dstLabelID,
TraceObservationPoint: decoded.TraceObservationPoint,
}
srcEndpoint := p.epResolver.ResolveEndpoint(srcIP, srcLabelID, datapathContext)
dstEndpoint := p.epResolver.ResolveEndpoint(dstIP, dstLabelID, datapathContext)
var sourceService, destinationService *pb.Service
if p.serviceGetter != nil {
sourceService = p.serviceGetter.GetServiceByAddr(srcIP, srcPort)
destinationService = p.serviceGetter.GetServiceByAddr(dstIP, dstPort)
}
decoded.Verdict = decodeVerdict(dn, tn, pvn)
decoded.AuthType = authType
decoded.DropReason = decodeDropReason(dn, pvn)
decoded.DropReasonDesc = pb.DropReason(decoded.DropReason)
decoded.File = decodeFileInfo(dn)
decoded.Ethernet = ether
decoded.IP = ip
decoded.L4 = l4
decoded.Tunnel = tunnel
decoded.Source = srcEndpoint
decoded.Destination = dstEndpoint
decoded.Type = pb.FlowType_L3_L4
decoded.SourceNames = p.resolveNames(dstEndpoint.ID, srcIP)
decoded.DestinationNames = p.resolveNames(srcEndpoint.ID, dstIP)
decoded.L7 = nil
decoded.IsReply = decodeIsReply(tn, pvn)
decoded.Reply = decoded.GetIsReply().GetValue() // false if GetIsReply() is nil
decoded.TrafficDirection = decodeTrafficDirection(srcEndpoint.ID, dn, tn, pvn)
decoded.EventType = decodeCiliumEventType(eventType, eventSubType)
decoded.TraceReason = decodeTraceReason(tn)
decoded.SourceService = sourceService
decoded.DestinationService = destinationService
decoded.PolicyMatchType = decodePolicyMatchType(pvn)
decoded.DebugCapturePoint = decodeDebugCapturePoint(dbg)
decoded.Interface = p.decodeNetworkInterface(tn, dbg)
decoded.ProxyPort = decodeProxyPort(dbg, tn)
decoded.Summary = summary
if p.correlateL3L4Policy && p.endpointGetter != nil {
correlation.CorrelatePolicy(p.log, p.endpointGetter, decoded)
}
return nil
}
func (p *Parser) resolveNames(epID uint32, ip netip.Addr) (names []string) {
if p.dnsGetter != nil {
return p.dnsGetter.GetNamesOf(epID, ip)
}
return nil
}
func decodeLayers(payload []byte, packet *packet, isL3Device, isIPv6, isVXLAN, isGeneve bool) (
ethernet *pb.Ethernet,
ip *pb.IP,
l4 *pb.Layer4,
tunnel *pb.Tunnel,
sourceIP, destinationIP netip.Addr,
sourcePort, destinationPort uint16,
summary string,
err error,
) {
packet.Lock()
defer packet.Unlock()
// Since v1.1.18, DecodeLayers returns a non-nil error for an empty packet, see
// https://github.com/google/gopacket/issues/846
// TODO: reconsider this check if the issue is fixed upstream
if len(payload) == 0 {
// Truncate layers to avoid accidental re-use.
packet.Layers = packet.Layers[:0]
packet.overlay.Layers = packet.overlay.Layers[:0]
return
}
switch {
case !isL3Device:
err = packet.decLayerL2Dev.DecodeLayers(payload, &packet.Layers)
case isIPv6:
err = packet.decLayerL3Dev.IPv6.DecodeLayers(payload, &packet.Layers)
default:
err = packet.decLayerL3Dev.IPv4.DecodeLayers(payload, &packet.Layers)
}
if err != nil {
return
}
for _, typ := range packet.Layers {
summary = typ.String()
switch typ {
case layers.LayerTypeEthernet:
ethernet = decodeEthernet(&packet.Ethernet)
case layers.LayerTypeIPv4:
ip, sourceIP, destinationIP = decodeIPv4(&packet.IPv4)
case layers.LayerTypeIPv6:
ip, sourceIP, destinationIP = decodeIPv6(&packet.IPv6)
case layers.LayerTypeTCP:
l4, sourcePort, destinationPort = decodeTCP(&packet.TCP)
summary = "TCP Flags: " + getTCPFlags(packet.TCP)
case layers.LayerTypeUDP:
l4, sourcePort, destinationPort = decodeUDP(&packet.UDP)
case layers.LayerTypeSCTP:
l4, sourcePort, destinationPort = decodeSCTP(&packet.SCTP)
case layers.LayerTypeICMPv4:
l4 = decodeICMPv4(&packet.ICMPv4)
summary = "ICMPv4 " + packet.ICMPv4.TypeCode.String()
case layers.LayerTypeICMPv6:
l4 = decodeICMPv6(&packet.ICMPv6)
summary = "ICMPv6 " + packet.ICMPv6.TypeCode.String()
}
}
switch {
case isVXLAN:
err = packet.decLayerOverlay.VXLAN.DecodeLayers(packet.UDP.Payload, &packet.overlay.Layers)
case isGeneve:
err = packet.decLayerOverlay.Geneve.DecodeLayers(packet.UDP.Payload, &packet.overlay.Layers)
default:
// Truncate layers to avoid accidental re-use.
packet.overlay.Layers = packet.overlay.Layers[:0]
return
}
if err != nil {
err = fmt.Errorf("overlay: %w", err)
return
}
// Return in case we have not decoded any overlay layer.
if len(packet.overlay.Layers) == 0 {
return
}
// Expect VXLAN/Geneve overlay as first overlay layer, if not we bail out.
switch packet.overlay.Layers[0] {
case layers.LayerTypeVXLAN:
tunnel = &pb.Tunnel{Protocol: pb.Tunnel_VXLAN, IP: ip, L4: l4}
case layers.LayerTypeGeneve:
tunnel = &pb.Tunnel{Protocol: pb.Tunnel_GENEVE, IP: ip, L4: l4}
default:
return
}
// Reset return values. This ensures the resulting flow does not misrepresent
// what is happening (e.g. same IP addresses for overlay and underlay).
ethernet, ip, l4 = nil, nil, nil
sourceIP, destinationIP = netip.Addr{}, netip.Addr{}
sourcePort, destinationPort = 0, 0
summary = ""
// Parse the rest of the overlay layers as we would do for a non-encapsulated packet.
// It is possible we're not parsing any layer here. This is because the overlay
// decoders failed (e.g., not enough data). We would still return empty values
// for the inner packet (ethernet, ip, l4, basically the re-init variables)
// while returning the non-empty `tunnel` field.
for _, typ := range packet.overlay.Layers[1:] {
summary = typ.String()
switch typ {
case layers.LayerTypeEthernet:
ethernet = decodeEthernet(&packet.overlay.Ethernet)
case layers.LayerTypeIPv4:
ip, sourceIP, destinationIP = decodeIPv4(&packet.overlay.IPv4)
case layers.LayerTypeIPv6:
ip, sourceIP, destinationIP = decodeIPv6(&packet.overlay.IPv6)
case layers.LayerTypeTCP:
l4, sourcePort, destinationPort = decodeTCP(&packet.overlay.TCP)
summary = "TCP Flags: " + getTCPFlags(packet.overlay.TCP)
case layers.LayerTypeUDP:
l4, sourcePort, destinationPort = decodeUDP(&packet.overlay.UDP)
case layers.LayerTypeSCTP:
l4, sourcePort, destinationPort = decodeSCTP(&packet.overlay.SCTP)
case layers.LayerTypeICMPv4:
l4 = decodeICMPv4(&packet.overlay.ICMPv4)
summary = "ICMPv4 " + packet.overlay.ICMPv4.TypeCode.String()
case layers.LayerTypeICMPv6:
l4 = decodeICMPv6(&packet.overlay.ICMPv6)
summary = "ICMPv6 " + packet.overlay.ICMPv6.TypeCode.String()
}
}
return
}
func decodeVerdict(dn *monitor.DropNotify, tn *monitor.TraceNotify, pvn *monitor.PolicyVerdictNotify) pb.Verdict {
switch {
case dn != nil:
return pb.Verdict_DROPPED
case tn != nil:
return pb.Verdict_FORWARDED
case pvn != nil:
if pvn.Verdict < 0 {
return pb.Verdict_DROPPED
}
if pvn.Verdict > 0 {
return pb.Verdict_REDIRECTED
}
if pvn.IsTrafficAudited() {
return pb.Verdict_AUDIT
}
return pb.Verdict_FORWARDED
}
return pb.Verdict_VERDICT_UNKNOWN
}
func decodeDropReason(dn *monitor.DropNotify, pvn *monitor.PolicyVerdictNotify) uint32 {
switch {
case dn != nil:
return uint32(dn.SubType)
case pvn != nil && pvn.Verdict < 0:
// if the flow was dropped, verdict equals the negative of the drop reason
return uint32(-pvn.Verdict)
}
return 0
}
func decodeFileInfo(dn *monitor.DropNotify) *pb.FileInfo {
switch {
case dn != nil:
return &pb.FileInfo{
Name: monitorAPI.BPFFileName(dn.File),
Line: uint32(dn.Line),
}
}
return nil
}
func decodePolicyMatchType(pvn *monitor.PolicyVerdictNotify) uint32 {
if pvn != nil {
return uint32((pvn.Flags & monitor.PolicyVerdictNotifyFlagMatchType) >>
monitor.PolicyVerdictNotifyFlagMatchTypeBitOffset)
}
return 0
}
func decodeEthernet(ethernet *layers.Ethernet) *pb.Ethernet {
return &pb.Ethernet{
Source: ethernet.SrcMAC.String(),
Destination: ethernet.DstMAC.String(),
}
}
func decodeIPv4(ipv4 *layers.IPv4) (ip *pb.IP, src, dst netip.Addr) {
// Ignore invalid IPs - getters will handle invalid values.
// IPs can be empty for Ethernet-only packets.
src, _ = netipx.FromStdIP(ipv4.SrcIP)
dst, _ = netipx.FromStdIP(ipv4.DstIP)
return &pb.IP{
Source: ipv4.SrcIP.String(),
Destination: ipv4.DstIP.String(),
IpVersion: pb.IPVersion_IPv4,
}, src, dst
}
func decodeIPv6(ipv6 *layers.IPv6) (ip *pb.IP, src, dst netip.Addr) {
// Ignore invalid IPs - getters will handle invalid values.
// IPs can be empty for Ethernet-only packets.
src, _ = netipx.FromStdIP(ipv6.SrcIP)
dst, _ = netipx.FromStdIP(ipv6.DstIP)
return &pb.IP{
Source: ipv6.SrcIP.String(),
Destination: ipv6.DstIP.String(),
IpVersion: pb.IPVersion_IPv6,
}, src, dst
}
func decodeTCP(tcp *layers.TCP) (l4 *pb.Layer4, src, dst uint16) {
return &pb.Layer4{
Protocol: &pb.Layer4_TCP{
TCP: &pb.TCP{
SourcePort: uint32(tcp.SrcPort),
DestinationPort: uint32(tcp.DstPort),
Flags: &pb.TCPFlags{
FIN: tcp.FIN, SYN: tcp.SYN, RST: tcp.RST,
PSH: tcp.PSH, ACK: tcp.ACK, URG: tcp.URG,
ECE: tcp.ECE, CWR: tcp.CWR, NS: tcp.NS,
},
},
},
}, uint16(tcp.SrcPort), uint16(tcp.DstPort)
}
func decodeSCTP(sctp *layers.SCTP) (l4 *pb.Layer4, src, dst uint16) {
return &pb.Layer4{
Protocol: &pb.Layer4_SCTP{
SCTP: &pb.SCTP{
SourcePort: uint32(sctp.SrcPort),
DestinationPort: uint32(sctp.DstPort),
},
},
}, uint16(sctp.SrcPort), uint16(sctp.DstPort)
}
func decodeUDP(udp *layers.UDP) (l4 *pb.Layer4, src, dst uint16) {
return &pb.Layer4{
Protocol: &pb.Layer4_UDP{
UDP: &pb.UDP{
SourcePort: uint32(udp.SrcPort),
DestinationPort: uint32(udp.DstPort),
},
},
}, uint16(udp.SrcPort), uint16(udp.DstPort)
}
func decodeICMPv4(icmp *layers.ICMPv4) *pb.Layer4 {
return &pb.Layer4{
Protocol: &pb.Layer4_ICMPv4{ICMPv4: &pb.ICMPv4{
Type: uint32(icmp.TypeCode.Type()),
Code: uint32(icmp.TypeCode.Code()),
}},
}
}
func decodeICMPv6(icmp *layers.ICMPv6) *pb.Layer4 {
return &pb.Layer4{
Protocol: &pb.Layer4_ICMPv6{ICMPv6: &pb.ICMPv6{
Type: uint32(icmp.TypeCode.Type()),
Code: uint32(icmp.TypeCode.Code()),
}},
}
}
func decodeIsReply(tn *monitor.TraceNotify, pvn *monitor.PolicyVerdictNotify) *wrapperspb.BoolValue {
switch {
case tn != nil && tn.TraceReasonIsKnown():
if tn.TraceReasonIsEncap() || tn.TraceReasonIsDecap() {
return nil
}
// Reason was specified by the datapath, just reuse it.
return &wrapperspb.BoolValue{
Value: tn.TraceReasonIsReply(),
}
case pvn != nil && pvn.Verdict >= 0:
// Forwarded PolicyVerdictEvents are emitted for the first packet of
// connection, therefore we statically assume that they are not reply
// packets
return &wrapperspb.BoolValue{Value: false}
default:
// For other events, such as drops, we simply do not know if they were
// replies or not.
return nil
}
}
func decodeCiliumEventType(eventType, eventSubType uint8) *pb.CiliumEventType {
return &pb.CiliumEventType{
Type: int32(eventType),
SubType: int32(eventSubType),
}
}
func decodeTraceReason(tn *monitor.TraceNotify) pb.TraceReason {
if tn == nil {
return pb.TraceReason_TRACE_REASON_UNKNOWN
}
// The Hubble protobuf enum values aren't 1:1 mapped with Cilium's datapath
// because we want pb.TraceReason_TRACE_REASON_UNKNOWN = 0 while in
// datapath monitor.TraceReasonUnknown = 5. The mapping works as follow:
switch {
// monitor.TraceReasonUnknown is mapped to pb.TraceReason_TRACE_REASON_UNKNOWN
case tn.TraceReason() == monitor.TraceReasonUnknown:
return pb.TraceReason_TRACE_REASON_UNKNOWN
// values before monitor.TraceReasonUnknown are "offset by one", e.g.
// TraceReasonCtEstablished = 1 → TraceReason_ESTABLISHED = 2 to make room
// for the zero value.
case tn.TraceReason() < monitor.TraceReasonUnknown:
return pb.TraceReason(tn.TraceReason()) + 1
// all values greater than monitor.TraceReasonUnknown are mapped 1:1 with
// the datapath values.
default:
return pb.TraceReason(tn.TraceReason())
}
}
func decodeSecurityIdentities(dn *monitor.DropNotify, tn *monitor.TraceNotify, pvn *monitor.PolicyVerdictNotify) (
sourceSecurityIdentiy, destinationSecurityIdentity uint32,
) {
switch {
case dn != nil:
sourceSecurityIdentiy = uint32(dn.SrcLabel)
destinationSecurityIdentity = uint32(dn.DstLabel)
case tn != nil:
sourceSecurityIdentiy = uint32(tn.SrcLabel)
destinationSecurityIdentity = uint32(tn.DstLabel)
case pvn != nil:
if pvn.IsTrafficIngress() {
sourceSecurityIdentiy = uint32(pvn.RemoteLabel)
} else {
destinationSecurityIdentity = uint32(pvn.RemoteLabel)
}
}
return
}
func decodeTrafficDirection(srcEP uint32, dn *monitor.DropNotify, tn *monitor.TraceNotify, pvn *monitor.PolicyVerdictNotify) pb.TrafficDirection {
if dn != nil && dn.Source != 0 {
// If the local endpoint at which the drop occurred is the same as the
// source of the dropped packet, we assume it was an egress flow. This
// implies that we also assume that dropped packets are not dropped
// reply packets of an ongoing connection.
if dn.Source == uint16(srcEP) {
return pb.TrafficDirection_EGRESS
}
return pb.TrafficDirection_INGRESS
}
if tn != nil && tn.Source != 0 {
// For trace events, we assume that packets may be reply packets of an
// ongoing connection. Therefore, we want to access the connection
// tracking result from the `Reason` field to invert the direction for
// reply packets. The datapath currently populates the `Reason` field
// with CT information for some observation points.
if tn.TraceReasonIsKnown() {
// true if the traffic source is the local endpoint, i.e. egress
isSourceEP := tn.Source == uint16(srcEP)
// when OrigIP is set, then the packet was SNATed
isSNATed := !tn.OriginalIP().IsUnspecified()
// true if the packet is a reply, i.e. reverse direction
isReply := tn.TraceReasonIsReply()
switch {
// Although technically the corresponding packet is ingressing the
// stack (TraceReasonEncryptOverlay traces are TraceToStack), it is
// ultimately originating from the local node and destinated to a
// remote node, so egress make more sense to expose at a high
// level.
case tn.TraceReason() == monitor.TraceReasonEncryptOverlay:
return pb.TrafficDirection_EGRESS
// isSourceEP != isReply ==
// (isSourceEP && !isReply) || (!isSourceEP && isReply)
case isSourceEP != isReply:
return pb.TrafficDirection_EGRESS
case isSNATed:
return pb.TrafficDirection_EGRESS
}
return pb.TrafficDirection_INGRESS
}
}
if pvn != nil {
if pvn.IsTrafficIngress() {
return pb.TrafficDirection_INGRESS
}
return pb.TrafficDirection_EGRESS
}
return pb.TrafficDirection_TRAFFIC_DIRECTION_UNKNOWN
}
func getTCPFlags(tcp layers.TCP) string {
const (
syn = "SYN"
ack = "ACK"
rst = "RST"
fin = "FIN"
psh = "PSH"
urg = "URG"
ece = "ECE"
cwr = "CWR"
ns = "NS"
maxTCPFlags = 9
comma = ", "
)
info := make([]string, 0, maxTCPFlags)
if tcp.SYN {
info = append(info, syn)
}
if tcp.ACK {
info = append(info, ack)
}
if tcp.RST {
info = append(info, rst)
}
if tcp.FIN {
info = append(info, fin)
}
if tcp.PSH {
info = append(info, psh)
}
if tcp.URG {
info = append(info, urg)
}
if tcp.ECE {
info = append(info, ece)
}
if tcp.CWR {
info = append(info, cwr)
}
if tcp.NS {
info = append(info, ns)
}
return strings.Join(info, comma)
}
func decodeDebugCapturePoint(dbg *monitor.DebugCapture) pb.DebugCapturePoint {
if dbg == nil {
return pb.DebugCapturePoint_DBG_CAPTURE_POINT_UNKNOWN
}
return pb.DebugCapturePoint(dbg.SubType)
}
func (p *Parser) decodeNetworkInterface(tn *monitor.TraceNotify, dbg *monitor.DebugCapture) *pb.NetworkInterface {
ifIndex := uint32(0)
if tn != nil {
ifIndex = tn.Ifindex
} else if dbg != nil {
switch dbg.SubType {
case monitor.DbgCaptureDelivery,
monitor.DbgCaptureFromLb,
monitor.DbgCaptureAfterV46,
monitor.DbgCaptureAfterV64,
monitor.DbgCaptureSnatPre,
monitor.DbgCaptureSnatPost:
ifIndex = dbg.Arg1
}
}
if ifIndex == 0 {
return nil
}
var name string
if p.linkGetter != nil {
// if the interface is not found, `name` will be an empty string and thus
// omitted in the protobuf message
name, _ = p.linkGetter.GetIfNameCached(int(ifIndex))
}
return &pb.NetworkInterface{
Index: ifIndex,
Name: name,
}
}
func decodeProxyPort(dbg *monitor.DebugCapture, tn *monitor.TraceNotify) uint32 {
if tn != nil && tn.ObsPoint == monitorAPI.TraceToProxy {
return uint32(tn.DstID)
} else if dbg != nil {
switch dbg.SubType {
case monitor.DbgCaptureProxyPre,
monitor.DbgCaptureProxyPost:
return byteorder.NetworkToHost32(dbg.Arg1)
}
}
return 0
}
// SPDX-License-Identifier: Apache-2.0
// Copyright Authors of Cilium
package types
import (
"crypto/tls"
"io"
"google.golang.org/grpc"
"google.golang.org/grpc/credentials"
"google.golang.org/grpc/credentials/insecure"
peerpb "github.com/cilium/cilium/api/v1/peer"
"github.com/cilium/cilium/pkg/crypto/certloader"
hubbleopts "github.com/cilium/cilium/pkg/hubble/server/serveroption"
)
// Client defines an interface that Peer service client should implement.
type Client interface {
peerpb.PeerClient
io.Closer
}
// ClientBuilder creates a new Client.
type ClientBuilder interface {
// Client builds a new Client.
Client(target string) (Client, error)
}
type client struct {
conn *grpc.ClientConn
peerpb.PeerClient
}
func (c *client) Close() error {
if c.conn == nil {
return nil
}
return c.conn.Close()
}
var _ ClientBuilder = (*LocalClientBuilder)(nil)
// LocalClientBuilder is a ClientBuilder that is suitable when the gRPC
// connection to the Peer service is local (typically a Unix Domain Socket).
type LocalClientBuilder struct{}
// Client implements ClientBuilder.Client.
func (b LocalClientBuilder) Client(target string) (Client, error) {
// The connection is local, so we assume using insecure connection is safe in
// this context.
conn, err := grpc.NewClient(target,
grpc.WithTransportCredentials(insecure.NewCredentials()),
)
if err != nil {
return nil, err
}
return &client{conn, peerpb.NewPeerClient(conn)}, nil
}
var _ ClientBuilder = (*RemoteClientBuilder)(nil)
// RemoteClientBuilder is a ClientBuilder that is suitable when the gRPC
// connection to the Peer service is remote (typically a K8s Service).
type RemoteClientBuilder struct {
TLSConfig certloader.ClientConfigBuilder
TLSServerName string
}
// Client implements the ClientBuilder interface.
func (b RemoteClientBuilder) Client(target string) (Client, error) {
var opts []grpc.DialOption
if b.TLSConfig == nil {
opts = append(opts, grpc.WithTransportCredentials(insecure.NewCredentials()))
} else {
// NOTE: gosec is unable to resolve the constant and warns about "TLS
// MinVersion too low".
tlsConfig := b.TLSConfig.ClientConfig(&tls.Config{ //nolint:gosec
ServerName: b.TLSServerName,
MinVersion: hubbleopts.MinTLSVersion,
})
opts = append(opts, grpc.WithTransportCredentials(credentials.NewTLS(tlsConfig)))
}
conn, err := grpc.NewClient(target, opts...)
if err != nil {
return nil, err
}
return &client{conn, peerpb.NewPeerClient(conn)}, nil
}
// SPDX-License-Identifier: Apache-2.0
// Copyright Authors of Cilium
package types
import (
"net"
"strconv"
"strings"
peerpb "github.com/cilium/cilium/api/v1/peer"
"github.com/cilium/cilium/pkg/hubble/defaults"
)
// Peer represents a hubble peer.
type Peer struct {
// Name is the name of the peer, typically the hostname. The name includes
// the cluster name if a value other than default has been specified.
// This value can be used to uniquely identify the host.
// When the cluster name is not the default, the cluster name is prepended
// to the peer name and a forward slash is added.
//
// Examples:
// - runtime1
// - testcluster/runtime1
Name string
// Address is the address of the peer's gRPC service.
Address net.Addr
// TLSEnabled indicates whether the service offered by the peer has TLS
// enabled.
TLSEnabled bool
// TLSServerName is the name the TLS certificate should be matched to.
TLSServerName string
}
// FromChangeNotification creates a new Peer from a ChangeNotification.
func FromChangeNotification(cn *peerpb.ChangeNotification) *Peer {
if cn == nil {
return (*Peer)(nil)
}
var err error
var addr net.Addr
switch a := cn.GetAddress(); {
case strings.HasPrefix(a, "unix://"), strings.HasPrefix(a, "/") && strings.HasSuffix(a, ".sock"):
addr, err = net.ResolveUnixAddr("unix", a)
case a == "":
// no address specified, leave it nil
default:
var host, port string
if host, port, err = net.SplitHostPort(a); err == nil {
if ip := net.ParseIP(host); ip != nil {
var p int
if p, err = strconv.Atoi(port); err == nil {
addr = &net.TCPAddr{
IP: ip,
Port: p,
}
} else {
err = nil
addr = &net.TCPAddr{
IP: ip,
Port: defaults.ServerPort,
}
}
} else {
// resolve then
addr, err = net.ResolveTCPAddr("tcp", a)
}
} else if ip := net.ParseIP(a); ip != nil {
err = nil
addr = &net.TCPAddr{
IP: ip,
Port: defaults.ServerPort,
}
}
}
if err != nil {
addr = (net.Addr)(nil)
}
var tlsEnabled bool
var tlsServerName string
if tls := cn.GetTls(); tls != nil {
tlsEnabled = true
tlsServerName = tls.GetServerName()
}
return &Peer{
Name: cn.GetName(),
Address: addr,
TLSEnabled: tlsEnabled,
TLSServerName: tlsServerName,
}
}
// String implements fmt's Stringer interface.
func (p Peer) String() string {
return p.Name
}
// Equal reports whether the Peer is equal to the provided Peer
func (p Peer) Equal(o Peer) bool {
addrEq := (p.Address == nil && o.Address == nil) ||
(p.Address.String() == o.Address.String() && p.Address.Network() == o.Address.Network())
return p.Name == o.Name &&
p.TLSEnabled == o.TLSEnabled &&
p.TLSServerName == o.TLSServerName &&
addrEq
}
// SPDX-License-Identifier: Apache-2.0
// Copyright Authors of Hubble
// Copyright Authors of Cilium
package serveroption
import (
"crypto/tls"
"fmt"
"net"
grpc_prometheus "github.com/grpc-ecosystem/go-grpc-prometheus"
"google.golang.org/grpc"
"google.golang.org/grpc/health"
healthpb "google.golang.org/grpc/health/grpc_health_v1"
observerpb "github.com/cilium/cilium/api/v1/observer"
peerpb "github.com/cilium/cilium/api/v1/peer"
"github.com/cilium/cilium/pkg/crypto/certloader"
v1 "github.com/cilium/cilium/pkg/hubble/api/v1"
)
// MinTLSVersion defines the minimum TLS version clients are expected to
// support in order to establish a connection to the hubble server.
var MinTLSVersion uint16 = tls.VersionTLS13
// Options stores all the configuration values for the hubble server.
type Options struct {
Listener net.Listener
HealthService healthpb.HealthServer
ObserverService observerpb.ObserverServer
PeerService peerpb.PeerServer
ServerTLSConfig certloader.ServerConfigBuilder
Insecure bool
GRPCMetrics *grpc_prometheus.ServerMetrics
GRPCUnaryInterceptors []grpc.UnaryServerInterceptor
GRPCStreamInterceptors []grpc.StreamServerInterceptor
}
// Option customizes the hubble server's configuration.
type Option func(o *Options) error
// WithTCPListener configures a TCP listener with the address.
func WithTCPListener(address string) Option {
return func(o *Options) error {
socket, err := net.Listen("tcp", address)
if err != nil {
return err
}
if o.Listener != nil {
socket.Close()
return fmt.Errorf("listener already configured: %s", address)
}
o.Listener = socket
return nil
}
}
// WithHealthService configures the server to expose the gRPC health service.
func WithHealthService() Option {
return func(o *Options) error {
healthSvc := health.NewServer()
healthSvc.SetServingStatus(v1.ObserverServiceName, healthpb.HealthCheckResponse_SERVING)
o.HealthService = healthSvc
return nil
}
}
// WithObserverService configures the server to expose the given observer server service.
func WithObserverService(svc observerpb.ObserverServer) Option {
return func(o *Options) error {
o.ObserverService = svc
return nil
}
}
// WithPeerService configures the server to expose the given peer server service.
func WithPeerService(svc peerpb.PeerServer) Option {
return func(o *Options) error {
o.PeerService = svc
return nil
}
}
// WithInsecure disables transport security. Transport security is required
// unless WithInsecure is set. Use WithTLS to set transport credentials for
// transport security.
func WithInsecure() Option {
return func(o *Options) error {
o.Insecure = true
return nil
}
}
// WithServerTLS sets the transport credentials for the server based on TLS.
func WithServerTLS(cfg certloader.ServerConfigBuilder) Option {
return func(o *Options) error {
o.ServerTLSConfig = cfg
return nil
}
}
// WithGRPCMetrics configures the server with the specified prometheus gPRC
// ServerMetrics.
func WithGRPCMetrics(grpcMetrics *grpc_prometheus.ServerMetrics) Option {
return func(o *Options) error {
o.GRPCMetrics = grpcMetrics
return nil
}
}
// WithGRPCStreamInterceptor configures the server with the given gRPC server stream interceptors
func WithGRPCStreamInterceptor(interceptors ...grpc.StreamServerInterceptor) Option {
return func(o *Options) error {
o.GRPCStreamInterceptors = append(o.GRPCStreamInterceptors, interceptors...)
return nil
}
}
// WithGRPCUnaryInterceptor configures the server with the given gRPC server stream interceptors
func WithGRPCUnaryInterceptor(interceptors ...grpc.UnaryServerInterceptor) Option {
return func(o *Options) error {
o.GRPCUnaryInterceptors = append(o.GRPCUnaryInterceptors, interceptors...)
return nil
}
}
// SPDX-License-Identifier: Apache-2.0
// Copyright Authors of Hubble
// Copyright Authors of Cilium
package serveroption
import (
"fmt"
"log/slog"
"net"
"os"
"strings"
"golang.org/x/sys/unix"
"github.com/cilium/cilium/pkg/api"
)
// WithUnixSocketListener configures a unix domain socket listener with the
// given file path. When the process runs in privileged mode, the file group
// owner is set to socketGroup.
func WithUnixSocketListener(scopedLog *slog.Logger, path string) Option {
return func(o *Options) error {
if o.Listener != nil {
return fmt.Errorf("listener already configured")
}
socketPath := strings.TrimPrefix(path, "unix://")
unix.Unlink(socketPath)
socket, err := net.Listen("unix", socketPath)
if err != nil {
return err
}
if os.Getuid() == 0 {
if err := api.SetDefaultPermissions(scopedLog.Debug, socketPath); err != nil {
socket.Close()
return err
}
}
o.Listener = socket
return nil
}
}
// SPDX-License-Identifier: Apache-2.0
// Copyright Authors of Hubble
// Copyright Authors of Cilium
package testutils
import (
"context"
"net"
"net/netip"
"time"
"google.golang.org/grpc"
"google.golang.org/grpc/connectivity"
flowpb "github.com/cilium/cilium/api/v1/flow"
"github.com/cilium/cilium/api/v1/models"
observerpb "github.com/cilium/cilium/api/v1/observer"
peerpb "github.com/cilium/cilium/api/v1/peer"
cgroupManager "github.com/cilium/cilium/pkg/cgroups/manager"
"github.com/cilium/cilium/pkg/hubble/parser/getters"
peerTypes "github.com/cilium/cilium/pkg/hubble/peer/types"
poolTypes "github.com/cilium/cilium/pkg/hubble/relay/pool/types"
"github.com/cilium/cilium/pkg/identity"
"github.com/cilium/cilium/pkg/ipcache"
slim_corev1 "github.com/cilium/cilium/pkg/k8s/slim/k8s/api/core/v1"
"github.com/cilium/cilium/pkg/labels"
policyTypes "github.com/cilium/cilium/pkg/policy/types"
)
// FakeGetFlowsServer is used for unit tests and implements the
// observerpb.Observer_GetFlowsServer interface.
type FakeGetFlowsServer struct {
OnSend func(response *observerpb.GetFlowsResponse) error
*FakeGRPCServerStream
}
// Send implements observerpb.Observer_GetFlowsServer.Send.
func (s *FakeGetFlowsServer) Send(response *observerpb.GetFlowsResponse) error {
if s.OnSend != nil {
// TODO: completely convert this into using flowpb.Flow
return s.OnSend(response)
}
panic("OnSend not set")
}
// FakeGetAgentEventsServer is used for unit tests and implements the
// observerpb.Observer_GetAgentEventsServer interface.
type FakeGetAgentEventsServer struct {
OnSend func(response *observerpb.GetAgentEventsResponse) error
*FakeGRPCServerStream
}
// Send implements observerpb.Observer_GetAgentEventsServer.Send.
func (s *FakeGetAgentEventsServer) Send(response *observerpb.GetAgentEventsResponse) error {
if s.OnSend != nil {
return s.OnSend(response)
}
panic("OnSend not set")
}
// FakeObserverClient is used for unit tests and implements the
// observerpb.ObserverClient interface.
type FakeObserverClient struct {
OnGetFlows func(ctx context.Context, in *observerpb.GetFlowsRequest, opts ...grpc.CallOption) (observerpb.Observer_GetFlowsClient, error)
OnGetAgentEvents func(ctx context.Context, in *observerpb.GetAgentEventsRequest, opts ...grpc.CallOption) (observerpb.Observer_GetAgentEventsClient, error)
OnGetDebugEvents func(ctx context.Context, in *observerpb.GetDebugEventsRequest, opts ...grpc.CallOption) (observerpb.Observer_GetDebugEventsClient, error)
OnGetNodes func(ctx context.Context, in *observerpb.GetNodesRequest, opts ...grpc.CallOption) (*observerpb.GetNodesResponse, error)
OnGetNamespaces func(ctx context.Context, in *observerpb.GetNamespacesRequest, opts ...grpc.CallOption) (*observerpb.GetNamespacesResponse, error)
OnServerStatus func(ctx context.Context, in *observerpb.ServerStatusRequest, opts ...grpc.CallOption) (*observerpb.ServerStatusResponse, error)
}
// GetFlows implements observerpb.ObserverClient.GetFlows.
func (c *FakeObserverClient) GetFlows(ctx context.Context, in *observerpb.GetFlowsRequest, opts ...grpc.CallOption) (observerpb.Observer_GetFlowsClient, error) {
if c.OnGetFlows != nil {
return c.OnGetFlows(ctx, in, opts...)
}
panic("OnGetFlows not set")
}
// GetAgentEvents implements observerpb.ObserverClient.GetAgentEvents.
func (c *FakeObserverClient) GetAgentEvents(ctx context.Context, in *observerpb.GetAgentEventsRequest, opts ...grpc.CallOption) (observerpb.Observer_GetAgentEventsClient, error) {
if c.OnGetAgentEvents != nil {
return c.OnGetAgentEvents(ctx, in, opts...)
}
panic("OnGetAgentEvents not set")
}
// GetDebugEvents implements observerpb.ObserverClient.GetDebugEvents.
func (c *FakeObserverClient) GetDebugEvents(ctx context.Context, in *observerpb.GetDebugEventsRequest, opts ...grpc.CallOption) (observerpb.Observer_GetDebugEventsClient, error) {
if c.OnGetDebugEvents != nil {
return c.OnGetDebugEvents(ctx, in, opts...)
}
panic("OnGetDebugEvents not set")
}
// GetNodes implements observerpb.ObserverClient.GetNodes.
func (c *FakeObserverClient) GetNodes(ctx context.Context, in *observerpb.GetNodesRequest, opts ...grpc.CallOption) (*observerpb.GetNodesResponse, error) {
if c.OnGetNodes != nil {
return c.OnGetNodes(ctx, in, opts...)
}
panic("OnGetNodes not set")
}
// GetNamespaces implements observerpb.ObserverClient.GetNamespaces.
func (c *FakeObserverClient) GetNamespaces(ctx context.Context, in *observerpb.GetNamespacesRequest, opts ...grpc.CallOption) (*observerpb.GetNamespacesResponse, error) {
if c.OnGetNamespaces != nil {
return c.OnGetNamespaces(ctx, in, opts...)
}
panic("OnGetNamespaces not set")
}
// ServerStatus implements observerpb.ObserverClient.ServerStatus.
func (c *FakeObserverClient) ServerStatus(ctx context.Context, in *observerpb.ServerStatusRequest, opts ...grpc.CallOption) (*observerpb.ServerStatusResponse, error) {
if c.OnServerStatus != nil {
return c.OnServerStatus(ctx, in, opts...)
}
panic("OnServerStatus not set")
}
// FakeGetFlowsClient is used for unit tests and implements the
// observerpb.Observer_GetFlowsClient interface.
type FakeGetFlowsClient struct {
OnRecv func() (*observerpb.GetFlowsResponse, error)
*FakeGRPCClientStream
}
// Recv implements observerpb.Observer_GetFlowsClient.Recv.
func (c *FakeGetFlowsClient) Recv() (*observerpb.GetFlowsResponse, error) {
if c.OnRecv != nil {
return c.OnRecv()
}
panic("OnRecv not set")
}
// FakePeerNotifyServer is used for unit tests and implements the
// peerpb.Peer_NotifyServer interface.
type FakePeerNotifyServer struct {
OnSend func(response *peerpb.ChangeNotification) error
*FakeGRPCServerStream
}
// Send implements peerpb.Peer_NotifyServer.Send.
func (s *FakePeerNotifyServer) Send(response *peerpb.ChangeNotification) error {
if s.OnSend != nil {
return s.OnSend(response)
}
panic("OnSend not set")
}
// FakePeerNotifyClient is used for unit tests and implements the
// peerpb.Peer_NotifyClient interface.
type FakePeerNotifyClient struct {
OnRecv func() (*peerpb.ChangeNotification, error)
*FakeGRPCClientStream
}
// Recv implements peerpb.Peer_NotifyClient.Recv.
func (c *FakePeerNotifyClient) Recv() (*peerpb.ChangeNotification, error) {
if c.OnRecv != nil {
return c.OnRecv()
}
panic("OnRecv not set")
}
// FakePeerClient is used for unit tests and implements the peerTypes.Client
// interface.
type FakePeerClient struct {
OnNotify func(ctx context.Context, in *peerpb.NotifyRequest, opts ...grpc.CallOption) (peerpb.Peer_NotifyClient, error)
OnClose func() error
}
// Notify implements peerTypes.Client.Notify.
func (c *FakePeerClient) Notify(ctx context.Context, in *peerpb.NotifyRequest, opts ...grpc.CallOption) (peerpb.Peer_NotifyClient, error) {
if c.OnNotify != nil {
return c.OnNotify(ctx, in, opts...)
}
panic("OnNotify not set")
}
// Close implements peerTypes.Client.Close.
func (c *FakePeerClient) Close() error {
if c.OnClose != nil {
return c.OnClose()
}
panic("OnClose not set")
}
// FakePeerClientBuilder is used for unit tests and implements the
// peerTypes.ClientBuilder interface.
type FakePeerClientBuilder struct {
OnClient func(target string) (peerTypes.Client, error)
}
// Client implements peerTypes.ClientBuilder.Client.
func (b FakePeerClientBuilder) Client(target string) (peerTypes.Client, error) {
if b.OnClient != nil {
return b.OnClient(target)
}
panic("OnClient not set")
}
// FakePeerLister is used for unit tests and implements the
// relay/observer.PeerListReporter interface.
type FakePeerLister struct {
OnList func() []poolTypes.Peer
}
// List implements relay/observer.PeerListReporter.List.
func (r *FakePeerLister) List() []poolTypes.Peer {
if r.OnList != nil {
return r.OnList()
}
panic("OnList not set")
}
// FakeClientConn is used for unit tests and implements the
// poolTypes.ClientConn interface.
type FakeClientConn struct {
OnGetState func() connectivity.State
OnClose func() error
OnInvoke func(ctx context.Context, method string, args any, reply any, opts ...grpc.CallOption) error
OnNewStream func(ctx context.Context, desc *grpc.StreamDesc, method string, opts ...grpc.CallOption) (grpc.ClientStream, error)
}
// GetState implements poolTypes.ClientConn.GetState.
func (c FakeClientConn) GetState() connectivity.State {
if c.OnGetState != nil {
return c.OnGetState()
}
panic("OnGetState not set")
}
// Close implements poolTypes.ClientConn.Close.
func (c FakeClientConn) Close() error {
if c.OnClose != nil {
return c.OnClose()
}
panic("OnClose not set")
}
// Invoke implements poolTypes.ClientConn.Invoke.
func (c FakeClientConn) Invoke(ctx context.Context, method string, args any, reply any, opts ...grpc.CallOption) error {
if c.OnInvoke != nil {
return c.OnInvoke(ctx, method, args, reply, opts...)
}
panic("OnInvoke not set")
}
// NewStream implements poolTypes.ClientConn.NewStream.
func (c FakeClientConn) NewStream(ctx context.Context, desc *grpc.StreamDesc, method string, opts ...grpc.CallOption) (grpc.ClientStream, error) {
if c.OnNewStream != nil {
return c.OnNewStream(ctx, desc, method, opts...)
}
panic("OnNewStream not set")
}
// FakeFQDNCache is used for unit tests that needs FQDNCache and/or DNSGetter.
type FakeFQDNCache struct {
OnInitializeFrom func(entries []*models.DNSLookup)
OnAddDNSLookup func(epID uint32, lookupTime time.Time, domainName string, ips []net.IP, ttl uint32)
OnGetNamesOf func(epID uint32, ip netip.Addr) []string
}
// InitializeFrom implements FQDNCache.InitializeFrom.
func (f *FakeFQDNCache) InitializeFrom(entries []*models.DNSLookup) {
if f.OnInitializeFrom != nil {
f.OnInitializeFrom(entries)
return
}
panic("InitializeFrom([]*models.DNSLookup) should not have been called since it was not defined")
}
// AddDNSLookup implements FQDNCache.AddDNSLookup.
func (f *FakeFQDNCache) AddDNSLookup(epID uint32, lookupTime time.Time, domainName string, ips []net.IP, ttl uint32) {
if f.OnAddDNSLookup != nil {
f.OnAddDNSLookup(epID, lookupTime, domainName, ips, ttl)
return
}
panic("AddDNSLookup(uint32, time.Time, string, []net.IP, uint32) should not have been called since it was not defined")
}
// GetNamesOf implements FQDNCache.GetNameOf.
func (f *FakeFQDNCache) GetNamesOf(epID uint32, ip netip.Addr) []string {
if f.OnGetNamesOf != nil {
return f.OnGetNamesOf(epID, ip)
}
panic("GetNamesOf(uint32, netip.Addr) should not have been called since it was not defined")
}
// NoopDNSGetter always returns an empty response.
var NoopDNSGetter = FakeFQDNCache{
OnGetNamesOf: func(sourceEpID uint32, ip netip.Addr) (fqdns []string) {
return nil
},
}
// FakeEndpointGetter is used for unit tests that needs EndpointGetter.
type FakeEndpointGetter struct {
OnGetEndpointInfo func(ip netip.Addr) (endpoint getters.EndpointInfo, ok bool)
OnGetEndpointInfoByID func(id uint16) (endpoint getters.EndpointInfo, ok bool)
}
// GetEndpointInfo implements EndpointGetter.GetEndpointInfo.
func (f *FakeEndpointGetter) GetEndpointInfo(ip netip.Addr) (endpoint getters.EndpointInfo, ok bool) {
if f.OnGetEndpointInfo != nil {
return f.OnGetEndpointInfo(ip)
}
panic("OnGetEndpointInfo not set")
}
// GetEndpointInfoByID implements EndpointGetter.GetEndpointInfoByID.
func (f *FakeEndpointGetter) GetEndpointInfoByID(id uint16) (endpoint getters.EndpointInfo, ok bool) {
if f.OnGetEndpointInfoByID != nil {
return f.OnGetEndpointInfoByID(id)
}
panic("GetEndpointInfoByID not set")
}
// NoopEndpointGetter always returns an empty response.
var NoopEndpointGetter = FakeEndpointGetter{
OnGetEndpointInfo: func(ip netip.Addr) (endpoint getters.EndpointInfo, ok bool) {
return nil, false
},
OnGetEndpointInfoByID: func(id uint16) (endpoint getters.EndpointInfo, ok bool) {
return nil, false
},
}
type FakeLinkGetter struct{}
func (e *FakeLinkGetter) Name(ifindex uint32) string {
return "lo"
}
func (e *FakeLinkGetter) GetIfNameCached(ifindex int) (string, bool) {
return e.Name(uint32(ifindex)), true
}
var NoopLinkGetter = FakeLinkGetter{}
// FakeIPGetter is used for unit tests that needs IPGetter.
type FakeIPGetter struct {
OnGetK8sMetadata func(ip netip.Addr) *ipcache.K8sMetadata
OnLookupSecIDByIP func(ip netip.Addr) (ipcache.Identity, bool)
}
// GetK8sMetadata implements FakeIPGetter.GetK8sMetadata.
func (f *FakeIPGetter) GetK8sMetadata(ip netip.Addr) *ipcache.K8sMetadata {
if f.OnGetK8sMetadata != nil {
return f.OnGetK8sMetadata(ip)
}
panic("OnGetK8sMetadata not set")
}
// LookupSecIDByIP implements FakeIPGetter.LookupSecIDByIP.
func (f *FakeIPGetter) LookupSecIDByIP(ip netip.Addr) (ipcache.Identity, bool) {
if f.OnLookupSecIDByIP != nil {
return f.OnLookupSecIDByIP(ip)
}
panic("OnLookupByIP not set")
}
// NoopIPGetter always returns an empty response.
var NoopIPGetter = FakeIPGetter{
OnGetK8sMetadata: func(ip netip.Addr) *ipcache.K8sMetadata {
return nil
},
OnLookupSecIDByIP: func(ip netip.Addr) (ipcache.Identity, bool) {
return ipcache.Identity{}, false
},
}
// FakeServiceGetter is used for unit tests that need ServiceGetter.
type FakeServiceGetter struct {
OnGetServiceByAddr func(ip netip.Addr, port uint16) *flowpb.Service
}
// GetServiceByAddr implements FakeServiceGetter.GetServiceByAddr.
func (f *FakeServiceGetter) GetServiceByAddr(ip netip.Addr, port uint16) *flowpb.Service {
if f.OnGetServiceByAddr != nil {
return f.OnGetServiceByAddr(ip, port)
}
panic("OnGetServiceByAddr not set")
}
// NoopServiceGetter always returns an empty response.
var NoopServiceGetter = FakeServiceGetter{
OnGetServiceByAddr: func(ip netip.Addr, port uint16) *flowpb.Service {
return nil
},
}
// FakeIdentityGetter is used for unit tests that need IdentityGetter.
type FakeIdentityGetter struct {
OnGetIdentity func(securityIdentity uint32) (*identity.Identity, error)
}
// GetIdentity implements IdentityGetter.GetIPIdentity.
func (f *FakeIdentityGetter) GetIdentity(securityIdentity uint32) (*identity.Identity, error) {
if f.OnGetIdentity != nil {
return f.OnGetIdentity(securityIdentity)
}
panic("OnGetIdentity not set")
}
// NoopIdentityGetter always returns an empty response.
var NoopIdentityGetter = FakeIdentityGetter{
OnGetIdentity: func(securityIdentity uint32) (*identity.Identity, error) {
return &identity.Identity{}, nil
},
}
// FakeEndpointInfo implements getters.EndpointInfo for unit tests. All interface
// methods return values exposed in the fields.
type FakeEndpointInfo struct {
ContainerIDs []string
ID uint64
Identity identity.NumericIdentity
IPv4 net.IP
IPv6 net.IP
PodName string
PodNamespace string
Labels []string
Pod *slim_corev1.Pod
PolicyMap map[policyTypes.Key]labels.LabelArrayListString
PolicyRevision uint64
}
// GetID returns the ID of the endpoint.
func (e *FakeEndpointInfo) GetID() uint64 {
return e.ID
}
// GetIdentity returns the numerical security identity of the endpoint.
func (e *FakeEndpointInfo) GetIdentity() identity.NumericIdentity {
return e.Identity
}
// GetK8sPodName returns the pod name of the endpoint.
func (e *FakeEndpointInfo) GetK8sPodName() string {
return e.PodName
}
// GetK8sNamespace returns the pod namespace of the endpoint.
func (e *FakeEndpointInfo) GetK8sNamespace() string {
return e.PodNamespace
}
// GetLabels returns the labels of the endpoint.
func (e *FakeEndpointInfo) GetLabels() labels.Labels {
return labels.NewLabelsFromModel(e.Labels)
}
// GetPod return the pod object of the endpoint.
func (e *FakeEndpointInfo) GetPod() *slim_corev1.Pod {
return e.Pod
}
func (e *FakeEndpointInfo) GetPolicyCorrelationInfoForKey(key policyTypes.Key) (
info policyTypes.PolicyCorrelationInfo,
ok bool,
) {
info.RuleLabels, ok = e.PolicyMap[key]
info.Revision = e.PolicyRevision
return info, ok
}
// FakePodMetadataGetter is used for unit tests that need a PodMetadataGetter.
type FakePodMetadataGetter struct {
OnGetPodMetadataForContainer func(cgroupId uint64) *cgroupManager.PodMetadata
}
// GetPodMetadataForContainer implements PodMetadataGetter.GetPodMetadataForContainer.
func (f *FakePodMetadataGetter) GetPodMetadataForContainer(cgroupId uint64) *cgroupManager.PodMetadata {
if f.OnGetPodMetadataForContainer != nil {
return f.OnGetPodMetadataForContainer(cgroupId)
}
panic("GetPodMetadataForContainer not set")
}
// NoopPodMetadataGetter always returns an empty response.
var NoopPodMetadataGetter = FakePodMetadataGetter{
OnGetPodMetadataForContainer: func(cgroupId uint64) *cgroupManager.PodMetadata {
return nil
},
}
// SPDX-License-Identifier: Apache-2.0
// Copyright Authors of Hubble
// Copyright Authors of Cilium
package testutils
import (
"context"
"google.golang.org/grpc/metadata"
)
// FakeGRPCServerStream implements google.golang.org/grpc.ServerStream
// interface for unit tests.
type FakeGRPCServerStream struct {
OnSetHeader func(metadata.MD) error
OnSendHeader func(metadata.MD) error
OnSetTrailer func(m metadata.MD)
OnContext func() context.Context
OnSendMsg func(m any) error
OnRecvMsg func(m any) error
}
// SetHeader implements grpc.ServerStream.SetHeader.
func (s *FakeGRPCServerStream) SetHeader(m metadata.MD) error {
if s.OnSetHeader != nil {
return s.OnSetHeader(m)
}
panic("OnSetHeader not set")
}
// SendHeader implements grpc.ServerStream.SendHeader.
func (s *FakeGRPCServerStream) SendHeader(m metadata.MD) error {
if s.OnSendHeader != nil {
return s.OnSendHeader(m)
}
panic("OnSendHeader not set")
}
// SetTrailer implements grpc.ServerStream.SetTrailer.
func (s *FakeGRPCServerStream) SetTrailer(m metadata.MD) {
if s.OnSetTrailer != nil {
s.OnSetTrailer(m)
}
panic("OnSetTrailer not set")
}
// Context implements grpc.ServerStream.Context.
func (s *FakeGRPCServerStream) Context() context.Context {
if s.OnContext != nil {
return s.OnContext()
}
panic("OnContext not set")
}
// SendMsg implements grpc.ServerStream.SendMsg.
func (s *FakeGRPCServerStream) SendMsg(m any) error {
if s.OnSendMsg != nil {
return s.OnSendMsg(m)
}
panic("OnSendMsg not set")
}
// RecvMsg implements grpc.ServerStream.RecvMsg.
func (s *FakeGRPCServerStream) RecvMsg(m any) error {
if s.OnRecvMsg != nil {
return s.OnRecvMsg(m)
}
panic("OnRecvMsg not set")
}
// FakeGRPCClientStream implements google.golang.org/grpc.ClientStream
// interface for unit tests.
type FakeGRPCClientStream struct {
OnHeader func() (metadata.MD, error)
OnTrailer func() metadata.MD
OnCloseSend func() error
OnContext func() context.Context
OnSendMsg func(m any) error
OnRecvMsg func(m any) error
}
// Header implements grpc.ClientStream.Header.
func (c *FakeGRPCClientStream) Header() (metadata.MD, error) {
if c.OnHeader != nil {
return c.OnHeader()
}
panic("OnHeader not set")
}
// Trailer implements grpc.ClientStream.Trailer.
func (c *FakeGRPCClientStream) Trailer() metadata.MD {
if c.OnTrailer != nil {
return c.OnTrailer()
}
panic("OnTrailer not set")
}
// CloseSend implements grpc.ClientStream.CloseSend.
func (c *FakeGRPCClientStream) CloseSend() error {
if c.OnCloseSend != nil {
return c.OnCloseSend()
}
panic("OnCloseSend not set")
}
// Context implements grpc.ClientStream.Context.
func (c *FakeGRPCClientStream) Context() context.Context {
if c.OnContext != nil {
return c.OnContext()
}
panic("OnContext not set")
}
// SendMsg implements grpc.ClientStream.SendMsg.
func (c *FakeGRPCClientStream) SendMsg(m any) error {
if c.OnSendMsg != nil {
return c.OnSendMsg(m)
}
panic("OnSendMsg not set")
}
// RecvMsg implements grpc.ClientStream.RecvMsg.
func (c *FakeGRPCClientStream) RecvMsg(m any) error {
if c.OnRecvMsg != nil {
return c.OnRecvMsg(m)
}
panic("OnRecvMsg not set")
}
// SPDX-License-Identifier: Apache-2.0
// Copyright Authors of Hubble
package testutils
import (
"bytes"
"encoding/binary"
"encoding/gob"
"fmt"
"github.com/gopacket/gopacket"
"github.com/cilium/cilium/pkg/byteorder"
"github.com/cilium/cilium/pkg/monitor"
monitorAPI "github.com/cilium/cilium/pkg/monitor/api"
)
// CreateL3L4Payload assembles a L3/L4 payload for testing purposes
func CreateL3L4Payload(message any, layers ...gopacket.SerializableLayer) ([]byte, error) {
// Serialize message.
buf := &bytes.Buffer{}
switch messageType := message.(type) {
case monitor.DebugCapture,
monitor.DropNotify,
monitor.PolicyVerdictNotify,
monitor.TraceNotify:
if err := binary.Write(buf, byteorder.Native, message); err != nil {
return nil, err
}
case monitorAPI.AgentNotify:
buf.WriteByte(byte(monitorAPI.MessageTypeAgent))
if err := gob.NewEncoder(buf).Encode(message); err != nil {
return nil, err
}
default:
return nil, fmt.Errorf("unsupported message type %T", messageType)
}
// Truncate buffer according to the event version. This allows us to serialize previous
// versions of events in tests, which would be otherwise serialized with the maximum size of the
// respective data structure (ex. DropNotifyV1 -> DropNotifyV2 + zero bytes of padding).
switch messageType := message.(type) {
case monitor.TraceNotify:
buf.Truncate(int(messageType.DataOffset()))
case monitor.DropNotify:
buf.Truncate(int(messageType.DataOffset()))
}
// Serialize layers.
packet := gopacket.NewSerializeBuffer()
options := gopacket.SerializeOptions{
FixLengths: true,
}
if err := gopacket.SerializeLayers(packet, options, layers...); err != nil {
return nil, err
}
if _, err := buf.Write(packet.Bytes()); err != nil {
return nil, err
}
return buf.Bytes(), nil
}
// MustCreateL3L4Payload wraps CreateL3L4Payload, but panics on error
func MustCreateL3L4Payload(message any, layers ...gopacket.SerializableLayer) []byte {
payload, err := CreateL3L4Payload(message, layers...)
if err != nil {
panic(err)
}
return payload
}
// SPDX-License-Identifier: Apache-2.0
// Copyright Authors of Cilium
package testutils
import (
"fmt"
"testing"
"github.com/google/go-cmp/cmp"
"github.com/stretchr/testify/assert"
"google.golang.org/protobuf/testing/protocmp"
)
func AssertProtoEqual(t *testing.T, want, got any, msgAndArgs ...any) bool {
t.Helper()
if diff := cmp.Diff(want, got, protocmp.Transform()); diff != "" {
return assert.Fail(t, fmt.Sprintf("not equal (-want +got):\n%s", diff), msgAndArgs...)
}
return true
}
// SPDX-License-Identifier: Apache-2.0
// Copyright Authors of Cilium
package iana
import (
"regexp"
)
// IANA Service Name consists of alphanumeric characters of which at
// least one is not a number, as well as non-consecutive dashes ('-')
// except for in the beginning or the end.
// Note: Character case must be ignored when comparing service names.
var isSvcName = regexp.MustCompile(`^([a-zA-Z0-9]-?)*[a-zA-Z](-?[a-zA-Z0-9])*$`).MatchString
// IsSvcName returns true if the string conforms to IANA Service Name specification
// (RFC 6335 Section 5.1. Service Name Syntax)
func IsSvcName(name string) bool {
return len(name) > 0 && len(name) <= 15 && isSvcName(name)
}
// SPDX-License-Identifier: Apache-2.0
// Copyright Authors of Cilium
package cache
import (
"context"
"errors"
"fmt"
"log/slog"
"os"
"path"
"path/filepath"
"sync/atomic"
"github.com/cilium/stream"
"github.com/google/renameio/v2"
jsoniter "github.com/json-iterator/go"
"github.com/cilium/cilium/pkg/allocator"
"github.com/cilium/cilium/pkg/controller"
"github.com/cilium/cilium/pkg/identity"
"github.com/cilium/cilium/pkg/identity/key"
"github.com/cilium/cilium/pkg/idpool"
api "github.com/cilium/cilium/pkg/k8s/apis/cilium.io"
clientset "github.com/cilium/cilium/pkg/k8s/client/clientset/versioned"
"github.com/cilium/cilium/pkg/k8s/identitybackend"
"github.com/cilium/cilium/pkg/kvstore"
kvstoreallocator "github.com/cilium/cilium/pkg/kvstore/allocator"
"github.com/cilium/cilium/pkg/kvstore/allocator/doublewrite"
"github.com/cilium/cilium/pkg/labels"
"github.com/cilium/cilium/pkg/lock"
"github.com/cilium/cilium/pkg/logging"
"github.com/cilium/cilium/pkg/logging/logfields"
"github.com/cilium/cilium/pkg/metrics"
"github.com/cilium/cilium/pkg/option"
"github.com/cilium/cilium/pkg/time"
"github.com/cilium/cilium/pkg/trigger"
)
var (
// IdentitiesPath is the path to where identities are stored in the
// key-value store.
IdentitiesPath = path.Join(kvstore.BaseKeyPrefix, "state", "identities", "v1")
)
// The filename for the local allocator checkpoont. This is periodically
// written, and restored on restart.
// The full path is, by default, /run/cilium/state/local_allocator_state.json
const CheckpointFile = "local_allocator_state.json"
// CachingIdentityAllocator manages the allocation of identities for both
// global and local identities.
type CachingIdentityAllocator struct {
logger *slog.Logger
// IdentityAllocator is an allocator for security identities from the
// kvstore.
IdentityAllocator *allocator.Allocator
// globalIdentityAllocatorInitialized is closed whenever the global identity
// allocator is initialized.
globalIdentityAllocatorInitialized chan struct{}
// localLock prevents interleaving of allocations and calls to UpdateIdentities
localLock lock.Mutex
localIdentities *localIdentityCache
localNodeIdentities *localIdentityCache
identitiesPath string
// This field exists is to hand out references that are either for sending
// and receiving. It should not be used directly without converting it first
// to a AllocatorEventSendChan or AllocatorEventRecvChan.
events allocator.AllocatorEventChan
watcher identityWatcher
// setupMutex synchronizes InitIdentityAllocator() and Close()
setupMutex lock.Mutex
owner IdentityAllocatorOwner
checkpointTrigger *trigger.Trigger
triggerDone <-chan struct{}
// restoredIdentities are the set of identities read in from a
// checkpoint on startup. These should be released, see `restoreLocalIdentities()`
// for more info.
restoredIdentities map[identity.NumericIdentity]*identity.Identity
// checkpointPath is the file where local allocator state should be checkpoointed.
// The default is /run/cilium/state/local_allocator_state.json, changed only for testing.
checkpointPath string
// operatorIDManagement indicates if cilium-operator is managing Cilium Identities.
operatorIDManagement bool
// maxAllocAttempts is the number of attempted allocation requests
// performed before failing. This is mainly introduced for testing purposes.
maxAllocAttempts int
// timeout for identity allocation operations.
timeout time.Duration
// syncInterval is the periodic synchronization interval of the allocated identities.
syncInterval time.Duration
}
type AllocatorConfig struct {
EnableOperatorManageCIDs bool
Timeout time.Duration
SyncInterval time.Duration
maxAllocAttempts int
}
// NewTestAllocatorConfig returns an AllocatorConfig initialized for testing purposes.
func NewTestAllocatorConfig() AllocatorConfig {
return AllocatorConfig{
EnableOperatorManageCIDs: false,
Timeout: 5 * time.Second,
SyncInterval: 1 * time.Hour,
}
}
// IdentityAllocatorOwner is the interface the owner of an identity allocator
// must implement
type IdentityAllocatorOwner interface {
// UpdateIdentities will be called when identities have changed
//
// The caller is responsible for making sure the same identity
// is not present in both 'added' and 'deleted', so that they
// can be processed in either order.
UpdateIdentities(added, deleted identity.IdentityMap) <-chan struct{}
// GetSuffix must return the node specific suffix to use
GetNodeSuffix() string
}
// IdentityAllocator is any type which is responsible for allocating security
// identities based of sets of labels, and caching information about identities
// locally.
type IdentityAllocator interface {
// Identity changes are observable.
stream.Observable[IdentityChange]
// WaitForInitialGlobalIdentities waits for the initial set of global
// security identities to have been received.
WaitForInitialGlobalIdentities(context.Context) error
// AllocateIdentity allocates an identity described by the specified labels.
// A possible previously used numeric identity for these labels can be passed
// in as the last parameter; identity.InvalidIdentity must be passed if no
// previous numeric identity exists.
AllocateIdentity(context.Context, labels.Labels, bool, identity.NumericIdentity) (*identity.Identity, bool, error)
// AllocateLocalIdentity allocates an identity, returning error if the set of
// labels would not result in a locally-scoped identity.
//
// If notifyOwner is true, then the SelectorCache is directly updated with this identity. If not,
// the caller *must* ensure the SelectorCache learns about this identity.
AllocateLocalIdentity(lbls labels.Labels, notifyOwner bool, oldNID identity.NumericIdentity) (*identity.Identity, bool, error)
// Release is the reverse operation of AllocateIdentity() and releases the
// specified identity.
Release(context.Context, *identity.Identity, bool) (released bool, err error)
// ReleaseLocalIdentities releases a slice of locally-scoped identities. It always
// updates the SelectorCache.
//
// Returns the list of released (refcount = 0) identities
ReleaseLocalIdentities(...identity.NumericIdentity) ([]identity.NumericIdentity, error)
// LookupIdentityByID returns the identity that corresponds to the given
// labels.
LookupIdentity(ctx context.Context, lbls labels.Labels) *identity.Identity
// LookupIdentityByID returns the identity that corresponds to the given
// numeric identity.
LookupIdentityByID(ctx context.Context, id identity.NumericIdentity) *identity.Identity
// GetIdentityCache returns the current cache of identities that the
// allocator has allocated. The caller should not modify the resulting
// identities by pointer.
GetIdentityCache() identity.IdentityMap
// GetIdentities returns a copy of the current cache of identities.
GetIdentities() IdentitiesModel
// WithholdLocalIdentities holds a set of numeric identities out of the local
// allocation pool(s). Once withheld, a numeric identity can only be used
// when explicitly requested via AllocateIdentity(..., oldNID).
WithholdLocalIdentities(nids []identity.NumericIdentity)
// UnwithholdLocalIdentities removes numeric identities from the withheld set,
// freeing them for general allocation.
UnwithholdLocalIdentities(nids []identity.NumericIdentity)
}
// InitIdentityAllocator creates the global identity allocator. Only the first
// invocation of this function will have an effect. The Caller must have
// initialized well known identities before calling this (by calling
// identity.InitWellKnownIdentities()).
// The client is only used by the CRD identity allocator currently.
// Returns a channel which is closed when initialization of the allocator is
// completed.
// TODO: identity backends are initialized directly in this function, pulling
// in dependencies on kvstore and k8s. It would be better to decouple this,
// since the backends are an interface.
func (m *CachingIdentityAllocator) InitIdentityAllocator(client clientset.Interface, kvstoreClient kvstore.Client) <-chan struct{} {
m.setupMutex.Lock()
defer m.setupMutex.Unlock()
if m.IdentityAllocator != nil {
logging.Fatal(m.logger, "InitIdentityAllocator() in succession without calling Close()")
}
m.logger.Info("Initializing identity allocator")
minID := idpool.ID(identity.GetMinimalAllocationIdentity(option.Config.ClusterID))
maxID := idpool.ID(identity.GetMaximumAllocationIdentity(option.Config.ClusterID))
m.logger.Info(
"Allocating identities between range",
logfields.Min, minID,
logfields.Max, maxID,
logfields.ClusterID, option.Config.ClusterID,
)
// In the case of the allocator being closed, we need to create a new events channel
// and start a new watch.
if m.events == nil {
m.events = make(allocator.AllocatorEventChan, eventsQueueSize)
m.watcher.watch(m.events)
}
// Asynchronously set up the global identity allocator since it connects
// to the kvstore.
go func(owner IdentityAllocatorOwner, events allocator.AllocatorEventSendChan, minID, maxID idpool.ID) {
m.setupMutex.Lock()
defer m.setupMutex.Unlock()
var (
backend allocator.Backend
err error
)
switch option.Config.IdentityAllocationMode {
case option.IdentityAllocationModeKVstore:
m.logger.Debug("Identity allocation backed by KVStore")
backend, err = kvstoreallocator.NewKVStoreBackend(
m.logger,
kvstoreallocator.KVStoreBackendConfiguration{
BasePath: m.identitiesPath,
Suffix: owner.GetNodeSuffix(),
Typ: &key.GlobalIdentity{},
Backend: kvstoreClient,
})
if err != nil {
logging.Fatal(m.logger, "Unable to initialize kvstore backend for identity allocation", logfields.Error, err)
}
case option.IdentityAllocationModeCRD:
m.logger.Debug("Identity allocation backed by CRD")
backend, err = identitybackend.NewCRDBackend(m.logger, identitybackend.CRDBackendConfiguration{
Store: nil,
StoreSet: &atomic.Bool{},
Client: client,
KeyFunc: (&key.GlobalIdentity{}).PutKeyFromMap,
})
if err != nil {
logging.Fatal(m.logger, "Unable to initialize Kubernetes CRD backend for identity allocation", logfields.Error, err)
}
case option.IdentityAllocationModeDoubleWriteReadKVstore, option.IdentityAllocationModeDoubleWriteReadCRD:
readFromKVStore := true
if option.Config.IdentityAllocationMode == option.IdentityAllocationModeDoubleWriteReadCRD {
readFromKVStore = false
}
m.logger.Debug("Double-Write Identity allocation mode (CRD and KVStore) with reads from KVStore", logfields.ReadFromKVStore, readFromKVStore)
backend, err = doublewrite.NewDoubleWriteBackend(
m.logger,
doublewrite.DoubleWriteBackendConfiguration{
CRDBackendConfiguration: identitybackend.CRDBackendConfiguration{
Store: nil,
StoreSet: &atomic.Bool{},
Client: client,
KeyFunc: (&key.GlobalIdentity{}).PutKeyFromMap,
},
KVStoreBackendConfiguration: kvstoreallocator.KVStoreBackendConfiguration{
BasePath: m.identitiesPath,
Suffix: owner.GetNodeSuffix(),
Typ: &key.GlobalIdentity{},
Backend: kvstoreClient,
},
ReadFromKVStore: readFromKVStore,
})
if err != nil {
logging.Fatal(m.logger, "Unable to initialize the Double Write backend for identity allocation", logfields.Error, err)
}
default:
logging.Fatal(m.logger, fmt.Sprintf("Unsupported identity allocation mode %s", option.Config.IdentityAllocationMode))
}
allocOptions := []allocator.AllocatorOption{
allocator.WithMax(maxID), allocator.WithMin(minID),
allocator.WithEvents(events), allocator.WithSyncInterval(m.syncInterval),
allocator.WithPrefixMask(idpool.ID(option.Config.ClusterID << identity.GetClusterIDShift())),
}
if m.operatorIDManagement {
allocOptions = append(allocOptions, allocator.WithOperatorIDManagement())
} else {
allocOptions = append(allocOptions, allocator.WithMasterKeyProtection())
}
if m.maxAllocAttempts > 0 {
allocOptions = append(allocOptions, allocator.WithMaxAllocAttempts(m.maxAllocAttempts))
}
a, err := allocator.NewAllocator(m.logger, &key.GlobalIdentity{}, backend, allocOptions...)
if err != nil {
logging.Fatal(m.logger, fmt.Sprintf("Unable to initialize IdentityAllocator with backend %s", option.Config.IdentityAllocationMode), logfields.Error, err)
}
m.IdentityAllocator = a
close(m.globalIdentityAllocatorInitialized)
}(m.owner, m.events, minID, maxID)
return m.globalIdentityAllocatorInitialized
}
// EnableCheckpointing enables checkpointing the local allocator state.
// The CachingIdentityAllocator is used in multiple places, but we only want to
// checkpoint the "primary" allocator
func (m *CachingIdentityAllocator) EnableCheckpointing() {
// Disallow other local allocation until we've restored from the checkpoint.
// This will be unlocked in ReleaseIdentities
m.localLock.Lock()
controllerManager := controller.NewManager()
controllerGroup := controller.NewGroup("identity-allocator")
controllerName := "local-identity-checkpoint"
triggerDone := make(chan struct{})
t, _ := trigger.NewTrigger(trigger.Parameters{
MinInterval: 10 * time.Second,
TriggerFunc: func(reasons []string) {
controllerManager.UpdateController(controllerName, controller.ControllerParams{
Group: controllerGroup,
DoFunc: m.checkpoint,
StopFunc: m.checkpoint, // perform one last checkpoint when the controller is removed
})
},
ShutdownFunc: func() {
controllerManager.RemoveControllerAndWait(controllerName) // waits for StopFunc
close(triggerDone)
},
})
m.checkpointTrigger = t
m.triggerDone = triggerDone
}
const eventsQueueSize = 1024
// InitIdentityAllocator creates the identity allocator. Only the first
// invocation of this function will have an effect. The Caller must have
// initialized well known identities before calling this (by calling
// identity.InitWellKnownIdentities()).
// client and identityStore are only used by the CRD identity allocator,
// currently, and identityStore may be nil.
// Returns a channel which is closed when initialization of the allocator is
// completed.
// TODO: identity backends are initialized directly in this function, pulling
// in dependencies on kvstore and k8s. It would be better to decouple this,
// since the backends are an interface.
// NewCachingIdentityAllocator creates a new instance of an
// CachingIdentityAllocator.
func NewCachingIdentityAllocator(logger *slog.Logger, owner IdentityAllocatorOwner, config AllocatorConfig) *CachingIdentityAllocator {
watcher := identityWatcher{
owner: owner,
}
m := &CachingIdentityAllocator{
logger: logger,
globalIdentityAllocatorInitialized: make(chan struct{}),
owner: owner,
identitiesPath: IdentitiesPath,
watcher: watcher,
events: make(allocator.AllocatorEventChan, eventsQueueSize),
operatorIDManagement: config.EnableOperatorManageCIDs,
maxAllocAttempts: config.maxAllocAttempts,
timeout: config.Timeout,
syncInterval: config.SyncInterval,
}
if option.Config.RunDir != "" { // disable checkpointing if this is a unit test
m.checkpointPath = filepath.Join(option.Config.StateDir, CheckpointFile)
}
m.watcher.watch(m.events)
// Local identity cache can be created synchronously since it doesn't
// rely upon any external resources (e.g., external kvstore).
m.localIdentities = newLocalIdentityCache(logger, identity.IdentityScopeLocal, identity.MinAllocatorLocalIdentity, identity.MaxAllocatorLocalIdentity)
m.localNodeIdentities = newLocalIdentityCache(logger, identity.IdentityScopeRemoteNode, identity.MinAllocatorLocalIdentity, identity.MaxAllocatorLocalIdentity)
return m
}
// Close closes the identity allocator
func (m *CachingIdentityAllocator) Close() {
m.setupMutex.Lock()
defer m.setupMutex.Unlock()
if m.checkpointTrigger != nil {
m.checkpointTrigger.Shutdown()
<-m.triggerDone
m.checkpointTrigger = nil
}
select {
case <-m.globalIdentityAllocatorInitialized:
// This means the channel was closed and therefore the IdentityAllocator == nil will never be true
default:
if m.IdentityAllocator == nil {
m.logger.Error("Close() called without calling InitIdentityAllocator() first")
return
}
}
m.IdentityAllocator.Delete()
if m.events != nil {
close(m.events)
m.events = nil
}
m.IdentityAllocator = nil
m.globalIdentityAllocatorInitialized = make(chan struct{})
}
// WaitForInitialGlobalIdentities waits for the initial set of global security
// identities to have been received and populated into the allocator cache.
func (m *CachingIdentityAllocator) WaitForInitialGlobalIdentities(ctx context.Context) error {
ctx, cancel := context.WithTimeout(ctx, m.timeout)
defer cancel()
select {
case <-m.globalIdentityAllocatorInitialized:
case <-ctx.Done():
return fmt.Errorf("initial global identity sync was cancelled: %w", ctx.Err())
}
return m.IdentityAllocator.WaitForInitialSync(ctx)
}
var ErrNonLocalIdentity = fmt.Errorf("labels would result in global identity")
// AllocateLocalIdentity works the same as AllocateIdentity, but it guarantees that the allocated
// identity will be local-only. If the provided set of labels does not map to a local identity scope,
// this will return an error.
func (m *CachingIdentityAllocator) AllocateLocalIdentity(lbls labels.Labels, notifyOwner bool, oldNID identity.NumericIdentity) (id *identity.Identity, allocated bool, err error) {
// If this is a reserved, pre-allocated identity, just return that and be done
if reservedIdentity := identity.LookupReservedIdentityByLabels(lbls); reservedIdentity != nil {
m.logger.Debug(
"Resolving reserved identity",
logfields.Identity, reservedIdentity.ID,
logfields.IdentityLabels, lbls,
logfields.New, false,
)
return reservedIdentity, false, nil
}
m.logger.Debug(
"Resolving local identity",
logfields.IdentityLabels, lbls,
)
m.localLock.Lock()
defer m.localLock.Unlock()
return m.allocateLocalIdentityLocked(lbls, notifyOwner, oldNID)
}
func (m *CachingIdentityAllocator) allocateLocalIdentityLocked(lbls labels.Labels, notifyOwner bool, oldNID identity.NumericIdentity) (id *identity.Identity, allocated bool, err error) {
// Allocate according to scope
var metricLabel string
switch scope := identity.ScopeForLabels(lbls); scope {
case identity.IdentityScopeLocal:
id, allocated, err = m.localIdentities.lookupOrCreate(lbls, oldNID)
metricLabel = identity.NodeLocalIdentityType
case identity.IdentityScopeRemoteNode:
id, allocated, err = m.localNodeIdentities.lookupOrCreate(lbls, oldNID)
metricLabel = identity.RemoteNodeIdentityType
default:
m.logger.Error(
"BUG: attempt to allocate local identity for labels, but a global identity is required",
logfields.Labels, lbls,
logfields.Scope, scope,
)
return nil, false, ErrNonLocalIdentity
}
if err != nil {
return nil, false, err
}
if allocated {
metrics.Identity.WithLabelValues(metricLabel).Inc()
for labelSource := range lbls.CollectSources() {
metrics.IdentityLabelSources.WithLabelValues(labelSource).Inc()
}
if m.checkpointTrigger != nil {
m.checkpointTrigger.Trigger()
}
if notifyOwner {
added := identity.IdentityMap{
id.ID: id.LabelArray,
}
m.owner.UpdateIdentities(added, nil)
}
}
return
}
// needsGlobalIdentity returns true if these labels require
// allocating a global identity
func needsGlobalIdentity(lbls labels.Labels) bool {
// If lbls corresponds to a reserved identity, no global allocation required
if identity.LookupReservedIdentityByLabels(lbls) != nil {
return false
}
// determine identity scope from labels,
return identity.ScopeForLabels(lbls) == identity.IdentityScopeGlobal
}
// AllocateIdentity allocates an identity described by the specified labels. If
// an identity for the specified set of labels already exist, the identity is
// re-used and reference counting is performed, otherwise a new identity is
// allocated via the kvstore or via the local identity allocator.
// A possible previously used numeric identity for these labels can be passed
// in as the 'oldNID' parameter; identity.InvalidIdentity must be passed if no
// previous numeric identity exists.
func (m *CachingIdentityAllocator) AllocateIdentity(ctx context.Context, lbls labels.Labels, notifyOwner bool, oldNID identity.NumericIdentity) (id *identity.Identity, allocated bool, err error) {
ctx, cancel := context.WithTimeout(ctx, m.timeout)
defer cancel()
if !needsGlobalIdentity(lbls) {
return m.AllocateLocalIdentity(lbls, notifyOwner, oldNID)
}
if option.Config.Debug {
m.logger.Debug(
"Resolving global identity",
logfields.IdentityLabels, lbls,
)
}
// This will block until the kvstore can be accessed and all identities
// were successfully synced
err = m.WaitForInitialGlobalIdentities(ctx)
if err != nil {
return nil, false, err
}
if m.IdentityAllocator == nil {
return nil, false, fmt.Errorf("allocator not initialized")
}
idp, allocated, isNewLocally, err := m.IdentityAllocator.Allocate(ctx, &key.GlobalIdentity{LabelArray: lbls.LabelArray()})
if err != nil {
return nil, false, err
}
if idp > identity.MaxNumericIdentity {
return nil, false, fmt.Errorf("%d: numeric identity too large", idp)
}
id = identity.NewIdentity(identity.NumericIdentity(idp), lbls)
if option.Config.Debug {
m.logger.Debug(
"Resolved identity",
logfields.Identity, idp,
logfields.IdentityLabels, lbls,
logfields.New, allocated,
logfields.NewLocally, isNewLocally,
)
}
if allocated || isNewLocally {
metrics.Identity.WithLabelValues(identity.ClusterLocalIdentityType).Inc()
for labelSource := range lbls.CollectSources() {
metrics.IdentityLabelSources.WithLabelValues(labelSource).Inc()
}
}
// Notify the owner of the newly added identities so that the
// cached identities can be updated ASAP, rather than just
// relying on the kv-store update events.
if allocated && notifyOwner {
added := identity.IdentityMap{
id.ID: id.LabelArray,
}
m.owner.UpdateIdentities(added, nil)
}
return id, allocated, nil
}
func (m *CachingIdentityAllocator) WithholdLocalIdentities(nids []identity.NumericIdentity) {
m.logger.Debug(
"Withholding numeric identities for later restoration",
logfields.Identity, nids,
)
// The allocators will return any identities that are not in-scope.
nids = m.localIdentities.withhold(nids)
nids = m.localNodeIdentities.withhold(nids)
if len(nids) > 0 {
m.logger.Error(
"Attempt to restore invalid numeric identities.",
logfields.Identity, nids,
)
}
}
func (m *CachingIdentityAllocator) UnwithholdLocalIdentities(nids []identity.NumericIdentity) {
m.logger.Debug(
"Unwithholding numeric identities",
logfields.Identity, nids,
)
// The allocators will ignore any identities that are not in-scope.
m.localIdentities.unwithhold(nids)
m.localNodeIdentities.unwithhold(nids)
}
// checkpoint writes the state of the local allocators to disk. This is used for restoration,
// to ensure that numeric identities are, as much as possible, stable across agent restarts.
//
// Do not call this directly, rather, use m.checkpointTrigger.Trigger()
func (m *CachingIdentityAllocator) checkpoint(ctx context.Context) error {
if m.checkpointPath == "" {
return nil // this is a unit test
}
scopedLog := m.logger.With(logfields.Path, m.checkpointPath)
ids := make([]*identity.Identity, 0, m.localIdentities.size()+m.localNodeIdentities.size())
ids = m.localIdentities.checkpoint(ids)
ids = m.localNodeIdentities.checkpoint(ids)
// use renameio to prevent partial writes
out, err := renameio.NewPendingFile(m.checkpointPath, renameio.WithExistingPermissions(), renameio.WithPermissions(0o600))
if err != nil {
scopedLog.Error("failed to prepare checkpoint file", logfields.Error, err)
return err
}
defer out.Cleanup()
jw := jsoniter.ConfigFastest.NewEncoder(out)
if err := jw.Encode(ids); err != nil {
scopedLog.Error("failed to marshal identity checkpoint state", logfields.Error, err)
return err
}
if err := out.CloseAtomicallyReplace(); err != nil {
scopedLog.Error("failed to write identity checkpoint file", logfields.Error, err)
return err
}
scopedLog.Debug("Wrote local identity allocator checkpoint")
return nil
}
// RestoreLocalIdentities reads in the checkpointed local allocator state
// from disk and allocates a reference to every previously existing identity.
//
// Once all identity-allocating objects are synchronized (e.g. network policies,
// remote nodes), call ReleaseRestoredIdentities to release the held references.
func (m *CachingIdentityAllocator) RestoreLocalIdentities() (map[identity.NumericIdentity]*identity.Identity, error) {
if m.checkpointPath == "" {
return nil, nil // unit test
}
if m.checkpointTrigger == nil {
m.logger.Error("BUG: RestoreLocalIdentities() called without EnableCheckpointing()")
return nil, nil
}
// The allocator was started with local allocation locked to ensure restoration
// always runs first. Once done, we must unlock so other allocation can proceed.
defer m.localLock.Unlock()
scopedLog := m.logger.With(logfields.Path, m.checkpointPath)
// Read in checkpoint file
fp, err := os.Open(m.checkpointPath)
if err != nil {
if os.IsNotExist(err) {
scopedLog.Info("No identity checkpoint file found, skipping restoration")
return nil, nil
}
return nil, fmt.Errorf("failed to open identity checkpoint file %s: %w", m.checkpointPath, err)
}
defer fp.Close()
jr := jsoniter.ConfigFastest.NewDecoder(fp)
var ids []*identity.Identity
if err := jr.Decode(&ids); err != nil {
return nil, fmt.Errorf("failed to parse identity checkpoint file %s: %w", m.checkpointPath, err)
}
if len(ids) == 0 {
return nil, nil
}
// Load in checkpoint:
// - withhold numeric identities
// - allocate previous identities
// - update SelectorCache
// - unwithhold numeric IDs
scopedLog.Info("Restoring checkpointed local identities", logfields.Count, len(ids))
m.restoredIdentities = make(map[identity.NumericIdentity]*identity.Identity, len(ids))
added := make(identity.IdentityMap, len(ids))
// Withhold restored local identities from allocation (except by request).
// This is insurance against a code change causing identities to be allocated
// differently, which could disrupt restoration.
// Withholding numeric IDs prevents them from being allocated except by explicit request.
oldNumIDs := make([]identity.NumericIdentity, 0, len(ids))
for _, id := range ids {
oldNumIDs = append(oldNumIDs, id.ID)
}
m.WithholdLocalIdentities(oldNumIDs)
for _, oldID := range ids {
// Ensure we do not restore any global identities or identities that somehow are
// changing scope. There's no point, as the numeric identity will be different.
if scope := identity.ScopeForLabels(oldID.Labels); scope != oldID.ID.Scope() || needsGlobalIdentity(oldID.Labels) {
// Should not happen, except when the scope for labels changes
// such as disabling policy-cidr-match-mode=nodes
scopedLog.Warn(
"skipping restore of non-local or re-scoped identity",
logfields.Identity, oldID,
logfields.Scope, scope,
)
continue
}
newID, _, err := m.allocateLocalIdentityLocked(
oldID.Labels,
false, // do not add to selector cache; we'll batch that later
oldID.ID, // request previous numeric ID
)
if err != nil {
scopedLog.Error(
"failed to restore checkpointed local identity, continuing",
logfields.Identity, oldID,
logfields.Error, err,
)
} else {
m.restoredIdentities[newID.ID] = newID
added[newID.ID] = newID.LabelArray
if newID.ID != oldID.ID {
// Paranoia, shouldn't happen
scopedLog.Warn(
"Restored local identity has different numeric ID",
logfields.Identity, oldID,
)
}
}
}
// Add identities to SelectorCache
if m.owner != nil {
m.owner.UpdateIdentities(added, nil)
}
// Release all withheld numeric identities back for general use.
m.UnwithholdLocalIdentities(oldNumIDs)
// return the set of restored identities, which is useful for prefix restoration
return m.restoredIdentities, nil
}
// ReleaseRestoredIdentities releases any identities that were restored, reducing their reference
// count and cleaning up as necessary. This always notifies the owner (i.e. updates the SelectorCache).
func (m *CachingIdentityAllocator) ReleaseRestoredIdentities() {
nids := make([]identity.NumericIdentity, 0, len(m.restoredIdentities))
for nid := range m.restoredIdentities {
nids = append(nids, nid)
}
dealloc, err := m.ReleaseLocalIdentities(nids...)
if err != nil {
// This should never happen; these IDs are local
m.logger.Error(
"failed to release restored identities",
logfields.Error, err,
)
}
if option.Config.Debug {
m.logger.Debug(
"Released restored identity references",
logfields.Count, len(dealloc),
)
}
m.restoredIdentities = nil // free memory
}
// Release is the reverse operation of AllocateIdentity() and releases the
// identity again. This function may result in kvstore operations.
// After the last user has released the ID, the returned lastUse value is true.
func (m *CachingIdentityAllocator) Release(ctx context.Context, id *identity.Identity, notifyOwner bool) (released bool, err error) {
ctx, cancel := context.WithTimeout(ctx, m.timeout)
defer cancel()
// Ignore reserved identities.
if id.IsReserved() {
return false, nil
}
// Release local identities
switch identity.ScopeForLabels(id.Labels) {
case identity.IdentityScopeLocal, identity.IdentityScopeRemoteNode:
dealloc, err := m.ReleaseLocalIdentities(id.ID)
return len(dealloc) > 0, err
}
// This will block until the kvstore can be accessed and all identities
// were successfully synced
err = m.WaitForInitialGlobalIdentities(ctx)
if err != nil {
return false, err
}
if m.IdentityAllocator == nil {
return false, fmt.Errorf("allocator not initialized")
}
// Rely on the eventual Kv-Store events for delete
// notifications of kv-store allocated identities. Even if an
// ID is no longer used locally, it may still be used by
// remote nodes, so we can't rely on the locally computed
// "lastUse".
released, err = m.IdentityAllocator.Release(ctx, &key.GlobalIdentity{LabelArray: id.LabelArray})
if released {
for labelSource := range id.Labels.CollectSources() {
metrics.IdentityLabelSources.WithLabelValues(labelSource).Dec()
}
metrics.Identity.WithLabelValues(identity.ClusterLocalIdentityType).Dec()
}
// Remove this ID from the selectorcache and any other identity "watchers"
if m.owner != nil && released && notifyOwner {
deleted := identity.IdentityMap{
id.ID: id.LabelArray,
}
m.owner.UpdateIdentities(nil, deleted)
}
return
}
// ReleaseLocalIdentities releases solely local identities. It always updates
// the SelectorCache.
//
// Returns the list of released (refcount = 0) identities
func (m *CachingIdentityAllocator) ReleaseLocalIdentities(nids ...identity.NumericIdentity) ([]identity.NumericIdentity, error) {
var dealloc []identity.NumericIdentity
var errs []error
m.localLock.Lock()
defer m.localLock.Unlock()
deleted := make(identity.IdentityMap, len(nids))
for _, nid := range nids {
if rid := identity.LookupReservedIdentity(nid); rid != nil {
continue
}
var alloc *localIdentityCache
var metricVal string
switch nid.Scope() {
case identity.IdentityScopeLocal:
alloc = m.localIdentities
metricVal = identity.NodeLocalIdentityType
case identity.IdentityScopeRemoteNode:
alloc = m.localNodeIdentities
metricVal = identity.RemoteNodeIdentityType
default:
errs = append(errs, fmt.Errorf("attempt to release non-local identity %d", nid))
continue
}
id := alloc.lookupByID(nid)
if id == nil {
continue
}
released := alloc.release(id)
if released {
dealloc = append(dealloc, nid)
deleted[nid] = id.LabelArray
for labelSource := range id.Labels.CollectSources() {
metrics.IdentityLabelSources.WithLabelValues(labelSource).Dec()
}
metrics.Identity.WithLabelValues(metricVal).Dec()
}
}
if len(deleted) > 0 {
if m.checkpointTrigger != nil {
m.checkpointTrigger.Trigger()
}
m.owner.UpdateIdentities(nil, deleted)
}
return dealloc, errors.Join(errs...)
}
// WatchRemoteIdentities returns a RemoteCache instance which can be later
// started to watch identities in another kvstore and sync them to the local
// identity cache. remoteName should be unique unless replacing an existing
// remote's backend. When cachedPrefix is set, identities are assumed to be
// stored under the "cilium/cache" prefix, and the watcher is adapted accordingly.
func (m *CachingIdentityAllocator) WatchRemoteIdentities(remoteName string, remoteID uint32, backend kvstore.BackendOperations, cachedPrefix bool) (allocator.RemoteIDCache, error) {
<-m.globalIdentityAllocatorInitialized
prefix := m.identitiesPath
if cachedPrefix {
prefix = path.Join(kvstore.StateToCachePrefix(prefix), remoteName)
}
remoteAllocatorBackend, err := kvstoreallocator.NewKVStoreBackend(m.logger, kvstoreallocator.KVStoreBackendConfiguration{BasePath: prefix, Suffix: m.owner.GetNodeSuffix(), Typ: &key.GlobalIdentity{}, Backend: backend})
if err != nil {
return nil, fmt.Errorf("error setting up remote allocator backend: %w", err)
}
remoteAlloc, err := allocator.NewAllocator(m.logger,
&key.GlobalIdentity{}, remoteAllocatorBackend,
allocator.WithEvents(m.IdentityAllocator.GetEvents()), allocator.WithoutGC(), allocator.WithoutAutostart(),
allocator.WithCacheValidator(clusterIDValidator(remoteID)),
allocator.WithCacheValidator(clusterNameValidator(remoteName)),
)
if err != nil {
return nil, fmt.Errorf("unable to initialize remote Identity Allocator: %w", err)
}
return m.IdentityAllocator.NewRemoteCache(remoteName, remoteAlloc), nil
}
func (m *CachingIdentityAllocator) RemoveRemoteIdentities(name string) {
if m.IdentityAllocator != nil {
m.IdentityAllocator.RemoveRemoteKVStore(name)
}
}
type IdentityChangeKind string
const (
IdentityChangeSync IdentityChangeKind = IdentityChangeKind(allocator.AllocatorChangeSync)
IdentityChangeUpsert IdentityChangeKind = IdentityChangeKind(allocator.AllocatorChangeUpsert)
IdentityChangeDelete IdentityChangeKind = IdentityChangeKind(allocator.AllocatorChangeDelete)
)
type IdentityChange struct {
Kind IdentityChangeKind
ID identity.NumericIdentity
Labels labels.Labels
}
// Observe identity changes. Doesn't include local identities. Conforms to stream.Observable.
// Replays the current state of the cache when subscribing.
func (m *CachingIdentityAllocator) Observe(ctx context.Context, next func(IdentityChange), complete func(error)) {
// This short-lived go routine serves the purpose of waiting for the global identity allocator becoming ready
// before starting to observe the underlying allocator for changes.
// m.IdentityAllocator is backed by a stream.FuncObservable, that will start its own
// go routine. Therefore, the current go routine will stop and free the lock on the setupMutex after the registration.
go func() {
if err := m.WaitForInitialGlobalIdentities(ctx); err != nil {
complete(ctx.Err())
return
}
m.setupMutex.Lock()
defer m.setupMutex.Unlock()
if m.IdentityAllocator == nil {
complete(errors.New("allocator no longer initialized"))
return
}
// Observe the underlying allocator for changes and map the events to identities.
stream.Map[allocator.AllocatorChange, IdentityChange](
m.IdentityAllocator,
func(change allocator.AllocatorChange) IdentityChange {
return IdentityChange{
Kind: IdentityChangeKind(change.Kind),
ID: identity.NumericIdentity(change.ID),
Labels: mapLabels(change.Key),
}
},
).Observe(ctx, next, complete)
}()
}
func mapLabels(allocatorKey allocator.AllocatorKey) labels.Labels {
var idLabels labels.Labels = nil
if allocatorKey != nil {
idLabels = labels.Labels{}
for k, v := range allocatorKey.GetAsMap() {
label := labels.ParseLabel(k + "=" + v)
idLabels[label.Key] = label
}
}
return idLabels
}
// LocalIdentityChanges returns an observable for (only) node-local identities.
// Replays current state on subscription followed by a Sync event.
func (m *CachingIdentityAllocator) LocalIdentityChanges() stream.Observable[IdentityChange] {
return m.localIdentities
}
// clusterIDValidator returns a validator ensuring that the identity ID belongs
// to the ClusterID range.
func clusterIDValidator(clusterID uint32) allocator.CacheValidator {
min := idpool.ID(identity.GetMinimalAllocationIdentity(clusterID))
max := idpool.ID(identity.GetMaximumAllocationIdentity(clusterID))
return func(_ allocator.AllocatorChangeKind, id idpool.ID, _ allocator.AllocatorKey) error {
if id < min || id > max {
return fmt.Errorf("ID %d does not belong to the allocation range of cluster ID %d", id, clusterID)
}
return nil
}
}
// clusterNameValidator returns a validator ensuring that the identity labels
// include the one specifying the correct cluster name.
func clusterNameValidator(clusterName string) allocator.CacheValidator {
return func(kind allocator.AllocatorChangeKind, _ idpool.ID, ak allocator.AllocatorKey) error {
if kind != allocator.AllocatorChangeUpsert {
// Don't filter out deletion events, as labels may not be propagated,
// and to prevent leaving stale identities behind.
return nil
}
gi, ok := ak.(*key.GlobalIdentity)
if !ok {
return fmt.Errorf("unsupported key type %T", ak)
}
var found bool
for _, lbl := range gi.LabelArray {
if lbl.Key != api.PolicyLabelCluster {
continue
}
switch {
case lbl.Source != labels.LabelSourceK8s:
return fmt.Errorf("unexpected source for cluster label: got %s, expected %s", lbl.Source, labels.LabelSourceK8s)
case lbl.Value != clusterName:
return fmt.Errorf("unexpected cluster name: got %s, expected %s", lbl.Value, clusterName)
default:
found = true
}
}
if !found {
return fmt.Errorf("could not find expected label %s", api.PolicyLabelCluster)
}
return nil
}
}
// SPDX-License-Identifier: Apache-2.0
// Copyright Authors of Cilium
package cache
import (
"context"
"log/slog"
"reflect"
"github.com/cilium/cilium/api/v1/models"
"github.com/cilium/cilium/pkg/allocator"
"github.com/cilium/cilium/pkg/identity"
"github.com/cilium/cilium/pkg/identity/key"
identitymodel "github.com/cilium/cilium/pkg/identity/model"
"github.com/cilium/cilium/pkg/idpool"
"github.com/cilium/cilium/pkg/labels"
"github.com/cilium/cilium/pkg/logging/logfields"
)
// IdentitiesModel is a wrapper so that we can implement the sort.Interface
// to sort the slice by ID
type IdentitiesModel []*models.Identity
// Less returns true if the element in index `i` is lower than the element
// in index `j`
func (s IdentitiesModel) Less(i, j int) bool {
return s[i].ID < s[j].ID
}
// FromIdentityCache populates the provided model from an identity cache.
func (s IdentitiesModel) FromIdentityCache(cache identity.IdentityMap) IdentitiesModel {
for id, lbls := range cache {
s = append(s, identitymodel.CreateModel(&identity.Identity{
ID: id,
Labels: lbls.Labels(),
}))
}
return s
}
// GetIdentityCache returns a cache of all known identities
func (m *CachingIdentityAllocator) GetIdentityCache() identity.IdentityMap {
m.logger.Debug("getting identity cache for identity allocator manager")
cache := identity.IdentityMap{}
if m.isGlobalIdentityAllocatorInitialized() {
m.IdentityAllocator.ForeachCache(func(id idpool.ID, val allocator.AllocatorKey) {
if val != nil {
if gi, ok := val.(*key.GlobalIdentity); ok {
cache[identity.NumericIdentity(id)] = gi.LabelArray
} else {
m.logger.Warn(
"Ignoring unknown identity type",
logfields.Type, reflect.TypeOf(val),
logfields.Value, val,
)
}
}
})
}
identity.IterateReservedIdentities(func(ni identity.NumericIdentity, id *identity.Identity) {
cache[ni] = id.Labels.LabelArray()
})
for _, identity := range m.localIdentities.GetIdentities() {
cache[identity.ID] = identity.Labels.LabelArray()
}
for _, identity := range m.localNodeIdentities.GetIdentities() {
cache[identity.ID] = identity.Labels.LabelArray()
}
return cache
}
// GetIdentities returns all known identities
func (m *CachingIdentityAllocator) GetIdentities() IdentitiesModel {
identities := IdentitiesModel{}
if m.isGlobalIdentityAllocatorInitialized() {
m.IdentityAllocator.ForeachCache(func(id idpool.ID, val allocator.AllocatorKey) {
if gi, ok := val.(*key.GlobalIdentity); ok {
identity := identity.NewIdentityFromLabelArray(identity.NumericIdentity(id), gi.LabelArray)
identities = append(identities, identitymodel.CreateModel(identity))
}
})
}
identity.IterateReservedIdentities(func(ni identity.NumericIdentity, id *identity.Identity) {
identities = append(identities, identitymodel.CreateModel(id))
})
for _, v := range m.localIdentities.GetIdentities() {
identities = append(identities, identitymodel.CreateModel(v))
}
for _, v := range m.localNodeIdentities.GetIdentities() {
identities = append(identities, identitymodel.CreateModel(v))
}
return identities
}
type identityWatcher struct {
logger *slog.Logger
owner IdentityAllocatorOwner
}
// collectEvent records the 'event' as an added or deleted identity,
// and makes sure that any identity is present in only one of the sets
// (added or deleted).
func collectEvent(logger *slog.Logger, event allocator.AllocatorEvent, added, deleted identity.IdentityMap) bool {
id := identity.NumericIdentity(event.ID)
// Only create events have the key
if event.Typ == allocator.AllocatorChangeUpsert {
if gi, ok := event.Key.(*key.GlobalIdentity); ok {
// Un-delete the added ID if previously
// 'deleted' so that collected events can be
// processed in any order.
delete(deleted, id)
added[id] = gi.LabelArray
return true
}
logger.Warn(
"collectEvent: Ignoring unknown identity type",
logfields.Type, reflect.TypeOf(event.Key),
logfields.Value, event.Key,
)
return false
}
// Reverse an add when subsequently deleted
delete(added, id)
// record the id deleted even if an add was reversed, as the
// id may also have previously existed, in which case the
// result is not no-op!
deleted[id] = labels.LabelArray{}
return true
}
// watch starts the identity watcher
func (w *identityWatcher) watch(events allocator.AllocatorEventRecvChan) {
go func() {
for {
added := identity.IdentityMap{}
deleted := identity.IdentityMap{}
First:
for {
event, ok := <-events
// Wait for one identity add or delete or stop
if !ok {
// 'events' was closed
return
}
// Collect first added and deleted labels
switch event.Typ {
case allocator.AllocatorChangeUpsert, allocator.AllocatorChangeDelete:
if collectEvent(w.logger, event, added, deleted) {
// First event collected
break First
}
}
}
More:
for {
// see if there is more, but do not wait nor stop
select {
case event, ok := <-events:
if !ok {
// 'events' was closed
break More
}
// Collect more added and deleted labels
switch event.Typ {
case allocator.AllocatorChangeUpsert, allocator.AllocatorChangeDelete:
collectEvent(w.logger, event, added, deleted)
}
default:
// No more events available without blocking
break More
}
}
// Issue collected updates
w.owner.UpdateIdentities(added, deleted) // disjoint sets
}
}()
}
// isGlobalIdentityAllocatorInitialized returns true if m.IdentityAllocator is not nil.
// Note: This does not mean that the identities have been synchronized,
// see WaitForInitialGlobalIdentities to wait for a fully populated cache.
func (m *CachingIdentityAllocator) isGlobalIdentityAllocatorInitialized() bool {
select {
case <-m.globalIdentityAllocatorInitialized:
return m.IdentityAllocator != nil
default:
return false
}
}
// LookupIdentity looks up the identity by its labels but does not create it.
// This function will first search through the local cache, then the caches for
// remote kvstores and finally fall back to the main kvstore.
// May return nil for lookups if the allocator has not yet been synchronized.
func (m *CachingIdentityAllocator) LookupIdentity(ctx context.Context, lbls labels.Labels) *identity.Identity {
ctx, cancel := context.WithTimeout(ctx, m.timeout)
defer cancel()
if reservedIdentity := identity.LookupReservedIdentityByLabels(lbls); reservedIdentity != nil {
return reservedIdentity
}
switch identity.ScopeForLabels(lbls) {
case identity.IdentityScopeLocal:
return m.localIdentities.lookup(lbls)
case identity.IdentityScopeRemoteNode:
return m.localNodeIdentities.lookup(lbls)
}
if !m.isGlobalIdentityAllocatorInitialized() {
return nil
}
lblArray := lbls.LabelArray()
id, err := m.IdentityAllocator.GetIncludeRemoteCaches(ctx, &key.GlobalIdentity{LabelArray: lblArray})
if err != nil {
return nil
}
if id > identity.MaxNumericIdentity {
return nil
}
if id == idpool.NoID {
return nil
}
return identity.NewIdentityFromLabelArray(identity.NumericIdentity(id), lblArray)
}
var unknownIdentity = identity.NewIdentity(identity.IdentityUnknown, labels.Labels{labels.IDNameUnknown: labels.NewLabel(labels.IDNameUnknown, "", labels.LabelSourceReserved)})
// LookupIdentityByID returns the identity by ID. This function will first
// search through the local cache, then the caches for remote kvstores and
// finally fall back to the main kvstore
// May return nil for lookups if the allocator has not yet been synchronized.
func (m *CachingIdentityAllocator) LookupIdentityByID(ctx context.Context, id identity.NumericIdentity) *identity.Identity {
ctx, cancel := context.WithTimeout(ctx, m.timeout)
defer cancel()
if id == identity.IdentityUnknown {
return unknownIdentity
}
if identity := identity.LookupReservedIdentity(id); identity != nil {
return identity
}
switch id.Scope() {
case identity.IdentityScopeLocal:
return m.localIdentities.lookupByID(id)
case identity.IdentityScopeRemoteNode:
return m.localNodeIdentities.lookupByID(id)
}
if !m.isGlobalIdentityAllocatorInitialized() {
return nil
}
allocatorKey, err := m.IdentityAllocator.GetByIDIncludeRemoteCaches(ctx, idpool.ID(id))
if err != nil {
return nil
}
if gi, ok := allocatorKey.(*key.GlobalIdentity); ok {
return identity.NewIdentityFromLabelArray(id, gi.LabelArray)
}
return nil
}
// SPDX-License-Identifier: Apache-2.0
// Copyright Authors of Cilium
package cache
import (
"context"
"fmt"
"log/slog"
"maps"
"github.com/cilium/stream"
"github.com/cilium/cilium/pkg/identity"
"github.com/cilium/cilium/pkg/labels"
"github.com/cilium/cilium/pkg/lock"
"github.com/cilium/cilium/pkg/logging/logfields"
)
type localIdentityCache struct {
logger *slog.Logger
mutex lock.RWMutex
identitiesByID map[identity.NumericIdentity]*identity.Identity
identitiesByLabels map[string]*identity.Identity
nextNumericIdentity identity.NumericIdentity
scope identity.NumericIdentity
minID identity.NumericIdentity
maxID identity.NumericIdentity
// withheldIdentities is a set of identities that should be considered unavailable for allocation,
// but not yet allocated.
// They are used during agent restart, where local identities are restored to prevent unnecessary
// ID flapping on restart.
//
// If an old nID is passed to lookupOrCreate(), then it is allowed to use a withhend entry here. Otherwise
// it must allocate a new ID not in this set.
withheldIdentities map[identity.NumericIdentity]struct{}
// Used to implement the stream.Observable interface.
changeSource stream.Observable[IdentityChange]
emitChange func(IdentityChange)
}
func newLocalIdentityCache(logger *slog.Logger, scope, minID, maxID identity.NumericIdentity) *localIdentityCache {
// There isn't a natural completion of this observable, so let's drop it.
mcast, emit, _ := stream.Multicast[IdentityChange]()
return &localIdentityCache{
logger: logger,
identitiesByID: map[identity.NumericIdentity]*identity.Identity{},
identitiesByLabels: map[string]*identity.Identity{},
nextNumericIdentity: minID,
scope: scope,
minID: minID,
maxID: maxID,
withheldIdentities: map[identity.NumericIdentity]struct{}{},
changeSource: mcast,
emitChange: emit,
}
}
func (l *localIdentityCache) bumpNextNumericIdentity() {
if l.nextNumericIdentity == l.maxID {
l.nextNumericIdentity = l.minID
} else {
l.nextNumericIdentity++
}
}
// getNextFreeNumericIdentity returns the next available numeric identity or an error
// If idCandidate has the local scope and is available, it will be returned instead of
// searching for a new numeric identity.
// The l.mutex must be held
func (l *localIdentityCache) getNextFreeNumericIdentity(idCandidate identity.NumericIdentity) (identity.NumericIdentity, error) {
// Try first with the given candidate
if idCandidate.Scope() == l.scope {
if _, taken := l.identitiesByID[idCandidate]; !taken {
// let nextNumericIdentity be, allocated identities will be skipped anyway
l.logger.Debug("Reallocated restored local identity", logfields.Identity, idCandidate)
return idCandidate, nil
} else {
l.logger.Debug("Requested local identity not available to allocate", logfields.Identity, idCandidate)
}
}
firstID := l.nextNumericIdentity
for {
idCandidate = l.nextNumericIdentity | l.scope
_, taken := l.identitiesByID[idCandidate]
_, withheld := l.withheldIdentities[idCandidate]
if !taken && !withheld {
l.bumpNextNumericIdentity()
return idCandidate, nil
}
l.bumpNextNumericIdentity()
if l.nextNumericIdentity == firstID {
// Desperation: no local identities left (unlikely). If there are withheld
// but not-taken identities, claim one of them.
for withheldID := range l.withheldIdentities {
if _, taken := l.identitiesByID[withheldID]; !taken {
delete(l.withheldIdentities, withheldID)
l.logger.Warn("Local identity allocator full; claiming first withheld identity. This may cause momentary policy drops", logfields.Identity, withheldID)
return withheldID, nil
}
}
return 0, fmt.Errorf("out of local identity space")
}
}
}
// lookupOrCreate searches for the existence of a local identity with the given
// labels. If it exists, the reference count is incremented and the identity is
// returned. If it does not exist, a new identity is created with a unique
// numeric identity. All identities returned by lookupOrCreate() must be
// released again via localIdentityCache.release().
// A possible previously used numeric identity for these labels can be passed
// in as the 'oldNID' parameter; identity.InvalidIdentity must be passed if no
// previous numeric identity exists. 'oldNID' will be reallocated if available.
func (l *localIdentityCache) lookupOrCreate(lbls labels.Labels, oldNID identity.NumericIdentity) (*identity.Identity, bool, error) {
// Not converting to string saves an allocation, as byte key lookups into
// string maps are optimized by the compiler, see
// https://github.com/golang/go/issues/3512.
repr := lbls.SortedList()
l.mutex.Lock()
defer l.mutex.Unlock()
if id, ok := l.identitiesByLabels[string(repr)]; ok {
id.ReferenceCount++
return id, false, nil
}
numericIdentity, err := l.getNextFreeNumericIdentity(oldNID)
if err != nil {
return nil, false, err
}
id := &identity.Identity{
ID: numericIdentity,
Labels: lbls,
LabelArray: lbls.LabelArray(),
ReferenceCount: 1,
}
l.identitiesByLabels[string(repr)] = id
l.identitiesByID[numericIdentity] = id
l.emitChange(IdentityChange{Kind: IdentityChangeUpsert, ID: numericIdentity, Labels: lbls})
return id, true, nil
}
// release releases a local identity from the cache. true is returned when the
// last use of the identity has been released and the identity has been
// forgotten.
func (l *localIdentityCache) release(id *identity.Identity) bool {
l.mutex.Lock()
defer l.mutex.Unlock()
if id, ok := l.identitiesByID[id.ID]; ok {
switch {
case id.ReferenceCount > 1:
id.ReferenceCount--
return false
case id.ReferenceCount == 1:
// Release is only attempted once, when the reference count is
// hitting the last use
delete(l.identitiesByLabels, string(id.Labels.SortedList()))
delete(l.identitiesByID, id.ID)
l.emitChange(IdentityChange{Kind: IdentityChangeDelete, ID: id.ID})
return true
}
}
return false
}
// withhold marks the nids as unavailable. Any out-of-scope identities are returned.
func (l *localIdentityCache) withhold(nids []identity.NumericIdentity) []identity.NumericIdentity {
if len(nids) == 0 {
return nil
}
unused := make([]identity.NumericIdentity, 0, len(nids))
l.mutex.Lock()
defer l.mutex.Unlock()
for _, nid := range nids {
if nid.Scope() != l.scope {
unused = append(unused, nid)
continue
}
l.withheldIdentities[nid] = struct{}{}
}
return unused
}
func (l *localIdentityCache) unwithhold(nids []identity.NumericIdentity) {
if len(nids) == 0 {
return
}
l.mutex.Lock()
defer l.mutex.Unlock()
for _, nid := range nids {
if nid.Scope() != l.scope {
continue
}
delete(l.withheldIdentities, nid)
}
}
// lookup searches for a local identity matching the given labels and returns
// it. If found, the reference count is NOT incremented and thus release must
// NOT be called.
func (l *localIdentityCache) lookup(lbls labels.Labels) *identity.Identity {
l.mutex.RLock()
defer l.mutex.RUnlock()
if id, ok := l.identitiesByLabels[string(lbls.SortedList())]; ok {
return id
}
return nil
}
// lookupByID searches for a local identity matching the given ID and returns
// it. If found, the reference count is NOT incremented and thus release must
// NOT be called.
func (l *localIdentityCache) lookupByID(id identity.NumericIdentity) *identity.Identity {
l.mutex.RLock()
defer l.mutex.RUnlock()
if id, ok := l.identitiesByID[id]; ok {
return id
}
return nil
}
// GetIdentities returns all local identities
func (l *localIdentityCache) GetIdentities() map[identity.NumericIdentity]*identity.Identity {
l.mutex.RLock()
defer l.mutex.RUnlock()
return maps.Clone(l.identitiesByID)
}
func (l *localIdentityCache) checkpoint(dst []*identity.Identity) []*identity.Identity {
l.mutex.RLock()
defer l.mutex.RUnlock()
for _, id := range l.identitiesByID {
dst = append(dst, id)
}
return dst
}
func (l *localIdentityCache) size() int {
l.mutex.RLock()
defer l.mutex.RUnlock()
return len(l.identitiesByID)
}
// Implements stream.Observable. Replays initial state as a sequence of adds.
func (l *localIdentityCache) Observe(ctx context.Context, next func(IdentityChange), complete func(error)) {
l.mutex.RLock()
defer l.mutex.RUnlock()
for nid, id := range l.identitiesByID {
select {
case <-ctx.Done():
complete(ctx.Err())
return
default:
}
next(IdentityChange{Kind: IdentityChangeUpsert, ID: nid, Labels: id.Labels})
}
select {
case <-ctx.Done():
complete(ctx.Err())
return
default:
}
next(IdentityChange{Kind: IdentityChangeSync})
l.changeSource.Observe(ctx, next, complete)
}
// SPDX-License-Identifier: Apache-2.0
// Copyright Authors of Cilium
package cache
import (
"context"
"log/slog"
"github.com/cilium/stream"
"github.com/cilium/cilium/pkg/allocator"
"github.com/cilium/cilium/pkg/identity"
identitymodel "github.com/cilium/cilium/pkg/identity/model"
"github.com/cilium/cilium/pkg/k8s/client/clientset/versioned"
"github.com/cilium/cilium/pkg/kvstore"
"github.com/cilium/cilium/pkg/labels"
"github.com/cilium/cilium/pkg/logging/logfields"
)
type NoopIdentityAllocator struct {
logger *slog.Logger
// allocatorInitialized is closed when the allocator is initialized.
allocatorInitialized chan struct{}
}
func NewNoopIdentityAllocator(logger *slog.Logger) *NoopIdentityAllocator {
return &NoopIdentityAllocator{
logger: logger,
allocatorInitialized: make(chan struct{}),
}
}
func (n *NoopIdentityAllocator) WaitForInitialGlobalIdentities(context.Context) error {
return nil
}
func (n *NoopIdentityAllocator) AllocateIdentity(ctx context.Context, lbls labels.Labels, notifyOwner bool, oldNID identity.NumericIdentity) (*identity.Identity, bool, error) {
n.logger.Debug(
"Assigning a fixed identity that is not based on labels, because network policies are disabled",
logfields.Identity, identity.ReservedIdentityInit,
logfields.IdentityLabels, lbls,
)
initID := identity.LookupReservedIdentity(identity.ReservedIdentityInit)
return initID, false, nil
}
func (n *NoopIdentityAllocator) AllocateLocalIdentity(lbls labels.Labels, notifyOwner bool, oldNID identity.NumericIdentity) (*identity.Identity, bool, error) {
initID := identity.LookupReservedIdentity(identity.ReservedIdentityInit)
return initID, false, nil
}
func (n *NoopIdentityAllocator) Release(context.Context, *identity.Identity, bool) (released bool, err error) {
// No need to release identities. All endpoints will have the same identity.
// The existing global identities will be cleaned up.
return false, nil
}
func (n *NoopIdentityAllocator) ReleaseLocalIdentities(...identity.NumericIdentity) ([]identity.NumericIdentity, error) {
return nil, nil
}
func (n *NoopIdentityAllocator) LookupIdentity(ctx context.Context, lbls labels.Labels) *identity.Identity {
// Lookup only reserved identities.
return identity.LookupReservedIdentityByLabels(lbls)
}
func (n *NoopIdentityAllocator) LookupIdentityByID(ctx context.Context, id identity.NumericIdentity) *identity.Identity {
// Lookup only reserved identities.
return identity.LookupReservedIdentity(id)
}
func (n *NoopIdentityAllocator) GetIdentityCache() identity.IdentityMap {
// Return only reserved identities, because reserved identities are
// statically initialized and are not managed by identity allocator.
cache := identity.IdentityMap{}
identity.IterateReservedIdentities(func(ni identity.NumericIdentity, id *identity.Identity) {
cache[ni] = id.Labels.LabelArray()
})
return cache
}
func (n *NoopIdentityAllocator) GetIdentities() IdentitiesModel {
// Return only reserved identities, because reserved identities are
// statically initialized and are not managed by identity allocator.
identities := IdentitiesModel{}
identity.IterateReservedIdentities(func(ni identity.NumericIdentity, id *identity.Identity) {
identities = append(identities, identitymodel.CreateModel(id))
})
return identities
}
func (n *NoopIdentityAllocator) WithholdLocalIdentities(nids []identity.NumericIdentity) {
// No-op, because local identities are not used when network policies are disabled.
}
func (n *NoopIdentityAllocator) UnwithholdLocalIdentities(nids []identity.NumericIdentity) {
// No-op, because local identities are not used when network policies are disabled.
}
type NoopRemoteIDCache struct{}
func (n *NoopRemoteIDCache) NumEntries() int {
return 0
}
func (n *NoopRemoteIDCache) Synced() bool {
return true
}
func (n *NoopRemoteIDCache) Watch(ctx context.Context, onSync func(context.Context)) {
onSync(ctx)
}
func (n *NoopIdentityAllocator) WatchRemoteIdentities(remoteName string, remoteID uint32, backend kvstore.BackendOperations, cachedPrefix bool) (allocator.RemoteIDCache, error) {
// Remote watchers are not used when the cluster has network policies disabled.
return &NoopRemoteIDCache{}, nil
}
func (n *NoopIdentityAllocator) RemoveRemoteIdentities(name string) {
// No-op, because remote identities are not used when network policies are disabled.
}
func (n *NoopIdentityAllocator) InitIdentityAllocator(versioned.Interface, kvstore.Client) <-chan struct{} {
close(n.allocatorInitialized)
return n.allocatorInitialized
}
func (n *NoopIdentityAllocator) RestoreLocalIdentities() (map[identity.NumericIdentity]*identity.Identity, error) {
// No-op, because local identities are not used when network policies are disabled.
return make(map[identity.NumericIdentity]*identity.Identity), nil
}
func (n *NoopIdentityAllocator) ReleaseRestoredIdentities() {
// No-op, because restored identities are not used when network policies are disabled.
}
func (n *NoopIdentityAllocator) Close() {}
func (m *NoopIdentityAllocator) Observe(ctx context.Context, next func(IdentityChange), complete func(error)) {
// No-op, because identities are not managed.
complete(nil)
}
// Noop identity allocator is itself a noop observable, just return itself as the local identity observable.
func (m *NoopIdentityAllocator) LocalIdentityChanges() stream.Observable[IdentityChange] {
return m
}
// SPDX-License-Identifier: Apache-2.0
// Copyright Authors of Cilium
package identity
import (
"encoding/json"
"fmt"
"net"
"strconv"
"sync"
"github.com/cilium/cilium/pkg/labels"
"github.com/cilium/cilium/pkg/option"
)
const (
NodeLocalIdentityType = "node_local"
ReservedIdentityType = "reserved"
ClusterLocalIdentityType = "cluster_local"
WellKnownIdentityType = "well_known"
RemoteNodeIdentityType = "remote_node"
)
// Identity is the representation of the security context for a particular set of
// labels.
type Identity struct {
// Identity's ID.
ID NumericIdentity `json:"id"`
// Set of labels that belong to this Identity.
Labels labels.Labels `json:"labels"`
// LabelArray contains the same labels as Labels in a form of a list, used
// for faster lookup.
LabelArray labels.LabelArray `json:"-"`
// CIDRLabel is the primary identity label when the identity represents
// a CIDR. The Labels field will consist of all matching prefixes, e.g.
// 10.0.0.0/8
// 10.0.0.0/7
// 10.0.0.0/6
// [...]
// reserved:world
//
// The CIDRLabel field will only contain 10.0.0.0/8
CIDRLabel labels.Labels `json:"-"`
// ReferenceCount counts the number of references pointing to this
// identity. This field is used by the owning cache of the identity.
ReferenceCount int `json:"-"`
}
// IPIdentityPair is a pairing of an IP and the security identity to which that
// IP corresponds. May include an optional Mask which, if present, denotes that
// the IP represents a CIDR with the specified Mask.
//
// WARNING - STABLE API
// This structure is written as JSON to the key-value store. Do NOT modify this
// structure in ways which are not JSON forward compatible.
type IPIdentityPair struct {
IP net.IP `json:"IP"`
Mask net.IPMask `json:"Mask"`
HostIP net.IP `json:"HostIP"`
ID NumericIdentity `json:"ID"`
Key uint8 `json:"Key"`
Metadata string `json:"Metadata"`
K8sNamespace string `json:"K8sNamespace,omitempty"`
K8sPodName string `json:"K8sPodName,omitempty"`
NamedPorts []NamedPort `json:"NamedPorts,omitempty"`
}
type IdentityMap map[NumericIdentity]labels.LabelArray
// GetKeyName returns the kvstore key to be used for the IPIdentityPair
func (pair *IPIdentityPair) GetKeyName() string { return pair.PrefixString() }
// Marshal returns the IPIdentityPair object as JSON byte slice
func (pair *IPIdentityPair) Marshal() ([]byte, error) { return json.Marshal(pair) }
// Unmarshal parses the JSON byte slice and updates the IPIdentityPair receiver
func (pair *IPIdentityPair) Unmarshal(key string, data []byte) error {
newPair := IPIdentityPair{}
if err := json.Unmarshal(data, &newPair); err != nil {
return err
}
if got := newPair.GetKeyName(); got != key {
return fmt.Errorf("IP address does not match key: expected %s, got %s", key, got)
}
*pair = newPair
return nil
}
// NamedPort is a mapping from a port name to a port number and protocol.
//
// WARNING - STABLE API
// This structure is written as JSON to the key-value store. Do NOT modify this
// structure in ways which are not JSON forward compatible.
type NamedPort struct {
Name string `json:"Name"`
Port uint16 `json:"Port"`
Protocol string `json:"Protocol"`
}
// Sanitize takes a partially initialized Identity (for example, deserialized
// from json) and reconstitutes the full object from what has been restored.
func (id *Identity) Sanitize() {
if id.Labels != nil {
id.LabelArray = id.Labels.LabelArray()
}
}
// StringID returns the identity identifier as string
func (id *Identity) StringID() string {
return id.ID.StringID()
}
// StringID returns the identity identifier as string
func (id *Identity) String() string {
return id.ID.StringID()
}
// IsReserved returns whether the identity represents a reserved identity
// (true), or not (false).
func (id *Identity) IsReserved() bool {
return LookupReservedIdentity(id.ID) != nil
}
// IsFixed returns whether the identity represents a fixed identity
// (true), or not (false).
func (id *Identity) IsFixed() bool {
return LookupReservedIdentity(id.ID) != nil &&
(id.ID == ReservedIdentityHost || id.ID == ReservedIdentityHealth ||
IsUserReservedIdentity(id.ID))
}
// IsWellKnown returns whether the identity represents a well known identity
// (true), or not (false).
func (id *Identity) IsWellKnown() bool {
return WellKnown.lookupByNumericIdentity(id.ID) != nil
}
// NewIdentityFromLabelArray creates a new identity
func NewIdentityFromLabelArray(id NumericIdentity, lblArray labels.LabelArray) *Identity {
var lbls labels.Labels
if lblArray != nil {
lbls = lblArray.Labels()
}
return &Identity{ID: id, Labels: lbls, LabelArray: lblArray}
}
// NewIdentity creates a new identity
func NewIdentity(id NumericIdentity, lbls labels.Labels) *Identity {
var lblArray labels.LabelArray
if lbls != nil {
lblArray = lbls.LabelArray()
}
return &Identity{ID: id, Labels: lbls, LabelArray: lblArray}
}
// IsHost determines whether the IP in the pair represents a host (true) or a
// CIDR prefix (false)
func (pair *IPIdentityPair) IsHost() bool {
return pair.Mask == nil
}
// PrefixString returns the IPIdentityPair's IP as either a host IP in the
// format w.x.y.z if 'host' is true, or as a prefix in the format the w.x.y.z/N
// if 'host' is false.
func (pair *IPIdentityPair) PrefixString() string {
ipstr := pair.IP.String()
if pair.IsHost() {
return ipstr
}
ones, _ := pair.Mask.Size()
return ipstr + "/" + strconv.Itoa(ones)
}
// RequiresGlobalIdentity returns true if the label combination requires a
// global identity
func RequiresGlobalIdentity(lbls labels.Labels) bool {
return ScopeForLabels(lbls) == IdentityScopeGlobal
}
// ScopeForLabels returns the identity scope to be used for the label set.
// If all labels are either CIDR or reserved, then returns the CIDR scope.
// Note: This assumes the caller has already called LookupReservedIdentityByLabels;
// it does not handle that case.
func ScopeForLabels(lbls labels.Labels) NumericIdentity {
scope := IdentityScopeGlobal
// If this is a remote node, return the remote node scope.
// Note that this is not reachable when policy-cidr-selects-nodes is false or
// when enable-node-selector-labels is false, since
// callers will already have gotten a value from LookupReservedIdentityByLabels.
if lbls.HasRemoteNodeLabel() {
return IdentityScopeRemoteNode
}
// The ingress label is for L7 LB with cilium proxy, which is running on
// every node. So it's not necessary to be global identity, but local
// identity instead.
if lbls.IsReserved() && lbls.HasIngressLabel() {
return IdentityScopeLocal
}
for _, label := range lbls {
switch label.Source {
case labels.LabelSourceCIDR, labels.LabelSourceFQDN, labels.LabelSourceReserved, labels.LabelSourceCIDRGroup:
scope = IdentityScopeLocal
default:
return IdentityScopeGlobal
}
}
return scope
}
// AddUserDefinedNumericIdentitySet adds all key-value pairs from the given map
// to the map of user defined numeric identities and reserved identities.
// The key-value pairs should map a numeric identity to a valid label.
// Is not safe for concurrent use.
func AddUserDefinedNumericIdentitySet(m map[string]string) error {
// Validate first
for k := range m {
ni, err := ParseNumericIdentity(k)
if err != nil {
return err
}
if !IsUserReservedIdentity(ni) {
return ErrNotUserIdentity
}
}
for k, lbl := range m {
ni, _ := ParseNumericIdentity(k)
AddUserDefinedNumericIdentity(ni, lbl)
AddReservedIdentity(ni, lbl)
}
return nil
}
// LookupReservedIdentityByLabels looks up a reserved identity by its labels and
// returns it if found. Returns nil if not found.
func LookupReservedIdentityByLabels(lbls labels.Labels) *Identity {
if identity := WellKnown.LookupByLabels(lbls); identity != nil {
return identity
}
// Check if a fixed identity exists.
if lbl, exists := lbls[labels.LabelKeyFixedIdentity]; exists {
// If the set of labels contain a fixed identity then and exists in
// the map of reserved IDs then return the identity of that reserved ID.
id := GetReservedID(lbl.Value)
if id != IdentityUnknown && IsUserReservedIdentity(id) {
return LookupReservedIdentity(id)
}
// If a fixed identity was not found then we return nil to avoid
// falling to a reserved identity.
return nil
}
// If there is no reserved label, return nil.
if !lbls.IsReserved() {
return nil
}
var nid NumericIdentity
if lbls.HasHostLabel() {
nid = ReservedIdentityHost
} else if lbls.HasRemoteNodeLabel() {
// If selecting remote-nodes via CIDR policies is allowed, then
// they no longer have a reserved identity.
if option.Config.PolicyCIDRMatchesNodes() {
return nil
}
// If selecting remote-nodes via node labels is allowed, then
// they no longer have a reserved identity and are using
// IdentityScopeRemoteNode.
if option.Config.PerNodeLabelsEnabled() {
return nil
}
nid = ReservedIdentityRemoteNode
if lbls.HasKubeAPIServerLabel() {
// If there's a kube-apiserver label, then we know this is
// kube-apiserver reserved ID, so change it as such.
// Only traffic from non-kube-apiserver nodes should be
// considered as remote-node.
nid = ReservedIdentityKubeAPIServer
}
}
if nid != IdentityUnknown {
return NewIdentity(nid, lbls)
}
// We have handled all the cases where multiple labels can be present.
// So, we make sure the set of labels only contains a single label and
// that label is of the reserved type. This is to prevent users from
// adding cilium-reserved labels into the workloads.
if len(lbls) != 1 {
return nil
}
nid = GetReservedID(lbls.ToSlice()[0].Key)
if nid != IdentityUnknown && !IsUserReservedIdentity(nid) {
return LookupReservedIdentity(nid)
}
return nil
}
// IdentityAllocationIsLocal returns true if a call to AllocateIdentity with
// the given labels would not require accessing the KV store to allocate the
// identity.
// Currently, this function returns true only if the labels are those of a
// reserved identity, i.e. if the slice contains a single reserved
// "reserved:*" label.
func IdentityAllocationIsLocal(lbls labels.Labels) bool {
// If there is only one label with the "reserved" source and a well-known
// key, the well-known identity for it can be allocated locally.
return LookupReservedIdentityByLabels(lbls) != nil
}
// UpdateIdentities is an interface to be called when identities change
type UpdateIdentities interface {
UpdateIdentities(added, deleted IdentityMap, wg *sync.WaitGroup) (mutated bool)
}
// SPDX-License-Identifier: Apache-2.0
// Copyright Authors of Cilium
package identitymanager
import (
"log/slog"
"github.com/cilium/cilium/api/v1/models"
"github.com/cilium/cilium/pkg/identity"
"github.com/cilium/cilium/pkg/identity/model"
"github.com/cilium/cilium/pkg/lock"
"github.com/cilium/cilium/pkg/logging/logfields"
)
type IDManager interface {
Add(identity *identity.Identity)
GetIdentityModels() []*models.IdentityEndpoints
Remove(identity *identity.Identity)
RemoveAll()
RemoveOldAddNew(old *identity.Identity, new *identity.Identity)
Subscribe(o Observer)
}
// IdentityManager caches information about a set of identities, currently a
// reference count of how many users there are for each identity.
type IdentityManager struct {
logger *slog.Logger
mutex lock.RWMutex
identities map[identity.NumericIdentity]*identityMetadata
observers map[Observer]struct{}
}
// NewIDManager returns an initialized IdentityManager.
func NewIDManager(logger *slog.Logger) IDManager {
return newIdentityManager(logger)
}
type identityMetadata struct {
identity *identity.Identity
refCount uint
}
func newIdentityManager(logger *slog.Logger) *IdentityManager {
return &IdentityManager{
logger: logger,
identities: make(map[identity.NumericIdentity]*identityMetadata),
observers: make(map[Observer]struct{}),
}
}
// Add inserts the identity into the identity manager. If the identity is
// already in the identity manager, the reference count for the identity is
// incremented.
func (idm *IdentityManager) Add(identity *identity.Identity) {
idm.logger.Debug(
"Adding identity to identity manager",
logfields.Identity, identity,
)
idm.mutex.Lock()
defer idm.mutex.Unlock()
idm.add(identity)
}
func (idm *IdentityManager) add(identity *identity.Identity) {
if identity == nil {
return
}
idMeta, exists := idm.identities[identity.ID]
if !exists {
idm.identities[identity.ID] = &identityMetadata{
identity: identity,
refCount: 1,
}
for o := range idm.observers {
o.LocalEndpointIdentityAdded(identity)
}
} else {
idMeta.refCount++
}
}
// RemoveOldAddNew removes old from the identity manager and inserts new
// into the IdentityManager.
// Caller must have previously added the old identity with Add().
// This is a no-op if both identities have the same numeric ID.
func (idm *IdentityManager) RemoveOldAddNew(old, new *identity.Identity) {
idm.mutex.Lock()
defer idm.mutex.Unlock()
if old == nil && new == nil {
return
}
// The host endpoint will always retain its reserved ID, but its labels may
// change so we need to update its identity.
if old != nil && new != nil && old.ID == new.ID && new.ID != identity.ReservedIdentityHost {
return
}
idm.logger.Debug(
"removing old and adding new identity",
logfields.Old, old,
logfields.New, new,
)
idm.remove(old)
idm.add(new)
}
// RemoveAll removes all identities.
func (idm *IdentityManager) RemoveAll() {
idm.mutex.Lock()
defer idm.mutex.Unlock()
for id := range idm.identities {
idm.remove(idm.identities[id].identity)
}
}
// Remove deletes the identity from the identity manager. If the identity is
// already in the identity manager, the reference count for the identity is
// decremented. If the identity is not in the cache, this is a no-op. If the
// ref count becomes zero, the identity is removed from the cache.
func (idm *IdentityManager) Remove(identity *identity.Identity) {
idm.logger.Debug(
"Removing identity from identity manager",
logfields.Identity, identity,
)
idm.mutex.Lock()
defer idm.mutex.Unlock()
idm.remove(identity)
}
func (idm *IdentityManager) remove(identity *identity.Identity) {
if identity == nil {
return
}
idMeta, exists := idm.identities[identity.ID]
if !exists {
idm.logger.Error(
"removing identity not added to the identity manager!",
logfields.Identity, identity,
)
return
}
idMeta.refCount--
if idMeta.refCount == 0 {
delete(idm.identities, identity.ID)
for o := range idm.observers {
o.LocalEndpointIdentityRemoved(identity)
}
}
}
// GetIdentityModels returns the API representation of the IdentityManager.
func (idm *IdentityManager) GetIdentityModels() []*models.IdentityEndpoints {
idm.mutex.RLock()
defer idm.mutex.RUnlock()
identities := make([]*models.IdentityEndpoints, 0, len(idm.identities))
for _, v := range idm.identities {
identities = append(identities, &models.IdentityEndpoints{
Identity: model.CreateModel(v.identity),
RefCount: int64(v.refCount),
})
}
return identities
}
// Subscribe adds the specified Observer to the global identity manager, to be
// notified upon changes to local identity usage.
func (idm *IdentityManager) Subscribe(o Observer) {
idm.mutex.Lock()
defer idm.mutex.Unlock()
idm.observers[o] = struct{}{}
}
// IdentitiesModel is a wrapper so that we can implement the sort.Interface
// to sort the slice by ID
type IdentitiesModel []*models.IdentityEndpoints
// Less returns true if the element in index `i` is lower than the element
// in index `j`
func (s IdentitiesModel) Less(i, j int) bool {
return s[i].Identity.ID < s[j].Identity.ID
}
// SPDX-License-Identifier: Apache-2.0
// Copyright Authors of Cilium
package key
import (
"maps"
"strings"
"github.com/cilium/cilium/pkg/labelsfilter"
"github.com/cilium/cilium/pkg/allocator"
"github.com/cilium/cilium/pkg/labels"
)
const (
// MetadataKeyBackendKey is the key used to store the backend key.
MetadataKeyBackendKey = iota
)
// GlobalIdentity is the structure used to store an identity
type GlobalIdentity struct {
labels.LabelArray
// metadata contains metadata that are stored for example by the backends.
metadata map[any]any
}
// GetKey encodes an Identity as string
func (gi *GlobalIdentity) GetKey() string {
var str strings.Builder
for _, l := range gi.LabelArray {
str.Write(l.FormatForKVStore())
}
return str.String()
}
// GetAsMap encodes a GlobalIdentity a map of keys to values. The keys will
// include a source delimted by a ':'. This output is pareable by PutKeyFromMap.
func (gi *GlobalIdentity) GetAsMap() map[string]string {
return gi.StringMap()
}
// PutKey decodes an Identity from its string representation
func (gi *GlobalIdentity) PutKey(v string) allocator.AllocatorKey {
return &GlobalIdentity{LabelArray: labels.NewLabelArrayFromSortedList(v)}
}
// PutKeyFromMap decodes an Identity from a map of key to value. Output
// from GetAsMap can be parsed.
// Note: NewLabelArrayFromMap will parse the ':' separated label source from
// the keys because the source parameter is ""
func (gi *GlobalIdentity) PutKeyFromMap(v map[string]string) allocator.AllocatorKey {
return &GlobalIdentity{LabelArray: labels.Map2Labels(v, "").LabelArray()}
}
// PutValue puts metadata inside the global identity for the given 'key' with
// the given 'value'.
func (gi *GlobalIdentity) PutValue(key, value any) allocator.AllocatorKey {
newMap := map[any]any{}
if gi.metadata != nil {
newMap = maps.Clone(gi.metadata)
}
newMap[key] = value
return &GlobalIdentity{
LabelArray: gi.LabelArray,
metadata: newMap,
}
}
// Value returns the value stored in the metadata map.
func (gi *GlobalIdentity) Value(key any) any {
return gi.metadata[key]
}
func GetCIDKeyFromLabels(allLabels map[string]string, source string) *GlobalIdentity {
lbs := labels.Map2Labels(allLabels, source)
idLabels, _ := labelsfilter.Filter(lbs)
return &GlobalIdentity{LabelArray: idLabels.LabelArray()}
}
// SPDX-License-Identifier: Apache-2.0
// Copyright Authors of Cilium
package model
import (
"github.com/cilium/cilium/api/v1/models"
"github.com/cilium/cilium/pkg/identity"
"github.com/cilium/cilium/pkg/labels"
)
func NewIdentityFromModel(base *models.Identity) *identity.Identity {
if base == nil {
return nil
}
id := &identity.Identity{
ID: identity.NumericIdentity(base.ID),
Labels: make(labels.Labels, len(base.Labels)),
}
for _, v := range base.Labels {
lbl := labels.ParseLabel(v)
id.Labels[lbl.Key] = lbl
}
id.Sanitize()
return id
}
func CreateModel(id *identity.Identity) *models.Identity {
if id == nil {
return nil
}
ret := &models.Identity{
ID: int64(id.ID),
Labels: make([]string, 0, len(id.Labels)),
}
for _, v := range id.LabelArray {
ret.Labels = append(ret.Labels, v.String())
}
return ret
}
// SPDX-License-Identifier: Apache-2.0
// Copyright Authors of Cilium
package identity
import (
"errors"
"fmt"
"math"
"net/netip"
"sort"
"strconv"
"sync"
"unsafe"
cmtypes "github.com/cilium/cilium/pkg/clustermesh/types"
api "github.com/cilium/cilium/pkg/k8s/apis/cilium.io"
"github.com/cilium/cilium/pkg/labels"
"github.com/cilium/cilium/pkg/option"
)
const (
// Identities also have scopes, which is defined by the high 8 bits.
// 0x00 -- Global and reserved identities. Reserved identities are
// not allocated like global identities, but are known
// because they are hardcoded in Cilium. Older versions of
// Cilium will not be aware of any "new" reserved identities
// that are added.
// 0x01 -- local (CIDR) identities
// 0x02 -- remote nodes
// IdentityScopeMask is the top 8 bits of the 32 bit identity
IdentityScopeMask = NumericIdentity(0xFF_00_00_00)
// IdentityScopeGlobal is the identity scope used by global and reserved identities.
IdentityScopeGlobal = NumericIdentity(0)
// IdentityScopeLocal is the tag in the numeric identity that identifies
// a numeric identity to have local (CIDR) scope.
IdentityScopeLocal = NumericIdentity(1 << 24)
// IdentityScopeRemoteNode is the tag in the numeric identity that identifies
// an identity to be a remote in-cluster node.
IdentityScopeRemoteNode = NumericIdentity(2 << 24)
// MinAllocatorLocalIdentity represents the minimal numeric identity
// that the localIdentityCache allocator can allocate for a local (CIDR)
// identity.
//
// Note that this does not represents the minimal value for a local
// identity, as the allocated ID will then be bitwise OR'ed with
// LocalIdentityFlag.
MinAllocatorLocalIdentity = 1
// MinLocalIdentity represents the actual minimal numeric identity value
// for a local (CIDR) identity.
MinLocalIdentity = MinAllocatorLocalIdentity | IdentityScopeLocal
// MaxAllocatorLocalIdentity represents the maximal numeric identity
// that the localIdentityCache allocator can allocate for a local (CIDR)
// identity.
//
// Note that this does not represents the maximal value for a local
// identity, as the allocated ID will then be bitwise OR'ed with
// LocalIdentityFlag.
MaxAllocatorLocalIdentity = 0xFFFFFF
// MaxLocalIdentity represents the actual maximal numeric identity value
// for a local (CIDR) identity.
MaxLocalIdentity = MaxAllocatorLocalIdentity | IdentityScopeLocal
// MinimalNumericIdentity represents the minimal numeric identity not
// used for reserved purposes.
MinimalNumericIdentity = NumericIdentity(256)
// UserReservedNumericIdentity represents the minimal numeric identity that
// can be used by users for reserved purposes.
UserReservedNumericIdentity = NumericIdentity(128)
// InvalidIdentity is the identity assigned if the identity is invalid
// or not determined yet
InvalidIdentity = NumericIdentity(0)
)
var (
// clusterIDInit ensures that clusterIDLen and clusterIDShift can only be
// set once, and only if we haven't used either value elsewhere already.
clusterIDInit sync.Once
// clusterIDShift is the number of bits to shift a cluster ID in a numeric
// identity and is equal to the number of bits that represent a cluster-local identity.
clusterIDShift uint32
)
const (
// IdentityUnknown represents an unknown identity
IdentityUnknown NumericIdentity = iota
// ReservedIdentityHost represents the local host
ReservedIdentityHost
// ReservedIdentityWorld represents any endpoint outside of the cluster
ReservedIdentityWorld
// ReservedIdentityUnmanaged represents unmanaged endpoints.
ReservedIdentityUnmanaged
// ReservedIdentityHealth represents the local cilium-health endpoint
ReservedIdentityHealth
// ReservedIdentityInit is the identity given to endpoints that have not
// received any labels yet.
ReservedIdentityInit
// ReservedIdentityRemoteNode is the identity given to all nodes in
// local and remote clusters except for the local node.
ReservedIdentityRemoteNode
// ReservedIdentityKubeAPIServer is the identity given to remote node(s) which
// have backend(s) serving the kube-apiserver running.
ReservedIdentityKubeAPIServer
// ReservedIdentityIngress is the identity given to the IP used as the source
// address for connections from Ingress proxies.
ReservedIdentityIngress
// ReservedIdentityWorldIPv4 represents any endpoint outside of the cluster
// for IPv4 address only.
ReservedIdentityWorldIPv4
// ReservedIdentityWorldIPv6 represents any endpoint outside of the cluster
// for IPv6 address only.
ReservedIdentityWorldIPv6
// ReservedEncryptedOverlay represents overlay traffic which must be IPSec
// encrypted before it leaves the host
ReservedEncryptedOverlay
)
// Special identities for well-known cluster components
// Each component has two identities. The first one is used for Kubernetes <1.21
// or when the NamespaceDefaultLabelName feature gate is disabled. The second
// one is used for Kubernetes >= 1.21 and when the NamespaceDefaultLabelName is
// enabled.
const (
DeprecatedETCDOperator NumericIdentity = iota + 100
DeprecatedCiliumKVStore
// ReservedKubeDNS is the reserved identity used for kube-dns.
ReservedKubeDNS
// ReservedEKSKubeDNS is the reserved identity used for kube-dns on EKS
ReservedEKSKubeDNS
// ReservedCoreDNS is the reserved identity used for CoreDNS
ReservedCoreDNS
// ReservedCiliumOperator is the reserved identity used for the Cilium operator
ReservedCiliumOperator
// ReservedEKSCoreDNS is the reserved identity used for CoreDNS on EKS
ReservedEKSCoreDNS
DeprecatedCiliumEtcdOperator
// Second identities for all above components
DeprecatedETCDOperator2
DeprecatedCiliumKVStore2
ReservedKubeDNS2
ReservedEKSKubeDNS2
ReservedCoreDNS2
ReservedCiliumOperator2
ReservedEKSCoreDNS2
DeprecatedCiliumEtcdOperator2
)
type wellKnownIdentities map[NumericIdentity]wellKnownIdentity
// wellKnownIdentitity is an identity for well-known security labels for which
// a well-known numeric identity is reserved to avoid requiring a cluster wide
// setup. Examples of this include kube-dns.
type wellKnownIdentity struct {
identity *Identity
labelArray labels.LabelArray
}
func (w wellKnownIdentities) add(i NumericIdentity, lbls []string) {
labelMap := labels.NewLabelsFromModel(lbls)
identity := NewIdentity(i, labelMap)
w[i] = wellKnownIdentity{
identity: NewIdentity(i, labelMap),
labelArray: labelMap.LabelArray(),
}
cacheMU.Lock()
reservedIdentityCache[i] = identity
cacheMU.Unlock()
}
func (w wellKnownIdentities) LookupByLabels(lbls labels.Labels) *Identity {
for _, i := range w {
if lbls.Equals(i.identity.Labels) {
return i.identity
}
}
return nil
}
func (w wellKnownIdentities) ForEach(yield func(*Identity)) {
for _, id := range w {
yield(id.identity)
}
}
func (w wellKnownIdentities) lookupByNumericIdentity(identity NumericIdentity) *Identity {
wki, ok := w[identity]
if !ok {
return nil
}
return wki.identity
}
type Configuration interface {
CiliumNamespaceName() string
}
func k8sLabel(key string, value string) string {
return "k8s:" + key + "=" + value
}
// InitWellKnownIdentities establishes all well-known identities. Returns the
// number of well-known identities initialized.
func InitWellKnownIdentities(c Configuration, cinfo cmtypes.ClusterInfo) int {
// kube-dns labels
// k8s:io.cilium.k8s.policy.serviceaccount=kube-dns
// k8s:io.kubernetes.pod.namespace=kube-system
// k8s:k8s-app=kube-dns
// k8s:io.cilium.k8s.policy.cluster=default
kubeDNSLabels := []string{
"k8s:k8s-app=kube-dns",
k8sLabel(api.PodNamespaceLabel, "kube-system"),
k8sLabel(api.PolicyLabelServiceAccount, "kube-dns"),
k8sLabel(api.PolicyLabelCluster, cinfo.Name),
}
WellKnown.add(ReservedKubeDNS, kubeDNSLabels)
WellKnown.add(ReservedKubeDNS2, append(kubeDNSLabels,
k8sLabel(api.PodNamespaceMetaNameLabel, "kube-system")))
// kube-dns EKS labels
// k8s:io.cilium.k8s.policy.serviceaccount=kube-dns
// k8s:io.kubernetes.pod.namespace=kube-system
// k8s:k8s-app=kube-dns
// k8s:io.cilium.k8s.policy.cluster=default
// k8s:eks.amazonaws.com/component=kube-dns
eksKubeDNSLabels := []string{
"k8s:k8s-app=kube-dns",
"k8s:eks.amazonaws.com/component=kube-dns",
k8sLabel(api.PodNamespaceLabel, "kube-system"),
k8sLabel(api.PolicyLabelServiceAccount, "kube-dns"),
k8sLabel(api.PolicyLabelCluster, cinfo.Name),
}
WellKnown.add(ReservedEKSKubeDNS, eksKubeDNSLabels)
WellKnown.add(ReservedEKSKubeDNS2, append(eksKubeDNSLabels,
k8sLabel(api.PodNamespaceMetaNameLabel, "kube-system")))
// CoreDNS EKS labels
// k8s:io.cilium.k8s.policy.serviceaccount=coredns
// k8s:io.kubernetes.pod.namespace=kube-system
// k8s:k8s-app=kube-dns
// k8s:io.cilium.k8s.policy.cluster=default
// k8s:eks.amazonaws.com/component=coredns
eksCoreDNSLabels := []string{
"k8s:k8s-app=kube-dns",
"k8s:eks.amazonaws.com/component=coredns",
k8sLabel(api.PodNamespaceLabel, "kube-system"),
k8sLabel(api.PolicyLabelServiceAccount, "coredns"),
k8sLabel(api.PolicyLabelCluster, cinfo.Name),
}
WellKnown.add(ReservedEKSCoreDNS, eksCoreDNSLabels)
WellKnown.add(ReservedEKSCoreDNS2, append(eksCoreDNSLabels,
k8sLabel(api.PodNamespaceMetaNameLabel, "kube-system")))
// CoreDNS labels
// k8s:io.cilium.k8s.policy.serviceaccount=coredns
// k8s:io.kubernetes.pod.namespace=kube-system
// k8s:k8s-app=kube-dns
// k8s:io.cilium.k8s.policy.cluster=default
coreDNSLabels := []string{
"k8s:k8s-app=kube-dns",
k8sLabel(api.PodNamespaceLabel, "kube-system"),
k8sLabel(api.PolicyLabelServiceAccount, "coredns"),
k8sLabel(api.PolicyLabelCluster, cinfo.Name),
}
WellKnown.add(ReservedCoreDNS, coreDNSLabels)
WellKnown.add(ReservedCoreDNS2, append(coreDNSLabels,
k8sLabel(api.PodNamespaceMetaNameLabel, "kube-system")))
// CiliumOperator labels
// k8s:io.cilium.k8s.policy.serviceaccount=cilium-operator
// k8s:io.kubernetes.pod.namespace=<NAMESPACE>
// k8s:name=cilium-operator
// k8s:io.cilium/app=operator
// k8s:app.kubernetes.io/part-of=cilium
// k8s:app.kubernetes.io/name=cilium-operator
// k8s:io.cilium.k8s.policy.cluster=default
ciliumOperatorLabels := []string{
"k8s:name=cilium-operator",
"k8s:io.cilium/app=operator",
"k8s:app.kubernetes.io/part-of=cilium",
"k8s:app.kubernetes.io/name=cilium-operator",
k8sLabel(api.PodNamespaceLabel, c.CiliumNamespaceName()),
k8sLabel(api.PolicyLabelServiceAccount, "cilium-operator"),
k8sLabel(api.PolicyLabelCluster, cinfo.Name),
}
WellKnown.add(ReservedCiliumOperator, ciliumOperatorLabels)
WellKnown.add(ReservedCiliumOperator2, append(ciliumOperatorLabels,
k8sLabel(api.PodNamespaceMetaNameLabel, c.CiliumNamespaceName())))
return len(WellKnown)
}
// GetClusterIDShift returns the number of bits to shift a cluster ID in a numeric
// identity and is equal to the number of bits that represent a cluster-local identity.
// A sync.Once is used to ensure we only initialize clusterIDShift once.
func GetClusterIDShift() uint32 {
clusterIDInit.Do(initClusterIDShift)
return clusterIDShift
}
// initClusterIDShift sets variables that control the bit allocation of cluster
// ID in a numeric identity.
func initClusterIDShift() {
// ClusterIDLen is the number of bits that represent a cluster ID in a numeric identity
clusterIDLen := uint32(math.Log2(float64(cmtypes.ClusterIDMax + 1)))
// ClusterIDShift is the number of bits to shift a cluster ID in a numeric identity
clusterIDShift = NumericIdentityBitlength - clusterIDLen
}
// GetMinimalNumericIdentity returns the minimal numeric identity not used for
// reserved purposes.
func GetMinimalAllocationIdentity(clusterID uint32) NumericIdentity {
if clusterID > 0 {
// For ClusterID > 0, the identity range just starts from cluster shift,
// no well-known-identities need to be reserved from the range.
return NumericIdentity((1 << GetClusterIDShift()) * clusterID)
}
return MinimalNumericIdentity
}
// GetMaximumAllocationIdentity returns the maximum numeric identity that
// should be handed out by the identity allocator.
func GetMaximumAllocationIdentity(clusterID uint32) NumericIdentity {
return NumericIdentity((1<<GetClusterIDShift())*(clusterID+1) - 1)
}
var (
reservedIdentities = map[string]NumericIdentity{
labels.IDNameHost: ReservedIdentityHost,
labels.IDNameWorld: ReservedIdentityWorld,
labels.IDNameWorldIPv4: ReservedIdentityWorldIPv4,
labels.IDNameWorldIPv6: ReservedIdentityWorldIPv6,
labels.IDNameUnmanaged: ReservedIdentityUnmanaged,
labels.IDNameHealth: ReservedIdentityHealth,
labels.IDNameInit: ReservedIdentityInit,
labels.IDNameRemoteNode: ReservedIdentityRemoteNode,
labels.IDNameKubeAPIServer: ReservedIdentityKubeAPIServer,
labels.IDNameIngress: ReservedIdentityIngress,
labels.IDNameEncryptedOverlay: ReservedEncryptedOverlay,
}
reservedIdentityNames = map[NumericIdentity]string{
IdentityUnknown: "unknown",
ReservedIdentityHost: labels.IDNameHost,
ReservedIdentityWorld: labels.IDNameWorld,
ReservedIdentityWorldIPv4: labels.IDNameWorldIPv4,
ReservedIdentityWorldIPv6: labels.IDNameWorldIPv6,
ReservedIdentityUnmanaged: labels.IDNameUnmanaged,
ReservedIdentityHealth: labels.IDNameHealth,
ReservedIdentityInit: labels.IDNameInit,
ReservedIdentityRemoteNode: labels.IDNameRemoteNode,
ReservedIdentityKubeAPIServer: labels.IDNameKubeAPIServer,
ReservedIdentityIngress: labels.IDNameIngress,
}
reservedIdentityLabels = map[NumericIdentity]labels.Labels{
ReservedIdentityHost: labels.LabelHost,
ReservedIdentityWorld: labels.LabelWorld,
ReservedIdentityWorldIPv4: labels.LabelWorldIPv4,
ReservedIdentityWorldIPv6: labels.LabelWorldIPv6,
ReservedIdentityUnmanaged: labels.NewLabelsFromModel([]string{"reserved:" + labels.IDNameUnmanaged}),
ReservedIdentityHealth: labels.LabelHealth,
ReservedIdentityInit: labels.NewLabelsFromModel([]string{"reserved:" + labels.IDNameInit}),
ReservedIdentityRemoteNode: labels.LabelRemoteNode,
ReservedIdentityKubeAPIServer: labels.Map2Labels(map[string]string{
labels.LabelKubeAPIServer.String(): "",
labels.LabelRemoteNode.String(): "",
}, ""),
ReservedIdentityIngress: labels.LabelIngress,
}
// WellKnown identities stores global state of all well-known identities.
WellKnown = wellKnownIdentities{}
// ErrNotUserIdentity is an error returned for an identity that is not user
// reserved.
ErrNotUserIdentity = errors.New("not a user reserved identity")
)
// IsUserReservedIdentity returns true if the given NumericIdentity belongs
// to the space reserved for users.
func IsUserReservedIdentity(id NumericIdentity) bool {
return id.Uint32() >= UserReservedNumericIdentity.Uint32() &&
id.Uint32() < MinimalNumericIdentity.Uint32()
}
// AddUserDefinedNumericIdentity adds the given numeric identity and respective
// label to the list of reservedIdentities. If the numeric identity is not
// between UserReservedNumericIdentity and MinimalNumericIdentity it will return
// ErrNotUserIdentity.
// Is not safe for concurrent use.
func AddUserDefinedNumericIdentity(identity NumericIdentity, label string) error {
if !IsUserReservedIdentity(identity) {
return ErrNotUserIdentity
}
reservedIdentities[label] = identity
reservedIdentityNames[identity] = label
return nil
}
// DelReservedNumericIdentity deletes the given Numeric Identity from the list
// of reservedIdentities. If the numeric identity is not between
// UserReservedNumericIdentity and MinimalNumericIdentity it will return
// ErrNotUserIdentity.
// Is not safe for concurrent use.
func DelReservedNumericIdentity(identity NumericIdentity) error {
if !IsUserReservedIdentity(identity) {
return ErrNotUserIdentity
}
label, ok := reservedIdentityNames[identity]
if ok {
delete(reservedIdentities, label)
delete(reservedIdentityNames, identity)
}
return nil
}
// NumericIdentity is the numeric representation of a security identity.
//
// Bits:
//
// 0-15: identity identifier
// 16-23: cluster identifier
// 24: LocalIdentityFlag: Indicates that the identity has a local scope
type NumericIdentity uint32
// NumericIdentityBitlength is the number of bits used on the wire for a
// NumericIdentity
const NumericIdentityBitlength = 24
// MaxNumericIdentity is the maximum value of a NumericIdentity.
const MaxNumericIdentity = math.MaxUint32
type NumericIdentitySlice []NumericIdentity
// AsUint32Slice returns the NumericIdentitySlice as a slice of uint32 without copying any data.
// This is safe as long as the underlying type stays as uint32.
func (nids NumericIdentitySlice) AsUint32Slice() []uint32 {
if len(nids) == 0 {
return nil
}
return unsafe.Slice((*uint32)(&nids[0]), len(nids))
}
func ParseNumericIdentity(id string) (NumericIdentity, error) {
nid, err := strconv.ParseUint(id, 0, 32)
if err != nil {
return NumericIdentity(0), err
}
if nid > MaxNumericIdentity {
return NumericIdentity(0), fmt.Errorf("%s: numeric identity too large", id)
}
return NumericIdentity(nid), nil
}
func (id NumericIdentity) StringID() string {
return strconv.FormatUint(uint64(id), 10)
}
func (id NumericIdentity) String() string {
if v, exists := reservedIdentityNames[id]; exists {
return v
}
return id.StringID()
}
// Uint32 normalizes the ID for use in BPF program.
func (id NumericIdentity) Uint32() uint32 {
return uint32(id)
}
func GetReservedID(name string) NumericIdentity {
if v, ok := reservedIdentities[name]; ok {
return v
}
return IdentityUnknown
}
// IsReservedIdentity returns whether id is one of the special reserved identities.
func (id NumericIdentity) IsReservedIdentity() bool {
_, isReservedIdentity := reservedIdentityNames[id]
return isReservedIdentity
}
// ClusterID returns the cluster ID associated with the identity
func (id NumericIdentity) ClusterID() uint32 {
return (uint32(id) >> uint32(GetClusterIDShift())) & cmtypes.ClusterIDMax
}
// GetAllReservedIdentities returns a list of all reserved numeric identities
// in ascending order.
// NOTE: While this func is unused from the cilium repository, is it imported
// and called by the hubble cli.
func GetAllReservedIdentities() []NumericIdentity {
identities := make([]NumericIdentity, 0, len(reservedIdentities))
for _, id := range reservedIdentities {
identities = append(identities, id)
}
// Because our reservedIdentities source is a go map, and go map order is
// randomized, we need to sort the resulting slice before returning it.
sort.Slice(identities, func(i, j int) bool {
return identities[i].Uint32() < identities[j].Uint32()
})
return identities
}
// GetWorldIdentityFromIP gets the correct world identity based
// on the IP address version. If Cilium is not in dual-stack mode
// then ReservedIdentityWorld will always be returned.
func GetWorldIdentityFromIP(addr netip.Addr) NumericIdentity {
if option.Config.IsDualStack() {
if addr.Is6() {
return ReservedIdentityWorldIPv6
}
return ReservedIdentityWorldIPv4
}
return ReservedIdentityWorld
}
// iterateReservedIdentityLabels iterates over all reservedIdentityLabels and
// executes the given function for each key, value pair in
// reservedIdentityLabels.
func iterateReservedIdentityLabels(f func(_ NumericIdentity, _ labels.Labels)) {
for ni, lbls := range reservedIdentityLabels {
f(ni, lbls)
}
}
// HasLocalScope returns true if the identity is in the Local (CIDR) scope
func (id NumericIdentity) HasLocalScope() bool {
return id.Scope() == IdentityScopeLocal
}
func (id NumericIdentity) HasRemoteNodeScope() bool {
return id.Scope() == IdentityScopeRemoteNode
}
// Scope returns the identity scope of this given numeric ID.
func (id NumericIdentity) Scope() NumericIdentity {
return id & IdentityScopeMask
}
// IsWorld returns true if the identity is one of the world identities
func (id NumericIdentity) IsWorld() bool {
if id == ReservedIdentityWorld {
return true
}
return option.Config.IsDualStack() &&
(id == ReservedIdentityWorldIPv4 || id == ReservedIdentityWorldIPv6)
}
// IsCluster returns true if the identity is a cluster identity by excluding all
// identities that are known to be non-cluster identities.
// NOTE: keep this and bpf identity_is_cluster() in sync!
func (id NumericIdentity) IsCluster() bool {
if id.IsWorld() || id.HasLocalScope() {
return false
}
return true
}
// SPDX-License-Identifier: Apache-2.0
// Copyright Authors of Cilium
package identity
import (
"github.com/cilium/cilium/pkg/labels"
"github.com/cilium/cilium/pkg/lock"
)
var (
// cacheMU protects the following map.
cacheMU lock.RWMutex
// ReservedIdentityCache that maps all reserved identities from their
// numeric identity to their corresponding identity.
reservedIdentityCache = map[NumericIdentity]*Identity{}
)
// AddReservedIdentity adds the reserved numeric identity with the respective
// label into the map of reserved identity cache, and returns the resulting Identity.
// This identity must not be mutated!
func AddReservedIdentity(ni NumericIdentity, lbl string) *Identity {
identity := NewIdentity(ni, labels.Labels{lbl: labels.NewLabel(lbl, "", labels.LabelSourceReserved)})
cacheMU.Lock()
reservedIdentityCache[ni] = identity
cacheMU.Unlock()
return identity
}
// AddReservedIdentityWithLabels is the same as AddReservedIdentity but accepts
// multiple labels. Returns the resulting Identity.
// This identity must not be mutated!
func AddReservedIdentityWithLabels(ni NumericIdentity, lbls labels.Labels) *Identity {
identity := NewIdentity(ni, lbls)
cacheMU.Lock()
reservedIdentityCache[ni] = identity
cacheMU.Unlock()
return identity
}
// LookupReservedIdentity looks up a reserved identity by its NumericIdentity
// and returns it if found. Returns nil if not found.
// This identity must not be mutated!
func LookupReservedIdentity(ni NumericIdentity) *Identity {
cacheMU.RLock()
defer cacheMU.RUnlock()
return reservedIdentityCache[ni]
}
func init() {
iterateReservedIdentityLabels(func(ni NumericIdentity, lbls labels.Labels) {
AddReservedIdentityWithLabels(ni, lbls)
})
}
// IterateReservedIdentities iterates over all reserved identities and
// executes the given function for each identity.
func IterateReservedIdentities(f func(_ NumericIdentity, _ *Identity)) {
cacheMU.RLock()
defer cacheMU.RUnlock()
for ni, identity := range reservedIdentityCache {
f(ni, identity)
}
}
func ListReservedIdentities() IdentityMap {
cacheMU.RLock()
defer cacheMU.RUnlock()
out := make(IdentityMap, len(reservedIdentityCache))
for ni, identity := range reservedIdentityCache {
out[ni] = identity.LabelArray
}
return out
}
// SPDX-License-Identifier: Apache-2.0
// Copyright Authors of Cilium
package idpool
import (
"strconv"
"github.com/cilium/cilium/pkg/lock"
)
// ID is a numeric identifier
type ID uint64
// NoID is a special ID that represents "no ID available"
const NoID ID = 0
// String returns the string representation of an allocated ID
func (i ID) String() string {
return strconv.FormatUint(uint64(i), 10)
}
// IDPool represents a pool of IDs that can be managed concurrently
// via local usage and external events.
//
// An intermediate state (leased) is introduced to the life cycle
// of an ID in the pool, in order to prevent lost updates to the
// pool that can occur as a result of employing both management schemes
// simultaneously.
// Local usage of an ID becomes a two stage process of leasing
// the ID from the pool, and later, Use()ing or Release()ing the ID on
// the pool upon successful or unsuccessful usage respectively,
//
// The table below shows the state transitions in the ID's life cycle.
// In the case of LeaseAvailableID() the ID is returned rather
// than provided as an input to the operation.
// All ID's begin in the available state.
/*
---------------------------------------------------------------------
|state\event | LeaseAvailableID | Release | Use | Insert | Remove |
---------------------------------------------------------------------
|1 available | 2 | * | * | * | 3 |
---------------------------------------------------------------------
|2 leased | ** | 1 | 3 | * | 3 |
---------------------------------------------------------------------
|3 unavailable | ** | * | * | 1 | * |
---------------------------------------------------------------------
* The event has no effect.
** This is guaranteed never to occur.
*/
type IDPool struct {
// mutex protects all IDPool data structures
mutex lock.Mutex
// min is the lower limit when leasing IDs. The pool will never
// return an ID lesser than this value.
minID ID
// max is the upper limit when leasing IDs. The pool will never
// return an ID greater than this value.
maxID ID
// idCache is a cache of IDs backing the pool.
idCache *idCache
}
// NewIDPool returns a new ID pool
func NewIDPool(minID ID, maxID ID) *IDPool {
return &IDPool{
minID: minID,
maxID: maxID,
idCache: newIDCache(minID, maxID),
}
}
// LeaseAvailableID returns an available ID at random from the pool.
// Returns an ID or NoID if no there is no available ID in the pool.
func (p *IDPool) LeaseAvailableID() ID {
p.mutex.Lock()
defer p.mutex.Unlock()
return p.idCache.leaseAvailableID()
}
// AllocateID returns a random available ID. Unlike LeaseAvailableID, the ID is
// immediately marked for use and there is no need to call Use().
func (p *IDPool) AllocateID() ID {
p.mutex.Lock()
defer p.mutex.Unlock()
return p.idCache.allocateID()
}
// Release returns a leased ID back to the pool.
// This operation accounts for IDs that were previously leased
// from the pool but were unused, e.g if allocation was unsuccessful.
// Thus, it has no effect if the ID is not currently leased in the
// pool, or the pool has since been refreshed.
//
// Returns true if the ID was returned back to the pool as
// a result of this call.
func (p *IDPool) Release(id ID) bool {
p.mutex.Lock()
defer p.mutex.Unlock()
return p.idCache.release(id)
}
// Use makes a leased ID unavailable in the pool and has no effect
// otherwise. Returns true if the ID was made unavailable
// as a result of this call.
func (p *IDPool) Use(id ID) bool {
p.mutex.Lock()
defer p.mutex.Unlock()
return p.idCache.use(id)
}
// Insert makes an unavailable ID available in the pool
// and has no effect otherwise. Returns true if the ID
// was added back to the pool.
func (p *IDPool) Insert(id ID) bool {
p.mutex.Lock()
defer p.mutex.Unlock()
return p.idCache.insert(id)
}
// Remove makes an ID unavailable in the pool.
// Returns true if the ID was previously available in the pool.
func (p *IDPool) Remove(id ID) bool {
p.mutex.Lock()
defer p.mutex.Unlock()
return p.idCache.remove(id)
}
type idCache struct {
// ids is a slice of IDs available in this idCache.
ids map[ID]struct{}
// leased is the set of IDs that are leased in this idCache.
leased map[ID]struct{}
}
func newIDCache(minID ID, maxID ID) *idCache {
n := max(int(maxID-minID+1), 0)
c := &idCache{
ids: make(map[ID]struct{}, n),
leased: make(map[ID]struct{}),
}
for id := minID; id < maxID+1; id++ {
c.ids[id] = struct{}{}
}
return c
}
// allocateID returns a random available ID without leasing it
func (c *idCache) allocateID() ID {
for id := range c.ids {
delete(c.ids, id)
return id
}
return NoID
}
// leaseAvailableID returns a random available ID.
func (c *idCache) leaseAvailableID() ID {
id := c.allocateID()
if id == NoID {
return NoID
}
// Mark as leased
c.leased[id] = struct{}{}
return id
}
// release makes the ID available again if it is currently
// leased and has no effect otherwise. Returns true if the
// ID was made available as a result of this call.
func (c *idCache) release(id ID) bool {
if _, exists := c.leased[id]; !exists {
return false
}
delete(c.leased, id)
c.insert(id)
return true
}
// use makes the ID unavailable if it is currently
// leased and has no effect otherwise. Returns true if the
// ID was made unavailable as a result of this call.
func (c *idCache) use(id ID) bool {
if _, exists := c.leased[id]; !exists {
return false
}
delete(c.leased, id)
return true
}
// insert adds the ID into the cache if it is currently unavailable.
// Returns true if the ID was added to the cache.
func (c *idCache) insert(id ID) bool {
if _, ok := c.ids[id]; ok {
return false
}
if _, exists := c.leased[id]; exists {
return false
}
c.ids[id] = struct{}{}
return true
}
// remove removes the ID from the cache.
// Returns true if the ID was available in the cache.
func (c *idCache) remove(id ID) bool {
delete(c.leased, id)
if _, ok := c.ids[id]; ok {
delete(c.ids, id)
return true
}
return false
}
// SPDX-License-Identifier: Apache-2.0
// Copyright Authors of Cilium
package ip
import (
"net"
"net/netip"
"go4.org/netipx"
)
// ParseCIDRs fetches all CIDRs referred to by the specified slice and returns
// them as regular golang CIDR objects.
//
// Deprecated. Consider using ParsePrefixes() instead.
func ParseCIDRs(cidrs []string) (valid []*net.IPNet, invalid []string) {
valid = make([]*net.IPNet, 0, len(cidrs))
invalid = make([]string, 0, len(cidrs))
for _, cidr := range cidrs {
_, prefix, err := net.ParseCIDR(cidr)
if err != nil {
// Likely the CIDR is specified in host format.
ip := net.ParseIP(cidr)
if ip == nil {
invalid = append(invalid, cidr)
continue
} else {
prefix = IPToPrefix(ip)
}
}
if prefix != nil {
valid = append(valid, prefix)
}
}
return valid, invalid
}
// ParsePrefixes parses all CIDRs referred to by the specified slice and
// returns them as regular golang netip.Prefix objects.
func ParsePrefixes(cidrs []string) (valid []netip.Prefix, invalid []string, errors []error) {
valid = make([]netip.Prefix, 0, len(cidrs))
invalid = make([]string, 0, len(cidrs))
errors = make([]error, 0, len(cidrs))
for _, cidr := range cidrs {
prefix, err := netip.ParsePrefix(cidr)
if err != nil {
ip, err2 := netip.ParseAddr(cidr)
if err2 != nil {
invalid = append(invalid, cidr)
errors = append(errors, err2)
continue
}
prefix = netip.PrefixFrom(ip, ip.BitLen())
}
valid = append(valid, prefix.Masked())
}
return valid, invalid, errors
}
// IPToNetPrefix is a convenience helper for migrating from the older 'net'
// standard library types to the newer 'netip' types. Use this to plug the new
// types in newer code into older types in older code during the migration.
//
// Note: This function assumes given ip is not an IPv4 mapped IPv6 address.
//
// The problem behind this is that when we convert the IPv4 net.IP address with
// netip.AddrFromSlice, the address is interpreted as an IPv4 mapped IPv6 address in some
// cases.
//
// For example, when we do netip.AddrFromSlice(net.ParseIP("1.1.1.1")), it is interpreted
// as an IPv6 address "::ffff:1.1.1.1". This is because 1) net.IP created with
// net.ParseIP(IPv4 string) holds IPv4 address as an IPv4 mapped IPv6 address internally
// and 2) netip.AddrFromSlice recognizes address family with length of the slice (4-byte =
// IPv4 and 16-byte = IPv6).
//
// By using netipx.FromStdIP, we can preserve the address family, but since we cannot distinguish
// IPv4 and IPv4 mapped IPv6 address only from net.IP value (see #37921 on golang/go) we
// need an assumption that given net.IP is not an IPv4 mapped IPv6 address.
func IPToNetPrefix(ip net.IP) netip.Prefix {
a, ok := netipx.FromStdIP(ip)
if !ok {
return netip.Prefix{}
}
return netip.PrefixFrom(a, a.BitLen())
}
// PrefixesContains checks that any prefix in prefix *fully* contains addr.
func PrefixesContains(prefixes []netip.Prefix, addr netip.Addr) bool {
for _, pfx := range prefixes {
if pfx.Contains(addr) {
return true
}
}
return false
}
// SPDX-License-Identifier: Apache-2.0
// Copyright Authors of Cilium
package ip
import (
"bytes"
"encoding/binary"
"math/big"
"net"
"net/netip"
"slices"
"sort"
"go4.org/netipx"
)
const (
ipv4BitLen = 8 * net.IPv4len
ipv6BitLen = 8 * net.IPv6len
)
// CountIPsInCIDR takes a RFC4632/RFC4291-formatted IPv4/IPv6 CIDR and
// determines how many IP addresses reside within that CIDR.
// The first and the last (base and broadcast) IPs are excluded.
//
// Returns 0 if the input CIDR cannot be parsed.
func CountIPsInCIDR(ipnet *net.IPNet) *big.Int {
subnet, size := ipnet.Mask.Size()
if subnet == size {
return big.NewInt(0)
}
return big.NewInt(0).
Sub(
big.NewInt(2).Exp(big.NewInt(2),
big.NewInt(int64(size-subnet)), nil),
big.NewInt(2),
)
}
var (
// v4Mappedv6Prefix is the RFC2765 IPv4-mapped address prefix.
v4Mappedv6Prefix = []byte{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0xff, 0xff}
ipv4LeadingZeroes = []byte{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0}
defaultIPv4 = []byte{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0xff, 0xff, 0x0, 0x0, 0x0, 0x0}
defaultIPv6 = []byte{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0}
upperIPv4 = []byte{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0xff, 0xff, 255, 255, 255, 255}
upperIPv6 = []byte{0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff}
)
// NetsByMask is used to sort a list of IP networks by the size of their masks.
// Implements sort.Interface.
type NetsByMask []*net.IPNet
func (s NetsByMask) Swap(i, j int) {
s[i], s[j] = s[j], s[i]
}
func (s NetsByMask) Less(i, j int) bool {
iPrefixSize, _ := s[i].Mask.Size()
jPrefixSize, _ := s[j].Mask.Size()
if iPrefixSize == jPrefixSize {
return bytes.Compare(s[i].IP, s[j].IP) < 0
}
return iPrefixSize < jPrefixSize
}
func (s NetsByMask) Len() int {
return len(s)
}
// Assert that NetsByMask implements sort.Interface.
var _ sort.Interface = NetsByMask{}
var _ sort.Interface = NetsByRange{}
// NetsByRange is used to sort a list of ranges, first by their last IPs, then by
// their first IPs
// Implements sort.Interface.
type NetsByRange []*netWithRange
func (s NetsByRange) Swap(i, j int) {
s[i], s[j] = s[j], s[i]
}
func (s NetsByRange) Less(i, j int) bool {
// First compare by last IP.
lastComparison := bytes.Compare(*s[i].Last, *s[j].Last)
if lastComparison < 0 {
return true
} else if lastComparison > 0 {
return false
}
// Then compare by first IP.
firstComparison := bytes.Compare(*s[i].First, *s[i].First)
if firstComparison < 0 {
return true
} else if firstComparison > 0 {
return false
}
// First and last IPs are the same, so thus are equal, and s[i]
// is not less than s[j].
return false
}
func (s NetsByRange) Len() int {
return len(s)
}
// removeRedundantCIDRs removes CIDRs which are contained within other given CIDRs.
func removeRedundantCIDRs(CIDRs []*net.IPNet) []*net.IPNet {
redundant := make(map[int]bool)
for j, CIDR := range CIDRs {
if redundant[j] {
continue // Skip redundant CIDRs
}
for i, CIDR2 := range CIDRs {
// Skip checking CIDR aganst itself or if CIDR has already been deemed redundant.
if i == j || redundant[i] {
continue
}
if CIDR.Contains(CIDR2.IP) {
redundant[i] = true
}
}
}
if len(redundant) == 0 {
return CIDRs
}
if len(redundant) == 1 {
for i := range redundant {
return slices.Delete(CIDRs, i, i+1)
}
}
newCIDRs := make([]*net.IPNet, 0, len(CIDRs)-len(redundant))
for i := range CIDRs {
if redundant[i] {
continue
}
newCIDRs = append(newCIDRs, CIDRs[i])
}
return newCIDRs
}
// RemoveCIDRs removes the specified CIDRs from another set of CIDRs. If a CIDR
// to remove is not contained within the CIDR, the CIDR to remove is ignored. A
// slice of CIDRs is returned which contains the set of CIDRs provided minus
// the set of CIDRs which were removed. Both input slices may be modified by
// calling this function.
func RemoveCIDRs(allowCIDRs, removeCIDRs []*net.IPNet) []*net.IPNet {
// Ensure that we iterate through the provided CIDRs in order of largest
// subnet first.
sort.Sort(NetsByMask(removeCIDRs))
// Remove CIDRs which are contained within CIDRs that we want to remove;
// such CIDRs are redundant.
removeCIDRs = removeRedundantCIDRs(removeCIDRs)
// Remove redundant allowCIDR so that all allowCIDRs are disjoint
allowCIDRs = removeRedundantCIDRs(allowCIDRs)
for _, remove := range removeCIDRs {
i := 0
for i < len(allowCIDRs) {
allowCIDR := allowCIDRs[i]
// Only remove CIDR if it is contained in the subnet we are allowing.
if allowCIDR.Contains(remove.IP.Mask(remove.Mask)) {
nets := excludeContainedCIDR(allowCIDR, remove)
// Remove CIDR that we have just processed and append new CIDRs
// that we computed from removing the CIDR to remove.
allowCIDRs = slices.Delete(allowCIDRs, i, i+1)
allowCIDRs = append(allowCIDRs, nets...)
} else if remove.Contains(allowCIDR.IP.Mask(allowCIDR.Mask)) {
// If a CIDR that we want to remove contains a CIDR in the list
// that is allowed, then we can just remove the CIDR to allow.
allowCIDRs = slices.Delete(allowCIDRs, i, i+1)
} else {
// Advance only if CIDR at index 'i' was not removed
i++
}
}
}
return allowCIDRs
}
func getNetworkPrefix(ipNet *net.IPNet) *net.IP {
var mask net.IP
if ipNet.IP.To4() == nil {
mask = make(net.IP, net.IPv6len)
for i := range ipNet.Mask {
mask[net.IPv6len-i-1] = ipNet.IP[net.IPv6len-i-1] & ^ipNet.Mask[i]
}
} else {
mask = make(net.IP, net.IPv4len)
for i := range net.IPv4len {
mask[net.IPv4len-i-1] = ipNet.IP[net.IPv6len-i-1] & ^ipNet.Mask[i]
}
}
return &mask
}
// excludeContainedCIDR returns a set of CIDRs that is equivalent to 'allowCIDR'
// except for 'removeCIDR', which must be a subset of 'allowCIDR'.
// Caller is responsible for only passing CIDRs of the same address family.
func excludeContainedCIDR(allowCIDR, removeCIDR *net.IPNet) []*net.IPNet {
// Get size of each CIDR mask.
allowSize, addrSize := allowCIDR.Mask.Size()
removeSize, _ := removeCIDR.Mask.Size()
// Removing a CIDR from itself should result into an empty set
if allowSize == removeSize && allowCIDR.IP.Equal(removeCIDR.IP) {
return nil
}
removeIPMasked := removeCIDR.IP.Mask(removeCIDR.Mask)
// Create CIDR prefixes with mask size of Y+1, Y+2 ... X where Y is the mask
// length of the CIDR prefix of allowCIDR from which we are excluding the CIDR
// prefix removeCIDR with mask length X.
allows := make([]*net.IPNet, 0, removeSize-allowSize)
// Scan bits from high to low, where 0th bit is the highest.
// For example, an allowCIDR of size 16 covers bits 0..15,
// so the new bit in the first new mask is 16th bit, for a mask size 17.
for bit := allowSize; bit < removeSize; bit++ {
newMaskSize := bit + 1 // bit numbering starts from 0, 0th bit needs mask of size 1
// The mask for each CIDR prefix is simply the masked removeCIDR with the lowest bit
// within the new mask size flipped.
newMask := net.CIDRMask(newMaskSize, addrSize)
newIPMasked := removeIPMasked.Mask(newMask)
flipNthHighestBit(newIPMasked, uint(bit))
newIPNet := net.IPNet{IP: newIPMasked, Mask: newMask}
allows = append(allows, &newIPNet)
}
return allows
}
// Flip the 'n'th highest bit in 'ip'. 'ip' is modified in place. 'n' is zero indexed.
func flipNthHighestBit(ip net.IP, n uint) {
i := n / 8
ip[i] = ip[i] ^ 0x80>>(n%8)
}
func ipNetToRange(ipNet net.IPNet) netWithRange {
firstIP := make(net.IP, len(ipNet.IP))
lastIP := make(net.IP, len(ipNet.IP))
copy(firstIP, ipNet.IP)
copy(lastIP, ipNet.IP)
firstIP = firstIP.Mask(ipNet.Mask)
lastIP = lastIP.Mask(ipNet.Mask)
if firstIP.To4() != nil {
firstIP = append(v4Mappedv6Prefix, firstIP...)
lastIP = append(v4Mappedv6Prefix, lastIP...)
}
lastIPMask := make(net.IPMask, len(ipNet.Mask))
copy(lastIPMask, ipNet.Mask)
for i := range lastIPMask {
lastIPMask[len(lastIPMask)-i-1] = ^lastIPMask[len(lastIPMask)-i-1]
lastIP[net.IPv6len-i-1] = lastIP[net.IPv6len-i-1] | lastIPMask[len(lastIPMask)-i-1]
}
return netWithRange{First: &firstIP, Last: &lastIP, Network: &ipNet}
}
// PrefixCeil converts the given number of IPs to the minimum number of prefixes needed to host those IPs.
// multiple indicates the number of IPs in a single prefix.
func PrefixCeil(numIPs int, multiple int) int {
if numIPs == 0 {
return 0
}
quotient := numIPs / multiple
rem := numIPs % multiple
if rem > 0 {
return quotient + 1
}
return quotient
}
// PrefixToIps converts the given prefix to an array containing IPs in the provided
// prefix/CIDR block. When maxIPs is set to 0, the returned array will contain all IPs
// in the given prefix. Otherwise, the returned array of IPs will be limited to the
// value of maxIPs starting at the first IP in the provided CIDR. For example, when
// providing 192.168.1.0/28 as a CIDR with 4 maxIPs, 192.168.1.0, 192.168.1.1,
// 192.168.1.2, 192.168.1.3 will be returned.
func PrefixToIps(prefixCidr string, maxIPs int) ([]string, error) {
var prefixIps []string
_, ipNet, err := net.ParseCIDR(prefixCidr)
if err != nil {
return prefixIps, err
}
netWithRange := ipNetToRange(*ipNet)
// Ensure last IP in the prefix is included
for ip := *netWithRange.First; len(prefixIps) < maxIPs || maxIPs == 0; ip = getNextIP(ip) {
prefixIps = append(prefixIps, ip.String())
if ip.Equal(*netWithRange.Last) {
break
}
}
return prefixIps, nil
}
// GetIPAtIndex get the IP by index in the range of ipNet. The index is start with 0.
func GetIPAtIndex(ipNet net.IPNet, index int64) net.IP {
netRange := ipNetToRange(ipNet)
val := big.NewInt(0)
var ip net.IP
if index >= 0 {
ip = *netRange.First
} else {
ip = *netRange.Last
index++
}
if ip.To4() != nil {
val.SetBytes(ip.To4())
} else {
val.SetBytes(ip)
}
val.Add(val, big.NewInt(index))
if ipNet.Contains(val.Bytes()) {
return val.Bytes()
}
return nil
}
func getPreviousIP(ip net.IP) net.IP {
// Cannot go lower than zero!
if ip.Equal(defaultIPv4) || ip.Equal(defaultIPv6) {
return ip
}
previousIP := make(net.IP, len(ip))
copy(previousIP, ip)
var overflow bool
var lowerByteBound int
if ip.To4() != nil {
lowerByteBound = net.IPv6len - net.IPv4len
} else {
lowerByteBound = 0
}
for i := len(ip) - 1; i >= lowerByteBound; i-- {
if overflow || i == len(ip)-1 {
previousIP[i]--
}
// Track if we have overflowed and thus need to continue subtracting.
if ip[i] == 0 && previousIP[i] == 255 {
overflow = true
} else {
overflow = false
}
}
return previousIP
}
// getNextIP returns the next IP from the given IP address. If the given IP is
// the last IP of a v4 or v6 range, the same IP is returned.
func getNextIP(ip net.IP) net.IP {
if ip.Equal(upperIPv4) || ip.Equal(upperIPv6) {
return ip
}
nextIP := make(net.IP, len(ip))
switch len(ip) {
case net.IPv4len:
ipU32 := binary.BigEndian.Uint32(ip)
ipU32++
binary.BigEndian.PutUint32(nextIP, ipU32)
return nextIP
case net.IPv6len:
ipU64 := binary.BigEndian.Uint64(ip[net.IPv6len/2:])
ipU64++
binary.BigEndian.PutUint64(nextIP[net.IPv6len/2:], ipU64)
if ipU64 == 0 {
ipU64 = binary.BigEndian.Uint64(ip[:net.IPv6len/2])
ipU64++
binary.BigEndian.PutUint64(nextIP[:net.IPv6len/2], ipU64)
} else {
copy(nextIP[:net.IPv6len/2], ip[:net.IPv6len/2])
}
return nextIP
default:
return ip
}
}
func createSpanningCIDR(r netWithRange) net.IPNet {
// Don't want to modify the values of the provided range, so make copies.
lowest := *r.First
highest := *r.Last
var isIPv4 bool
var spanningMaskSize, bitLen, byteLen int
if lowest.To4() != nil {
isIPv4 = true
bitLen = ipv4BitLen
byteLen = net.IPv4len
} else {
bitLen = ipv6BitLen
byteLen = net.IPv6len
}
if isIPv4 {
spanningMaskSize = ipv4BitLen
} else {
spanningMaskSize = ipv6BitLen
}
// Convert to big Int so we can easily do bitshifting on the IP addresses,
// since golang only provides up to 64-bit unsigned integers.
lowestBig := big.NewInt(0).SetBytes(lowest)
highestBig := big.NewInt(0).SetBytes(highest)
// Starting from largest mask / smallest range possible, apply a mask one bit
// larger in each iteration to the upper bound in the range until we have
// masked enough to pass the lower bound in the range. This
// gives us the size of the prefix for the spanning CIDR to return as
// well as the IP for the CIDR prefix of the spanning CIDR.
for spanningMaskSize > 0 && lowestBig.Cmp(highestBig) < 0 {
spanningMaskSize--
mask := big.NewInt(1)
mask = mask.Lsh(mask, uint(bitLen-spanningMaskSize))
mask = mask.Mul(mask, big.NewInt(-1))
highestBig = highestBig.And(highestBig, mask)
}
// If ipv4, need to append 0s because math.Big gets rid of preceding zeroes.
if isIPv4 {
highest = append(ipv4LeadingZeroes, highestBig.Bytes()...)
} else {
highest = highestBig.Bytes()
}
// Int does not store leading zeroes.
if len(highest) == 0 {
highest = make([]byte, byteLen)
}
newNet := net.IPNet{IP: highest, Mask: net.CIDRMask(spanningMaskSize, bitLen)}
return newNet
}
type netWithRange struct {
First *net.IP
Last *net.IP
Network *net.IPNet
}
func mergeAdjacentCIDRs(ranges []*netWithRange) []*netWithRange {
// Sort the ranges. This sorts first by the last IP, then first IP, then by
// the IP network in the list itself
sort.Sort(NetsByRange(ranges))
// Merge adjacent CIDRs if possible.
for i := len(ranges) - 1; i > 0; i-- {
first1 := getPreviousIP(*ranges[i].First)
// Since the networks are sorted, we know that if a network in the list
// is adjacent to another one in the list, it will be the network next
// to it in the list. If the previous IP of the current network we are
// processing overlaps with the last IP of the previous network in the
// list, then we can merge the two ranges together.
if bytes.Compare(first1, *ranges[i-1].Last) <= 0 {
// Pick the minimum of the first two IPs to represent the start
// of the new range.
var minFirstIP *net.IP
if bytes.Compare(*ranges[i-1].First, *ranges[i].First) < 0 {
minFirstIP = ranges[i-1].First
} else {
minFirstIP = ranges[i].First
}
// Always take the last IP of the ith IP.
newRangeLast := make(net.IP, len(*ranges[i].Last))
copy(newRangeLast, *ranges[i].Last)
newRangeFirst := make(net.IP, len(*minFirstIP))
copy(newRangeFirst, *minFirstIP)
// Can't set the network field because since we are combining a
// range of IPs, and we don't yet know what CIDR prefix(es) represent
// the new range.
ranges[i-1] = &netWithRange{First: &newRangeFirst, Last: &newRangeLast, Network: nil}
// Since we have combined ranges[i] with the preceding item in the
// ranges list, we can delete ranges[i] from the slice.
ranges = slices.Delete(ranges, i, i+1)
}
}
return ranges
}
// coalesceRanges converts ranges into an equivalent list of net.IPNets.
// All IPs in ranges should be of the same address family (IPv4 or IPv6).
func coalesceRanges(ranges []*netWithRange) []*net.IPNet {
coalescedCIDRs := []*net.IPNet{}
// Create CIDRs from ranges that were combined if needed.
for _, netRange := range ranges {
// If the Network field of netWithRange wasn't modified, then we can
// add it to the list which we will return, as it cannot be joined with
// any other CIDR in the list.
if netRange.Network != nil {
coalescedCIDRs = append(coalescedCIDRs, netRange.Network)
} else {
// We have joined two ranges together, so we need to find the new CIDRs
// that represent this range.
rangeCIDRs := rangeToCIDRs(*netRange.First, *netRange.Last)
coalescedCIDRs = append(coalescedCIDRs, rangeCIDRs...)
}
}
return coalescedCIDRs
}
// CoalesceCIDRs transforms the provided list of CIDRs into the most-minimal
// equivalent set of IPv4 and IPv6 CIDRs.
// It removes CIDRs that are subnets of other CIDRs in the list, and groups
// together CIDRs that have the same mask size into a CIDR of the same mask
// size provided that they share the same number of most significant
// mask-size bits.
//
// Note: this algorithm was ported from the Python library netaddr.
// https://github.com/drkjam/netaddr .
func CoalesceCIDRs(cidrs []*net.IPNet) ([]*net.IPNet, []*net.IPNet) {
ranges4 := []*netWithRange{}
ranges6 := []*netWithRange{}
for _, network := range cidrs {
newNetToRange := ipNetToRange(*network)
if network.IP.To4() != nil {
ranges4 = append(ranges4, &newNetToRange)
} else {
ranges6 = append(ranges6, &newNetToRange)
}
}
return coalesceRanges(mergeAdjacentCIDRs(ranges4)), coalesceRanges(mergeAdjacentCIDRs(ranges6))
}
// rangeToCIDRs converts the range of IPs covered by firstIP and lastIP to
// a list of CIDRs that contains all of the IPs covered by the range.
func rangeToCIDRs(firstIP, lastIP net.IP) []*net.IPNet {
// First, create a CIDR that spans both IPs.
spanningCIDR := createSpanningCIDR(netWithRange{&firstIP, &lastIP, nil})
spanningRange := ipNetToRange(spanningCIDR)
firstIPSpanning := spanningRange.First
lastIPSpanning := spanningRange.Last
cidrList := []*net.IPNet{}
// If the first IP of the spanning CIDR passes the lower bound (firstIP),
// we need to split the spanning CIDR and only take the IPs that are
// greater than the value which we split on, as we do not want the lesser
// values since they are less than the lower-bound (firstIP).
if bytes.Compare(*firstIPSpanning, firstIP) < 0 {
// Split on the previous IP of the first IP so that the right list of IPs
// of the partition includes the firstIP.
prevFirstRangeIP := getPreviousIP(firstIP)
var bitLen int
if prevFirstRangeIP.To4() != nil {
bitLen = ipv4BitLen
} else {
bitLen = ipv6BitLen
}
_, _, right := PartitionCIDR(spanningCIDR, net.IPNet{IP: prevFirstRangeIP, Mask: net.CIDRMask(bitLen, bitLen)})
// Append all CIDRs but the first, as this CIDR includes the upper
// bound of the spanning CIDR, which we still need to partition on.
cidrList = append(cidrList, right...)
spanningCIDR = *right[0]
cidrList = cidrList[1:]
}
// Conversely, if the last IP of the spanning CIDR passes the upper bound
// (lastIP), we need to split the spanning CIDR and only take the IPs that
// are greater than the value which we split on, as we do not want the greater
// values since they are greater than the upper-bound (lastIP).
if bytes.Compare(*lastIPSpanning, lastIP) > 0 {
// Split on the next IP of the last IP so that the left list of IPs
// of the partition include the lastIP.
nextFirstRangeIP := getNextIP(lastIP)
var bitLen int
if nextFirstRangeIP.To4() != nil {
bitLen = ipv4BitLen
} else {
bitLen = ipv6BitLen
}
left, _, _ := PartitionCIDR(spanningCIDR, net.IPNet{IP: nextFirstRangeIP, Mask: net.CIDRMask(bitLen, bitLen)})
cidrList = append(cidrList, left...)
} else {
// Otherwise, there is no need to partition; just use add the spanning
// CIDR to the list of networks.
cidrList = append(cidrList, &spanningCIDR)
}
return cidrList
}
// PartitionCIDR returns a list of IP Networks partitioned upon excludeCIDR.
// The first list contains the networks to the left of the excludeCIDR in the
// partition, the second is a list containing the excludeCIDR itself if it is
// contained within the targetCIDR (nil otherwise), and the
// third is a list containing the networks to the right of the excludeCIDR in
// the partition.
func PartitionCIDR(targetCIDR net.IPNet, excludeCIDR net.IPNet) ([]*net.IPNet, []*net.IPNet, []*net.IPNet) {
var targetIsIPv4 bool
if targetCIDR.IP.To4() != nil {
targetIsIPv4 = true
}
targetIPRange := ipNetToRange(targetCIDR)
excludeIPRange := ipNetToRange(excludeCIDR)
targetFirstIP := *targetIPRange.First
targetLastIP := *targetIPRange.Last
excludeFirstIP := *excludeIPRange.First
excludeLastIP := *excludeIPRange.Last
targetMaskSize, _ := targetCIDR.Mask.Size()
excludeMaskSize, _ := excludeCIDR.Mask.Size()
if bytes.Compare(excludeLastIP, targetFirstIP) < 0 {
return nil, nil, []*net.IPNet{&targetCIDR}
} else if bytes.Compare(targetLastIP, excludeFirstIP) < 0 {
return []*net.IPNet{&targetCIDR}, nil, nil
}
if targetMaskSize >= excludeMaskSize {
return nil, []*net.IPNet{&targetCIDR}, nil
}
left := []*net.IPNet{}
right := []*net.IPNet{}
newPrefixLen := targetMaskSize + 1
targetFirstCopy := make(net.IP, len(targetFirstIP))
copy(targetFirstCopy, targetFirstIP)
iLowerOld := make(net.IP, len(targetFirstCopy))
copy(iLowerOld, targetFirstCopy)
// Since golang only supports up to unsigned 64-bit integers, and we need
// to perform addition on addresses, use math/big library, which allows
// for manipulation of large integers.
// Used to track the current lower and upper bounds of the ranges to compare
// to excludeCIDR.
iLower := big.NewInt(0)
iUpper := big.NewInt(0)
iLower = iLower.SetBytes(targetFirstCopy)
var bitLen int
if targetIsIPv4 {
bitLen = ipv4BitLen
} else {
bitLen = ipv6BitLen
}
shiftAmount := (uint)(bitLen - newPrefixLen)
targetIPInt := big.NewInt(0)
targetIPInt.SetBytes(targetFirstIP.To16())
exp := big.NewInt(0)
// Use left shift for exponentiation
exp = exp.Lsh(big.NewInt(1), shiftAmount)
iUpper = iUpper.Add(targetIPInt, exp)
matched := big.NewInt(0)
for excludeMaskSize >= newPrefixLen {
// Append leading zeros to IPv4 addresses, as math.Big.Int does not
// append them when the IP address is copied from a byte array to
// math.Big.Int. Leading zeroes are required for parsing IPv4 addresses
// for use with net.IP / net.IPNet.
var iUpperBytes, iLowerBytes []byte
if targetIsIPv4 {
iUpperBytes = append(ipv4LeadingZeroes, iUpper.Bytes()...)
iLowerBytes = append(ipv4LeadingZeroes, iLower.Bytes()...)
} else {
iUpperBytesLen := len(iUpper.Bytes())
// Make sure that the number of bytes in the array matches what net
// package expects, as big package doesn't append leading zeroes.
if iUpperBytesLen != net.IPv6len {
numZeroesToAppend := net.IPv6len - iUpperBytesLen
zeroBytes := make([]byte, numZeroesToAppend)
iUpperBytes = append(zeroBytes, iUpper.Bytes()...)
} else {
iUpperBytes = iUpper.Bytes()
}
iLowerBytesLen := len(iLower.Bytes())
if iLowerBytesLen != net.IPv6len {
numZeroesToAppend := net.IPv6len - iLowerBytesLen
zeroBytes := make([]byte, numZeroesToAppend)
iLowerBytes = append(zeroBytes, iLower.Bytes()...)
} else {
iLowerBytes = iLower.Bytes()
}
}
// If the IP we are excluding over is of a higher value than the current
// CIDR prefix we are generating, add the CIDR prefix to the set of IPs
// to the left of the exclude CIDR
if bytes.Compare(excludeFirstIP, iUpperBytes) >= 0 {
left = append(left, &net.IPNet{IP: iLowerBytes, Mask: net.CIDRMask(newPrefixLen, bitLen)})
matched = matched.Set(iUpper)
} else {
// Same as above, but opposite.
right = append(right, &net.IPNet{IP: iUpperBytes, Mask: net.CIDRMask(newPrefixLen, bitLen)})
matched = matched.Set(iLower)
}
newPrefixLen++
if newPrefixLen > bitLen {
break
}
iLower = iLower.Set(matched)
iUpper = iUpper.Add(matched, big.NewInt(0).Lsh(big.NewInt(1), uint(bitLen-newPrefixLen)))
}
excludeList := []*net.IPNet{&excludeCIDR}
return left, excludeList, right
}
// KeepUniqueAddrs transforms the provided multiset of IP addresses into a
// single set, lexicographically sorted via comparison of the addresses using
// netip.Addr.Compare (i.e. IPv4 addresses show up before IPv6).
// The slice is manipulated in-place destructively; it does not create a new slice.
func KeepUniqueAddrs(addrs []netip.Addr) []netip.Addr {
SortAddrList(addrs)
return slices.Compact(addrs)
}
var privateIPBlocks []*net.IPNet
func initPrivatePrefixes() {
// We only care about global scope prefixes here.
for _, cidr := range []string{
"0.0.0.0/8", // RFC1122 - IPv4 Host on this network
"10.0.0.0/8", // RFC1918 - IPv4 Private-Use Networks
"100.64.0.0/10", // RFC6598 - IPv4 Shared address space
"127.0.0.0/8", // RFC1122 - IPv4 Loopback
"169.254.0.0/16", // RFC3927 - IPv4 Link-Local
"172.16.0.0/12", // RFC1918 - IPv4 Private-Use Networks
"192.0.0.0/24", // RFC6890 - IPv4 IETF Assignments
"192.0.2.0/24", // RFC5737 - IPv4 TEST-NET-1
"192.168.0.0/16", // RFC1918 - IPv4 Private-Use Networks
"198.18.0.0/15", // RFC2544 - IPv4 Interconnect Benchmarks
"198.51.100.0/24", // RFC5737 - IPv4 TEST-NET-2
"203.0.113.0/24", // RFC5737 - IPv4 TEST-NET-3
"224.0.0.0/4", // RFC5771 - IPv4 Multicast
"::/128", // RFC4291 - IPv6 Unspecified
"::1/128", // RFC4291 - IPv6 Loopback
"100::/64", // RFC6666 - IPv6 Discard-Only Prefix
"2001:2::/48", // RFC5180 - IPv6 Benchmarking
"2001:db8::/48", // RFC3849 - IPv6 Documentation
"fc00::/7", // RFC4193 - IPv6 Unique-Local
"fe80::/10", // RFC4291 - IPv6 Link-Local
"ff00::/8", // RFC4291 - IPv6 Multicast
} {
_, block, _ := net.ParseCIDR(cidr)
privateIPBlocks = append(privateIPBlocks, block)
}
}
func init() {
initPrivatePrefixes()
}
// IsPublicAddr returns whether a given global IP is from
// a public range.
func IsPublicAddr(ip net.IP) bool {
for _, block := range privateIPBlocks {
if block.Contains(ip) {
return false
}
}
return true
}
// IPToPrefix returns the corresponding IPNet for the given IP.
func IPToPrefix(ip net.IP) *net.IPNet {
bits := net.IPv6len * 8
if ip.To4() != nil {
ip = ip.To4()
bits = net.IPv4len * 8
}
prefix := &net.IPNet{
IP: ip,
Mask: net.CIDRMask(bits, bits),
}
return prefix
}
// IsIPv4 returns true if the given IP is an IPv4
func IsIPv4(ip net.IP) bool {
return ip.To4() != nil
}
// IsIPv6 returns if netIP is IPv6.
func IsIPv6(ip net.IP) bool {
return ip != nil && ip.To4() == nil
}
// ListContainsIP returns whether a list of IPs contains a given IP.
func ListContainsIP(ipList []net.IP, ip net.IP) bool {
for _, e := range ipList {
if e.Equal(ip) {
return true
}
}
return false
}
// SortIPList sorts the provided net.IP slice in place.
func SortIPList(ipList []net.IP) {
slices.SortFunc(ipList, func(a, b net.IP) int { return bytes.Compare(a, b) })
}
func SortAddrList(ipList []netip.Addr) {
slices.SortFunc(ipList, netip.Addr.Compare)
}
// getSortedIPList returns a new net.IP slice in which the IPs are sorted.
func getSortedIPList(ipList []net.IP) []net.IP {
sortedIPList := make([]net.IP, len(ipList))
copy(sortedIPList, ipList)
SortIPList(sortedIPList)
return sortedIPList
}
// UnsortedIPListsAreEqual returns true if the list of net.IP provided is same
// without considering the order of the IPs in the list. The function will first
// attempt to sort both the IP lists and then validate equality for sorted lists.
func UnsortedIPListsAreEqual(ipList1, ipList2 []net.IP) bool {
// The IP set is definitely different if the lengths are different.
if len(ipList1) != len(ipList2) {
return false
}
a := getSortedIPList(ipList1)
b := getSortedIPList(ipList2)
// Lengths are equal, so each member in one set must be in the other
// If any IPs at the same index differ the sorted IP list are not equal.
for i := range a {
if !a[i].Equal(b[i]) {
return false
}
}
return true
}
// GetIPFromListByFamily returns a single IP address of the provided family from a list
// of ip addresses.
func GetIPFromListByFamily(ipList []net.IP, v4Family bool) net.IP {
for _, ipAddr := range ipList {
if v4Family == IsIPv4(ipAddr) || (!v4Family && IsIPv6(ipAddr)) {
return ipAddr
}
}
return nil
}
// MustAddrsFromIPs converts a slice of net.IP to a slice of netip.Addr. It assumes
// the input slice contains only valid IP addresses and always returns a slice
// containing valid netip.Addr.
func MustAddrsFromIPs(ips []net.IP) []netip.Addr {
addrs := make([]netip.Addr, 0, len(ips))
for _, ip := range ips {
addrs = append(addrs, netipx.MustFromStdIP(ip))
}
return addrs
}
// SPDX-License-Identifier: Apache-2.0
// Copyright Authors of Cilium
package ip
import (
"strconv"
"github.com/vishvananda/netlink"
)
func ParseScope(scope string) (int, error) {
switch scope {
case "global":
return int(netlink.SCOPE_UNIVERSE), nil
case "nowhere":
return int(netlink.SCOPE_NOWHERE), nil
case "host":
return int(netlink.SCOPE_HOST), nil
case "link":
return int(netlink.SCOPE_LINK), nil
case "site":
return int(netlink.SCOPE_SITE), nil
default:
return strconv.Atoi(scope)
}
}
// SPDX-License-Identifier: Apache-2.0
// Copyright Authors of Cilium
package types
import (
"fmt"
"net/netip"
"github.com/cilium/cilium/pkg/lock"
)
// Limits specifies the IPAM relevant instance limits
type Limits struct {
// Adapters specifies the maximum number of interfaces that can be
// attached to the instance
Adapters int
// IPv4 is the maximum number of IPv4 addresses per adapter/interface
IPv4 int
// IPv6 is the maximum number of IPv6 addresses per adapter/interface
IPv6 int
// HypervisorType tracks the instance's hypervisor type if available. Used to determine if features like prefix
// delegation are supported on an instance. Bare metal instances would have empty string.
HypervisorType string
// IsBareMetal tracks whether an instance is a bare metal instance or not
IsBareMetal bool
}
// AllocationIP is an IP which is available for allocation, or already
// has been allocated
type AllocationIP struct {
// Owner is the owner of the IP. This field is set if the IP has been
// allocated. It will be set to the pod name or another identifier
// representing the usage of the IP
//
// The owner field is left blank for an entry in Spec.IPAM.Pool and
// filled out as the IP is used and also added to Status.IPAM.Used.
//
// +optional
Owner string `json:"owner,omitempty"`
// Resource is set for both available and allocated IPs, it represents
// what resource the IP is associated with, e.g. in combination with
// AWS ENI, this will refer to the ID of the ENI
//
// +optional
Resource string `json:"resource,omitempty"`
}
// AllocationMap is a map of allocated IPs indexed by IP
type AllocationMap map[string]AllocationIP
// IPAMPodCIDR is a pod CIDR
//
// +kubebuilder:validation:Format=cidr
type IPAMPodCIDR string
func (c *IPAMPodCIDR) ToPrefix() (*netip.Prefix, error) {
if c == nil {
return nil, fmt.Errorf("nil ipam cidr")
}
prefix, err := netip.ParsePrefix(string(*c))
if err != nil {
return nil, fmt.Errorf("failed to parse ipam cidr %v: %w", c, err)
}
return &prefix, nil
}
// IPAMPoolAllocation describes an allocation of an IPAM pool from the operator to the
// node. It contains the assigned PodCIDRs allocated from this pool
type IPAMPoolAllocation struct {
// Pool is the name of the IPAM pool backing this allocation
//
// +kubebuilder:validation:MinLength=1
Pool string `json:"pool"`
// CIDRs contains a list of pod CIDRs currently allocated from this pool
//
// +optional
CIDRs []IPAMPodCIDR `json:"cidrs,omitempty"`
}
type IPAMPoolRequest struct {
// Pool is the name of the IPAM pool backing this request
//
// +kubebuilder:validation:MinLength=1
Pool string `json:"pool"`
// Needed indicates how many IPs out of the above Pool this node requests
// from the operator. The operator runs a reconciliation loop to ensure each
// node always has enough PodCIDRs allocated in each pool to fulfill the
// requested number of IPs here.
//
// +optional
Needed IPAMPoolDemand `json:"needed,omitempty"`
}
type IPAMPoolSpec struct {
// Requested contains a list of IPAM pool requests, i.e. indicates how many
// addresses this node requests out of each pool listed here. This field
// is owned and written to by cilium-agent and read by the operator.
//
// +optional
Requested []IPAMPoolRequest `json:"requested,omitempty"`
// Allocated contains the list of pooled CIDR assigned to this node. The
// operator will add new pod CIDRs to this field, whereas the agent will
// remove CIDRs it has released.
//
// +optional
Allocated []IPAMPoolAllocation `json:"allocated,omitempty"`
}
// IPAMSpec is the IPAM specification of the node
//
// This structure is embedded into v2.CiliumNode
type IPAMSpec struct {
// Pool is the list of IPv4 addresses available to the node for allocation.
// When an IPv4 address is used, it will remain on this list but will be added to
// Status.IPAM.Used
//
// +optional
Pool AllocationMap `json:"pool,omitempty"`
// IPv6Pool is the list of IPv6 addresses available to the node for allocation.
// When an IPv6 address is used, it will remain on this list but will be added to
// Status.IPAM.IPv6Used
//
// +optional
IPv6Pool AllocationMap `json:"ipv6-pool,omitempty"`
// Pools contains the list of assigned IPAM pools for this node.
//
// +optional
Pools IPAMPoolSpec `json:"pools,omitempty"`
// PodCIDRs is the list of CIDRs available to the node for allocation.
// When an IP is used, the IP will be added to Status.IPAM.Used
//
// +optional
PodCIDRs []string `json:"podCIDRs,omitempty"`
// MinAllocate is the minimum number of IPs that must be allocated when
// the node is first bootstrapped. It defines the minimum base socket
// of addresses that must be available. After reaching this watermark,
// the PreAllocate and MaxAboveWatermark logic takes over to continue
// allocating IPs.
//
// +kubebuilder:validation:Minimum=0
MinAllocate int `json:"min-allocate,omitempty"`
// MaxAllocate is the maximum number of IPs that can be allocated to the
// node. When the current amount of allocated IPs will approach this value,
// the considered value for PreAllocate will decrease down to 0 in order to
// not attempt to allocate more addresses than defined.
//
// +kubebuilder:validation:Minimum=0
MaxAllocate int `json:"max-allocate,omitempty"`
// PreAllocate defines the number of IP addresses that must be
// available for allocation in the IPAMspec. It defines the buffer of
// addresses available immediately without requiring cilium-operator to
// get involved.
//
// +kubebuilder:validation:Minimum=0
PreAllocate int `json:"pre-allocate,omitempty"`
// MaxAboveWatermark is the maximum number of addresses to allocate
// beyond the addresses needed to reach the PreAllocate watermark.
// Going above the watermark can help reduce the number of API calls to
// allocate IPs, e.g. when a new ENI is allocated, as many secondary
// IPs as possible are allocated. Limiting the amount can help reduce
// waste of IPs.
//
// +kubebuilder:validation:Minimum=0
MaxAboveWatermark int `json:"max-above-watermark,omitempty"`
// StaticIPTags are used to determine the pool of IPs from which to
// attribute a static IP to the node. For example in AWS this is used to
// filter Elastic IP Addresses.
//
// +optional
StaticIPTags map[string]string `json:"static-ip-tags,omitempty"`
}
// IPReleaseStatus defines the valid states in IP release handshake
//
// +kubebuilder:validation:Enum=marked-for-release;ready-for-release;do-not-release;released
type IPReleaseStatus string
// IPAMStatus is the IPAM status of a node
//
// This structure is embedded into v2.CiliumNode
type IPAMStatus struct {
// Used lists all IPv4 addresses out of Spec.IPAM.Pool which have been allocated
// and are in use.
//
// +optional
Used AllocationMap `json:"used,omitempty"`
// IPv6Used lists all IPv6 addresses out of Spec.IPAM.IPv6Pool which have been
// allocated and are in use.
//
// +optional
IPv6Used AllocationMap `json:"ipv6-used,omitempty"`
// PodCIDRs lists the status of each pod CIDR allocated to this node.
//
// +optional
PodCIDRs PodCIDRMap `json:"pod-cidrs,omitempty"`
// Operator is the Operator status of the node
//
// +optional
OperatorStatus OperatorStatus `json:"operator-status,omitempty"`
// ReleaseIPs tracks the state for every IPv4 address considered for release.
// The value can be one of the following strings:
// * marked-for-release : Set by operator as possible candidate for IP
// * ready-for-release : Acknowledged as safe to release by agent
// * do-not-release : IP already in use / not owned by the node. Set by agent
// * released : IP successfully released. Set by operator
//
// +optional
ReleaseIPs map[string]IPReleaseStatus `json:"release-ips,omitempty"`
// ReleaseIPv6s tracks the state for every IPv6 address considered for release.
// The value can be one of the following strings:
// * marked-for-release : Set by operator as possible candidate for IP
// * ready-for-release : Acknowledged as safe to release by agent
// * do-not-release : IP already in use / not owned by the node. Set by agent
// * released : IP successfully released. Set by operator
//
// +optional
ReleaseIPv6s map[string]IPReleaseStatus `json:"release-ipv6s,omitempty"`
// AssignedStaticIP is the static IP assigned to the node (ex: public Elastic IP address in AWS)
//
// +optional
AssignedStaticIP string `json:"assigned-static-ip,omitempty"`
}
// IPAMPoolRequest is a request from the agent to the operator, indicating how
// may IPs it requires from a given pool
type IPAMPoolDemand struct {
// IPv4Addrs contains the number of requested IPv4 addresses out of a given
// pool
//
// +optional
IPv4Addrs int `json:"ipv4-addrs,omitempty"`
// IPv6Addrs contains the number of requested IPv6 addresses out of a given
// pool
//
// +optional
IPv6Addrs int `json:"ipv6-addrs,omitempty"`
}
type PodCIDRMap map[string]PodCIDRMapEntry
// +kubebuilder:validation:Enum=released;depleted;in-use
type PodCIDRStatus string
const (
PodCIDRStatusReleased PodCIDRStatus = "released"
PodCIDRStatusDepleted PodCIDRStatus = "depleted"
PodCIDRStatusInUse PodCIDRStatus = "in-use"
)
type PodCIDRMapEntry struct {
// Status describes the status of a pod CIDR
//
// +optional
Status PodCIDRStatus `json:"status,omitempty"`
}
// OperatorStatus is the status used by cilium-operator to report
// errors in case the allocation CIDR failed.
type OperatorStatus struct {
// Error is the error message set by cilium-operator.
//
// +optional
Error string `json:"error,omitempty"`
}
// Tags implements generic key value tags
type Tags map[string]string
// Match returns true if the required tags are all found
func (t Tags) Match(required Tags) bool {
for k, neededvalue := range required {
haveValue, ok := t[k]
if !ok || (ok && neededvalue != haveValue) {
return false
}
}
return true
}
// Subnet is a representation of a subnet
// +k8s:deepcopy-gen=false
// +deepequal-gen=false
type Subnet struct {
// ID is the subnet ID
ID string
// Name is the subnet name
Name string
// CIDR is the IPv4 CIDR associated with the subnet
CIDR netip.Prefix
// IPv6CIDR is the IPv6 CIDR associated with the subnet
IPv6CIDR netip.Prefix
// AvailabilityZone is the availability zone of the subnet
AvailabilityZone string
// VirtualNetworkID is the virtual network the subnet is in
VirtualNetworkID string
// AvailableAddresses is the number of IPv4 addresses available for
// allocation
AvailableAddresses int
// AvailableIPv6Addresses is the number of IPv6 addresses available for
// allocation
AvailableIPv6Addresses int
// Tags is the tags of the subnet
Tags Tags
}
// DeepEqual is a deepequal function, deeply comparing the
// receiver with other. in must be non-nil.
func (in *Subnet) DeepEqual(other *Subnet) bool {
if other == nil {
return false
}
if in.ID != other.ID {
return false
}
if in.Name != other.Name {
return false
}
if in.CIDR != other.CIDR {
return false
}
if in.IPv6CIDR != other.IPv6CIDR {
return false
}
if in.AvailabilityZone != other.AvailabilityZone {
return false
}
if in.VirtualNetworkID != other.VirtualNetworkID {
return false
}
if in.AvailableAddresses != other.AvailableAddresses {
return false
}
if in.AvailableIPv6Addresses != other.AvailableIPv6Addresses {
return false
}
if ((in.Tags != nil) && (other.Tags != nil)) || ((in.Tags == nil) != (other.Tags == nil)) {
in, other := &in.Tags, &other.Tags
if !in.DeepEqual(other) {
return false
}
}
return true
}
// DeepCopyInto is a deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *Subnet) DeepCopyInto(out *Subnet) {
*out = *in
if in.Tags != nil {
in, out := &in.Tags, &out.Tags
*out = make(Tags, len(*in))
for key, val := range *in {
(*out)[key] = val
}
}
}
// DeepCopy is a deepcopy function, copying the receiver, creating a new Subnet.
func (in *Subnet) DeepCopy() *Subnet {
if in == nil {
return nil
}
out := new(Subnet)
in.DeepCopyInto(out)
return out
}
// SubnetMap indexes subnets by subnet ID
type SubnetMap map[string]*Subnet
// FirstSubnetWithAvailableAddresses returns the first pool ID in the list of
// subnets with available addresses. If any of the preferred pool IDs have
// available addresses, the first pool ID with available addresses is returned.
func (m SubnetMap) FirstSubnetWithAvailableAddresses(preferredPoolIDs []PoolID) (PoolID, int) {
for _, p := range preferredPoolIDs {
if s := m[string(p)]; s != nil {
if s.AvailableAddresses > 0 {
return p, s.AvailableAddresses
}
}
}
for poolID, s := range m {
if s.AvailableAddresses > 0 {
return PoolID(poolID), s.AvailableAddresses
}
}
return PoolNotExists, 0
}
// VirtualNetwork is the representation of a virtual network
type VirtualNetwork struct {
// ID is the ID of the virtual network
ID string
// PrimaryCIDR is the primary IPv4 CIDR
PrimaryCIDR string
// CIDRs is the list of secondary IPv4 CIDR ranges associated with the VPC
CIDRs []string
// IPv6CIDRs is the list of IPv6 CIDR ranges associated with the VPC
IPv6CIDRs []string
}
// VirtualNetworkMap indexes virtual networks by their ID
type VirtualNetworkMap map[string]*VirtualNetwork
// RouteTable is a representation of a route table but only for the purpose of
// to check the subnets are in the same route table. It is not a full
// representation of a route table.
type RouteTable struct {
// ID is the ID of the route table
ID string
// VirtualNetworkID is the virtual network the route table is in
VirtualNetworkID string
// Subnets maps subnet IDs to their presence in this route table
// +deepequal-gen=false
Subnets map[string]struct{}
}
// RouteTableMap indexes route tables by their ID
type RouteTableMap map[string]*RouteTable
// PoolNotExists indicate that no such pool ID exists
const PoolNotExists = PoolID("")
// PoolUnspec indicates that the pool ID is unspecified
const PoolUnspec = PoolNotExists
// PoolID is the type used to identify an IPAM pool
type PoolID string
// PoolQuota defines the limits of an IPAM pool
type PoolQuota struct {
// AvailabilityZone is the availability zone in which the IPAM pool resides in
AvailabilityZone string
// AvailableIPs is the number of available IPs in the pool
AvailableIPs int
// AvailableIPv6s is the number of available IPv6 addresses in the pool
AvailableIPv6s int
}
// PoolQuotaMap is a map of pool quotas indexes by pool identifier
type PoolQuotaMap map[PoolID]PoolQuota
// Interface is the implementation of a IPAM relevant network interface
// +k8s:deepcopy-gen=false
// +deepequal-gen=false
type Interface interface {
// InterfaceID must return the identifier of the interface
InterfaceID() string
// ForeachAddress must iterate over all addresses of the interface and
// call fn for each address
ForeachAddress(instanceID string, fn AddressIterator) error
// DeepCopyInterface returns a deep copy of the underlying interface type.
DeepCopyInterface() Interface
}
// InterfaceRevision is the configurationr revision of a network interface. It
// consists of a revision hash representing the current configuration version
// and the resource itself.
//
// +k8s:deepcopy-gen=false
// +deepequal-gen=false
type InterfaceRevision struct {
// Resource is the interface resource
Resource Interface
// Fingerprint is the fingerprint reprsenting the network interface
// configuration. It is typically implemented as the result of a hash
// function calculated off the resource. This field is optional, not
// all IPAM backends make use of fingerprints.
Fingerprint string
}
// DeepCopy returns a deep copy
func (i *InterfaceRevision) DeepCopy() *InterfaceRevision {
if i == nil {
return nil
}
return &InterfaceRevision{
Resource: i.Resource.DeepCopyInterface(),
Fingerprint: i.Fingerprint,
}
}
// Instance is the representation of an instance, typically a VM, subject to
// per-node IPAM logic
//
// +k8s:deepcopy-gen=false
// +deepequal-gen=false
type Instance struct {
// interfaces is a map of all interfaces attached to the instance
// indexed by the interface ID
Interfaces map[string]InterfaceRevision
}
// DeepCopy returns a deep copy
func (i *Instance) DeepCopy() *Instance {
if i == nil {
return nil
}
c := &Instance{
Interfaces: map[string]InterfaceRevision{},
}
for k, v := range i.Interfaces {
c.Interfaces[k] = *v.DeepCopy()
}
return c
}
// InstanceMap is the list of all instances indexed by instance ID
//
// +k8s:deepcopy-gen=false
// +deepequal-gen=false
type InstanceMap struct {
mutex lock.RWMutex
data map[string]*Instance
}
// NewInstanceMap returns a new InstanceMap
func NewInstanceMap() *InstanceMap {
return &InstanceMap{data: map[string]*Instance{}}
}
// UpdateInstance updates the interfaces map for a particular instance.
func (m *InstanceMap) UpdateInstance(instanceID string, instance *Instance) {
m.mutex.Lock()
m.data[instanceID] = instance
m.mutex.Unlock()
}
// Update updates the definition of an interface for a particular instance. If
// the interface is already known, the definition is updated, otherwise the
// interface is added to the instance.
func (m *InstanceMap) Update(instanceID string, iface InterfaceRevision) {
m.mutex.Lock()
m.updateLocked(instanceID, iface)
m.mutex.Unlock()
}
func (m *InstanceMap) updateLocked(instanceID string, iface InterfaceRevision) {
if iface.Resource == nil {
return
}
i, ok := m.data[instanceID]
if !ok {
i = &Instance{}
m.data[instanceID] = i
}
if i.Interfaces == nil {
i.Interfaces = map[string]InterfaceRevision{}
}
i.Interfaces[iface.Resource.InterfaceID()] = iface
}
type Address any
// AddressIterator is the function called by the ForeachAddress iterator
type AddressIterator func(instanceID, interfaceID, ip, poolID string, address Address) error
func foreachAddress(instanceID string, instance *Instance, fn AddressIterator) error {
for _, rev := range instance.Interfaces {
if err := rev.Resource.ForeachAddress(instanceID, fn); err != nil {
return err
}
}
return nil
}
// ForeachAddress calls fn for each address on each interface attached to each
// instance. If an instanceID is specified, the only the interfaces and
// addresses of the specified instance are considered.
//
// The InstanceMap is read-locked throughout the iteration process, i.e., no
// updates will occur. However, the address object given to the AddressIterator
// will point to live data and must be deep copied if used outside of the
// context of the iterator function.
func (m *InstanceMap) ForeachAddress(instanceID string, fn AddressIterator) error {
m.mutex.RLock()
defer m.mutex.RUnlock()
if instanceID != "" {
if instance := m.data[instanceID]; instance != nil {
return foreachAddress(instanceID, instance, fn)
}
return fmt.Errorf("instance does not exist: %q", instanceID)
}
for instanceID, instance := range m.data {
if err := foreachAddress(instanceID, instance, fn); err != nil {
return err
}
}
return nil
}
// InterfaceIterator is the function called by the ForeachInterface iterator
type InterfaceIterator func(instanceID, interfaceID string, iface InterfaceRevision) error
func foreachInterface(instanceID string, instance *Instance, fn InterfaceIterator) error {
for _, rev := range instance.Interfaces {
if err := fn(instanceID, rev.Resource.InterfaceID(), rev); err != nil {
return err
}
}
return nil
}
// ForeachInterface calls fn for each interface on each interface attached to
// each instance. If an instanceID is specified, the only the interfaces and
// addresses of the specified instance are considered.
//
// The InstanceMap is read-locked throughout the iteration process, i.e., no
// updates will occur. However, the address object given to the InterfaceIterator
// will point to live data and must be deep copied if used outside of the
// context of the iterator function.
func (m *InstanceMap) ForeachInterface(instanceID string, fn InterfaceIterator) error {
m.mutex.RLock()
defer m.mutex.RUnlock()
if instanceID != "" {
if instance := m.data[instanceID]; instance != nil {
return foreachInterface(instanceID, instance, fn)
}
return fmt.Errorf("instance does not exist: %q", instanceID)
}
for instanceID, instance := range m.data {
if err := foreachInterface(instanceID, instance, fn); err != nil {
return err
}
}
return nil
}
// GetInterface returns returns a particular interface of an instance. The
// boolean indicates whether the interface was found or not.
func (m *InstanceMap) GetInterface(instanceID, interfaceID string) (InterfaceRevision, bool) {
m.mutex.RLock()
defer m.mutex.RUnlock()
if instance := m.data[instanceID]; instance != nil {
if rev, ok := instance.Interfaces[interfaceID]; ok {
return rev, true
}
}
return InterfaceRevision{}, false
}
// DeepCopy returns a deep copy
func (m *InstanceMap) DeepCopy() *InstanceMap {
c := NewInstanceMap()
m.ForeachInterface("", func(instanceID, interfaceID string, rev InterfaceRevision) error {
// c is not exposed yet, we can access it without locking it
rev.Resource = rev.Resource.DeepCopyInterface()
c.updateLocked(instanceID, rev)
return nil
})
return c
}
// NumInstances returns the number of instances in the instance map
func (m *InstanceMap) NumInstances() (size int) {
m.mutex.RLock()
size = len(m.data)
m.mutex.RUnlock()
return
}
// Exists returns whether the instance ID is in the instanceMap
func (m *InstanceMap) Exists(instanceID string) (exists bool) {
m.mutex.RLock()
defer m.mutex.RUnlock()
if instance := m.data[instanceID]; instance != nil {
return true
}
return false
}
// Delete instance from m.data
func (m *InstanceMap) Delete(instanceID string) {
m.mutex.Lock()
defer m.mutex.Unlock()
delete(m.data, instanceID)
}
//go:build !ignore_autogenerated
// +build !ignore_autogenerated
// SPDX-License-Identifier: Apache-2.0
// Copyright Authors of Cilium
// Code generated by deepcopy-gen. DO NOT EDIT.
package types
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *AllocationIP) DeepCopyInto(out *AllocationIP) {
*out = *in
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AllocationIP.
func (in *AllocationIP) DeepCopy() *AllocationIP {
if in == nil {
return nil
}
out := new(AllocationIP)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in AllocationMap) DeepCopyInto(out *AllocationMap) {
{
in := &in
*out = make(AllocationMap, len(*in))
for key, val := range *in {
(*out)[key] = val
}
return
}
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AllocationMap.
func (in AllocationMap) DeepCopy() AllocationMap {
if in == nil {
return nil
}
out := new(AllocationMap)
in.DeepCopyInto(out)
return *out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *IPAMPoolAllocation) DeepCopyInto(out *IPAMPoolAllocation) {
*out = *in
if in.CIDRs != nil {
in, out := &in.CIDRs, &out.CIDRs
*out = make([]IPAMPodCIDR, len(*in))
copy(*out, *in)
}
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new IPAMPoolAllocation.
func (in *IPAMPoolAllocation) DeepCopy() *IPAMPoolAllocation {
if in == nil {
return nil
}
out := new(IPAMPoolAllocation)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *IPAMPoolDemand) DeepCopyInto(out *IPAMPoolDemand) {
*out = *in
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new IPAMPoolDemand.
func (in *IPAMPoolDemand) DeepCopy() *IPAMPoolDemand {
if in == nil {
return nil
}
out := new(IPAMPoolDemand)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *IPAMPoolRequest) DeepCopyInto(out *IPAMPoolRequest) {
*out = *in
out.Needed = in.Needed
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new IPAMPoolRequest.
func (in *IPAMPoolRequest) DeepCopy() *IPAMPoolRequest {
if in == nil {
return nil
}
out := new(IPAMPoolRequest)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *IPAMPoolSpec) DeepCopyInto(out *IPAMPoolSpec) {
*out = *in
if in.Requested != nil {
in, out := &in.Requested, &out.Requested
*out = make([]IPAMPoolRequest, len(*in))
copy(*out, *in)
}
if in.Allocated != nil {
in, out := &in.Allocated, &out.Allocated
*out = make([]IPAMPoolAllocation, len(*in))
for i := range *in {
(*in)[i].DeepCopyInto(&(*out)[i])
}
}
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new IPAMPoolSpec.
func (in *IPAMPoolSpec) DeepCopy() *IPAMPoolSpec {
if in == nil {
return nil
}
out := new(IPAMPoolSpec)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *IPAMSpec) DeepCopyInto(out *IPAMSpec) {
*out = *in
if in.Pool != nil {
in, out := &in.Pool, &out.Pool
*out = make(AllocationMap, len(*in))
for key, val := range *in {
(*out)[key] = val
}
}
if in.IPv6Pool != nil {
in, out := &in.IPv6Pool, &out.IPv6Pool
*out = make(AllocationMap, len(*in))
for key, val := range *in {
(*out)[key] = val
}
}
in.Pools.DeepCopyInto(&out.Pools)
if in.PodCIDRs != nil {
in, out := &in.PodCIDRs, &out.PodCIDRs
*out = make([]string, len(*in))
copy(*out, *in)
}
if in.StaticIPTags != nil {
in, out := &in.StaticIPTags, &out.StaticIPTags
*out = make(map[string]string, len(*in))
for key, val := range *in {
(*out)[key] = val
}
}
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new IPAMSpec.
func (in *IPAMSpec) DeepCopy() *IPAMSpec {
if in == nil {
return nil
}
out := new(IPAMSpec)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *IPAMStatus) DeepCopyInto(out *IPAMStatus) {
*out = *in
if in.Used != nil {
in, out := &in.Used, &out.Used
*out = make(AllocationMap, len(*in))
for key, val := range *in {
(*out)[key] = val
}
}
if in.IPv6Used != nil {
in, out := &in.IPv6Used, &out.IPv6Used
*out = make(AllocationMap, len(*in))
for key, val := range *in {
(*out)[key] = val
}
}
if in.PodCIDRs != nil {
in, out := &in.PodCIDRs, &out.PodCIDRs
*out = make(PodCIDRMap, len(*in))
for key, val := range *in {
(*out)[key] = val
}
}
out.OperatorStatus = in.OperatorStatus
if in.ReleaseIPs != nil {
in, out := &in.ReleaseIPs, &out.ReleaseIPs
*out = make(map[string]IPReleaseStatus, len(*in))
for key, val := range *in {
(*out)[key] = val
}
}
if in.ReleaseIPv6s != nil {
in, out := &in.ReleaseIPv6s, &out.ReleaseIPv6s
*out = make(map[string]IPReleaseStatus, len(*in))
for key, val := range *in {
(*out)[key] = val
}
}
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new IPAMStatus.
func (in *IPAMStatus) DeepCopy() *IPAMStatus {
if in == nil {
return nil
}
out := new(IPAMStatus)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *Limits) DeepCopyInto(out *Limits) {
*out = *in
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Limits.
func (in *Limits) DeepCopy() *Limits {
if in == nil {
return nil
}
out := new(Limits)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *OperatorStatus) DeepCopyInto(out *OperatorStatus) {
*out = *in
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new OperatorStatus.
func (in *OperatorStatus) DeepCopy() *OperatorStatus {
if in == nil {
return nil
}
out := new(OperatorStatus)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in PodCIDRMap) DeepCopyInto(out *PodCIDRMap) {
{
in := &in
*out = make(PodCIDRMap, len(*in))
for key, val := range *in {
(*out)[key] = val
}
return
}
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PodCIDRMap.
func (in PodCIDRMap) DeepCopy() PodCIDRMap {
if in == nil {
return nil
}
out := new(PodCIDRMap)
in.DeepCopyInto(out)
return *out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *PodCIDRMapEntry) DeepCopyInto(out *PodCIDRMapEntry) {
*out = *in
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PodCIDRMapEntry.
func (in *PodCIDRMapEntry) DeepCopy() *PodCIDRMapEntry {
if in == nil {
return nil
}
out := new(PodCIDRMapEntry)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *PoolQuota) DeepCopyInto(out *PoolQuota) {
*out = *in
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PoolQuota.
func (in *PoolQuota) DeepCopy() *PoolQuota {
if in == nil {
return nil
}
out := new(PoolQuota)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in PoolQuotaMap) DeepCopyInto(out *PoolQuotaMap) {
{
in := &in
*out = make(PoolQuotaMap, len(*in))
for key, val := range *in {
(*out)[key] = val
}
return
}
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PoolQuotaMap.
func (in PoolQuotaMap) DeepCopy() PoolQuotaMap {
if in == nil {
return nil
}
out := new(PoolQuotaMap)
in.DeepCopyInto(out)
return *out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *RouteTable) DeepCopyInto(out *RouteTable) {
*out = *in
if in.Subnets != nil {
in, out := &in.Subnets, &out.Subnets
*out = make(map[string]struct{}, len(*in))
for key, val := range *in {
(*out)[key] = val
}
}
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RouteTable.
func (in *RouteTable) DeepCopy() *RouteTable {
if in == nil {
return nil
}
out := new(RouteTable)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in RouteTableMap) DeepCopyInto(out *RouteTableMap) {
{
in := &in
*out = make(RouteTableMap, len(*in))
for key, val := range *in {
var outVal *RouteTable
if val == nil {
(*out)[key] = nil
} else {
in, out := &val, &outVal
*out = new(RouteTable)
(*in).DeepCopyInto(*out)
}
(*out)[key] = outVal
}
return
}
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RouteTableMap.
func (in RouteTableMap) DeepCopy() RouteTableMap {
if in == nil {
return nil
}
out := new(RouteTableMap)
in.DeepCopyInto(out)
return *out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in SubnetMap) DeepCopyInto(out *SubnetMap) {
{
in := &in
*out = make(SubnetMap, len(*in))
for key, val := range *in {
var outVal *Subnet
if val == nil {
(*out)[key] = nil
} else {
in, out := &val, &outVal
*out = (*in).DeepCopy()
}
(*out)[key] = outVal
}
return
}
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SubnetMap.
func (in SubnetMap) DeepCopy() SubnetMap {
if in == nil {
return nil
}
out := new(SubnetMap)
in.DeepCopyInto(out)
return *out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in Tags) DeepCopyInto(out *Tags) {
{
in := &in
*out = make(Tags, len(*in))
for key, val := range *in {
(*out)[key] = val
}
return
}
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Tags.
func (in Tags) DeepCopy() Tags {
if in == nil {
return nil
}
out := new(Tags)
in.DeepCopyInto(out)
return *out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *VirtualNetwork) DeepCopyInto(out *VirtualNetwork) {
*out = *in
if in.CIDRs != nil {
in, out := &in.CIDRs, &out.CIDRs
*out = make([]string, len(*in))
copy(*out, *in)
}
if in.IPv6CIDRs != nil {
in, out := &in.IPv6CIDRs, &out.IPv6CIDRs
*out = make([]string, len(*in))
copy(*out, *in)
}
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new VirtualNetwork.
func (in *VirtualNetwork) DeepCopy() *VirtualNetwork {
if in == nil {
return nil
}
out := new(VirtualNetwork)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in VirtualNetworkMap) DeepCopyInto(out *VirtualNetworkMap) {
{
in := &in
*out = make(VirtualNetworkMap, len(*in))
for key, val := range *in {
var outVal *VirtualNetwork
if val == nil {
(*out)[key] = nil
} else {
in, out := &val, &outVal
*out = new(VirtualNetwork)
(*in).DeepCopyInto(*out)
}
(*out)[key] = outVal
}
return
}
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new VirtualNetworkMap.
func (in VirtualNetworkMap) DeepCopy() VirtualNetworkMap {
if in == nil {
return nil
}
out := new(VirtualNetworkMap)
in.DeepCopyInto(out)
return *out
}
//go:build !ignore_autogenerated
// +build !ignore_autogenerated
// SPDX-License-Identifier: Apache-2.0
// Copyright Authors of Cilium
// Code generated by deepequal-gen. DO NOT EDIT.
package types
// DeepEqual is an autogenerated deepequal function, deeply comparing the
// receiver with other. in must be non-nil.
func (in *AllocationIP) DeepEqual(other *AllocationIP) bool {
if other == nil {
return false
}
if in.Owner != other.Owner {
return false
}
if in.Resource != other.Resource {
return false
}
return true
}
// DeepEqual is an autogenerated deepequal function, deeply comparing the
// receiver with other. in must be non-nil.
func (in *AllocationMap) DeepEqual(other *AllocationMap) bool {
if other == nil {
return false
}
if len(*in) != len(*other) {
return false
} else {
for key, inValue := range *in {
if otherValue, present := (*other)[key]; !present {
return false
} else {
if !inValue.DeepEqual(&otherValue) {
return false
}
}
}
}
return true
}
// DeepEqual is an autogenerated deepequal function, deeply comparing the
// receiver with other. in must be non-nil.
func (in *IPAMPoolAllocation) DeepEqual(other *IPAMPoolAllocation) bool {
if other == nil {
return false
}
if in.Pool != other.Pool {
return false
}
if ((in.CIDRs != nil) && (other.CIDRs != nil)) || ((in.CIDRs == nil) != (other.CIDRs == nil)) {
in, other := &in.CIDRs, &other.CIDRs
if other == nil {
return false
}
if len(*in) != len(*other) {
return false
} else {
for i, inElement := range *in {
if inElement != (*other)[i] {
return false
}
}
}
}
return true
}
// DeepEqual is an autogenerated deepequal function, deeply comparing the
// receiver with other. in must be non-nil.
func (in *IPAMPoolDemand) DeepEqual(other *IPAMPoolDemand) bool {
if other == nil {
return false
}
if in.IPv4Addrs != other.IPv4Addrs {
return false
}
if in.IPv6Addrs != other.IPv6Addrs {
return false
}
return true
}
// DeepEqual is an autogenerated deepequal function, deeply comparing the
// receiver with other. in must be non-nil.
func (in *IPAMPoolRequest) DeepEqual(other *IPAMPoolRequest) bool {
if other == nil {
return false
}
if in.Pool != other.Pool {
return false
}
if in.Needed != other.Needed {
return false
}
return true
}
// DeepEqual is an autogenerated deepequal function, deeply comparing the
// receiver with other. in must be non-nil.
func (in *IPAMPoolSpec) DeepEqual(other *IPAMPoolSpec) bool {
if other == nil {
return false
}
if ((in.Requested != nil) && (other.Requested != nil)) || ((in.Requested == nil) != (other.Requested == nil)) {
in, other := &in.Requested, &other.Requested
if other == nil {
return false
}
if len(*in) != len(*other) {
return false
} else {
for i, inElement := range *in {
if !inElement.DeepEqual(&(*other)[i]) {
return false
}
}
}
}
if ((in.Allocated != nil) && (other.Allocated != nil)) || ((in.Allocated == nil) != (other.Allocated == nil)) {
in, other := &in.Allocated, &other.Allocated
if other == nil {
return false
}
if len(*in) != len(*other) {
return false
} else {
for i, inElement := range *in {
if !inElement.DeepEqual(&(*other)[i]) {
return false
}
}
}
}
return true
}
// DeepEqual is an autogenerated deepequal function, deeply comparing the
// receiver with other. in must be non-nil.
func (in *IPAMSpec) DeepEqual(other *IPAMSpec) bool {
if other == nil {
return false
}
if ((in.Pool != nil) && (other.Pool != nil)) || ((in.Pool == nil) != (other.Pool == nil)) {
in, other := &in.Pool, &other.Pool
if other == nil || !in.DeepEqual(other) {
return false
}
}
if ((in.IPv6Pool != nil) && (other.IPv6Pool != nil)) || ((in.IPv6Pool == nil) != (other.IPv6Pool == nil)) {
in, other := &in.IPv6Pool, &other.IPv6Pool
if other == nil || !in.DeepEqual(other) {
return false
}
}
if !in.Pools.DeepEqual(&other.Pools) {
return false
}
if ((in.PodCIDRs != nil) && (other.PodCIDRs != nil)) || ((in.PodCIDRs == nil) != (other.PodCIDRs == nil)) {
in, other := &in.PodCIDRs, &other.PodCIDRs
if other == nil {
return false
}
if len(*in) != len(*other) {
return false
} else {
for i, inElement := range *in {
if inElement != (*other)[i] {
return false
}
}
}
}
if in.MinAllocate != other.MinAllocate {
return false
}
if in.MaxAllocate != other.MaxAllocate {
return false
}
if in.PreAllocate != other.PreAllocate {
return false
}
if in.MaxAboveWatermark != other.MaxAboveWatermark {
return false
}
if ((in.StaticIPTags != nil) && (other.StaticIPTags != nil)) || ((in.StaticIPTags == nil) != (other.StaticIPTags == nil)) {
in, other := &in.StaticIPTags, &other.StaticIPTags
if other == nil {
return false
}
if len(*in) != len(*other) {
return false
} else {
for key, inValue := range *in {
if otherValue, present := (*other)[key]; !present {
return false
} else {
if inValue != otherValue {
return false
}
}
}
}
}
return true
}
// DeepEqual is an autogenerated deepequal function, deeply comparing the
// receiver with other. in must be non-nil.
func (in *IPAMStatus) DeepEqual(other *IPAMStatus) bool {
if other == nil {
return false
}
if ((in.Used != nil) && (other.Used != nil)) || ((in.Used == nil) != (other.Used == nil)) {
in, other := &in.Used, &other.Used
if other == nil || !in.DeepEqual(other) {
return false
}
}
if ((in.IPv6Used != nil) && (other.IPv6Used != nil)) || ((in.IPv6Used == nil) != (other.IPv6Used == nil)) {
in, other := &in.IPv6Used, &other.IPv6Used
if other == nil || !in.DeepEqual(other) {
return false
}
}
if ((in.PodCIDRs != nil) && (other.PodCIDRs != nil)) || ((in.PodCIDRs == nil) != (other.PodCIDRs == nil)) {
in, other := &in.PodCIDRs, &other.PodCIDRs
if other == nil || !in.DeepEqual(other) {
return false
}
}
if in.OperatorStatus != other.OperatorStatus {
return false
}
if ((in.ReleaseIPs != nil) && (other.ReleaseIPs != nil)) || ((in.ReleaseIPs == nil) != (other.ReleaseIPs == nil)) {
in, other := &in.ReleaseIPs, &other.ReleaseIPs
if other == nil {
return false
}
if len(*in) != len(*other) {
return false
} else {
for key, inValue := range *in {
if otherValue, present := (*other)[key]; !present {
return false
} else {
if inValue != otherValue {
return false
}
}
}
}
}
if ((in.ReleaseIPv6s != nil) && (other.ReleaseIPv6s != nil)) || ((in.ReleaseIPv6s == nil) != (other.ReleaseIPv6s == nil)) {
in, other := &in.ReleaseIPv6s, &other.ReleaseIPv6s
if other == nil {
return false
}
if len(*in) != len(*other) {
return false
} else {
for key, inValue := range *in {
if otherValue, present := (*other)[key]; !present {
return false
} else {
if inValue != otherValue {
return false
}
}
}
}
}
if in.AssignedStaticIP != other.AssignedStaticIP {
return false
}
return true
}
// DeepEqual is an autogenerated deepequal function, deeply comparing the
// receiver with other. in must be non-nil.
func (in *Limits) DeepEqual(other *Limits) bool {
if other == nil {
return false
}
if in.Adapters != other.Adapters {
return false
}
if in.IPv4 != other.IPv4 {
return false
}
if in.IPv6 != other.IPv6 {
return false
}
if in.HypervisorType != other.HypervisorType {
return false
}
if in.IsBareMetal != other.IsBareMetal {
return false
}
return true
}
// DeepEqual is an autogenerated deepequal function, deeply comparing the
// receiver with other. in must be non-nil.
func (in *OperatorStatus) DeepEqual(other *OperatorStatus) bool {
if other == nil {
return false
}
if in.Error != other.Error {
return false
}
return true
}
// DeepEqual is an autogenerated deepequal function, deeply comparing the
// receiver with other. in must be non-nil.
func (in *PodCIDRMap) DeepEqual(other *PodCIDRMap) bool {
if other == nil {
return false
}
if len(*in) != len(*other) {
return false
} else {
for key, inValue := range *in {
if otherValue, present := (*other)[key]; !present {
return false
} else {
if !inValue.DeepEqual(&otherValue) {
return false
}
}
}
}
return true
}
// DeepEqual is an autogenerated deepequal function, deeply comparing the
// receiver with other. in must be non-nil.
func (in *PodCIDRMapEntry) DeepEqual(other *PodCIDRMapEntry) bool {
if other == nil {
return false
}
if in.Status != other.Status {
return false
}
return true
}
// DeepEqual is an autogenerated deepequal function, deeply comparing the
// receiver with other. in must be non-nil.
func (in *PoolQuota) DeepEqual(other *PoolQuota) bool {
if other == nil {
return false
}
if in.AvailabilityZone != other.AvailabilityZone {
return false
}
if in.AvailableIPs != other.AvailableIPs {
return false
}
if in.AvailableIPv6s != other.AvailableIPv6s {
return false
}
return true
}
// DeepEqual is an autogenerated deepequal function, deeply comparing the
// receiver with other. in must be non-nil.
func (in *PoolQuotaMap) DeepEqual(other *PoolQuotaMap) bool {
if other == nil {
return false
}
if len(*in) != len(*other) {
return false
} else {
for key, inValue := range *in {
if otherValue, present := (*other)[key]; !present {
return false
} else {
if !inValue.DeepEqual(&otherValue) {
return false
}
}
}
}
return true
}
// DeepEqual is an autogenerated deepequal function, deeply comparing the
// receiver with other. in must be non-nil.
func (in *RouteTable) DeepEqual(other *RouteTable) bool {
if other == nil {
return false
}
if in.ID != other.ID {
return false
}
if in.VirtualNetworkID != other.VirtualNetworkID {
return false
}
return true
}
// DeepEqual is an autogenerated deepequal function, deeply comparing the
// receiver with other. in must be non-nil.
func (in *RouteTableMap) DeepEqual(other *RouteTableMap) bool {
if other == nil {
return false
}
if len(*in) != len(*other) {
return false
} else {
for key, inValue := range *in {
if otherValue, present := (*other)[key]; !present {
return false
} else {
if !inValue.DeepEqual(otherValue) {
return false
}
}
}
}
return true
}
// DeepEqual is an autogenerated deepequal function, deeply comparing the
// receiver with other. in must be non-nil.
func (in *SubnetMap) DeepEqual(other *SubnetMap) bool {
if other == nil {
return false
}
if len(*in) != len(*other) {
return false
} else {
for key, inValue := range *in {
if otherValue, present := (*other)[key]; !present {
return false
} else {
if !inValue.DeepEqual(otherValue) {
return false
}
}
}
}
return true
}
// DeepEqual is an autogenerated deepequal function, deeply comparing the
// receiver with other. in must be non-nil.
func (in *Tags) DeepEqual(other *Tags) bool {
if other == nil {
return false
}
if len(*in) != len(*other) {
return false
} else {
for key, inValue := range *in {
if otherValue, present := (*other)[key]; !present {
return false
} else {
if inValue != otherValue {
return false
}
}
}
}
return true
}
// DeepEqual is an autogenerated deepequal function, deeply comparing the
// receiver with other. in must be non-nil.
func (in *VirtualNetwork) DeepEqual(other *VirtualNetwork) bool {
if other == nil {
return false
}
if in.ID != other.ID {
return false
}
if in.PrimaryCIDR != other.PrimaryCIDR {
return false
}
if ((in.CIDRs != nil) && (other.CIDRs != nil)) || ((in.CIDRs == nil) != (other.CIDRs == nil)) {
in, other := &in.CIDRs, &other.CIDRs
if other == nil {
return false
}
if len(*in) != len(*other) {
return false
} else {
for i, inElement := range *in {
if inElement != (*other)[i] {
return false
}
}
}
}
if ((in.IPv6CIDRs != nil) && (other.IPv6CIDRs != nil)) || ((in.IPv6CIDRs == nil) != (other.IPv6CIDRs == nil)) {
in, other := &in.IPv6CIDRs, &other.IPv6CIDRs
if other == nil {
return false
}
if len(*in) != len(*other) {
return false
} else {
for i, inElement := range *in {
if inElement != (*other)[i] {
return false
}
}
}
}
return true
}
// DeepEqual is an autogenerated deepequal function, deeply comparing the
// receiver with other. in must be non-nil.
func (in *VirtualNetworkMap) DeepEqual(other *VirtualNetworkMap) bool {
if other == nil {
return false
}
if len(*in) != len(*other) {
return false
} else {
for key, inValue := range *in {
if otherValue, present := (*other)[key]; !present {
return false
} else {
if !inValue.DeepEqual(otherValue) {
return false
}
}
}
}
return true
}
// SPDX-License-Identifier: Apache-2.0
// Copyright Authors of Cilium
package ipcache
import (
"fmt"
"github.com/cilium/cilium/pkg/source"
)
// ErrOverwrite represents an overwrite error where functions return the error
// to indicate the new source can't overwrite existing source.
type ErrOverwrite struct {
ExistingSrc source.Source
NewSrc source.Source
}
// NewErrOverwrite returns a new ErrOverwrite.
func NewErrOverwrite(existing, new source.Source) *ErrOverwrite {
return &ErrOverwrite{
ExistingSrc: existing,
NewSrc: new,
}
}
func (e ErrOverwrite) Error() string {
return fmt.Sprintf("unable to overwrite source %q with source %q", e.ExistingSrc, e.NewSrc)
}
func (e *ErrOverwrite) Is(target error) bool {
t, ok := target.(*ErrOverwrite)
if !ok {
return false
}
return (e.ExistingSrc == t.ExistingSrc || t.ExistingSrc == "") &&
(e.NewSrc == t.NewSrc || t.NewSrc == "")
}
// ErrInvalidIP represents an error of an invalid IP.
type ErrInvalidIP struct {
ip string
}
// NewErrInvalidIP returns a new ErrInvalidIP.
func NewErrInvalidIP(ip string) *ErrInvalidIP {
return &ErrInvalidIP{
ip: ip,
}
}
func (e ErrInvalidIP) Error() string {
return fmt.Sprintf("attempt to upsert invalid IP %q into ipcache layer", e.ip)
}
func (e *ErrInvalidIP) Is(target error) bool {
t, ok := target.(*ErrInvalidIP)
if !ok {
return false
}
return e.ip == t.ip
}
// SPDX-License-Identifier: Apache-2.0
// Copyright Authors of Cilium
package ipcache
import (
"context"
"log/slog"
"net"
"net/netip"
"sync"
"sync/atomic"
cmtypes "github.com/cilium/cilium/pkg/clustermesh/types"
"github.com/cilium/cilium/pkg/controller"
"github.com/cilium/cilium/pkg/counter"
"github.com/cilium/cilium/pkg/identity"
"github.com/cilium/cilium/pkg/identity/cache"
iputil "github.com/cilium/cilium/pkg/ip"
ipcacheTypes "github.com/cilium/cilium/pkg/ipcache/types"
"github.com/cilium/cilium/pkg/k8s/synced"
"github.com/cilium/cilium/pkg/labels"
"github.com/cilium/cilium/pkg/lock"
"github.com/cilium/cilium/pkg/logging/logfields"
"github.com/cilium/cilium/pkg/metrics"
"github.com/cilium/cilium/pkg/option"
"github.com/cilium/cilium/pkg/source"
"github.com/cilium/cilium/pkg/types"
)
// Identity is the identity representation of an IP<->Identity cache.
type Identity struct {
// Note: The ordering of these fields is optimized to reduce padding
// Source is the source of the identity in the cache
Source source.Source
// overwrittenLegacySource contains the source of the original entry created
// via legacy API. This is preserved so that original source can be restored
// if the metadata API stops managing the entry.
overwrittenLegacySource source.Source
// ID is the numeric identity
ID identity.NumericIdentity
// This blank field ensures that the == operator cannot be used on this
// type, to avoid external packages accidentally comparing the private
// values below
_ []struct{}
// modifiedByLegacyAPI indicates that this entry touched by the legacy Upsert API.
// This informs the metadata subsystem to retain the original source and
// to clean up the entry if the legacy API already dropped its reference.
// upsertLocked will ensure that this field remains true once set. In other
// words, there is no way unset this field once set.
// This field is intended to be removed once cilium/cilium#21142 has been
// fully implemented and all entries are created via the new metadata API
modifiedByLegacyAPI bool
// shadowed determines if another entry overlaps with this one.
// Shadowed identities are not propagated to listeners by default.
// Most commonly set for Identity with Source = source.Generated when
// a pod IP (other source) has the same IP.
shadowed bool
}
func (i Identity) equals(o Identity) bool {
return i.ID == o.ID &&
i.Source == o.Source &&
i.shadowed == o.shadowed &&
i.modifiedByLegacyAPI == o.modifiedByLegacyAPI &&
i.overwrittenLegacySource == o.overwrittenLegacySource
}
func (i Identity) exclusivelyOwnedByLegacyAPI() bool {
return i.modifiedByLegacyAPI && i.overwrittenLegacySource == ""
}
func (i Identity) exclusivelyOwnedByMetadataAPI() bool {
return !i.modifiedByLegacyAPI
}
func (i Identity) ownedByLegacyAndMetadataAPI() bool {
return i.modifiedByLegacyAPI && i.overwrittenLegacySource != ""
}
// IPKeyPair is the (IP, key) pair used of the identity
type IPKeyPair struct {
IP net.IP
Key uint8
}
// K8sMetadata contains Kubernetes pod information of the IP
type K8sMetadata struct {
// Namespace is the Kubernetes namespace of the pod behind the IP
Namespace string
// PodName is the Kubernetes pod name behind the IP
PodName string
// NamedPorts is the set of named ports for the pod
NamedPorts types.NamedPortMap
}
// Configuration is init-time configuration for the IPCache.
type Configuration struct {
context.Context
Logger *slog.Logger
// Accessors to other subsystems, provided by the daemon
cache.IdentityAllocator
ipcacheTypes.IdentityUpdater
synced.CacheStatus
}
// IPCache is a collection of mappings:
// - mapping of endpoint IP or CIDR to security identities of all endpoints
// which are part of the same cluster, and vice-versa
// - mapping of endpoint IP or CIDR to host IP (maybe nil)
type IPCache struct {
logger *slog.Logger
mutex lock.SemaphoredMutex
ipToIdentityCache map[string]Identity
identityToIPCache map[identity.NumericIdentity]map[string]struct{}
ipToHostIPCache map[string]IPKeyPair
ipToK8sMetadata map[string]K8sMetadata
ipToEndpointFlags map[string]uint8
listeners []IPIdentityMappingListener
// controllers manages the async controllers for this IPCache
controllers *controller.Manager
// needNamedPorts is initially 'false', but will atomically be changed to 'true'
// when the clusterwide named port mappings are needed for network policy
// computation for the first time. This avoids the overhead of unnecessarily
// triggering policy updates when it is known not to be needed.
needNamedPorts atomic.Bool
// namedPorts is a collection of all named ports in the cluster. This is needed
// only if an egress policy refers to a port by name.
// This map is returned (read-only, as a NamedPortMultiMap) to users.
// Therefore, all updates must be made atomically, which is guaranteed by the
// interface.
namedPorts namedPortMultiMapUpdater
// Configuration provides pointers towards other agent components that
// the IPCache relies upon at runtime.
*Configuration
// metadata is the ipcache identity metadata map, which maps IPs to labels.
metadata *metadata
// prefixLengths tracks the unique set of prefix lengths for IPv4 and
// IPv6 addresses in order to optimize longest prefix match lookups.
prefixLengths *counter.PrefixLengthCounter
// injectionStarted is a sync.Once so we can lazily start the prefix injection controller,
// but only once
injectionStarted sync.Once
}
// NewIPCache returns a new IPCache with the mappings of endpoint IP to security
// identity (and vice-versa) initialized.
func NewIPCache(c *Configuration) *IPCache {
ipc := &IPCache{
logger: c.Logger,
mutex: lock.NewSemaphoredMutex(),
ipToIdentityCache: map[string]Identity{},
identityToIPCache: map[identity.NumericIdentity]map[string]struct{}{},
ipToHostIPCache: map[string]IPKeyPair{},
ipToK8sMetadata: map[string]K8sMetadata{},
ipToEndpointFlags: map[string]uint8{},
controllers: controller.NewManager(),
namedPorts: types.NewNamedPortMultiMap(),
metadata: newMetadata(c.Logger),
prefixLengths: counter.DefaultPrefixLengthCounter(),
Configuration: c,
}
return ipc
}
// Shutdown cleans up asynchronous routines associated with the IPCache.
func (ipc *IPCache) Shutdown() error {
return ipc.controllers.RemoveControllerAndWait(LabelInjectorName)
}
// RLock RLocks the IPCache's mutex.
func (ipc *IPCache) RLock() {
ipc.mutex.RLock()
}
// RUnlock RUnlocks the IPCache's mutex.
func (ipc *IPCache) RUnlock() {
ipc.mutex.RUnlock()
}
// AddListener adds a listener for this IPCache.
func (ipc *IPCache) AddListener(listener IPIdentityMappingListener) {
// We need to acquire the semaphored mutex as we Write Lock as we are
// modifying the listeners slice.
ipc.mutex.Lock()
ipc.listeners = append(ipc.listeners, listener)
// We will release the semaphore mutex with UnlockToRLock, *and not Unlock*
// because want to prevent a race across an Upsert or Delete. By doing this
// we are sure no other writers are performing any operation while we are
// still reading.
ipc.mutex.UnlockToRLock()
defer ipc.mutex.RUnlock()
// Initialize new listener with the current mappings
ipc.dumpToListenerLocked(listener)
}
// Update a controller for this IPCache
func (ipc *IPCache) UpdateController(
name string,
params controller.ControllerParams,
) {
ipc.controllers.UpdateController(name, params)
}
func (ipc *IPCache) getHostIPCache(ip string) (net.IP, uint8) {
ipc.mutex.RLock()
defer ipc.mutex.RUnlock()
return ipc.getHostIPCacheRLocked(ip)
}
func (ipc *IPCache) getHostIPCacheRLocked(ip string) (net.IP, uint8) {
ipKeyPair := ipc.ipToHostIPCache[ip]
return ipKeyPair.IP, ipKeyPair.Key
}
// GetK8sMetadata returns Kubernetes metadata for the given IP address.
// The returned pointer should *never* be modified.
func (ipc *IPCache) GetK8sMetadata(ip netip.Addr) *K8sMetadata {
if !ip.IsValid() {
return nil
}
ipc.mutex.RLock()
defer ipc.mutex.RUnlock()
return ipc.getK8sMetadata(ip.String())
}
// getK8sMetadata returns Kubernetes metadata for the given IP address.
func (ipc *IPCache) getK8sMetadata(ip string) *K8sMetadata {
if k8sMeta, ok := ipc.ipToK8sMetadata[ip]; ok {
return &k8sMeta
}
return nil
}
// getEndpointFlags returns endpoint flags for the given IP address.
func (ipc *IPCache) getEndpointFlags(ip string) uint8 {
ipc.mutex.RLock()
defer ipc.mutex.RUnlock()
return ipc.getEndpointFlagsRLocked(ip)
}
// getEndpointFlagsRLocked returns endpoint flags for the given IP address.
func (ipc *IPCache) getEndpointFlagsRLocked(ip string) uint8 {
return ipc.ipToEndpointFlags[ip]
}
// Upsert adds / updates the provided IP (endpoint or CIDR prefix) and identity
// into the IPCache.
//
// Returns an error if the entry is not owned by the self declared source, i.e.
// returns error if the kubernetes layer is trying to upsert an entry now
// managed by the kvstore layer or if 'ip' is invalid. See
// source.AllowOverwrite() for rules on ownership. hostIP is the location of the
// given IP. It is optional (may be nil) and is propagated to the listeners.
// k8sMeta contains Kubernetes-specific metadata such as pod namespace and pod
// name belonging to the IP (may be nil).
//
// When deleting ipcache entries that were previously inserted via this
// function, ensure that the corresponding delete occurs via Delete().
//
// Deprecated: Prefer UpsertMetadata() instead.
func (ipc *IPCache) Upsert(ip string, hostIP net.IP, hostKey uint8, k8sMeta *K8sMetadata, newIdentity Identity) (namedPortsChanged bool, err error) {
ipc.mutex.Lock()
defer ipc.mutex.Unlock()
return ipc.upsertLocked(ip, hostIP, hostKey, k8sMeta, newIdentity, 0 /* endpointFlags unused in legacy */, false /* !force */, true /* fromLegacyAPI */)
}
// upsertLocked adds / updates the provided IP and identity into the IPCache,
// assuming that the IPCache lock has been taken. Warning, do not use force
// unless you know exactly what you're doing. Forcing adding / updating the
// IPCache will not take into account the source of the identity and bypasses
// the overwrite logic! Once GH-18301 is addressed, there will be no need for
// any force logic.
//
// The ip argument is a string, and the format is one of
// - Prefix (e.g., 10.0.0.0/24)
// - Host IP (e.g., 10.0.0.1)
// - Prefix with ClusterID (e.g., 10.0.0.0/24@1)
// - Host IP with ClusterID (e.g., 10.0.0.1@1)
//
// The formats with ClusterID are only used by Cluster Mesh for overlapping IP
// range support which identifies prefix or host IPs using prefix/ip + ClusterID.
func (ipc *IPCache) upsertLocked(
ip string,
hostIP net.IP,
hostKey uint8,
k8sMeta *K8sMetadata,
newIdentity Identity,
endpointFlags uint8,
force bool,
fromLegacyAPI bool,
) (namedPortsChanged bool, err error) {
var newNamedPorts types.NamedPortMap
if k8sMeta != nil {
newNamedPorts = k8sMeta.NamedPorts
}
scopedLog := ipc.logger
if option.Config.Debug {
scopedLog = ipc.logger.With(
logfields.IPAddr, ip,
logfields.Identity, newIdentity,
logfields.Key, hostKey,
)
if k8sMeta != nil {
scopedLog = scopedLog.With(
logfields.K8sPodName, k8sMeta.PodName,
logfields.K8sNamespace, k8sMeta.Namespace,
logfields.NamedPorts, k8sMeta.NamedPorts,
)
}
}
var cidrCluster cmtypes.PrefixCluster
var oldIdentity *Identity
callbackListeners := true
oldHostIP, oldHostKey := ipc.getHostIPCacheRLocked(ip)
oldEndpointFlags := ipc.getEndpointFlagsRLocked(ip)
oldK8sMeta := ipc.ipToK8sMetadata[ip]
metaEqual := oldK8sMeta.Equal(k8sMeta)
cachedIdentity, found := ipc.ipToIdentityCache[ip]
if found {
// If this is a legacy upsert call, then we need to make sure that the
// overwrittenLegacySource is updated
if fromLegacyAPI {
switch {
case cachedIdentity.exclusivelyOwnedByMetadataAPI():
// If the entry was exclusively owned by the metadata API, then we
// want to preserve our source as the legacy source (so it can be
// restored by the metadata API later) - even if our upsert call is
// later rejected due to it being a low-precedence upsert.
newIdentity.overwrittenLegacySource = newIdentity.Source
case cachedIdentity.ownedByLegacyAndMetadataAPI():
// If the entry is owned by both APIs, then it will have overwrittenLegacySource
// already set. Thus, check if we should update it, otherwise just preserve it as is.
if source.AllowOverwrite(cachedIdentity.overwrittenLegacySource, newIdentity.Source) {
newIdentity.overwrittenLegacySource = newIdentity.Source
} else {
newIdentity.overwrittenLegacySource = cachedIdentity.overwrittenLegacySource
}
}
}
// Here we track if an entry was previously created via new asynchronous
// UpsertMetadata API or the old synchronous Upsert call and preserve that bit
if fromLegacyAPI || cachedIdentity.modifiedByLegacyAPI {
newIdentity.modifiedByLegacyAPI = true
}
if !force && !source.AllowOverwrite(cachedIdentity.Source, newIdentity.Source) {
metrics.IPCacheErrorsTotal.WithLabelValues(
metricTypeUpsert, metricErrorOverwrite,
).Inc()
// Even if this update is rejected, we want to update the modifiedByLegacyAPI
// and overwrittenLegacySource fields in case the low precedence source here
// needs to be restored later
cachedIdentity.modifiedByLegacyAPI = newIdentity.modifiedByLegacyAPI
cachedIdentity.overwrittenLegacySource = newIdentity.overwrittenLegacySource
ipc.ipToIdentityCache[ip] = cachedIdentity
return false, NewErrOverwrite(cachedIdentity.Source, newIdentity.Source)
}
// Skip update if IP is already mapped to the given identity
// and the host IP hasn't changed.
if cachedIdentity.equals(newIdentity) && oldHostIP.Equal(hostIP) &&
hostKey == oldHostKey && metaEqual && oldEndpointFlags == endpointFlags {
return false, nil
}
oldIdentity = &cachedIdentity
} else if fromLegacyAPI {
// If this is a new entry, inserted via legacy API, then track it as such
newIdentity.modifiedByLegacyAPI = true
}
// Endpoint IP identities take precedence over CIDR identities, so if the
// IP is a full CIDR prefix and there's an existing equivalent endpoint IP,
// don't notify the listeners.
if cidrCluster, err = cmtypes.ParsePrefixCluster(ip); err == nil {
if cidrCluster.IsSingleIP() {
if _, endpointIPFound := ipc.ipToIdentityCache[cidrCluster.AddrCluster().String()]; endpointIPFound {
scopedLog.Debug("Ignoring CIDR to identity mapping as it is shadowed by an endpoint IP")
// Skip calling back the listeners, since the endpoint IP has
// precedence over the new CIDR.
newIdentity.shadowed = true
}
}
} else if addrCluster, err := cmtypes.ParseAddrCluster(ip); err == nil { // Endpoint IP or Endpoint IP with ClusterID
cidrCluster = addrCluster.AsPrefixCluster()
// Check whether the upserted endpoint IP will shadow that CIDR, and
// replace its mapping with the listeners if that was the case.
if !found {
cidrClusterStr := cidrCluster.String()
if cidrIdentity, cidrFound := ipc.ipToIdentityCache[cidrClusterStr]; cidrFound {
oldHostIP, _ = ipc.getHostIPCacheRLocked(cidrClusterStr)
if cidrIdentity.ID != newIdentity.ID || !oldHostIP.Equal(hostIP) {
scopedLog.Debug("New endpoint IP started shadowing existing CIDR to identity mapping")
cidrIdentity.shadowed = true
ipc.ipToIdentityCache[cidrClusterStr] = cidrIdentity
oldIdentity = &cidrIdentity
} else {
// The endpoint IP and the CIDR are associated with the
// same identity and host IP. Nothing changes for the
// listeners.
callbackListeners = false
}
}
}
} else {
scopedLog.Error(
"Attempt to upsert invalid IP into ipcache layer",
logfields.AddrCluster, ip,
logfields.Identity, newIdentity,
logfields.Key, hostKey,
)
metrics.IPCacheErrorsTotal.WithLabelValues(
metricTypeUpsert, metricErrorInvalid,
).Inc()
return false, NewErrInvalidIP(ip)
}
scopedLog.Debug("Upserting IP into ipcache layer")
// Update both maps.
ipc.ipToIdentityCache[ip] = newIdentity
// Delete the old identity, if any.
if found {
delete(ipc.identityToIPCache[cachedIdentity.ID], ip)
if len(ipc.identityToIPCache[cachedIdentity.ID]) == 0 {
delete(ipc.identityToIPCache, cachedIdentity.ID)
}
}
if _, ok := ipc.identityToIPCache[newIdentity.ID]; !ok {
ipc.identityToIPCache[newIdentity.ID] = map[string]struct{}{}
}
ipc.identityToIPCache[newIdentity.ID][ip] = struct{}{}
ipc.prefixLengths.Add([]netip.Prefix{cidrCluster.AsPrefix()})
if hostIP == nil {
delete(ipc.ipToHostIPCache, ip)
} else {
ipc.ipToHostIPCache[ip] = IPKeyPair{IP: hostIP, Key: hostKey}
}
ipc.ipToEndpointFlags[ip] = endpointFlags
if !metaEqual {
if k8sMeta == nil {
delete(ipc.ipToK8sMetadata, ip)
} else {
ipc.ipToK8sMetadata[ip] = *k8sMeta
}
// Update the named ports reference counting, but don't cause policy
// updates if no policy uses named ports.
namedPortsChanged = ipc.namedPorts.Update(oldK8sMeta.NamedPorts, newNamedPorts)
namedPortsChanged = namedPortsChanged && ipc.needNamedPorts.Load()
}
if callbackListeners && !newIdentity.shadowed {
for _, listener := range ipc.listeners {
listener.OnIPIdentityCacheChange(Upsert, cidrCluster, oldHostIP, hostIP, oldIdentity, newIdentity, hostKey, k8sMeta, endpointFlags)
}
}
metrics.IPCacheEventsTotal.WithLabelValues(
metricTypeUpsert,
).Inc()
return namedPortsChanged, nil
}
// DumpToListener dumps the entire contents of the IPCache by triggering
// the listener's "OnIPIdentityCacheChange" method for each entry in the cache.
func (ipc *IPCache) DumpToListener(listener IPIdentityMappingListener) {
ipc.mutex.RLock()
ipc.dumpToListenerLocked(listener)
ipc.mutex.RUnlock()
}
// MU is a batched metadata update, the short name is to cut down on visual clutter.
type MU struct {
Prefix cmtypes.PrefixCluster
Source source.Source
Resource ipcacheTypes.ResourceID
Metadata []IPMetadata
IsCIDR bool
}
// UpsertMetadata upserts a given IP and some corresponding information into
// the ipcache metadata map. See IPMetadata for a list of types that are valid
// to pass into this function. This will trigger asynchronous calculation of
// any datapath updates necessary to implement the logic associated with the
// specified metadata.
func (ipc *IPCache) UpsertMetadata(prefix cmtypes.PrefixCluster, src source.Source, resource ipcacheTypes.ResourceID, aux ...IPMetadata) {
ipc.UpsertMetadataBatch(MU{Prefix: prefix, Source: src, Resource: resource, Metadata: aux})
}
// UpsertMetadataBatch applies updates to multiple prefixes in a single transaction,
// reducing potential lock contention.
//
// Returns a revision number that can be passed to WaitForRevision().
func (ipc *IPCache) UpsertMetadataBatch(updates ...MU) (revision uint64) {
prefixes := make([]cmtypes.PrefixCluster, 0, len(updates))
ipc.metadata.Lock()
for _, upd := range updates {
if !upd.IsCIDR || ipc.metadata.prefixRefCounter.Add(upd.Prefix) {
prefixes = append(prefixes, ipc.metadata.upsertLocked(upd.Prefix, upd.Source, upd.Resource, upd.Metadata...)...)
}
}
ipc.metadata.Unlock()
revision = ipc.metadata.enqueuePrefixUpdates(prefixes...)
ipc.TriggerLabelInjection()
return
}
// RemoveMetadata removes metadata associated with a specific resource from the
// supplied prefix. Individual metadata types must be supplied for removal, but the
// data need not match.
//
// This removes nothing:
//
// RemoveMedata(pfx, resource)
//
// This removes all labels from the given resource:
//
// RemoveMetadata(pfx, resource, Labels{})
func (ipc *IPCache) RemoveMetadata(prefix cmtypes.PrefixCluster, resource ipcacheTypes.ResourceID, aux ...IPMetadata) {
ipc.RemoveMetadataBatch(MU{Prefix: prefix, Resource: resource, Metadata: aux})
}
// RemoveMetadataBatch is a batched version of RemoveMetadata.
// Returns a revision number that can be passed to WaitForRevision().
func (ipc *IPCache) RemoveMetadataBatch(updates ...MU) (revision uint64) {
prefixes := make([]cmtypes.PrefixCluster, 0, len(updates))
ipc.metadata.Lock()
for _, upd := range updates {
if !upd.IsCIDR || ipc.metadata.prefixRefCounter.Delete(upd.Prefix) {
prefixes = append(prefixes, ipc.metadata.remove(upd.Prefix, upd.Resource, upd.Metadata...)...)
}
}
ipc.metadata.Unlock()
revision = ipc.metadata.enqueuePrefixUpdates(prefixes...)
ipc.TriggerLabelInjection()
return
}
// OverrideIdentity overrides the identity for a given prefix in the IPCache metadata
// map. This is used when a resource indicates that this prefix already has a
// defined identity, and where any additional labels associated with the prefix
// are to be ignored.
// If multiple resources override the identity, a warning is emitted and only
// one of the override identities is used.
// This will trigger asynchronous calculation of any local identity changes
// that must occur to associate the specified labels with the prefix, and push
// any datapath updates necessary to implement the logic associated with the
// metadata currently associated with the 'prefix'.
//
// Callers must arrange for RemoveIdentityOverride() to eventually be called
// to reverse this operation if the underlying resource is removed.
//
// Use with caution: For most use cases, UpsertMetadata() is a better API to
// allow metadata to be associated with the prefix. This will delegate identity
// resolution to the IPCache internally, which provides better compatibility
// between various features that may use the IPCache to associate metadata with
// the same netip prefixes. Using this API may cause feature incompatibilities
// with users of other APIs such as UpsertMetadata() and other variations on
// inserting metadata into the IPCache.
func (ipc *IPCache) OverrideIdentity(prefix cmtypes.PrefixCluster, identityLabels labels.Labels, src source.Source, resource ipcacheTypes.ResourceID) {
ipc.UpsertMetadata(prefix, src, resource, overrideIdentity(true), identityLabels)
}
func (ipc *IPCache) RemoveIdentityOverride(cidr cmtypes.PrefixCluster, identityLabels labels.Labels, resource ipcacheTypes.ResourceID) {
ipc.RemoveMetadata(cidr, resource, overrideIdentity(true), identityLabels)
}
// WaitForRevision will block until the desired revision has been reached (or passed).
// It can be used in concert with the revision number returned by Upsert* calls to
// ensure that an update has been applied.
//
// The revision is updated every time the ipcache successfully applies all queued
// metadata updates. Thus, the sequence
//
// rev := UpsertMetadataBatch(prefix1, metadata, ...)
// WaitForRevision(ctx, rev)
//
// means that prefix1 has had at least one call to InjectLabels with the supplied
// metadata. It does not guarantee that the metadata matches exactly what was
// passed to UpsertMetadata, as other callers may have also queued modifications.
//
// Note that the revision number should be treated as an opaque identifier.
// Returns a non-nil error if the provided context was cancelled before the
// desired revision was reached.
func (ipc *IPCache) WaitForRevision(ctx context.Context, desired uint64) error {
return ipc.metadata.waitForRevision(ctx, desired)
}
// dumpToListenerLocked dumps the entire contents of the IPCache by triggering
// the listener's "OnIPIdentityCacheChange" method for each entry in the cache.
// The caller *MUST* grab the IPCache.Lock for reading before calling this
// function.
func (ipc *IPCache) dumpToListenerLocked(listener IPIdentityMappingListener) {
for ip, identity := range ipc.ipToIdentityCache {
if identity.shadowed {
continue
}
hostIP, encryptKey := ipc.getHostIPCacheRLocked(ip)
k8sMeta := ipc.getK8sMetadata(ip)
endpointFlags := ipc.getEndpointFlagsRLocked(ip)
cidrCluster, err := cmtypes.ParsePrefixCluster(ip)
if err != nil {
addrCluster := cmtypes.MustParseAddrCluster(ip)
cidrCluster = addrCluster.AsPrefixCluster()
}
listener.OnIPIdentityCacheChange(Upsert, cidrCluster, nil, hostIP, nil, identity, encryptKey, k8sMeta, endpointFlags)
}
}
// deleteLocked removes the provided IP-to-security-identity mapping
// from ipc with the assumption that the IPCache's mutex is held.
func (ipc *IPCache) deleteLocked(ip string, source source.Source) (namedPortsChanged bool) {
logAttr := ipc.logger.With(logfields.IPAddr, ip)
cachedIdentity, found := ipc.ipToIdentityCache[ip]
if !found {
logAttr.Warn("Attempt to remove non-existing IP from ipcache layer")
metrics.IPCacheErrorsTotal.WithLabelValues(
metricTypeDelete, metricErrorNoExist,
).Inc()
return false
}
if cachedIdentity.Source != source {
logAttr.Debug(
"Skipping delete of identity from source",
logfields.CachedSource, cachedIdentity.Source,
logfields.Source, source,
)
metrics.IPCacheErrorsTotal.WithLabelValues(
metricTypeDelete, metricErrorOverwrite,
).Inc()
return false
}
var cidrCluster cmtypes.PrefixCluster
cacheModification := Delete
oldHostIP, encryptKey := ipc.getHostIPCacheRLocked(ip)
oldK8sMeta := ipc.getK8sMetadata(ip)
oldEndpointFlags := ipc.getEndpointFlagsRLocked(ip)
var newHostIP net.IP
var oldIdentity *Identity
newIdentity := cachedIdentity
callbackListeners := true
var err error
if cidrCluster, err = cmtypes.ParsePrefixCluster(ip); err == nil {
// Check whether the deleted CIDR was shadowed by an endpoint IP. In
// this case, skip calling back the listeners since they don't know
// about its mapping.
if _, endpointIPFound := ipc.ipToIdentityCache[cidrCluster.AddrCluster().String()]; endpointIPFound {
ipc.logger.Debug(
"Deleting CIDR shadowed by endpoint IP",
)
callbackListeners = false
}
} else if addrCluster, err := cmtypes.ParseAddrCluster(ip); err == nil { // Endpoint IP or Endpoint IP with ClusterID
// Convert the endpoint IP into an equivalent full CIDR.
cidrCluster = addrCluster.AsPrefixCluster()
// Check whether the deleted endpoint IP was shadowing that CIDR, and
// restore its mapping with the listeners if that was the case.
cidrClusterStr := cidrCluster.String()
if cidrIdentity, cidrFound := ipc.ipToIdentityCache[cidrClusterStr]; cidrFound {
newHostIP, _ = ipc.getHostIPCacheRLocked(cidrClusterStr)
if cidrIdentity.ID != cachedIdentity.ID || !oldHostIP.Equal(newHostIP) {
ipc.logger.Debug(
"Removal of endpoint IP revives shadowed CIDR to identity mapping",
)
cacheModification = Upsert
cidrIdentity.shadowed = false
ipc.ipToIdentityCache[cidrClusterStr] = cidrIdentity
oldIdentity = &cachedIdentity
newIdentity = cidrIdentity
} else {
// The endpoint IP and the CIDR were associated with the same
// identity and host IP. Nothing changes for the listeners.
callbackListeners = false
}
}
} else {
ipc.logger.Error(
"Attempt to delete invalid IP from ipcache layer",
)
metrics.IPCacheErrorsTotal.WithLabelValues(
metricTypeDelete, metricErrorInvalid,
).Inc()
return false
}
ipc.logger.Debug(
"Deleting IP from ipcache layer",
)
delete(ipc.ipToIdentityCache, ip)
delete(ipc.identityToIPCache[cachedIdentity.ID], ip)
if len(ipc.identityToIPCache[cachedIdentity.ID]) == 0 {
delete(ipc.identityToIPCache, cachedIdentity.ID)
}
delete(ipc.ipToHostIPCache, ip)
delete(ipc.ipToK8sMetadata, ip)
delete(ipc.ipToEndpointFlags, ip)
ipc.prefixLengths.Delete([]netip.Prefix{cidrCluster.AsPrefix()})
// Update named ports
namedPortsChanged = false
if oldK8sMeta != nil && len(oldK8sMeta.NamedPorts) > 0 {
namedPortsChanged = ipc.namedPorts.Update(oldK8sMeta.NamedPorts, nil)
// Only trigger policy updates if named ports are used in policy.
namedPortsChanged = namedPortsChanged && ipc.needNamedPorts.Load()
}
if callbackListeners {
for _, listener := range ipc.listeners {
listener.OnIPIdentityCacheChange(cacheModification, cidrCluster, oldHostIP, newHostIP,
oldIdentity, newIdentity, encryptKey, oldK8sMeta, oldEndpointFlags)
}
}
metrics.IPCacheEventsTotal.WithLabelValues(
metricTypeDelete,
).Inc()
return namedPortsChanged
}
// GetNamedPorts returns a copy of the named ports map. May return nil.
func (ipc *IPCache) GetNamedPorts() (npm types.NamedPortMultiMap) {
// We must not acquire the IPCache mutex here, as that would establish a lock ordering of
// Endpoint > IPCache (as endpoint.mutex can be held while calling GetNamedPorts)
// Since InjectLabels requires IPCache > Endpoint, a deadlock can occur otherwise.
// needNamedPorts is initially set to 'false'. This means that we will not trigger
// policy updates upon changes to named ports. Once this is set to 'true' though,
// Upsert and Delete will start to return 'namedPortsChanged = true' if the upsert
// or delete changed a named port, enabling the caller to trigger a policy update.
// Note that at the moment, this will never be set back to false, even if no policy
// uses named ports anymore.
ipc.needNamedPorts.Store(true)
// Caller can keep using the map, operations on it are protected by its mutex.
return ipc.namedPorts
}
// DeleteOnMetadataMatch removes the provided IP to security identity mapping from the IPCache
// if the metadata cache holds the same "owner" metadata as the triggering pod event.
func (ipc *IPCache) DeleteOnMetadataMatch(IP string, source source.Source, namespace, name string) (namedPortsChanged bool) {
ipc.mutex.Lock()
defer ipc.mutex.Unlock()
k8sMeta := ipc.getK8sMetadata(IP)
if k8sMeta != nil && k8sMeta.Namespace == namespace && k8sMeta.PodName == name {
return ipc.deleteLocked(IP, source)
}
return false
}
// Delete removes the provided IP-to-security-identity mapping from the IPCache.
//
// Deprecated: Prefer RemoveLabels() or RemoveIdentity() instead.
func (ipc *IPCache) Delete(IP string, source source.Source) (namedPortsChanged bool) {
ipc.mutex.Lock()
defer ipc.mutex.Unlock()
return ipc.deleteLocked(IP, source)
}
// LookupByIP returns the corresponding security identity that endpoint IP maps
// to within the provided IPCache, as well as if the corresponding entry exists
// in the IPCache.
func (ipc *IPCache) LookupByIP(IP string) (Identity, bool) {
ipc.mutex.RLock()
defer ipc.mutex.RUnlock()
return ipc.lookupByIPRLocked(IP)
}
// lookupByIPRLocked returns the corresponding security identity that endpoint IP maps
// to within the provided IPCache, as well as if the corresponding entry exists
// in the IPCache.
func (ipc *IPCache) lookupByIPRLocked(IP string) (Identity, bool) {
identity, exists := ipc.ipToIdentityCache[IP]
return identity, exists
}
// LookupByPrefixRLocked looks for either the specified CIDR prefix, or if the
// prefix is fully specified (ie, w.x.y.z/32 for IPv4), find the host for the
// identity in the provided IPCache, and returns the corresponding security
// identity as well as whether the entry exists in the IPCache.
func (ipc *IPCache) LookupByPrefixRLocked(prefix string) (identity Identity, exists bool) {
if _, cidr, err := net.ParseCIDR(prefix); err == nil {
// If it's a fully specfied prefix, attempt to find the host
ones, bits := cidr.Mask.Size()
if ones == bits {
identity, exists = ipc.ipToIdentityCache[cidr.IP.String()]
if exists {
return
}
}
}
identity, exists = ipc.ipToIdentityCache[prefix]
return
}
// LookupByPrefix returns the corresponding security identity that endpoint IP
// maps to within the provided IPCache, as well as if the corresponding entry
// exists in the IPCache.
func (ipc *IPCache) LookupByPrefix(IP string) (Identity, bool) {
ipc.mutex.RLock()
defer ipc.mutex.RUnlock()
return ipc.LookupByPrefixRLocked(IP)
}
// LookupSecIDByIP performs a longest prefix match lookup in the IPCache for
// the identity corresponding to the specified address (or, in the case of no
// direct match, any shorter prefix). Returns the corresponding identity and
// whether a match was found.
func (ipc *IPCache) LookupSecIDByIP(ip netip.Addr) (id Identity, ok bool) {
if !ip.IsValid() {
return Identity{}, false
}
ipc.mutex.RLock()
defer ipc.mutex.RUnlock()
if id, ok = ipc.lookupByIPRLocked(ip.String()); ok {
return id, ok
}
ipv6Prefixes, ipv4Prefixes := ipc.prefixLengths.ToBPFData()
prefixes := ipv4Prefixes
if ip.Is6() {
prefixes = ipv6Prefixes
}
for _, prefixLen := range prefixes {
// note: we perform a lookup even when `prefixLen == bits`, as some
// entries derived by a single address cidr-range will not have been
// found by the above lookup
cidr, _ := ip.Prefix(prefixLen)
if id, ok = ipc.LookupByPrefixRLocked(cidr.String()); ok {
return id, ok
}
}
return id, false
}
// LookupByIdentity returns the set of IPs (endpoint or CIDR prefix) that have
// security identity ID, or nil if the entry does not exist.
func (ipc *IPCache) LookupByIdentity(id identity.NumericIdentity) (ips []string) {
ipc.mutex.RLock()
defer ipc.mutex.RUnlock()
// Can't return the internal map as it may be modified at any time when the
// lock is not held, so return a slice of strings instead
length := len(ipc.identityToIPCache[id])
if length > 0 {
ips = make([]string, 0, length)
for ip := range ipc.identityToIPCache[id] {
ips = append(ips, ip)
}
}
return ips
}
// LookupByHostRLocked returns the list of IPs returns the set of IPs
// (endpoint or CIDR prefix) that have hostIPv4 or hostIPv6 associated as the
// host of the entry. Requires the caller to hold the RLock.
func (ipc *IPCache) LookupByHostRLocked(hostIPv4, hostIPv6 net.IP) (cidrs []net.IPNet) {
for ip, host := range ipc.ipToHostIPCache {
if hostIPv4 != nil && host.IP.Equal(hostIPv4) || hostIPv6 != nil && host.IP.Equal(hostIPv6) {
_, cidr, err := net.ParseCIDR(ip)
if err != nil {
endpointIP := net.ParseIP(ip)
cidr = iputil.IPToPrefix(endpointIP)
}
cidrs = append(cidrs, *cidr)
}
}
return cidrs
}
// Equal returns true if two K8sMetadata pointers contain the same data or are
// both nil.
func (m *K8sMetadata) Equal(o *K8sMetadata) bool {
if m == o {
return true
} else if m == nil || o == nil {
return false
}
if len(m.NamedPorts) != len(o.NamedPorts) {
return false
}
for k, v := range m.NamedPorts {
if v2, ok := o.NamedPorts[k]; !ok || v != v2 {
return false
}
}
return m.Namespace == o.Namespace && m.PodName == o.PodName
}
// SPDX-License-Identifier: Apache-2.0
// Copyright Authors of Cilium
package ipcache
import (
"context"
"encoding/json"
"fmt"
"log/slog"
"net"
"net/netip"
"path"
"sort"
"github.com/cilium/hive/cell"
cmtypes "github.com/cilium/cilium/pkg/clustermesh/types"
"github.com/cilium/cilium/pkg/identity"
"github.com/cilium/cilium/pkg/kvstore"
storepkg "github.com/cilium/cilium/pkg/kvstore/store"
"github.com/cilium/cilium/pkg/lock"
"github.com/cilium/cilium/pkg/logging/logfields"
"github.com/cilium/cilium/pkg/source"
"github.com/cilium/cilium/pkg/types"
"github.com/cilium/cilium/pkg/u8proto"
)
const (
// DefaultAddressSpace is the address space used if none is provided.
// TODO - once pkg/node adds this to clusterConfiguration, remove.
DefaultAddressSpace = "default"
)
var (
// IPIdentitiesPath is the path to where endpoint IPs are stored in the key-value
// store.
IPIdentitiesPath = path.Join(kvstore.BaseKeyPrefix, "state", "ip", "v1")
// AddressSpace is the address space (cluster, etc.) in which policy is
// computed. It is determined by the orchestration system / runtime.
AddressSpace = DefaultAddressSpace
)
type kvstoreClient interface {
// IsEnabled returns true if KVStore support is enabled.
IsEnabled() bool
// UpdateIfDifferent updates a key if the value is different
UpdateIfDifferent(ctx context.Context, key string, value []byte, lease bool) (bool, error)
// Delete deletes a key. It does not return an error if the key does not exist.
Delete(ctx context.Context, key string) error
}
// IPIdentitySynchronizer handles the synchronization of ipcache entries into the kvstore.
type IPIdentitySynchronizer struct {
logger *slog.Logger
client kvstoreClient
tracker lock.Map[string, []byte]
}
func NewIPIdentitySynchronizer(logger *slog.Logger, client kvstore.Client) *IPIdentitySynchronizer {
return &IPIdentitySynchronizer{logger: logger, client: client}
}
// Upsert updates / inserts the provided IP->Identity mapping into the kvstore.
func (s *IPIdentitySynchronizer) Upsert(ctx context.Context, IP, hostIP netip.Addr, ID identity.NumericIdentity, key uint8,
metadata, k8sNamespace, k8sPodName string, npm types.NamedPortMap) error {
// Sort named ports into a slice
namedPorts := make([]identity.NamedPort, 0, len(npm))
for name, value := range npm {
namedPorts = append(namedPorts, identity.NamedPort{
Name: name,
Port: value.Port,
Protocol: u8proto.U8proto(value.Proto).String(),
})
}
sort.Slice(namedPorts, func(i, j int) bool {
return namedPorts[i].Name < namedPorts[j].Name
})
ipKey := path.Join(IPIdentitiesPath, AddressSpace, IP.String())
ipIDPair := identity.IPIdentityPair{
IP: IP.AsSlice(),
ID: ID,
Metadata: metadata,
HostIP: hostIP.AsSlice(),
Key: key,
K8sNamespace: k8sNamespace,
K8sPodName: k8sPodName,
NamedPorts: namedPorts,
}
marshaledIPIDPair, err := json.Marshal(ipIDPair)
if err != nil {
return err
}
s.logger.Debug(
"Upserting IP->ID mapping to kvstore",
logfields.IPAddr, ipIDPair.IP,
logfields.Identity, ipIDPair.ID,
logfields.Key, ipIDPair.Key,
logfields.Modification, Upsert,
)
_, err = s.client.UpdateIfDifferent(ctx, ipKey, marshaledIPIDPair, true)
if err == nil {
s.tracker.Store(ipKey, marshaledIPIDPair)
}
return err
}
// Delete removes the IP->Identity mapping for the specified ip
// from the kvstore, which will subsequently trigger an event in
// NewIPIdentityWatcher().
func (s *IPIdentitySynchronizer) Delete(ctx context.Context, ip string) error {
ipKey := path.Join(IPIdentitiesPath, AddressSpace, ip)
s.tracker.Delete(ipKey)
return s.client.Delete(ctx, ipKey)
}
// IsEnabled returns true if the synchronization to the KVStore is enabled.
func (s *IPIdentitySynchronizer) IsEnabled() bool {
return s.client.IsEnabled()
}
// LocalIPIdentityWatcher is an IPIdentityWatcher specialized to watch the
// entries corresponding to the local cluster.
type LocalIPIdentityWatcher struct {
watcher *IPIdentityWatcher
syncer *IPIdentitySynchronizer
client kvstore.Client
}
func NewLocalIPIdentityWatcher(in struct {
cell.In
Logger *slog.Logger
ClusterInfo cmtypes.ClusterInfo
Client kvstore.Client
Factory storepkg.Factory
IPCache *IPCache
Syncer *IPIdentitySynchronizer
}) *LocalIPIdentityWatcher {
return &LocalIPIdentityWatcher{
watcher: NewIPIdentityWatcher(
in.Logger, in.ClusterInfo.Name, in.IPCache,
in.Factory, source.KVStore,
),
syncer: in.Syncer,
client: in.Client,
}
}
// Watch starts the watcher and blocks waiting for events, until the context is closed.
func (liw *LocalIPIdentityWatcher) Watch(ctx context.Context) {
liw.watcher.Watch(ctx, liw.client, WithSelfDeletionProtection(liw.syncer))
}
// WaitForSync blocks until either the initial list of entries had been retrieved
// from the kvstore, or the given context is canceled. It returns immediately in
// CRD mode
func (liw *LocalIPIdentityWatcher) WaitForSync(ctx context.Context) error {
if !liw.client.IsEnabled() {
return nil
}
return liw.watcher.WaitForSync(ctx)
}
// IsEnabled returns true if the synchronization from the KVStore is enabled.
func (liw *LocalIPIdentityWatcher) IsEnabled() bool {
return liw.client.IsEnabled()
}
// IPIdentityWatcher is a watcher that will notify when IP<->identity mappings
// change in the kvstore.
type IPIdentityWatcher struct {
log *slog.Logger
store storepkg.WatchStore
ipcache IPCacher
clusterName string
clusterID uint32
source source.Source
withSelfDeletionProtection bool
validators []ipIdentityValidator
// Set only when withSelfDeletionProtection is true
syncer *IPIdentitySynchronizer
started bool
synced chan struct{}
}
type IPCacher interface {
Upsert(ip string, hostIP net.IP, hostKey uint8, k8sMeta *K8sMetadata, newIdentity Identity) (bool, error)
Delete(IP string, source source.Source) (namedPortsChanged bool)
}
// NewIPIdentityWatcher creates a new IPIdentityWatcher for the given cluster.
func NewIPIdentityWatcher(
logger *slog.Logger, clusterName string, ipc IPCacher, factory storepkg.Factory,
source source.Source, opts ...storepkg.RWSOpt,
) *IPIdentityWatcher {
watcher := IPIdentityWatcher{
ipcache: ipc,
clusterName: clusterName,
source: source,
synced: make(chan struct{}),
log: logger.With(logfields.ClusterName, clusterName),
}
watcher.store = factory.NewWatchStore(
clusterName,
func() storepkg.Key { return &identity.IPIdentityPair{} },
&watcher,
append(opts, storepkg.RWSWithOnSyncCallback(watcher.onSync))...,
)
return &watcher
}
type ipIdentityValidator func(*identity.IPIdentityPair) error
type IWOpt func(*iwOpts)
type iwOpts struct {
clusterID uint32
selfDeletionProtection *IPIdentitySynchronizer
cachedPrefix bool
validators []ipIdentityValidator
}
// WithClusterID configures the ClusterID associated with the given watcher.
func WithClusterID(id uint32) IWOpt {
return func(opts *iwOpts) {
opts.clusterID = id
}
}
// WithSelfDeletionProtection enables the automatic re-creation of the owned
// keys if they are detected to have been deleted, based on the synchronizer
// parameter.
func WithSelfDeletionProtection(synchronizer *IPIdentitySynchronizer) IWOpt {
return func(opts *iwOpts) {
opts.selfDeletionProtection = synchronizer
}
}
// WithCachedPrefix adapts the watched prefix based on the fact that the information
// concerning the given cluster is cached from an external kvstore.
func WithCachedPrefix(cached bool) IWOpt {
return func(opts *iwOpts) {
opts.cachedPrefix = cached
}
}
// WithIdentityValidator registers a validation function to ensure that the
// observed IPs are associated with an identity belonging to the expected range.
func WithIdentityValidator(clusterID uint32) IWOpt {
return func(opts *iwOpts) {
min := identity.GetMinimalAllocationIdentity(clusterID)
max := identity.GetMaximumAllocationIdentity(clusterID)
validator := func(pair *identity.IPIdentityPair) error {
switch {
// The identity belongs to the expected range based on the Cluster ID.
case pair.ID >= min && pair.ID <= max:
return nil
// Allow all reserved IDs as well, including well-known and
// user-reserved identities, as they are not scoped by Cluster ID.
case pair.ID < identity.MinimalNumericIdentity:
return nil
default:
return fmt.Errorf("ID %d does not belong to the allocation range of cluster ID %d", pair.ID, clusterID)
}
}
opts.validators = append(opts.validators, validator)
}
}
// Watch starts the watcher and blocks waiting for events, until the context is
// closed. When events are received from the kvstore, all IPIdentityMappingListener
// are notified. It automatically emits deletion events for stale keys when appropriate
// (that is, when the watcher is restarted, and if the ClusterID is changed).
func (iw *IPIdentityWatcher) Watch(ctx context.Context, backend storepkg.WatchStoreBackend, opts ...IWOpt) {
var iwo iwOpts
for _, opt := range opts {
opt(&iwo)
}
if iw.started && iw.clusterID != iwo.clusterID {
iw.log.Info(
"ClusterID changed: draining all known ipcache entries",
logfields.ClusterID, iwo.clusterID,
)
iw.store.Drain()
}
prefix := path.Join(IPIdentitiesPath, AddressSpace)
if iwo.cachedPrefix {
prefix = path.Join(kvstore.StateToCachePrefix(IPIdentitiesPath), iw.clusterName)
}
iw.started = true
iw.clusterID = iwo.clusterID
iw.withSelfDeletionProtection = iwo.selfDeletionProtection != nil
iw.syncer = iwo.selfDeletionProtection
iw.validators = iwo.validators
iw.store.Watch(ctx, backend, prefix)
}
// Drain triggers a deletion event for all known ipcache entries.
func (iw *IPIdentityWatcher) Drain() {
iw.store.Drain()
}
// NumEntries returns the number of entries synchronized from the kvstore.
func (iw *IPIdentityWatcher) NumEntries() uint64 {
return iw.store.NumEntries()
}
// Synced returns whether the initial list of entries has been retrieved from
// the kvstore, and new events are currently being watched.
func (iw *IPIdentityWatcher) Synced() bool {
return iw.store.Synced()
}
// WaitForSync blocks until either the initial list of entries had been retrieved
// from the kvstore, or the given context is canceled.
func (iw *IPIdentityWatcher) WaitForSync(ctx context.Context) error {
select {
case <-iw.synced:
return nil
case <-ctx.Done():
return ctx.Err()
}
}
// OnUpdate is triggered when a new upsertion event is observed, and
// synchronizes local caching of endpoint IP to ipIDPair mapping with
// the operation the key-value store has informed us about.
//
// To resolve conflicts between hosts and full CIDR prefixes:
// - Insert hosts into the cache as ".../w.x.y.z"
// - Insert CIDRS into the cache as ".../w.x.y.z/N"
// - If a host entry created, notify the listeners.
// - If a CIDR is created and there's no overlapping host
// entry, ie it is a less than fully masked CIDR, OR
// it is a fully masked CIDR and there is no corresponding
// host entry, then:
// - Notify the listeners.
// - Otherwise, do not notify listeners.
func (iw *IPIdentityWatcher) OnUpdate(k storepkg.Key) {
ipIDPair := k.(*identity.IPIdentityPair)
ip := ipIDPair.PrefixString()
if ip == "<nil>" {
iw.log.Warn("Ignoring entry with nil IP")
return
}
iw.log.Debug(
"Observed upsertion event",
logfields.IPAddr, ip,
)
for _, validator := range iw.validators {
if err := validator(ipIDPair); err != nil {
iw.log.Warn(
"Skipping invalid upsertion event",
logfields.Error, err,
logfields.IPAddr, ip,
)
return
}
}
var k8sMeta *K8sMetadata
if ipIDPair.K8sNamespace != "" || ipIDPair.K8sPodName != "" || len(ipIDPair.NamedPorts) > 0 {
k8sMeta = &K8sMetadata{
Namespace: ipIDPair.K8sNamespace,
PodName: ipIDPair.K8sPodName,
NamedPorts: make(types.NamedPortMap, len(ipIDPair.NamedPorts)),
}
for _, np := range ipIDPair.NamedPorts {
err := k8sMeta.NamedPorts.AddPort(np.Name, int(np.Port), np.Protocol)
if err != nil {
iw.log.Error(
"Parsing named port failed",
logfields.Error, err,
logfields.IPAddr, ipIDPair,
)
}
}
}
peerIdentity := ipIDPair.ID
if peerIdentity == identity.ReservedIdentityHost {
// The only way we can discover IPs associated with the local host
// is directly via the NodeDiscovery package. If someone is informing
// this agent about IPs corresponding to the "host" via the kvstore,
// then they're sharing their own perspective on their own node IPs'
// identity. We should treat the peer as a "remote-node", not a "host".
peerIdentity = identity.ReservedIdentityRemoteNode
}
if iw.clusterID != 0 {
// Annotate IP/Prefix string with ClusterID. So that we can distinguish
// the two network endpoints that have the same IP adddress, but belongs
// to the different clusters.
ip = cmtypes.AnnotateIPCacheKeyWithClusterID(ip, iw.clusterID)
}
// There is no need to delete the "old" IP addresses from this
// ip ID pair. The only places where the ip ID pair are created
// is the clustermesh, where it sends a delete to the KVStore,
// and the endpoint-runIPIdentitySync where it bounded to a
// lease and a controller which is stopped/removed when the
// endpoint is gone.
iw.ipcache.Upsert(ip, ipIDPair.HostIP, ipIDPair.Key, k8sMeta, Identity{
ID: peerIdentity,
Source: iw.source,
})
}
// OnDelete is triggered when a new deletion event is observed, and
// synchronizes local caching of endpoint IP to ipIDPair mapping with
// the operation the key-value store has informed us about.
//
// To resolve conflicts between hosts and full CIDR prefixes:
// - If a host is removed, check for an overlapping CIDR
// and if it exists, notify the listeners with an upsert
// for the CIDR's identity
// - If any other deletion case, notify listeners of
// the deletion event.
func (iw *IPIdentityWatcher) OnDelete(k storepkg.NamedKey) {
ipIDPair := k.(*identity.IPIdentityPair)
ip := ipIDPair.PrefixString()
iw.log.Debug(
"Observed deletion event",
logfields.IPAddr, ip,
)
if iw.withSelfDeletionProtection && iw.selfDeletionProtection(ip) {
return
}
if iw.clusterID != 0 {
// See equivalent logic in the kvstore.EventTypeUpdate case
ip = cmtypes.AnnotateIPCacheKeyWithClusterID(ip, iw.clusterID)
}
// The key no longer exists in the
// local cache, it is safe to remove
// from the datapath ipcache.
iw.ipcache.Delete(ip, iw.source)
}
func (iw *IPIdentityWatcher) onSync(context.Context) {
close(iw.synced)
}
func (iw *IPIdentityWatcher) selfDeletionProtection(ip string) bool {
key := path.Join(IPIdentitiesPath, AddressSpace, ip)
if m, ok := iw.syncer.tracker.Load(key); ok {
iw.log.Warn(
"Received kvstore delete notification for alive ipcache entry",
logfields.IPAddr, ip,
)
_, err := iw.syncer.client.UpdateIfDifferent(context.TODO(), key, m, true)
if err != nil {
iw.log.Warn(
"Unable to re-create alive ipcache entry",
logfields.Error, err,
logfields.IPAddr, ip,
)
}
return true
}
return false
}
// SPDX-License-Identifier: Apache-2.0
// Copyright Authors of Cilium
package ipcache
import (
"bytes"
"context"
"errors"
"fmt"
"log/slog"
"maps"
"net"
"net/netip"
"sync"
cmtypes "github.com/cilium/cilium/pkg/clustermesh/types"
"github.com/cilium/cilium/pkg/container/bitlpm"
"github.com/cilium/cilium/pkg/controller"
"github.com/cilium/cilium/pkg/counter"
"github.com/cilium/cilium/pkg/identity"
"github.com/cilium/cilium/pkg/ipcache/types"
"github.com/cilium/cilium/pkg/labels"
"github.com/cilium/cilium/pkg/lock"
"github.com/cilium/cilium/pkg/logging/logfields"
"github.com/cilium/cilium/pkg/option"
"github.com/cilium/cilium/pkg/source"
"github.com/cilium/cilium/pkg/time"
)
var (
// ErrLocalIdentityAllocatorUninitialized is an error that's returned when
// the local identity allocator is uninitialized.
ErrLocalIdentityAllocatorUninitialized = errors.New("local identity allocator uninitialized")
LabelInjectorName = "ipcache-inject-labels"
injectLabelsControllerGroup = controller.NewGroup("ipcache-inject-labels")
)
// clusterID is the type of the key to use in the metadata CIDRTrieMap
type clusterID uint32
// metadata contains the ipcache metadata. Mainily it holds a map which maps IP
// prefixes (x.x.x.x/32) to a set of information (prefixInfo).
//
// When allocating an identity to associate with each prefix, the
// identity allocation routines will merge this set of labels into the
// complete set of labels used for that local (CIDR) identity,
// thereby associating these labels with each prefix that is 'covered'
// by this prefix. Subsequently these labels may be matched by network
// policy and propagated in monitor output.
//
// ```mermaid
// flowchart
//
// subgraph resourceInfo
// labels.Labels
// source.Source
// end
// subgraph prefixInfo
// UA[ResourceID]-->LA[resourceInfo]
// UB[ResourceID]-->LB[resourceInfo]
// ...
// end
// subgraph identityMetadata
// IP_Prefix-->prefixInfo
// end
//
// ```
type metadata struct {
logger *slog.Logger
// Protects the m map.
//
// If this mutex will be held at the same time as the IPCache mutex,
// this mutex must be taken first and then take the IPCache mutex in
// order to prevent deadlocks.
lock.Mutex
// m is the actual map containing the mappings.
m map[cmtypes.PrefixCluster]*prefixInfo
// prefixes is a map of tries. Each trie holds the prefixes for the same
// clusterID, in order to find descendants efficiently.
prefixes *bitlpm.CIDRTrieMap[clusterID, struct{}]
// prefixRefCounter keeps a reference count of all prefixes that come from
// policy resources, as an optimization, in order to avoid redundantly
// storing all prefixes from policies.
prefixRefCounter counter.Counter[cmtypes.PrefixCluster]
// queued* handle updates into the IPCache. Whenever a label is added
// or removed from a specific IP prefix, that prefix is added into
// 'queuedPrefixes'. Each time label injection is triggered, it will
// process the metadata changes for these prefixes and potentially
// generate updates into the ipcache, policy engine and datapath.
queuedChangesMU lock.Mutex
queuedPrefixes map[cmtypes.PrefixCluster]struct{}
// queuedRevision is the "version" of the prefix queue. It is incremented
// on every *dequeue*. If injection is successful, then injectedRevision
// is updated and an update broadcast to waiters.
queuedRevision uint64
// injectedRevision indicates the current "version" of the queue that has
// been applied to the ipcache. It is optionally used by ipcache clients
// to wait for a specific update to be processed. It is protected by a
// Cond's mutex. When label injection is successful, this will be updated
// to whatever revision was dequeued and any waiters will be "awoken" via
// the Cond's Broadcast().
injectedRevision uint64
injectedRevisionCond *sync.Cond
// reservedHostLock protects the localHostLabels map. Holders must
// always take the metadata read lock first.
reservedHostLock lock.Mutex
// reservedHostLabels collects all labels that apply to the host identity.
// see updateLocalHostLabels() for more info.
reservedHostLabels map[netip.Prefix]labels.Labels
}
func newMetadata(logger *slog.Logger) *metadata {
return &metadata{
logger: logger,
m: make(map[cmtypes.PrefixCluster]*prefixInfo),
prefixes: bitlpm.NewCIDRTrieMap[clusterID, struct{}](),
prefixRefCounter: make(counter.Counter[cmtypes.PrefixCluster]),
queuedPrefixes: make(map[cmtypes.PrefixCluster]struct{}),
queuedRevision: 1,
injectedRevisionCond: sync.NewCond(&lock.Mutex{}),
reservedHostLabels: make(map[netip.Prefix]labels.Labels),
}
}
// dequeuePrefixUpdates returns the set of queued prefixes, as well as the revision
// that should be passed to setInjectedRevision once label injection has successfully
// completed.
func (m *metadata) dequeuePrefixUpdates() (modifiedPrefixes []cmtypes.PrefixCluster, revision uint64) {
m.queuedChangesMU.Lock()
modifiedPrefixes = make([]cmtypes.PrefixCluster, 0, len(m.queuedPrefixes))
for p := range m.queuedPrefixes {
modifiedPrefixes = append(modifiedPrefixes, p)
}
m.queuedPrefixes = make(map[cmtypes.PrefixCluster]struct{})
revision = m.queuedRevision
m.queuedRevision++ // Increment, as any newly-queued prefixes are now subject to the next revision cycle
m.queuedChangesMU.Unlock()
return
}
// enqueuePrefixUpdates queues prefixes for label injection. It returns the "next"
// queue revision number, which can be passed to waitForRevision.
func (m *metadata) enqueuePrefixUpdates(prefixes ...cmtypes.PrefixCluster) uint64 {
m.queuedChangesMU.Lock()
defer m.queuedChangesMU.Unlock()
for _, prefix := range prefixes {
m.queuedPrefixes[prefix] = struct{}{}
}
return m.queuedRevision
}
// setInjectectRevision updates the injected revision to a new value and
// wakes all waiters.
func (m *metadata) setInjectedRevision(rev uint64) {
m.injectedRevisionCond.L.Lock()
m.injectedRevision = rev
m.injectedRevisionCond.Broadcast()
m.injectedRevisionCond.L.Unlock()
}
// waitForRevision waits for the injected revision to be at or above the
// supplied revision. We may skip revisions, as the desired revision is bumped
// every time prefixes are dequeued, but injection may fail. Thus, any revision
// greater or equal to the desired revision is acceptable.
func (m *metadata) waitForRevision(ctx context.Context, rev uint64) error {
// Allow callers to bail out by cancelling the context
cleanupCancellation := context.AfterFunc(ctx, func() {
// We need to acquire injectedRevisionCond.L here to be sure that the
// Broadcast won't occur before the call to Wait, which would result
// in a missed signal.
m.injectedRevisionCond.L.Lock()
defer m.injectedRevisionCond.L.Unlock()
m.injectedRevisionCond.Broadcast()
})
defer cleanupCancellation()
m.injectedRevisionCond.L.Lock()
defer m.injectedRevisionCond.L.Unlock()
for m.injectedRevision < rev {
m.injectedRevisionCond.Wait()
if ctx.Err() != nil {
return ctx.Err()
}
}
return nil
}
// canonicalPrefix returns the prefixCluster with its prefix in canonicalized form.
// The canonical version of the prefix must be used for lookups in the metadata prefixCluster
// map. The canonical representation of a prefix has the lower bits of the address always
// zeroed out and does not contain any IPv4-mapped IPv6 address.
func canonicalPrefix(prefixCluster cmtypes.PrefixCluster) cmtypes.PrefixCluster {
if !prefixCluster.AsPrefix().IsValid() {
return prefixCluster // no canonical version of invalid prefix
}
prefix := prefixCluster.AsPrefix()
clusterID := prefixCluster.ClusterID()
// Prefix() always zeroes out the lower bits
p, err := prefix.Addr().Unmap().Prefix(prefix.Bits())
if err != nil {
return prefixCluster // no canonical version of invalid prefix
}
return cmtypes.NewPrefixCluster(p, clusterID)
}
// upsertLocked inserts / updates the set of metadata associated with this resource for this prefix.
// It returns the set of affected prefixes. It may return nil if the metadata change is a no-op.
func (m *metadata) upsertLocked(prefix cmtypes.PrefixCluster, src source.Source, resource types.ResourceID, info ...IPMetadata) []cmtypes.PrefixCluster {
prefix = canonicalPrefix(prefix)
changed := false
if _, ok := m.m[prefix]; !ok {
changed = true
m.m[prefix] = newPrefixInfo()
m.prefixes.Upsert(clusterID(prefix.ClusterID()), prefix.AsPrefix(), struct{}{})
}
if _, ok := m.m[prefix].byResource[resource]; !ok {
changed = true
m.m[prefix].byResource[resource] = &resourceInfo{
source: src,
}
}
for _, i := range info {
c := m.m[prefix].byResource[resource].merge(m.logger, i, src)
changed = changed || c
}
// If the metadata for this resource hasn't changed, *or* it has
// no effect on the flattened metadata, then return zero affected prefixes.
if !changed || m.m[prefix].flattened.has(src, info) {
return nil
}
// Invalidated flattened metadata. Will be re-populated on next read.
m.m[prefix].flattened = nil
return m.findAffectedChildPrefixes(prefix)
}
// GetMetadataSourceByPrefix returns the highest precedence source which has
// provided metadata for this prefix
func (ipc *IPCache) GetMetadataSourceByPrefix(prefix cmtypes.PrefixCluster) source.Source {
ipc.metadata.Lock()
defer ipc.metadata.Unlock()
return ipc.metadata.getLocked(prefix).Source()
}
// get returns a deep copy of the flattened prefix info
func (m *metadata) get(prefix cmtypes.PrefixCluster) *resourceInfo {
m.Lock()
defer m.Unlock()
return m.getLocked(prefix)
}
// getLocked returns a deep copy of the flattened prefix info
func (m *metadata) getLocked(prefix cmtypes.PrefixCluster) *resourceInfo {
if pi, ok := m.m[canonicalPrefix(prefix)]; ok {
if pi.flattened == nil {
// re-compute the flattened set of prefixes
pi.flattened = pi.flatten(m.logger.With(
logfields.CIDR, prefix,
logfields.ClusterID, prefix.ClusterID(),
))
}
return pi.flattened.DeepCopy()
}
return nil
}
// mergeParentLabels pulls down all labels from parent prefixes, with "longer" prefixes having
// preference.
//
// Thus, if the ipcache contains:
// - 10.0.0.0/8 -> "a=b, foo=bar"
// - 10.1.0.0/16 -> "a=c"
// - 10.1.1.0/24 -> "d=e"
// the complete set of labels for 10.1.1.0/24 is [a=c, d=e, foo=bar]
func (m *metadata) mergeParentLabels(lbls labels.Labels, prefixCluster cmtypes.PrefixCluster) {
m.Lock()
defer m.Unlock()
hasCIDR := lbls.HasSource(labels.LabelSourceCIDR) // we should only merge one CIDR label
// Iterate over all shorter prefixes, from `prefix` to 0.0.0.0/0 // ::/0.
// Merge all labels, preferring those from longer prefixes, but only merge a single "cidr:XXX" label at most.
prefix := prefixCluster.AsPrefix()
for bits := prefix.Bits() - 1; bits >= 0; bits-- {
parent, _ := prefix.Addr().Unmap().Prefix(bits) // canonical
if info := m.getLocked(cmtypes.NewPrefixCluster(parent, prefixCluster.ClusterID())); info != nil {
for k, v := range info.ToLabels() {
if v.Source == labels.LabelSourceCIDR && hasCIDR {
continue
}
if _, ok := lbls[k]; !ok {
lbls[k] = v
if v.Source == labels.LabelSourceCIDR {
hasCIDR = true
}
}
}
}
}
}
// findAffectedChildPrefixes returns the list of all child prefixes which are
// affected by an update to the parent prefix
func (m *metadata) findAffectedChildPrefixes(parent cmtypes.PrefixCluster) (children []cmtypes.PrefixCluster) {
if parent.IsSingleIP() {
return []cmtypes.PrefixCluster{parent} // no children
}
m.prefixes.Descendants(clusterID(parent.ClusterID()), parent.AsPrefix(), func(child netip.Prefix, _ struct{}) bool {
children = append(children, cmtypes.NewPrefixCluster(child, parent.ClusterID()))
return true
})
return children
}
// doInjectLabels injects labels from the ipcache metadata (IDMD) map into the
// identities used for the prefixes in the IPCache. The given source is the
// source of the caller, as inserting into the IPCache requires knowing where
// this updated information comes from. Conversely, RemoveLabelsExcluded()
// performs the inverse: removes labels from the IDMD map and releases
// identities allocated by this function.
//
// Note that as this function iterates through the IDMD, if it detects a change
// in labels for a given prefix, then this might allocate a new identity. If a
// prefix was previously associated with an identity, it will get deallocated,
// so a balance is kept, ensuring a one-to-one mapping between prefix and
// identity.
//
// Returns the CIDRs that were not yet processed, for example due to an
// unexpected error while processing the identity updates for those CIDRs
// The caller should attempt to retry injecting labels for those CIDRs.
//
// Do not call this directly; rather, use TriggerLabelInjection()
func (ipc *IPCache) doInjectLabels(ctx context.Context, modifiedPrefixes []cmtypes.PrefixCluster) (remainingPrefixes []cmtypes.PrefixCluster, err error) {
if ipc.IdentityAllocator == nil {
return modifiedPrefixes, ErrLocalIdentityAllocatorUninitialized
}
if !ipc.Configuration.CacheStatus.Synchronized() {
return modifiedPrefixes, errors.New("k8s cache not fully synced")
}
type ipcacheEntry struct {
identity Identity
tunnelPeer net.IP
encryptKey uint8
endpointFlags uint8
force bool
}
var (
// previouslyAllocatedIdentities maps IP Prefix -> Identity for
// old identities where the prefix will now map to a new identity
previouslyAllocatedIdentities = make(map[cmtypes.PrefixCluster]Identity)
// idsToAdd stores the identities that must be updated via the
// selector cache.
idsToAdd = make(map[identity.NumericIdentity]labels.LabelArray)
// entriesToReplace stores the identity to replace in the ipcache.
entriesToReplace = make(map[cmtypes.PrefixCluster]ipcacheEntry)
entriesToDelete = make(map[cmtypes.PrefixCluster]Identity)
// unmanagedPrefixes is the set of prefixes for which we no longer have
// any metadata, but were created by a call directly to Upsert()
unmanagedPrefixes = make(map[cmtypes.PrefixCluster]Identity)
)
for i, prefix := range modifiedPrefixes {
pstr := prefix.String()
oldID, entryExists := ipc.LookupByIP(pstr)
oldTunnelIP, oldEncryptionKey := ipc.getHostIPCache(pstr)
oldEndpointFlags := ipc.getEndpointFlags(pstr)
prefixInfo := ipc.metadata.get(prefix)
var newID *identity.Identity
var isNew bool
if prefixInfo == nil {
if !entryExists {
// Already deleted, no new metadata to associate
continue
} // else continue below to remove the old entry
} else {
// Insert to propagate the updated set of labels after removal.
newID, isNew, err = ipc.resolveIdentity(prefix, prefixInfo)
if err != nil {
// NOTE: This may fail during a 2nd or later
// iteration of the loop. To handle this, break
// the loop here and continue executing the set
// of changes for the prefixes that were
// already processed.
//
// Old identities corresponding to earlier
// prefixes may be released as part of this,
// so hopefully this forward progress will
// unblock subsequent calls into this function.
remainingPrefixes = modifiedPrefixes[i:]
err = fmt.Errorf("failed to allocate new identity during label injection: %w", err)
break
}
var newOverwrittenLegacySource source.Source
tunnelPeerIP := prefixInfo.TunnelPeer().IP()
encryptKeyUint8 := prefixInfo.EncryptKey().Uint8()
epFlagsUint8 := prefixInfo.EndpointFlags().Uint8()
if entryExists {
// If an entry already exists for this prefix, then we want to
// retain its source, if it has been modified by the legacy API.
// This allows us to restore the original source if we remove all
// metadata for the prefix
switch {
case oldID.exclusivelyOwnedByLegacyAPI():
// This is the first time we have associated metadata for a
// modifiedByLegacyAPI=true entry. Store the old (legacy) source:
newOverwrittenLegacySource = oldID.Source
case oldID.ownedByLegacyAndMetadataAPI():
// The entry has modifiedByLegacyAPI=true, but has already been
// updated at least once by the metadata API. Retain the legacy
// source as is.
newOverwrittenLegacySource = oldID.overwrittenLegacySource
}
// We can safely skip the ipcache upsert if the entry matches with
// the entry in the metadata cache exactly.
// Note that checking ID alone is insufficient, see GH-24502.
if oldID.ID == newID.ID && prefixInfo.Source() == oldID.Source &&
oldID.overwrittenLegacySource == newOverwrittenLegacySource &&
oldTunnelIP.Equal(tunnelPeerIP) &&
oldEncryptionKey == encryptKeyUint8 &&
oldEndpointFlags == epFlagsUint8 {
goto releaseIdentity
}
}
// If this ID was newly allocated, we must add it to the SelectorCache
if isNew {
idsToAdd[newID.ID] = newID.Labels.LabelArray()
}
entriesToReplace[prefix] = ipcacheEntry{
identity: Identity{
ID: newID.ID,
Source: prefixInfo.Source(),
overwrittenLegacySource: newOverwrittenLegacySource,
// Note: `modifiedByLegacyAPI` and `shadowed` will be
// set by the upsert call itself
},
tunnelPeer: tunnelPeerIP,
encryptKey: encryptKeyUint8,
endpointFlags: epFlagsUint8,
// IPCache.Upsert() and friends currently require a
// Source to be provided during upsert. If the old
// Source was higher precedence due to labels that
// have now been removed, then we need to explicitly
// work around that to remove the old higher-priority
// identity and replace it with this new identity.
force: entryExists && prefixInfo.Source() != oldID.Source && oldID.ID != newID.ID,
}
}
releaseIdentity:
if entryExists {
// 'prefix' is being removed or modified, so some prior
// iteration of this loop hit the 'injectLabels' case
// above, thereby allocating a (new) identity. If we
// delete or update the identity for 'prefix' in this
// iteration of the loop, then we must balance the
// allocation from the prior InjectLabels() call by
// releasing the previous reference.
entry, entryToBeReplaced := entriesToReplace[prefix]
if oldID.exclusivelyOwnedByLegacyAPI() && entryToBeReplaced {
// If the previous ipcache entry for the prefix
// was not managed by this function, then the
// previous ipcache user to inject the IPCache
// entry retains its own reference to the
// Security Identity. Given that this function
// is going to assume (non-exclusive) responsibility
// for the IPCache entry now, this path must retain its
// own reference to the Security Identity to
// ensure that if the other owner ever releases
// their reference, this reference stays live.
if option.Config.Debug {
ipc.logger.Debug(
"Acquiring Identity reference",
logfields.IdentityOld, oldID.ID,
logfields.Identity, entry.identity.ID,
)
}
} else {
previouslyAllocatedIdentities[prefix] = oldID
}
// If all associated metadata for this prefix has been removed,
// and the existing IPCache entry was never touched by any other
// subsystem using the old Upsert API, then we can safely remove
// the IPCache entry associated with this prefix.
if prefixInfo == nil {
if oldID.exclusivelyOwnedByMetadataAPI() {
entriesToDelete[prefix] = oldID
} else if oldID.ownedByLegacyAndMetadataAPI() {
// If, on the other hand, this prefix *was* touched by
// another, Upsert-based system, then we want to restore
// the original (legacy) source. This ensures that the legacy
// Delete call (with the legacy source) will be able to remove
// it.
unmanagedEntry := ipcacheEntry{
identity: Identity{
ID: oldID.ID,
Source: oldID.overwrittenLegacySource,
modifiedByLegacyAPI: true,
},
tunnelPeer: oldTunnelIP,
encryptKey: oldEncryptionKey,
endpointFlags: oldEndpointFlags,
force: true, /* overwrittenLegacySource is lower precedence */
}
entriesToReplace[prefix] = unmanagedEntry
// In addition, flag this prefix as potentially eligible
// for deletion if all references are removed (i.e. the legacy
// Delete call already happened).
unmanagedPrefixes[prefix] = unmanagedEntry.identity
if option.Config.Debug {
ipc.logger.Debug(
"Previously managed IPCache entry is now unmanaged",
logfields.IdentityOld, oldID.ID,
)
}
} else if oldID.exclusivelyOwnedByLegacyAPI() {
// Even if we never actually overwrote the legacy-owned
// entry, we should still remove it if all references are removed.
unmanagedPrefixes[prefix] = oldID
}
}
}
// The reserved:host identity is special: the numeric ID is fixed,
// and the set of labels is mutable. Thus, whenever it changes,
// we must always update the SelectorCache (normally, this is elided
// when no changes are present).
if newID != nil && newID.ID == identity.ReservedIdentityHost {
idsToAdd[newID.ID] = newID.Labels.LabelArray()
}
// Again, more reserved:host bookkeeping: if this prefix is no longer ID 1 (because
// it is being deleted or changing IDs), we need to recompute the labels
// for reserved:host and push that to the SelectorCache
if entryExists && oldID.ID == identity.ReservedIdentityHost &&
(newID == nil || newID.ID != identity.ReservedIdentityHost) && prefix.ClusterID() == 0 {
i := ipc.updateReservedHostLabels(prefix.AsPrefix(), nil)
idsToAdd[i.ID] = i.Labels.LabelArray()
}
}
// Batch update the SelectorCache and policymaps with the newly allocated identities.
// This must be done before writing them to the ipcache, or else traffic may be dropped.
// (This is because prefixes may have identities that are not yet marked as allowed.)
//
// We must do this even if we don't appear to have allocated any identities, because they
// may be in flight due to another caller.
done := ipc.IdentityUpdater.UpdateIdentities(idsToAdd, nil)
select {
case <-done:
case <-ctx.Done():
return modifiedPrefixes, ctx.Err()
}
ipc.mutex.Lock()
defer ipc.mutex.Unlock()
for p, entry := range entriesToReplace {
prefix := p.String()
meta := ipc.getK8sMetadata(prefix)
if _, err2 := ipc.upsertLocked(
prefix,
entry.tunnelPeer,
entry.encryptKey,
meta,
entry.identity,
entry.endpointFlags,
entry.force,
/* fromLegacyAPI */ false,
); err2 != nil {
// It's plausible to pull the same information twice
// from different sources, for instance in etcd mode
// where node information is propagated both via the
// kvstore and via the k8s control plane. If the
// upsert was rejected due to source precedence, but the
// identity is unchanged, then we can safely ignore the
// error message.
oldID, ok := previouslyAllocatedIdentities[p]
if !(ok && oldID.ID == entry.identity.ID && errors.Is(err2, &ErrOverwrite{
ExistingSrc: oldID.Source,
NewSrc: entry.identity.Source,
})) {
ipc.logger.Error(
"Failed to replace ipcache entry with new identity after label removal. Traffic may be disrupted.",
logfields.Error, err2,
logfields.IPAddr, prefix,
logfields.Identity, entry.identity.ID,
)
}
}
}
// Delete any no-longer-referenced prefixes.
// These will now revert to the world identity.
// This must happen *before* identities are released, or else there will be policy drops
for prefix, id := range entriesToDelete {
ipc.deleteLocked(prefix.String(), id.Source)
}
// Release our reference for all identities. If their refcount reaches zero, do a
// sanity check to ensure there are no stale prefixes remaining
idsToRelease := make([]identity.NumericIdentity, 0, len(previouslyAllocatedIdentities))
for _, id := range previouslyAllocatedIdentities {
idsToRelease = append(idsToRelease, id.ID)
}
deletedNIDs, err2 := ipc.IdentityAllocator.ReleaseLocalIdentities(idsToRelease...)
if err2 != nil {
// should be unreachable, as this only happens if we allocated a global identity
ipc.logger.Warn("BUG: Failed to release local identity", logfields.Error, err2)
}
// Scan all deallocated identities, looking for stale prefixes that still reference them
for _, deletedNID := range deletedNIDs {
for prefixStr := range ipc.identityToIPCache[deletedNID] {
prefix, err := cmtypes.ParsePrefixCluster(prefixStr)
if err != nil {
continue // unreachable
}
// Corner case: This prefix + identity was initially created by a direct Upsert(),
// but all identity references have been released. We should then delete this prefix.
if oldID, unmanaged := unmanagedPrefixes[prefix]; unmanaged && oldID.ID == deletedNID {
ipc.logger.Debug(
"Force-removing released prefix from the ipcache.",
logfields.IPAddr, prefix,
logfields.Identity, oldID,
)
ipc.deleteLocked(prefix.String(), oldID.Source)
}
}
}
return remainingPrefixes, err
}
// resolveIdentity will either return a previously-allocated identity for the
// given prefix or allocate a new one corresponding to the labels associated
// with the specified prefixInfo.
//
// This function will take an additional reference on the returned identity.
// The caller *must* ensure that this reference is eventually released via
// a call to ipc.IdentityAllocator.Release(). Typically this is tied to whether
// the caller subsequently injects an entry into the BPF IPCache map:
// - If the entry is inserted, we assume that the entry will eventually be
// removed, and when it is removed, we will remove that reference from the
// identity & release the identity.
// - If the entry is not inserted (for instance, because the bpf IPCache map
// already has the same IP -> identity entry in the map), immediately release
// the reference.
func (ipc *IPCache) resolveIdentity(prefix cmtypes.PrefixCluster, info *resourceInfo) (*identity.Identity, bool, error) {
// Override identities always take precedence
if info.IdentityOverride() {
id, isNew, err := ipc.IdentityAllocator.AllocateLocalIdentity(info.ToLabels(), false, identity.InvalidIdentity)
if err != nil {
ipc.logger.Warn(
"Failed to allocate new identity for prefix's IdentityOverrideLabels.",
logfields.Error, err,
logfields.ClusterID, prefix.ClusterID(),
logfields.IPAddr, prefix,
logfields.Labels, info.ToLabels(),
)
}
return id, isNew, err
}
lbls := info.ToLabels()
// unconditionally merge any parent labels down in to this prefix
ipc.metadata.mergeParentLabels(lbls, prefix)
// Enforce certain label invariants, e.g. adding or removing `reserved:world`.
resolveLabels(lbls, prefix)
if prefix.ClusterID() == 0 && lbls.HasHostLabel() {
// Associate any new labels with the host identity.
//
// This case is a bit special, because other parts of Cilium
// have hardcoded assumptions around the host identity and
// that it corresponds to identity.ReservedIdentityHost.
// If additional labels are associated with the IPs of the
// host, add those extra labels into the host identity here
// so that policy will match on the identity correctly.
//
// We can get away with this because the host identity is only
// significant within the current agent's view (ie each agent
// will calculate its own host identity labels independently
// for itself). For all other identities, we avoid modifying
// the labels at runtime and instead opt to allocate new
// identities below.
//
// As an extra gotcha, we need need to merge all labels for all IPs
// that resolve to the reserved:host identity, otherwise we can
// flap identities labels depending on which prefix writes first. See GH-28259.
i := ipc.updateReservedHostLabels(prefix.AsPrefix(), lbls)
return i, false, nil
}
// This should only ever allocate an identity locally on the node,
// which could theoretically fail if we ever allocate a very large
// number of identities.
id, isNew, err := ipc.IdentityAllocator.AllocateLocalIdentity(lbls, false, info.requestedIdentity.ID())
if err != nil {
ipc.logger.Warn(
"Failed to allocate new identity for prefix's Labels.",
logfields.Error, err,
logfields.IPAddr, prefix,
logfields.Labels, lbls,
)
return nil, false, err
}
if lbls.HasWorldLabel() {
id.CIDRLabel = labels.NewLabelsFromModel([]string{labels.LabelSourceCIDR + ":" + prefix.String()})
}
return id, isNew, err
}
// resolveLabels applies certain prefix-level invariants to the set of labels.
//
// At a high level, this function makes it so that in-cluster entities
// are not selectable by CIDR and CIDR-equivalent policies.
// This function is necessary as there are a number of *independent* label,
// sources, so only once the full set is computed can we apply this logic.
//
// CIDR and CIDR-equivalent labels are labels with source:
// - cidr:
// - fqdn:
// - cidrgroup:
//
// A prefix with any of these labels is considered "in-cluster"
// - reserved:host
// - reserved:remote-node
// - reserved:health
// - reserved:ingress
//
// However, nodes *are* allowed to be selectable by CIDR and CIDR equivalents
// if PolicyCIDRMatchesNodes() is true.
func resolveLabels(lbls labels.Labels, prefix cmtypes.PrefixCluster) {
isNode := lbls.HasRemoteNodeLabel() || lbls.HasHostLabel()
isInCluster := (isNode ||
lbls.HasHealthLabel() ||
lbls.HasIngressLabel())
// In-cluster entities must not have reserved:world.
if isInCluster {
lbls.Remove(labels.LabelWorld)
lbls.Remove(labels.LabelWorldIPv4)
lbls.Remove(labels.LabelWorldIPv6)
}
// In-cluster entities must not have cidr or fqdn labels.
// Exception: nodes may, when PolicyCIDRMatchesNodes() is enabled.
if isInCluster && !(isNode && option.Config.PolicyCIDRMatchesNodes()) {
lbls.RemoveFromSource(labels.LabelSourceCIDR)
lbls.RemoveFromSource(labels.LabelSourceFQDN)
lbls.RemoveFromSource(labels.LabelSourceCIDRGroup)
}
// Remove all labels with source `node:`, unless this is a node *and* node labels are enabled.
if !(isNode && option.Config.PerNodeLabelsEnabled()) {
lbls.RemoveFromSource(labels.LabelSourceNode)
}
// No empty labels allowed.
// Add in (cidr:<address/prefix>) label as a fallback.
// This should not be hit in production, but is used in tests.
if len(lbls) == 0 {
maps.Copy(lbls, labels.GetCIDRLabels(prefix.AsPrefix()))
}
// add world if not in-cluster.
if !isInCluster {
lbls.AddWorldLabel(prefix.AsPrefix().Addr())
}
}
// updateReservedHostLabels adds or removes labels that apply to the local host.
// The `reserved:host` identity is special: the numeric identity is fixed
// and the set of labels is mutable. (The datapath requires this.) So,
// we need to determine all prefixes that have the `reserved:host` label and
// capture their labels. Then, we must aggregate *all* labels from all prefixes and
// update the labels that correspond to the `reserved:host` identity.
//
// This could be termed a meta-ipcache. The ipcache metadata layer aggregates
// an arbitrary set of resources and labels to a prefix. Here, we are aggregating an arbitrary
// set of prefixes and labels to an identity.
func (ipc *IPCache) updateReservedHostLabels(prefix netip.Prefix, lbls labels.Labels) *identity.Identity {
ipc.metadata.reservedHostLock.Lock()
defer ipc.metadata.reservedHostLock.Unlock()
if lbls == nil {
delete(ipc.metadata.reservedHostLabels, prefix)
} else {
ipc.metadata.reservedHostLabels[prefix] = lbls
}
// aggregate all labels and update static identity
newLabels := labels.NewFrom(labels.LabelHost)
for _, l := range ipc.metadata.reservedHostLabels {
newLabels.MergeLabels(l)
}
ipc.logger.Debug(
"Merged labels for reserved:host identity",
logfields.Labels, newLabels,
)
return identity.AddReservedIdentityWithLabels(identity.ReservedIdentityHost, newLabels)
}
// appendAPIServerLabelsForDeletion inspects labels and performs special handling for corner cases like API server entities
// deployed external to the cluster.
func appendAPIServerLabelsForDeletion(lbls labels.Labels, currentLabels labels.Labels) labels.Labels {
if currentLabels.HasKubeAPIServerLabel() && currentLabels.HasWorldLabel() && len(currentLabels) == 2 {
lbls.MergeLabels(labels.LabelWorld)
}
return lbls
}
// RemoveLabelsExcluded removes the given labels from all IPs inside the IDMD
// except for the IPs / prefixes inside the given excluded set.
//
// The caller must subsequently call IPCache.TriggerLabelInjection() to push
// these changes down into the policy engine and ipcache datapath maps.
func (ipc *IPCache) RemoveLabelsExcluded(
lbls labels.Labels,
toExclude map[cmtypes.PrefixCluster]struct{},
rid types.ResourceID,
) {
ipc.metadata.Lock()
defer ipc.metadata.Unlock()
var affectedPrefixes []cmtypes.PrefixCluster
oldSet := ipc.metadata.filterByLabels(lbls)
for _, ip := range oldSet {
if _, ok := toExclude[ip]; !ok {
prefixLabels := ipc.metadata.getLocked(ip).ToLabels()
lblsToRemove := appendAPIServerLabelsForDeletion(lbls, prefixLabels)
affectedPrefixes = append(affectedPrefixes, ipc.metadata.remove(ip, rid, lblsToRemove)...)
}
}
ipc.metadata.enqueuePrefixUpdates(affectedPrefixes...)
}
// filterByLabels returns all the prefixes inside the ipcache metadata map
// which contain the given labels. Note that `filter` is a subset match, not a
// full match.
//
// Assumes that the ipcache metadata read lock is taken!
func (m *metadata) filterByLabels(filter labels.Labels) []cmtypes.PrefixCluster {
var matching []cmtypes.PrefixCluster
sortedFilter := filter.SortedList()
for prefix := range m.m {
lbls := m.getLocked(prefix).ToLabels()
if bytes.Contains(lbls.SortedList(), sortedFilter) {
matching = append(matching, prefix)
}
}
return matching
}
// remove asynchronously removes the labels association for a prefix.
//
// This function assumes that the ipcache metadata lock is held for writing.
func (m *metadata) remove(prefix cmtypes.PrefixCluster, resource types.ResourceID, aux ...IPMetadata) []cmtypes.PrefixCluster {
prefix = canonicalPrefix(prefix)
info, ok := m.m[prefix]
if !ok || info.byResource[resource] == nil {
return nil
}
// compute affected prefixes before deletion, to ensure the prefix matches
// its own entry before it is deleted
affected := m.findAffectedChildPrefixes(prefix)
for _, a := range aux {
info.byResource[resource].unmerge(m.logger, a)
}
if !info.byResource[resource].isValid() {
delete(info.byResource, resource)
}
if !info.isValid() { // Labels empty, delete
delete(m.m, prefix)
m.prefixes.Delete(clusterID(prefix.ClusterID()), prefix.AsPrefix())
} else {
// erase flattened, we'll recompute on read
info.flattened = nil
}
return affected
}
// TriggerLabelInjection triggers the label injection controller to iterate
// through the IDMD and potentially allocate new identities based on any label
// changes.
//
// The following diagram describes the relationship between the label injector
// triggered here and the callers/callees.
//
// +------------+ (1) (1) +-----------------------------+
// | EP Watcher +-----+ +-----+ CN Watcher / Node Discovery |
// +-----+------+ W | | W +------+----------------------+
// | | | |
// | v v |
// | +------+ |
// | | IDMD | |
// | +------+ |
// | ^ |
// | | |
// | (3) |R |
// | (2) +------+--------+ (2) |
// +------->|Label Injector |<------+
// Trigger +-------+-------+ Trigger
// (4) |W (5) |W
// | |
// v v
// +--------+ +---+
// |Policy &| |IPC|
// |datapath| +---+
// +--------+
// legend:
// * W means write
// * R means read
func (ipc *IPCache) TriggerLabelInjection() {
// GH-17829: Would also be nice to have an end-to-end test to validate
// on upgrade that there are no connectivity drops when this
// channel is preventing transient BPF entries.
// This controller is for retrying this operation in case it fails. It
// should eventually succeed.
ipc.injectionStarted.Do(func() {
ipc.UpdateController(
LabelInjectorName,
controller.ControllerParams{
Group: injectLabelsControllerGroup,
Context: ipc.Configuration.Context,
DoFunc: ipc.handleLabelInjection,
MaxRetryInterval: 1 * time.Minute,
},
)
})
ipc.controllers.TriggerController(LabelInjectorName)
}
// Changeable just for unit tests.
var chunkSize = 512
// handleLabelInjection dequeues the set of pending prefixes and processes
// their metadata updates
func (ipc *IPCache) handleLabelInjection(ctx context.Context) error {
if ipc.Configuration.CacheStatus != nil {
// wait for k8s caches to sync.
// this is duplicated from doInjectLabels(), but it keeps us from needlessly
// churning the queue while the agent initializes.
select {
case <-ctx.Done():
return ctx.Err()
case <-ipc.Configuration.CacheStatus:
}
}
// Any prefixes that have failed and must be retried
var retry []cmtypes.PrefixCluster
var err error
idsToModify, rev := ipc.metadata.dequeuePrefixUpdates()
cs := chunkSize
// no point in dividing for the first run, we will not be releasing any identities anyways.
if rev == 1 {
cs = len(idsToModify)
}
// Split ipcache updates in to chunks to reduce resource spikes.
// InjectLabels releases all identities only at the end of processing, so
// it may allocate up to `chunkSize` additional identities.
for len(idsToModify) > 0 {
idx := min(len(idsToModify), cs)
chunk := idsToModify[0:idx]
idsToModify = idsToModify[idx:]
var failed []cmtypes.PrefixCluster
// If individual prefixes failed injection, doInjectLabels() the set of failed prefixes
// and sets err. We must ensure the failed prefixes are re-queued for injection.
failed, err = ipc.doInjectLabels(ctx, chunk)
retry = append(retry, failed...)
if err != nil {
break
}
}
ok := true
if len(retry) > 0 {
// err will also be set, so
ipc.metadata.enqueuePrefixUpdates(retry...)
ok = false
}
if len(idsToModify) > 0 {
ipc.metadata.enqueuePrefixUpdates(idsToModify...)
ok = false
}
if ok {
// if all prefixes were successfully injected, bump the revision
// so that any waiters are made aware.
ipc.metadata.setInjectedRevision(rev)
}
// non-nil err will re-trigger this controller
return err
}
// SPDX-License-Identifier: Apache-2.0
// Copyright Authors of Cilium
package ipcache
import (
"log/slog"
"maps"
"slices"
"strings"
"github.com/cilium/cilium/pkg/identity"
ipcachetypes "github.com/cilium/cilium/pkg/ipcache/types"
"github.com/cilium/cilium/pkg/labels"
"github.com/cilium/cilium/pkg/logging/logfields"
"github.com/cilium/cilium/pkg/option"
"github.com/cilium/cilium/pkg/source"
"github.com/cilium/cilium/pkg/types"
)
// prefixInfo holds all of the information (labels, etc.) about a given prefix
// independently based on the ResourceID of the origin of that information, and
// provides convenient accessors to consistently merge the stored information
// to generate ipcache output based on a range of inputs.
//
// Note that when making a copy of this object, resourceInfo is pointer which
// means it needs to be deep-copied via (*resourceInfo).DeepCopy().
type prefixInfo struct {
byResource map[ipcachetypes.ResourceID]*resourceInfo
// flattened is the fully resolved information, with all information
// by resource merged.
flattened *resourceInfo
}
func newPrefixInfo() *prefixInfo {
return &prefixInfo{
byResource: make(map[ipcachetypes.ResourceID]*resourceInfo),
}
}
// IdentityOverride can be used to override the identity of a given prefix.
// Must be provided together with a set of labels. Any other labels associated
// with this prefix are ignored while an override is present.
// This type implements ipcache.IPMetadata
type overrideIdentity bool
// resourceInfo is all of the information that has been collected from a given
// resource (types.ResourceID) about this IP. Each field must have a 'zero'
// value that indicates that it should be ignored for purposes of merging
// multiple resourceInfo across multiple ResourceIDs together.
type resourceInfo struct {
labels labels.Labels
source source.Source
identityOverride overrideIdentity
tunnelPeer ipcachetypes.TunnelPeer
encryptKey ipcachetypes.EncryptKey
requestedIdentity ipcachetypes.RequestedIdentity
endpointFlags ipcachetypes.EndpointFlags
}
// IPMetadata is an empty interface intended to inform developers using the
// IPCache interface about which types are valid to be injected, and how to
// update this code, in particular the merge(),unmerge(),isValid() methods
// below.
//
// In an ideal world, we would use Constraints here but as of Go 1.18, these
// cannot be used in conjunction with methods, which is how the information
// gets injected into the IPCache.
type IPMetadata any
// namedPortMultiMapUpdater allows for mutation of the NamedPortMultiMap, which
// is otherwise read-only.
type namedPortMultiMapUpdater interface {
types.NamedPortMultiMap
Update(old, new types.NamedPortMap) (namedPortChanged bool)
}
// merge overwrites the field in 'resourceInfo' corresponding to 'info'. This
// associates the new information with the prefix and ResourceID that this
// 'resourceInfo' resides under in the outer metadata map.
//
// returns true if the metadata was changed
func (m *resourceInfo) merge(logger *slog.Logger, info IPMetadata, src source.Source) bool {
changed := false
switch info := info.(type) {
case labels.Labels:
changed = !info.DeepEqual(&m.labels)
m.labels = labels.NewFrom(info)
case overrideIdentity:
changed = m.identityOverride != info
m.identityOverride = info
case ipcachetypes.TunnelPeer:
changed = m.tunnelPeer != info
m.tunnelPeer = info
case ipcachetypes.EncryptKey:
changed = m.encryptKey != info
m.encryptKey = info
case ipcachetypes.RequestedIdentity:
changed = m.requestedIdentity != info
m.requestedIdentity = info
case ipcachetypes.EndpointFlags:
changed = m.endpointFlags != info
m.endpointFlags = info
default:
logger.Error(
"BUG: Invalid IPMetadata passed to ipinfo.merge()",
logfields.Info, info,
)
return false
}
changed = changed || m.source != src
m.source = src
return changed
}
// unmerge removes the info of the specified type from 'resourceInfo'.
func (m *resourceInfo) unmerge(logger *slog.Logger, info IPMetadata) {
switch info.(type) {
case labels.Labels:
m.labels = nil
case overrideIdentity:
m.identityOverride = false
case ipcachetypes.TunnelPeer:
m.tunnelPeer = ipcachetypes.TunnelPeer{}
case ipcachetypes.EncryptKey:
m.encryptKey = ipcachetypes.EncryptKeyEmpty
case ipcachetypes.RequestedIdentity:
m.requestedIdentity = ipcachetypes.RequestedIdentity(identity.IdentityUnknown)
case ipcachetypes.EndpointFlags:
m.endpointFlags = ipcachetypes.EndpointFlags{}
default:
logger.Error(
"BUG: Invalid IPMetadata passed to ipinfo.unmerge()",
logfields.Info, info,
)
return
}
}
// has returns true if the resourceInfo already has the particular metadata.
// in the case of labels, the existing labels may be a superset.
// In other words, returns true if merging info would result in m
// being unchanged. It is used to prevent needless cache busting
// when updating metadata.
func (m *resourceInfo) has(src source.Source, infos []IPMetadata) bool {
if m == nil {
return false
}
if m.source != src {
return false
}
for _, info := range infos {
switch i := info.(type) {
case labels.Labels:
for key, newLabel := range i {
existingLabel, ok := m.labels[key]
if !ok || newLabel != existingLabel {
return false
}
}
case overrideIdentity:
if m.identityOverride != i {
return false
}
case ipcachetypes.TunnelPeer:
if m.tunnelPeer != i {
return false
}
case ipcachetypes.EncryptKey:
if m.encryptKey != i {
return false
}
case ipcachetypes.RequestedIdentity:
if m.requestedIdentity != i {
return false
}
case ipcachetypes.EndpointFlags:
if m.endpointFlags != i {
return false
}
default:
return false
}
}
return true
}
func (m *resourceInfo) isValid() bool {
if m.labels != nil {
return true
}
if m.identityOverride {
return true
}
if m.tunnelPeer.IsValid() {
return true
}
if m.encryptKey.IsValid() {
return true
}
if m.requestedIdentity.IsValid() {
return true
}
if m.endpointFlags.IsValid() {
return true
}
return false
}
func (m *resourceInfo) DeepCopy() *resourceInfo {
n := new(resourceInfo)
n.labels = labels.NewFrom(m.labels)
n.source = m.source
n.identityOverride = m.identityOverride
n.tunnelPeer = m.tunnelPeer
n.encryptKey = m.encryptKey
n.requestedIdentity = m.requestedIdentity
n.endpointFlags = m.endpointFlags
return n
}
func (s prefixInfo) isValid() bool {
for _, v := range s.byResource {
if v.isValid() {
return true
}
}
return false
}
func (s *prefixInfo) sortedBySourceThenResourceID() []ipcachetypes.ResourceID {
return slices.SortedStableFunc(maps.Keys(s.byResource), func(a ipcachetypes.ResourceID, b ipcachetypes.ResourceID) int {
if s.byResource[a].source != s.byResource[b].source {
if !source.AllowOverwrite(s.byResource[a].source, s.byResource[b].source) {
return -1
} else {
return 1
}
}
return strings.Compare(string(a), string(b))
})
}
func (r *resourceInfo) ToLabels() labels.Labels {
if r.labels == nil {
return labels.Labels{} // code expects non-nil Labels.
}
return r.labels
}
func (r *resourceInfo) Source() source.Source {
if r == nil {
return source.Unspec
}
return r.source
}
func (r *resourceInfo) EncryptKey() ipcachetypes.EncryptKey {
if r == nil {
return ipcachetypes.EncryptKeyEmpty
}
return r.encryptKey
}
func (r *resourceInfo) TunnelPeer() ipcachetypes.TunnelPeer {
if r == nil {
return ipcachetypes.TunnelPeer{}
}
return r.tunnelPeer
}
func (r *resourceInfo) RequestedIdentity() ipcachetypes.RequestedIdentity {
if r == nil {
return ipcachetypes.RequestedIdentity(identity.InvalidIdentity)
}
return r.requestedIdentity
}
func (r *resourceInfo) EndpointFlags() ipcachetypes.EndpointFlags {
if r == nil {
return ipcachetypes.EndpointFlags{}
}
return r.endpointFlags
}
// identityOverride returns true if the exact set of labels has been specified
// and should not be manipulated further.
func (r *resourceInfo) IdentityOverride() bool {
if r == nil {
return false
}
return bool(r.identityOverride)
}
// flatten resolves the set of all possible metadata in to a single
// flattened resource.
// In the event of a conflict, entries with a higher precedence source
// will win.
func (s *prefixInfo) flatten(scopedLog *slog.Logger) *resourceInfo {
// shortcut: with exactly one resource, we just return it
if len(s.byResource) == 1 {
for _, r := range s.byResource {
return r
}
}
out := &resourceInfo{}
var (
overrideResourceID ipcachetypes.ResourceID
tunnelPeerResourceID ipcachetypes.ResourceID
encryptKeyResourceID ipcachetypes.ResourceID
requestedIDResourceID ipcachetypes.ResourceID
endpointFlagsResourceID ipcachetypes.ResourceID
)
labelResourceIDs := map[string]ipcachetypes.ResourceID{}
for _, resourceID := range s.sortedBySourceThenResourceID() {
info := s.byResource[resourceID]
// Sorted by source priority, so the first source wins.
if out.source == "" {
out.source = info.source
}
if len(info.labels) > 0 && !out.identityOverride /* identityOverride already fixed the labels */ {
if len(out.labels) > 0 {
// merge labels, complaining if the value exists
for key, newLabel := range info.labels {
otherLabel, exists := out.labels[key]
if exists && !otherLabel.DeepEqual(&newLabel) {
scopedLog.Warn(
"Detected conflicting label for prefix. "+
"This may cause connectivity issues for this address.",
logfields.Labels, out.labels,
logfields.Resource, labelResourceIDs[key],
logfields.ConflictingLabels, otherLabel,
)
} else if !exists {
out.labels[key] = newLabel
labelResourceIDs[key] = resourceID
}
}
} else {
out.labels = labels.NewFrom(info.labels) // copy map, as we will be mutating it
}
}
if info.identityOverride {
if len(info.labels) == 0 {
scopedLog.Warn(
"Detected identity override, but no labels where specified. "+
"Falling back on the old non-override labels. "+
"This may cause connectivity issues for this address.",
logfields.Resource, resourceID,
)
} else {
if out.identityOverride {
scopedLog.Warn(
"Detected conflicting identity override for prefix. "+
"This may cause connectivity issues for this address.",
logfields.Labels, out.labels,
logfields.Resource, overrideResourceID,
logfields.ConflictingLabels, info.labels,
logfields.ConflictingResource, resourceID,
)
} else {
out.identityOverride = true
out.labels = info.labels
overrideResourceID = resourceID
}
}
}
if info.tunnelPeer.IsValid() && info.tunnelPeer != out.tunnelPeer {
if out.tunnelPeer.IsValid() {
if option.Config.TunnelingEnabled() {
scopedLog.Warn(
"Detected conflicting tunnel peer for prefix. "+
"This may cause connectivity issues for this address.",
logfields.TunnelPeer, out.tunnelPeer,
logfields.Resource, tunnelPeerResourceID,
logfields.ConflictingTunnelPeer, info.tunnelPeer,
logfields.ConflictingResource, resourceID,
)
}
} else {
out.tunnelPeer = info.tunnelPeer
tunnelPeerResourceID = resourceID
}
}
if info.encryptKey.IsValid() && info.encryptKey != out.encryptKey {
if out.encryptKey.IsValid() {
scopedLog.Warn(
"Detected conflicting encryption key index for prefix. "+
"This may cause connectivity issues for this address.",
logfields.Key, out.encryptKey,
logfields.Resource, encryptKeyResourceID,
logfields.ConflictingKey, info.encryptKey,
logfields.ConflictingResource, resourceID,
)
} else {
out.encryptKey = info.encryptKey
encryptKeyResourceID = resourceID
}
}
if info.requestedIdentity.IsValid() && info.requestedIdentity != out.requestedIdentity {
if out.requestedIdentity.IsValid() {
scopedLog.Warn(
"Detected conflicting requested numeric identity for prefix. "+
"This may cause momentary connectivity issues for this address.",
logfields.Identity, out.requestedIdentity,
logfields.Resource, requestedIDResourceID,
logfields.ConflictingIdentity, info.requestedIdentity,
logfields.ConflictingResource, resourceID,
)
} else {
out.requestedIdentity = info.requestedIdentity
requestedIDResourceID = resourceID
}
}
// Note: if more flags are added in pkg/ipcache/types/types.go,
// they must be merged here.
if info.endpointFlags.IsValid() && info.endpointFlags != out.endpointFlags {
if out.endpointFlags.IsValid() {
scopedLog.Warn(
"Detected conflicting endpoint flags for prefix. "+
"This may cause connectivity issues for this address.",
logfields.EndpointFlags, out.endpointFlags,
logfields.Resource, endpointFlagsResourceID,
logfields.ConflictingEndpointFlags, info.endpointFlags,
logfields.ConflictingResource, resourceID,
)
} else {
out.endpointFlags = info.endpointFlags
endpointFlagsResourceID = resourceID
}
}
}
return out
}
// SPDX-License-Identifier: Apache-2.0
// Copyright Authors of Cilium
package types
import (
"net/netip"
"github.com/cilium/cilium/api/v1/models"
)
type IPListEntrySlice []*models.IPListEntry
func (s IPListEntrySlice) Swap(i, j int) {
s[i], s[j] = s[j], s[i]
}
// Less sorts the IPListEntry objects by CIDR prefix then IP address.
// Given that the same IP cannot map to more than one identity, no further
// sorting is performed.
func (s IPListEntrySlice) Less(i, j int) bool {
iNet, _ := netip.ParsePrefix(*s[i].Cidr)
jNet, _ := netip.ParsePrefix(*s[j].Cidr)
iPrefixSize := iNet.Bits()
jPrefixSize := jNet.Bits()
if iPrefixSize == jPrefixSize {
return iNet.Addr().Less(jNet.Addr())
}
return iPrefixSize < jPrefixSize
}
func (s IPListEntrySlice) Len() int {
return len(s)
}
// SPDX-License-Identifier: Apache-2.0
// Copyright Authors of Cilium
package types
import (
"net"
"net/netip"
"strconv"
"strings"
"github.com/cilium/cilium/pkg/identity"
)
// IdentityUpdater is responsible for handling identity updates into the core
// policy engine. See SelectorCache.UpdateIdentities() for more details.
type IdentityUpdater interface {
UpdateIdentities(added, deleted identity.IdentityMap) <-chan struct{}
}
// ResourceID identifies a unique copy of a resource that provides a source for
// information tied to an IP address in the IPCache.
type ResourceID string
// ResourceKind determines the source of the ResourceID. Typically this is the
// short name for the k8s resource.
type ResourceKind string
var (
ResourceKindCCNP = ResourceKind("ccnp")
ResourceKindCIDRGroup = ResourceKind("cidrgroup")
ResourceKindCNP = ResourceKind("cnp")
ResourceKindDaemon = ResourceKind("daemon")
ResourceKindEndpoint = ResourceKind("ep")
ResourceKindFile = ResourceKind("file")
ResourceKindNetpol = ResourceKind("netpol")
ResourceKindNode = ResourceKind("node")
)
// NewResourceID returns a ResourceID populated with the standard fields for
// uniquely identifying a source of IPCache information.
func NewResourceID(kind ResourceKind, namespace, name string) ResourceID {
str := strings.Builder{}
str.Grow(len(kind) + 1 + len(namespace) + 1 + len(name))
str.WriteString(string(kind))
str.WriteRune('/')
str.WriteString(namespace)
str.WriteRune('/')
str.WriteString(name)
return ResourceID(str.String())
}
func (r ResourceID) Namespace() string {
parts := strings.SplitN(string(r), "/", 3)
if len(parts) < 2 {
return ""
}
return parts[1]
}
// TunnelPeer is the IP address of the host associated with this prefix. This is
// typically used to establish a tunnel, e.g. in tunnel mode or for encryption.
// This type implements ipcache.IPMetadata
type TunnelPeer struct{ netip.Addr }
func (t TunnelPeer) IP() net.IP {
return t.AsSlice()
}
// EncryptKey is the identity of the encryption key.
// This type implements ipcache.IPMetadata
type EncryptKey uint8
const EncryptKeyEmpty = EncryptKey(0)
func (e EncryptKey) IsValid() bool {
return e != EncryptKeyEmpty
}
func (e EncryptKey) Uint8() uint8 {
return uint8(e)
}
func (e EncryptKey) String() string {
return strconv.Itoa(int(e))
}
// RequestedIdentity is a desired numeric identity for the prefix. When the
// prefix is next injected, this numeric ID will be requested from the local
// allocator. If the allocator can accommodate that request, it will do so.
// In order for this to be useful, the prefix must not already have an identity
// (or its set of labels must have changed), and that numeric identity must
// be free.
// Thus, the numeric ID should have already been held-aside in the allocator
// using WithholdLocalIdentities(). That will ensure the numeric ID remains free
// for the prefix to request.
type RequestedIdentity identity.NumericIdentity
func (id RequestedIdentity) IsValid() bool {
return id != 0
}
func (id RequestedIdentity) ID() identity.NumericIdentity {
return identity.NumericIdentity(id)
}
// EndpointFlags represents various flags that can be attached to endpoints in the IPCache
// This type implements ipcache.IPMetadata
type EndpointFlags struct {
// isInit gets flipped to true on the first intentional flag set
// it is a sentinel to distinguish an uninitialized EndpointFlags
// from one with all flags set to false
isInit bool
// flagSkipTunnel can be applied to a remote endpoint to signal that
// packets destined for said endpoint shall not be forwarded through
// an overlay tunnel, regardless of Cilium's configuration.
flagSkipTunnel bool
// Note: if you add any more flags here, be sure to update (*prefixInfo).flatten()
// to merge them across different resources.
}
func (e *EndpointFlags) SetSkipTunnel(skip bool) {
e.isInit = true
e.flagSkipTunnel = skip
}
func (e EndpointFlags) IsValid() bool {
return e.isInit
}
// Uint8 encoding MUST mimic the one in pkg/maps/ipcache
// since it will eventually get recast to it
const (
FlagSkipTunnel uint8 = 1 << iota
)
func (e EndpointFlags) Uint8() uint8 {
var flags uint8 = 0
if e.flagSkipTunnel {
flags = flags | FlagSkipTunnel
}
return flags
}
// SPDX-License-Identifier: Apache-2.0
// Copyright Authors of Cilium
package utils
import (
"log/slog"
"k8s.io/apimachinery/pkg/types"
cmtypes "github.com/cilium/cilium/pkg/clustermesh/types"
k8sConst "github.com/cilium/cilium/pkg/k8s/apis/cilium.io"
slim_metav1 "github.com/cilium/cilium/pkg/k8s/slim/k8s/apis/meta/v1"
"github.com/cilium/cilium/pkg/labels"
"github.com/cilium/cilium/pkg/logging/logfields"
"github.com/cilium/cilium/pkg/policy/api"
)
const (
// podPrefixLbl is the value the prefix used in the label selector to
// represent pods on the default namespace.
podPrefixLbl = labels.LabelSourceK8sKeyPrefix + k8sConst.PodNamespaceLabel
// podAnyPrefixLbl is the value of the prefix used in the label selector to
// represent pods in the default namespace for any source type.
podAnyPrefixLbl = labels.LabelSourceAnyKeyPrefix + k8sConst.PodNamespaceLabel
// podK8SNamespaceLabelsPrefix is the prefix use in the label selector for namespace labels.
podK8SNamespaceLabelsPrefix = labels.LabelSourceK8sKeyPrefix + k8sConst.PodNamespaceMetaLabelsPrefix
// podAnyNamespaceLabelsPrefix is the prefix use in the label selector for namespace labels
// for any source type.
podAnyNamespaceLabelsPrefix = labels.LabelSourceAnyKeyPrefix + k8sConst.PodNamespaceMetaLabelsPrefix
// clusterPrefixLbl is the prefix use in the label selector for cluster name.
clusterPrefixLbl = labels.LabelSourceK8sKeyPrefix + k8sConst.PolicyLabelCluster
// clusterAnyPrefixLbl is the prefix use in the label selector for cluster name
// for any source type.
clusterAnyPrefixLbl = labels.LabelSourceAnyKeyPrefix + k8sConst.PolicyLabelCluster
// podInitLbl is the label used in a label selector to match on
// initializing pods.
podInitLbl = labels.LabelSourceReservedKeyPrefix + labels.IDNameInit
// ResourceTypeCiliumNetworkPolicy is the resource type used for the
// PolicyLabelDerivedFrom label
ResourceTypeCiliumNetworkPolicy = "CiliumNetworkPolicy"
// ResourceTypeCiliumClusterwideNetworkPolicy is the resource type used for the
// PolicyLabelDerivedFrom label
ResourceTypeCiliumClusterwideNetworkPolicy = "CiliumClusterwideNetworkPolicy"
)
// GetPolicyLabels returns a LabelArray for the given namespace and name.
func GetPolicyLabels(ns, name string, uid types.UID, derivedFrom string) labels.LabelArray {
// Keep labels sorted by the key.
labelsArr := labels.LabelArray{
labels.NewLabel(k8sConst.PolicyLabelDerivedFrom, derivedFrom, labels.LabelSourceK8s),
labels.NewLabel(k8sConst.PolicyLabelName, name, labels.LabelSourceK8s),
}
// For clusterwide policy namespace will be empty.
if ns != "" {
nsLabel := labels.NewLabel(k8sConst.PolicyLabelNamespace, ns, labels.LabelSourceK8s)
labelsArr = append(labelsArr, nsLabel)
}
srcLabel := labels.NewLabel(k8sConst.PolicyLabelUID, string(uid), labels.LabelSourceK8s)
return append(labelsArr, srcLabel)
}
// addClusterFilterByDefault attempt to add a cluster filter if the cluster name
// is defined and that the EndpointSelector doesn't already have a cluster selector
func addClusterFilterByDefault(es *api.EndpointSelector, clusterName string) {
if clusterName != cmtypes.PolicyAnyCluster && !es.HasKey(clusterPrefixLbl) && !es.HasKey(clusterAnyPrefixLbl) {
es.AddMatch(clusterPrefixLbl, clusterName)
}
}
// getEndpointSelector converts the provided labelSelector into an EndpointSelector,
// adding the relevant matches for namespaces and clusters based on the provided options.
// If no namespace is provided then it is assumed that the selector is global to the cluster
// this is when translating selectors for CiliumClusterwideNetworkPolicy.
// If a clusterName is provided then is is assumed that the selector is scoped to the local
// cluster by default in a ClusterMesh environment.
func getEndpointSelector(clusterName, namespace string, labelSelector *slim_metav1.LabelSelector, addK8sPrefix, matchesInit bool) api.EndpointSelector {
es := api.NewESFromK8sLabelSelector("", labelSelector)
// The k8s prefix must not be added to reserved labels.
if addK8sPrefix && es.HasKeyPrefix(labels.LabelSourceReservedKeyPrefix) {
return es
}
// The user can explicitly specify the namespace in the
// FromEndpoints selector. If omitted, we limit the
// scope to the namespace the policy lives in.
//
// Policies applying on initializing pods are a special case.
// Those pods don't have any labels, so they don't have a namespace label either.
// Don't add a namespace label to those endpoint selectors, or we wouldn't be
// able to match on those pods.
if !es.HasKey(podPrefixLbl) && !es.HasKey(podAnyPrefixLbl) {
if namespace == "" {
// For a clusterwide policy if a namespace is not specified in the labels we add
// a selector to only match endpoints that contains a namespace label.
// This is to make sure that we are only allowing traffic for cilium managed k8s endpoints
// and even if a wildcard is provided in the selector we don't proceed with a truly
// empty(allow all) endpoint selector for the policy.
if !matchesInit {
es.AddMatchExpression(podPrefixLbl, slim_metav1.LabelSelectorOpExists, []string{})
}
} else if !es.HasKeyPrefix(podK8SNamespaceLabelsPrefix) && !es.HasKeyPrefix(podAnyNamespaceLabelsPrefix) {
es.AddMatch(podPrefixLbl, namespace)
}
}
// Similarly to namespace, the user can explicitly specify the cluster in the
// FromEndpoints selector. If omitted, we limit the
// scope to the cluster the policy lives in.
addClusterFilterByDefault(&es, clusterName)
return es
}
func parseToCiliumIngressCommonRule(clusterName, namespace string, es api.EndpointSelector, ing api.IngressCommonRule) api.IngressCommonRule {
matchesInit := matchesPodInit(es)
var retRule api.IngressCommonRule
if ing.FromEndpoints != nil {
retRule.FromEndpoints = make([]api.EndpointSelector, len(ing.FromEndpoints))
for j, ep := range ing.FromEndpoints {
retRule.FromEndpoints[j] = getEndpointSelector(clusterName, namespace, ep.LabelSelector, true, matchesInit)
}
}
if ing.FromNodes != nil {
retRule.FromNodes = make([]api.EndpointSelector, len(ing.FromNodes))
for j, node := range ing.FromNodes {
es = api.NewESFromK8sLabelSelector("", node.LabelSelector)
es.AddMatchExpression(labels.LabelSourceReservedKeyPrefix+labels.IDNameRemoteNode, slim_metav1.LabelSelectorOpExists, []string{})
addClusterFilterByDefault(&es, clusterName)
retRule.FromNodes[j] = es
}
}
if ing.FromCIDR != nil {
retRule.FromCIDR = make([]api.CIDR, len(ing.FromCIDR))
copy(retRule.FromCIDR, ing.FromCIDR)
}
if ing.FromCIDRSet != nil {
retRule.FromCIDRSet = make([]api.CIDRRule, len(ing.FromCIDRSet))
copy(retRule.FromCIDRSet, ing.FromCIDRSet)
}
if ing.FromRequires != nil {
retRule.FromRequires = make([]api.EndpointSelector, len(ing.FromRequires))
for j, ep := range ing.FromRequires {
retRule.FromRequires[j] = getEndpointSelector(clusterName, namespace, ep.LabelSelector, false, matchesInit)
}
}
if ing.FromEntities != nil {
retRule.FromEntities = make([]api.Entity, len(ing.FromEntities))
copy(retRule.FromEntities, ing.FromEntities)
}
if ing.FromGroups != nil {
retRule.FromGroups = make([]api.Groups, len(ing.FromGroups))
copy(retRule.FromGroups, ing.FromGroups)
}
return retRule
}
func parseToCiliumIngressRule(clusterName, namespace string, es api.EndpointSelector, inRules []api.IngressRule) []api.IngressRule {
var retRules []api.IngressRule
if inRules != nil {
retRules = make([]api.IngressRule, len(inRules))
for i, ing := range inRules {
if ing.ToPorts != nil {
retRules[i].ToPorts = make([]api.PortRule, len(ing.ToPorts))
copy(retRules[i].ToPorts, ing.ToPorts)
}
if ing.ICMPs != nil {
retRules[i].ICMPs = make(api.ICMPRules, len(ing.ICMPs))
copy(retRules[i].ICMPs, ing.ICMPs)
}
retRules[i].IngressCommonRule = parseToCiliumIngressCommonRule(clusterName, namespace, es, ing.IngressCommonRule)
retRules[i].Authentication = ing.Authentication.DeepCopy()
retRules[i].SetAggregatedSelectors()
}
}
return retRules
}
func parseToCiliumIngressDenyRule(clusterName, namespace string, es api.EndpointSelector, inRules []api.IngressDenyRule) []api.IngressDenyRule {
var retRules []api.IngressDenyRule
if inRules != nil {
retRules = make([]api.IngressDenyRule, len(inRules))
for i, ing := range inRules {
if ing.ToPorts != nil {
retRules[i].ToPorts = make([]api.PortDenyRule, len(ing.ToPorts))
copy(retRules[i].ToPorts, ing.ToPorts)
}
if ing.ICMPs != nil {
retRules[i].ICMPs = make(api.ICMPRules, len(ing.ICMPs))
copy(retRules[i].ICMPs, ing.ICMPs)
}
retRules[i].IngressCommonRule = parseToCiliumIngressCommonRule(clusterName, namespace, es, ing.IngressCommonRule)
retRules[i].SetAggregatedSelectors()
}
}
return retRules
}
func parseToCiliumEgressCommonRule(clusterName, namespace string, es api.EndpointSelector, egr api.EgressCommonRule) api.EgressCommonRule {
matchesInit := matchesPodInit(es)
var retRule api.EgressCommonRule
if egr.ToEndpoints != nil {
retRule.ToEndpoints = make([]api.EndpointSelector, len(egr.ToEndpoints))
for j, ep := range egr.ToEndpoints {
endpointSelector := getEndpointSelector(clusterName, namespace, ep.LabelSelector, true, matchesInit)
endpointSelector.Generated = ep.Generated
retRule.ToEndpoints[j] = endpointSelector
}
}
if egr.ToCIDR != nil {
retRule.ToCIDR = make([]api.CIDR, len(egr.ToCIDR))
copy(retRule.ToCIDR, egr.ToCIDR)
}
if egr.ToCIDRSet != nil {
retRule.ToCIDRSet = make(api.CIDRRuleSlice, len(egr.ToCIDRSet))
copy(retRule.ToCIDRSet, egr.ToCIDRSet)
}
if egr.ToRequires != nil {
retRule.ToRequires = make([]api.EndpointSelector, len(egr.ToRequires))
for j, ep := range egr.ToRequires {
retRule.ToRequires[j] = getEndpointSelector(clusterName, namespace, ep.LabelSelector, false, matchesInit)
}
}
if egr.ToServices != nil {
retRule.ToServices = make([]api.Service, len(egr.ToServices))
copy(retRule.ToServices, egr.ToServices)
}
if egr.ToEntities != nil {
retRule.ToEntities = make([]api.Entity, len(egr.ToEntities))
copy(retRule.ToEntities, egr.ToEntities)
}
if egr.ToNodes != nil {
retRule.ToNodes = make([]api.EndpointSelector, len(egr.ToNodes))
for j, node := range egr.ToNodes {
es = api.NewESFromK8sLabelSelector("", node.LabelSelector)
es.AddMatchExpression(labels.LabelSourceReservedKeyPrefix+labels.IDNameRemoteNode, slim_metav1.LabelSelectorOpExists, []string{})
addClusterFilterByDefault(&es, clusterName)
retRule.ToNodes[j] = es
}
}
if egr.ToGroups != nil {
retRule.ToGroups = make([]api.Groups, len(egr.ToGroups))
copy(retRule.ToGroups, egr.ToGroups)
}
return retRule
}
func parseToCiliumEgressRule(clusterName, namespace string, es api.EndpointSelector, inRules []api.EgressRule) []api.EgressRule {
var retRules []api.EgressRule
if inRules != nil {
retRules = make([]api.EgressRule, len(inRules))
for i, egr := range inRules {
if egr.ToPorts != nil {
retRules[i].ToPorts = make([]api.PortRule, len(egr.ToPorts))
copy(retRules[i].ToPorts, egr.ToPorts)
}
if egr.ICMPs != nil {
retRules[i].ICMPs = make(api.ICMPRules, len(egr.ICMPs))
copy(retRules[i].ICMPs, egr.ICMPs)
}
if egr.ToFQDNs != nil {
retRules[i].ToFQDNs = make([]api.FQDNSelector, len(egr.ToFQDNs))
copy(retRules[i].ToFQDNs, egr.ToFQDNs)
}
retRules[i].EgressCommonRule = parseToCiliumEgressCommonRule(clusterName, namespace, es, egr.EgressCommonRule)
retRules[i].Authentication = egr.Authentication
retRules[i].SetAggregatedSelectors()
}
}
return retRules
}
func parseToCiliumEgressDenyRule(clusterName, namespace string, es api.EndpointSelector, inRules []api.EgressDenyRule) []api.EgressDenyRule {
var retRules []api.EgressDenyRule
if inRules != nil {
retRules = make([]api.EgressDenyRule, len(inRules))
for i, egr := range inRules {
if egr.ToPorts != nil {
retRules[i].ToPorts = make([]api.PortDenyRule, len(egr.ToPorts))
copy(retRules[i].ToPorts, egr.ToPorts)
}
if egr.ICMPs != nil {
retRules[i].ICMPs = make(api.ICMPRules, len(egr.ICMPs))
copy(retRules[i].ICMPs, egr.ICMPs)
}
retRules[i].EgressCommonRule = parseToCiliumEgressCommonRule(clusterName, namespace, es, egr.EgressCommonRule)
retRules[i].SetAggregatedSelectors()
}
}
return retRules
}
func matchesPodInit(epSelector api.EndpointSelector) bool {
if epSelector.LabelSelector == nil {
return false
}
return epSelector.HasKey(podInitLbl)
}
// namespacesAreValid checks the set of namespaces from a rule returns true if
// they are not specified, or if they are specified and match the namespace
// where the rule is being inserted.
func namespacesAreValid(namespace string, userNamespaces []string) bool {
return len(userNamespaces) == 0 ||
(len(userNamespaces) == 1 && userNamespaces[0] == namespace)
}
// ParseToCiliumRule returns an api.Rule with all the labels parsed into cilium
// labels. If the namespace provided is empty then the rule is cluster scoped, this
// might happen in case of CiliumClusterwideNetworkPolicy which enforces a policy on the cluster
// instead of the particular namespace. If the clusterName is provided then the
// policy is scoped to the local cluster in a ClusterMesh environment.
func ParseToCiliumRule(logger *slog.Logger, clusterName, namespace, name string, uid types.UID, r *api.Rule) *api.Rule {
retRule := &api.Rule{}
if r.EndpointSelector.LabelSelector != nil {
retRule.EndpointSelector = api.NewESFromK8sLabelSelector("", r.EndpointSelector.LabelSelector)
// The PodSelector should only reflect to the same namespace
// the policy is being stored, thus we add the namespace to
// the MatchLabels map. Additionally, Policy repository relies
// on this fact to properly choose correct network policies for
// a given Security Identity.
//
// Policies applying to all namespaces are a special case.
// Such policies can match on any traffic from Pods or Nodes,
// so it wouldn't make sense to inject a namespace match for
// those policies.
if namespace != "" {
userNamespace, present := r.EndpointSelector.GetMatch(podPrefixLbl)
if present && !namespacesAreValid(namespace, userNamespace) {
logger.Warn("CiliumNetworkPolicy contains illegal namespace match in EndpointSelector."+
" EndpointSelector always applies in namespace of the policy resource, removing illegal namespace match'.",
logfields.K8sNamespace, namespace,
logfields.CiliumNetworkPolicyName, name,
logfields.K8sNamespaceIllegal, userNamespace,
)
}
retRule.EndpointSelector.AddMatch(podPrefixLbl, namespace)
}
} else if r.NodeSelector.LabelSelector != nil {
retRule.NodeSelector = api.NewESFromK8sLabelSelector("", r.NodeSelector.LabelSelector)
}
retRule.Ingress = parseToCiliumIngressRule(clusterName, namespace, r.EndpointSelector, r.Ingress)
retRule.IngressDeny = parseToCiliumIngressDenyRule(clusterName, namespace, r.EndpointSelector, r.IngressDeny)
retRule.Egress = parseToCiliumEgressRule(clusterName, namespace, r.EndpointSelector, r.Egress)
retRule.EgressDeny = parseToCiliumEgressDenyRule(clusterName, namespace, r.EndpointSelector, r.EgressDeny)
retRule.Labels = ParseToCiliumLabels(namespace, name, uid, r.Labels)
retRule.Description = r.Description
retRule.EnableDefaultDeny = r.EnableDefaultDeny
retRule.Log = r.Log
return retRule
}
// ParseToCiliumLabels returns all ruleLbls appended with a specific label that
// represents the given namespace and name along with a label that specifies
// these labels were derived from a CiliumNetworkPolicy.
func ParseToCiliumLabels(namespace, name string, uid types.UID, ruleLbs labels.LabelArray) labels.LabelArray {
resourceType := ResourceTypeCiliumNetworkPolicy
if namespace == "" {
resourceType = ResourceTypeCiliumClusterwideNetworkPolicy
}
policyLbls := GetPolicyLabels(namespace, name, uid, resourceType)
return append(policyLbls, ruleLbs...).Sort()
}
// SPDX-License-Identifier: Apache-2.0
// Copyright Authors of Cilium
package v2
import (
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/utils/ptr"
slimv1 "github.com/cilium/cilium/pkg/k8s/slim/k8s/apis/meta/v1"
)
const (
// DefaultBGPPeerPort defines the TCP port number of a CiliumBGPNeighbor when PeerPort is unspecified.
DefaultBGPPeerPort = 179
// DefaultBGPEBGPMultihopTTL defines the default value for the TTL value used in BGP packets sent to the eBGP neighbors.
DefaultBGPEBGPMultihopTTL = 1
// DefaultBGPConnectRetryTimeSeconds defines the default initial value for the BGP ConnectRetryTimer (RFC 4271, Section 8).
DefaultBGPConnectRetryTimeSeconds = 120
// DefaultBGPHoldTimeSeconds defines the default initial value for the BGP HoldTimer (RFC 4271, Section 4.2).
DefaultBGPHoldTimeSeconds = 90
// DefaultBGPKeepAliveTimeSeconds defines the default initial value for the BGP KeepaliveTimer (RFC 4271, Section 8).
DefaultBGPKeepAliveTimeSeconds = 30
// DefaultBGPGRRestartTimeSeconds defines default Restart Time for graceful restart (RFC 4724, section 4.2)
DefaultBGPGRRestartTimeSeconds = 120
)
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
// +k8s:openapi-gen=false
// +deepequal-gen=false
// CiliumBGPPeerConfigList is a list of CiliumBGPPeer objects.
type CiliumBGPPeerConfigList struct {
metav1.TypeMeta `json:",inline"`
metav1.ListMeta `json:"metadata"`
// Items is a list of CiliumBGPPeer.
Items []CiliumBGPPeerConfig `json:"items"`
}
// +genclient
// +genclient:nonNamespaced
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
// +kubebuilder:resource:categories={cilium,ciliumbgp},singular="ciliumbgppeerconfig",path="ciliumbgppeerconfigs",scope="Cluster",shortName={cbgppeer}
// +kubebuilder:printcolumn:JSONPath=".metadata.creationTimestamp",name="Age",type=date
// +kubebuilder:subresource:status
// +kubebuilder:storageversion
type CiliumBGPPeerConfig struct {
// +deepequal-gen=false
metav1.TypeMeta `json:",inline"`
// +deepequal-gen=false
metav1.ObjectMeta `json:"metadata"`
// Spec is the specification of the desired behavior of the CiliumBGPPeerConfig.
Spec CiliumBGPPeerConfigSpec `json:"spec"`
// Status is the running status of the CiliumBGPPeerConfig
//
// +kubebuilder:validation:Optional
Status CiliumBGPPeerConfigStatus `json:"status"`
}
type CiliumBGPPeerConfigSpec struct {
// Transport defines the BGP transport parameters for the peer.
//
// If not specified, the default transport parameters are used.
//
// +kubebuilder:validation:Optional
Transport *CiliumBGPTransport `json:"transport,omitempty"`
// Timers defines the BGP timers for the peer.
//
// If not specified, the default timers are used.
//
// +kubebuilder:validation:Optional
Timers *CiliumBGPTimers `json:"timers,omitempty"`
// AuthSecretRef is the name of the secret to use to fetch a TCP
// authentication password for this peer.
//
// If not specified, no authentication is used.
//
// +kubebuilder:validation:Optional
AuthSecretRef *string `json:"authSecretRef,omitempty"`
// GracefulRestart defines graceful restart parameters which are negotiated
// with this peer.
//
// If not specified, the graceful restart capability is disabled.
//
// +kubebuilder:validation:Optional
GracefulRestart *CiliumBGPNeighborGracefulRestart `json:"gracefulRestart,omitempty"`
// EBGPMultihopTTL controls the multi-hop feature for eBGP peers.
// Its value defines the Time To Live (TTL) value used in BGP
// packets sent to the peer.
//
// If not specified, EBGP multihop is disabled. This field is ignored for iBGP neighbors.
//
// +kubebuilder:validation:Optional
// +kubebuilder:validation:Minimum=1
// +kubebuilder:validation:Maximum=255
// +kubebuilder:default=1
EBGPMultihop *int32 `json:"ebgpMultihop,omitempty"`
// Families, if provided, defines a set of AFI/SAFIs the speaker will
// negotiate with it's peer.
//
// If not specified, the default families of IPv6/unicast and IPv4/unicast will be created.
//
// +kubebuilder:validation:Optional
Families []CiliumBGPFamilyWithAdverts `json:"families,omitempty"`
}
type CiliumBGPPeerConfigStatus struct {
// The current conditions of the CiliumBGPPeerConfig
//
// +optional
// +listType=map
// +listMapKey=type
// +deepequal-gen=false
Conditions []metav1.Condition `json:"conditions,omitempty"`
}
// Conditions for CiliumBGPPeerConfig. When you add a new condition, don't
// forget to to update the below AllBGPPeerConfigConditions list as well.
const (
// Referenced auth secret is missing
BGPPeerConfigConditionMissingAuthSecret = "cilium.io/MissingAuthSecret"
)
var AllBGPPeerConfigConditions = []string{
BGPPeerConfigConditionMissingAuthSecret,
}
// CiliumBGPFamily represents a AFI/SAFI address family pair.
type CiliumBGPFamily struct {
// Afi is the Address Family Identifier (AFI) of the family.
//
// +kubebuilder:validation:Enum=ipv4;ipv6;l2vpn;ls;opaque
// +kubebuilder:validation:Required
Afi string `json:"afi"`
// Safi is the Subsequent Address Family Identifier (SAFI) of the family.
//
// +kubebuilder:validation:Enum=unicast;multicast;mpls_label;encapsulation;vpls;evpn;ls;sr_policy;mup;mpls_vpn;mpls_vpn_multicast;route_target_constraints;flowspec_unicast;flowspec_vpn;key_value
// +kubebuilder:validation:Required
Safi string `json:"safi"`
}
// CiliumBGPFamilyWithAdverts represents a AFI/SAFI address family pair along with reference to BGP Advertisements.
type CiliumBGPFamilyWithAdverts struct {
CiliumBGPFamily `json:",inline"`
// Advertisements selects group of BGP Advertisement(s) to advertise for this family.
//
// If not specified, no advertisements are sent for this family.
//
// This field is ignored in CiliumBGPNeighbor which is used in CiliumBGPPeeringPolicy.
// Use CiliumBGPPeeringPolicy advertisement options instead.
//
// +kubebuilder:validation:Optional
Advertisements *slimv1.LabelSelector `json:"advertisements,omitempty"`
}
// CiliumBGPTransport defines the BGP transport parameters for the peer.
type CiliumBGPTransport struct {
// PeerPort is the peer port to be used for the BGP session.
//
// If not specified, defaults to TCP port 179.
//
// +kubebuilder:validation:Optional
// +kubebuilder:validation:Minimum=1
// +kubebuilder:validation:Maximum=65535
// +kubebuilder:default=179
PeerPort *int32 `json:"peerPort,omitempty"`
}
func (t *CiliumBGPTransport) SetDefaults() {
if t.PeerPort == nil || *t.PeerPort == 0 {
t.PeerPort = ptr.To[int32](DefaultBGPPeerPort)
}
}
// CiliumBGPTimers defines timers configuration for a BGP peer.
//
// +kubebuilder:validation:XValidation:rule="self.keepAliveTimeSeconds <= self.holdTimeSeconds", message="keepAliveTimeSeconds can not be larger than holdTimeSeconds"
type CiliumBGPTimers struct {
// ConnectRetryTimeSeconds defines the initial value for the BGP ConnectRetryTimer (RFC 4271, Section 8).
//
// If not specified, defaults to 120 seconds.
//
// +kubebuilder:validation:Optional
// +kubebuilder:validation:Minimum=1
// +kubebuilder:validation:Maximum=2147483647
// +kubebuilder:default=120
ConnectRetryTimeSeconds *int32 `json:"connectRetryTimeSeconds,omitempty"`
// HoldTimeSeconds defines the initial value for the BGP HoldTimer (RFC 4271, Section 4.2).
// Updating this value will cause a session reset.
//
// If not specified, defaults to 90 seconds.
//
// +kubebuilder:validation:Optional
// +kubebuilder:validation:Minimum=3
// +kubebuilder:validation:Maximum=65535
// +kubebuilder:default=90
HoldTimeSeconds *int32 `json:"holdTimeSeconds,omitempty"`
// KeepaliveTimeSeconds defines the initial value for the BGP KeepaliveTimer (RFC 4271, Section 8).
// It can not be larger than HoldTimeSeconds. Updating this value will cause a session reset.
//
// If not specified, defaults to 30 seconds.
//
// +kubebuilder:validation:Optional
// +kubebuilder:validation:Minimum=1
// +kubebuilder:validation:Maximum=65535
// +kubebuilder:default=30
KeepAliveTimeSeconds *int32 `json:"keepAliveTimeSeconds,omitempty"`
}
func (t *CiliumBGPTimers) SetDefaults() {
if t.ConnectRetryTimeSeconds == nil || *t.ConnectRetryTimeSeconds == 0 {
t.ConnectRetryTimeSeconds = ptr.To[int32](DefaultBGPConnectRetryTimeSeconds)
}
if t.HoldTimeSeconds == nil || *t.HoldTimeSeconds == 0 {
t.HoldTimeSeconds = ptr.To[int32](DefaultBGPHoldTimeSeconds)
}
if t.KeepAliveTimeSeconds == nil || *t.KeepAliveTimeSeconds == 0 {
t.KeepAliveTimeSeconds = ptr.To[int32](DefaultBGPKeepAliveTimeSeconds)
}
}
type CiliumBGPNeighborGracefulRestart struct {
// Enabled flag, when set enables graceful restart capability.
//
// +kubebuilder:validation:Required
Enabled bool `json:"enabled"`
// RestartTimeSeconds is the estimated time it will take for the BGP
// session to be re-established with peer after a restart.
// After this period, peer will remove stale routes. This is
// described RFC 4724 section 4.2.
//
// +kubebuilder:validation:Optional
// +kubebuilder:validation:Minimum=1
// +kubebuilder:validation:Maximum=4095
// +kubebuilder:default=120
RestartTimeSeconds *int32 `json:"restartTimeSeconds,omitempty"`
}
func (gr *CiliumBGPNeighborGracefulRestart) SetDefaults() {
if gr.RestartTimeSeconds == nil || *gr.RestartTimeSeconds == 0 {
gr.RestartTimeSeconds = ptr.To[int32](DefaultBGPGRRestartTimeSeconds)
}
}
func (p *CiliumBGPPeerConfigSpec) SetDefaults() {
if p == nil {
return
}
if p.Transport == nil {
p.Transport = &CiliumBGPTransport{}
}
p.Transport.SetDefaults()
if p.Timers == nil {
p.Timers = &CiliumBGPTimers{}
}
p.Timers.SetDefaults()
if p.EBGPMultihop == nil {
p.EBGPMultihop = ptr.To[int32](DefaultBGPEBGPMultihopTTL)
}
if p.GracefulRestart == nil {
p.GracefulRestart = &CiliumBGPNeighborGracefulRestart{}
}
p.GracefulRestart.SetDefaults()
if len(p.Families) == 0 {
p.Families = []CiliumBGPFamilyWithAdverts{
{
CiliumBGPFamily: CiliumBGPFamily{
Afi: "ipv6",
Safi: "unicast",
},
},
{
CiliumBGPFamily: CiliumBGPFamily{
Afi: "ipv4",
Safi: "unicast",
},
},
}
}
}
// SPDX-License-Identifier: Apache-2.0
// Copyright Authors of Cilium
package v2
import (
"fmt"
"log/slog"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
k8sCiliumUtils "github.com/cilium/cilium/pkg/k8s/apis/cilium.io/utils"
"github.com/cilium/cilium/pkg/policy/api"
)
// +genclient
// +genclient:nonNamespaced
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
// +deepequal-gen:private-method=true
// +kubebuilder:resource:categories={cilium,ciliumpolicy},singular="ciliumclusterwidenetworkpolicy",path="ciliumclusterwidenetworkpolicies",scope="Cluster",shortName={ccnp}
// +kubebuilder:printcolumn:JSONPath=".status.conditions[?(@.type=='Valid')].status",name="Valid",type=string
// +kubebuilder:subresource:status
// +kubebuilder:storageversion
// CiliumClusterwideNetworkPolicy is a Kubernetes third-party resource with an
// modified version of CiliumNetworkPolicy which is cluster scoped rather than
// namespace scoped.
type CiliumClusterwideNetworkPolicy struct {
// +deepequal-gen=false
metav1.TypeMeta `json:",inline"`
// +deepequal-gen=false
metav1.ObjectMeta `json:"metadata"`
// Spec is the desired Cilium specific rule specification.
Spec *api.Rule `json:"spec,omitempty"`
// Specs is a list of desired Cilium specific rule specification.
Specs api.Rules `json:"specs,omitempty"`
// Status is the status of the Cilium policy rule.
//
// The reason this field exists in this structure is due a bug in the k8s
// code-generator that doesn't create a `UpdateStatus` method because the
// field does not exist in the structure.
//
// +kubebuilder:validation:Optional
Status CiliumNetworkPolicyStatus `json:"status"`
}
// DeepEqual compares 2 CCNPs while ignoring the LastAppliedConfigAnnotation
// and ignoring the Status field of the CCNP.
func (in *CiliumClusterwideNetworkPolicy) DeepEqual(other *CiliumClusterwideNetworkPolicy) bool {
return objectMetaDeepEqual(in.ObjectMeta, other.ObjectMeta) && in.deepEqual(other)
}
// SetDerivedPolicyStatus set the derivative policy status for the given
// derivative policy name.
func (r *CiliumClusterwideNetworkPolicy) SetDerivedPolicyStatus(derivativePolicyName string, status CiliumNetworkPolicyNodeStatus) {
if r.Status.DerivativePolicies == nil {
r.Status.DerivativePolicies = map[string]CiliumNetworkPolicyNodeStatus{}
}
r.Status.DerivativePolicies[derivativePolicyName] = status
}
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
// +k8s:openapi-gen=false
// +deepequal-gen=false
// CiliumClusterwideNetworkPolicyList is a list of
// CiliumClusterwideNetworkPolicy objects.
type CiliumClusterwideNetworkPolicyList struct {
metav1.TypeMeta `json:",inline"`
metav1.ListMeta `json:"metadata"`
// Items is a list of CiliumClusterwideNetworkPolicies.
Items []CiliumClusterwideNetworkPolicy `json:"items"`
}
// Parse parses a CiliumClusterwideNetworkPolicy and returns a list of cilium
// policy rules.
func (r *CiliumClusterwideNetworkPolicy) Parse(logger *slog.Logger, clusterName string) (api.Rules, error) {
if r.ObjectMeta.Name == "" {
return nil, NewErrParse("CiliumClusterwideNetworkPolicy must have name")
}
name := r.ObjectMeta.Name
uid := r.ObjectMeta.UID
retRules := api.Rules{}
if r.Spec == nil && r.Specs == nil {
return nil, ErrEmptyCCNP
}
if r.Spec != nil {
if err := r.Spec.Sanitize(); err != nil {
return nil, NewErrParse(fmt.Sprintf("Invalid CiliumClusterwideNetworkPolicy spec: %s", err))
}
cr := k8sCiliumUtils.ParseToCiliumRule(logger, clusterName, "", name, uid, r.Spec)
retRules = append(retRules, cr)
}
if r.Specs != nil {
for _, rule := range r.Specs {
if err := rule.Sanitize(); err != nil {
return nil, NewErrParse(fmt.Sprintf("Invalid CiliumClusterwideNetworkPolicy specs: %s", err))
}
cr := k8sCiliumUtils.ParseToCiliumRule(logger, clusterName, "", name, uid, rule)
retRules = append(retRules, cr)
}
}
return retRules, nil
}
// SPDX-License-Identifier: Apache-2.0
// Copyright Authors of Cilium
package v2
import (
"bytes"
"encoding/json"
"fmt"
"google.golang.org/protobuf/encoding/protojson"
"google.golang.org/protobuf/encoding/prototext"
"google.golang.org/protobuf/proto"
"google.golang.org/protobuf/types/known/anypb"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
slim_metav1 "github.com/cilium/cilium/pkg/k8s/slim/k8s/apis/meta/v1"
"github.com/cilium/cilium/pkg/loadbalancer"
"github.com/cilium/cilium/pkg/logging"
"github.com/cilium/cilium/pkg/logging/logfields"
"github.com/cilium/cilium/pkg/option"
)
// +genclient
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
// +kubebuilder:resource:categories={cilium},singular="ciliumenvoyconfig",path="ciliumenvoyconfigs",scope="Namespaced",shortName={cec}
// +kubebuilder:printcolumn:JSONPath=".metadata.creationTimestamp",description="The age of the identity",name="Age",type=date
// +kubebuilder:storageversion
type CiliumEnvoyConfig struct {
// +k8s:openapi-gen=false
// +deepequal-gen=false
metav1.TypeMeta `json:",inline"`
// +k8s:openapi-gen=false
// +deepequal-gen=false
metav1.ObjectMeta `json:"metadata"`
// +k8s:openapi-gen=false
Spec CiliumEnvoyConfigSpec `json:"spec,omitempty"`
}
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
// +deepequal-gen=false
// CiliumEnvoyConfigList is a list of CiliumEnvoyConfig objects.
type CiliumEnvoyConfigList struct {
metav1.TypeMeta `json:",inline"`
metav1.ListMeta `json:"metadata"`
// Items is a list of CiliumEnvoyConfig.
Items []CiliumEnvoyConfig `json:"items"`
}
type CiliumEnvoyConfigSpec struct {
// Services specifies Kubernetes services for which traffic is
// forwarded to an Envoy listener for L7 load balancing. Backends
// of these services are automatically synced to Envoy usign EDS.
//
// +kubebuilder:validation:Optional
Services []*ServiceListener `json:"services,omitempty"`
// BackendServices specifies Kubernetes services whose backends
// are automatically synced to Envoy using EDS. Traffic for these
// services is not forwarded to an Envoy listener. This allows an
// Envoy listener load balance traffic to these backends while
// normal Cilium service load balancing takes care of balancing
// traffic for these services at the same time.
//
// +kubebuilder:validation:Optional
BackendServices []*Service `json:"backendServices,omitempty"`
// Envoy xDS resources, a list of the following Envoy resource types:
// type.googleapis.com/envoy.config.listener.v3.Listener,
// type.googleapis.com/envoy.config.route.v3.RouteConfiguration,
// type.googleapis.com/envoy.config.cluster.v3.Cluster,
// type.googleapis.com/envoy.config.endpoint.v3.ClusterLoadAssignment, and
// type.googleapis.com/envoy.extensions.transport_sockets.tls.v3.Secret.
//
// +kubebuilder:validation:Required
Resources []XDSResource `json:"resources,omitempty"`
// NodeSelector is a label selector that determines to which nodes
// this configuration applies.
// If nil, then this config applies to all nodes.
//
// +kubebuilder:validation:Optional
NodeSelector *slim_metav1.LabelSelector `json:"nodeSelector,omitempty"`
}
type Service struct {
// Name is the name of a destination Kubernetes service that identifies traffic
// to be redirected.
//
// +kubebuilder:validation:Required
Name string `json:"name"`
// Namespace is the Kubernetes service namespace.
// In CiliumEnvoyConfig namespace defaults to the namespace of the CEC,
// In CiliumClusterwideEnvoyConfig namespace defaults to "default".
// +kubebuilder:validation:Optional
Namespace string `json:"namespace"`
// Ports is a set of port numbers, which can be used for filtering in case of underlying
// is exposing multiple port numbers.
//
// +kubebuilder:validation:Optional
Ports []string `json:"number,omitempty"`
}
func (l *Service) ServiceName() loadbalancer.ServiceName {
return loadbalancer.NewServiceName(l.Namespace, l.Name)
}
type ServiceListener struct {
// Name is the name of a destination Kubernetes service that identifies traffic
// to be redirected.
//
// +kubebuilder:validation:Required
Name string `json:"name"`
// Namespace is the Kubernetes service namespace.
// In CiliumEnvoyConfig namespace this is overridden to the namespace of the CEC,
// In CiliumClusterwideEnvoyConfig namespace defaults to "default".
// +kubebuilder:validation:Optional
Namespace string `json:"namespace"`
// Ports is a set of service's frontend ports that should be redirected to the Envoy
// listener. By default all frontend ports of the service are redirected.
//
// +kubebuilder:validation:Optional
Ports []uint16 `json:"ports,omitempty"`
// Listener specifies the name of the Envoy listener the
// service traffic is redirected to. The listener must be
// specified in the Envoy 'resources' of the same
// CiliumEnvoyConfig.
//
// If omitted, the first listener specified in 'resources' is
// used.
//
// +kubebuilder:validation:Optional
Listener string `json:"listener"`
}
func (l *ServiceListener) ServiceName() loadbalancer.ServiceName {
return loadbalancer.NewServiceName(l.Namespace, l.Name)
}
// +kubebuilder:pruning:PreserveUnknownFields
type XDSResource struct {
*anypb.Any `json:"-"`
}
// DeepCopyInto deep copies 'in' into 'out'.
func (in *XDSResource) DeepCopyInto(out *XDSResource) {
out.Any, _ = proto.Clone(in.Any).(*anypb.Any)
}
// DeepEqual returns 'true' if 'a' and 'b' are equal.
func (a *XDSResource) DeepEqual(b *XDSResource) bool {
return proto.Equal(a.Any, b.Any)
}
// MarshalJSON ensures that the unstructured object produces proper
// JSON when passed to Go's standard JSON library.
func (u *XDSResource) MarshalJSON() ([]byte, error) {
return protojson.Marshal(u.Any)
}
// UnmarshalJSON ensures that the unstructured object properly decodes
// JSON when passed to Go's standard JSON library.
func (u *XDSResource) UnmarshalJSON(b []byte) (err error) {
// xDS resources are not validated in K8s, recover from possible panics
defer func() {
if r := recover(); r != nil {
err = fmt.Errorf("CEC JSON decoding paniced: %v", r)
}
}()
u.Any = &anypb.Any{}
err = protojson.Unmarshal(b, u.Any)
if err != nil {
var buf bytes.Buffer
json.Indent(&buf, b, "", "\t")
// slogloggercheck: it's safe to use the default logger here as it has been initialized by the program up to this point.
logging.DefaultSlogLogger.Warn("Ignoring invalid CiliumEnvoyConfig JSON",
logfields.Error, err,
logfields.Object, buf,
)
} else if option.Config.Debug {
// slogloggercheck: it's safe to use the default logger here as it has been initialized by the program up to this point.
logging.DefaultSlogLogger.Debug("CEC unmarshaled XDS Resource", logfields.Resource, prototext.Format(u.Any))
}
return nil
}
// SPDX-License-Identifier: Apache-2.0
// Copyright Authors of Cilium
package v2
import (
"fmt"
"strconv"
"strings"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"github.com/cilium/cilium/pkg/iana"
slim_metav1 "github.com/cilium/cilium/pkg/k8s/slim/k8s/apis/meta/v1"
lb "github.com/cilium/cilium/pkg/loadbalancer"
"github.com/cilium/cilium/pkg/policy/api"
)
// +genclient
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
// +kubebuilder:resource:categories={cilium,ciliumpolicy},singular="ciliumlocalredirectpolicy",path="ciliumlocalredirectpolicies",scope="Namespaced",shortName={clrp}
// +kubebuilder:printcolumn:JSONPath=".metadata.creationTimestamp",name="Age",type=date
// CiliumLocalRedirectPolicy is a Kubernetes Custom Resource that contains a
// specification to redirect traffic locally within a node.
type CiliumLocalRedirectPolicy struct {
// +k8s:openapi-gen=false
// +deepequal-gen=false
metav1.TypeMeta `json:",inline"`
// +k8s:openapi-gen=false
// +deepequal-gen=false
metav1.ObjectMeta `json:"metadata"`
// Spec is the desired behavior of the local redirect policy.
Spec CiliumLocalRedirectPolicySpec `json:"spec,omitempty"`
// Status is the most recent status of the local redirect policy.
// It is a read-only field.
//
// +deepequal-gen=false
// +kubebuilder:validation:Optional
Status CiliumLocalRedirectPolicyStatus `json:"status"`
}
type Frontend struct {
// IP is a destination ip address for traffic to be redirected.
//
// Example:
// When it is set to "169.254.169.254", traffic destined to
// "169.254.169.254" is redirected.
//
// +kubebuilder:validation:Pattern=`((^\s*((([0-9]|[1-9][0-9]|1[0-9]{2}|2[0-4][0-9]|25[0-5])\.){3}([0-9]|[1-9][0-9]|1[0-9]{2}|2[0-4][0-9]|25[0-5]))\s*$)|(^\s*((([0-9A-Fa-f]{1,4}:){7}([0-9A-Fa-f]{1,4}|:))|(([0-9A-Fa-f]{1,4}:){6}(:[0-9A-Fa-f]{1,4}|((25[0-5]|2[0-4]\d|1\d\d|[1-9]?\d)(\.(25[0-5]|2[0-4]\d|1\d\d|[1-9]?\d)){3})|:))|(([0-9A-Fa-f]{1,4}:){5}(((:[0-9A-Fa-f]{1,4}){1,2})|:((25[0-5]|2[0-4]\d|1\d\d|[1-9]?\d)(\.(25[0-5]|2[0-4]\d|1\d\d|[1-9]?\d)){3})|:))|(([0-9A-Fa-f]{1,4}:){4}(((:[0-9A-Fa-f]{1,4}){1,3})|((:[0-9A-Fa-f]{1,4})?:((25[0-5]|2[0-4]\d|1\d\d|[1-9]?\d)(\.(25[0-5]|2[0-4]\d|1\d\d|[1-9]?\d)){3}))|:))|(([0-9A-Fa-f]{1,4}:){3}(((:[0-9A-Fa-f]{1,4}){1,4})|((:[0-9A-Fa-f]{1,4}){0,2}:((25[0-5]|2[0-4]\d|1\d\d|[1-9]?\d)(\.(25[0-5]|2[0-4]\d|1\d\d|[1-9]?\d)){3}))|:))|(([0-9A-Fa-f]{1,4}:){2}(((:[0-9A-Fa-f]{1,4}){1,5})|((:[0-9A-Fa-f]{1,4}){0,3}:((25[0-5]|2[0-4]\d|1\d\d|[1-9]?\d)(\.(25[0-5]|2[0-4]\d|1\d\d|[1-9]?\d)){3}))|:))|(([0-9A-Fa-f]{1,4}:){1}(((:[0-9A-Fa-f]{1,4}){1,6})|((:[0-9A-Fa-f]{1,4}){0,4}:((25[0-5]|2[0-4]\d|1\d\d|[1-9]?\d)(\.(25[0-5]|2[0-4]\d|1\d\d|[1-9]?\d)){3}))|:))|(:(((:[0-9A-Fa-f]{1,4}){1,7})|((:[0-9A-Fa-f]{1,4}){0,5}:((25[0-5]|2[0-4]\d|1\d\d|[1-9]?\d)(\.(25[0-5]|2[0-4]\d|1\d\d|[1-9]?\d)){3}))|:)))(%.+)?\s*$))`
// +kubebuilder:validation:Required
IP string `json:"ip"`
// ToPorts is a list of destination L4 ports with protocol for traffic
// to be redirected.
// When multiple ports are specified, the ports must be named.
//
// Example:
// When set to Port: "53" and Protocol: UDP, traffic destined to port '53'
// with UDP protocol is redirected.
//
// +kubebuilder:validation:Required
ToPorts []PortInfo `json:"toPorts"`
}
// RedirectFrontend is a frontend configuration that matches traffic that needs to be redirected.
// The configuration must be specified using a ip/port tuple or a Kubernetes service.
type RedirectFrontend struct {
// AddressMatcher is a tuple {IP, port, protocol} that matches traffic to be
// redirected.
//
// +kubebuilder:validation:OneOf
AddressMatcher *Frontend `json:"addressMatcher,omitempty"`
// ServiceMatcher specifies Kubernetes service and port that matches
// traffic to be redirected.
//
// +kubebuilder:validation:OneOf
ServiceMatcher *ServiceInfo `json:"serviceMatcher,omitempty"`
}
// PortInfo specifies L4 port number and name along with the transport protocol
type PortInfo struct {
// Port is an L4 port number. The string will be strictly parsed as a single uint16.
//
// +kubebuilder:validation:Pattern=`^()([1-9]|[1-5]?[0-9]{2,4}|6[1-4][0-9]{3}|65[1-4][0-9]{2}|655[1-2][0-9]|6553[1-5])$`
// +kubebuilder:validation:Required
Port string `json:"port"`
// Protocol is the L4 protocol.
// Accepted values: "TCP", "UDP"
//
// +kubebuilder:validation:Enum=TCP;UDP
// +kubebuilder:validation:Required
Protocol api.L4Proto `json:"protocol"`
// Name is a port name, which must contain at least one [a-z],
// and may also contain [0-9] and '-' anywhere except adjacent to another
// '-' or in the beginning or the end.
//
// +kubebuilder:validation:Pattern=`^([0-9]{1,4})|([a-zA-Z0-9]-?)*[a-zA-Z](-?[a-zA-Z0-9])*$`
// +kubebuilder:validation:Optional
Name string `json:"name"`
}
type ServiceInfo struct {
// Name is the name of a destination Kubernetes service that identifies traffic
// to be redirected.
// The service type needs to be ClusterIP.
//
// Example:
// When this field is populated with 'serviceName:myService', all the traffic
// destined to the cluster IP of this service at the (specified)
// service port(s) will be redirected.
//
// +kubebuilder:validation:Required
Name string `json:"serviceName"`
// Namespace is the Kubernetes service namespace.
// The service namespace must match the namespace of the parent Local
// Redirect Policy. For Cluster-wide Local Redirect Policy, this
// can be any namespace.
// +kubebuilder:validation:Required
Namespace string `json:"namespace"`
// ToPorts is a list of destination service L4 ports with protocol for
// traffic to be redirected. If not specified, traffic for all the service
// ports will be redirected.
// When multiple ports are specified, the ports must be named.
//
// +kubebuilder:validation:Optional
ToPorts []PortInfo `json:"toPorts,omitempty"`
}
// RedirectBackend is a backend configuration that determines where traffic needs to be redirected to.
type RedirectBackend struct {
// LocalEndpointSelector selects node local pod(s) where traffic is redirected to.
//
// +kubebuilder:validation:Required
LocalEndpointSelector slim_metav1.LabelSelector `json:"localEndpointSelector"`
// ToPorts is a list of L4 ports with protocol of node local pod(s) where traffic
// is redirected to.
// When multiple ports are specified, the ports must be named.
//
// +kubebuilder:validation:Required
ToPorts []PortInfo `json:"toPorts"`
}
// CiliumLocalRedirectPolicySpec specifies the configurations for redirecting traffic
// within a node.
type CiliumLocalRedirectPolicySpec struct {
// RedirectFrontend specifies frontend configuration to redirect traffic from.
// It can not be empty.
//
// +kubebuilder:validation:Required
// +kubebuilder:validation:XValidation:rule="self == oldSelf", message="redirectFrontend is immutable"
RedirectFrontend RedirectFrontend `json:"redirectFrontend"`
// RedirectBackend specifies backend configuration to redirect traffic to.
// It can not be empty.
//
// +kubebuilder:validation:Required
// +kubebuilder:validation:XValidation:rule="self == oldSelf", message="redirectBackend is immutable"
RedirectBackend RedirectBackend `json:"redirectBackend"`
// SkipRedirectFromBackend indicates whether traffic matching RedirectFrontend
// from RedirectBackend should skip redirection, and hence the traffic will
// be forwarded as-is.
//
// The default is false which means traffic matching RedirectFrontend will
// get redirected from all pods, including the RedirectBackend(s).
//
// Example: If RedirectFrontend is configured to "169.254.169.254:80" as the traffic
// that needs to be redirected to backends selected by RedirectBackend, if
// SkipRedirectFromBackend is set to true, traffic going to "169.254.169.254:80"
// from such backends will not be redirected back to the backends. Instead,
// the matched traffic from the backends will be forwarded to the original
// destination "169.254.169.254:80".
//
// +kubebuilder:validation:Optional
// +kubebuilder:default=false
// +kubebuilder:validation:XValidation:rule="self == oldSelf", message="skipRedirectFromBackend is immutable"
SkipRedirectFromBackend bool `json:"skipRedirectFromBackend"`
// Description can be used by the creator of the policy to describe the
// purpose of this policy.
//
// +kubebuilder:validation:Optional
Description string `json:"description,omitempty"`
}
// CiliumLocalRedirectPolicyStatus is the status of a Local Redirect Policy.
type CiliumLocalRedirectPolicyStatus struct {
// TODO Define status(aditi)
OK bool `json:"ok,omitempty"`
}
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
// +k8s:openapi-gen=false
// +deepequal-gen=false
// CiliumLocalRedirectPolicyList is a list of CiliumLocalRedirectPolicy objects.
type CiliumLocalRedirectPolicyList struct {
metav1.TypeMeta `json:",inline"`
metav1.ListMeta `json:"metadata"`
// Items is a list of CiliumLocalRedirectPolicy
Items []CiliumLocalRedirectPolicy `json:"items"`
}
// SanitizePortInfo sanitizes all the fields in the PortInfo.
// It returns port number, name, and protocol derived from the given input and error (failure cases).
func (pInfo *PortInfo) SanitizePortInfo(checkNamedPort bool) (uint16, string, lb.L4Type, error) {
var (
pInt uint16
pName string
protocol lb.L4Type
)
// Sanitize port
if pInfo.Port == "" {
return pInt, pName, protocol, fmt.Errorf("port must be specified")
} else {
p, err := strconv.ParseUint(pInfo.Port, 0, 16)
if err != nil {
return pInt, pName, protocol, fmt.Errorf("unable to parse port: %w", err)
}
if p == 0 {
return pInt, pName, protocol, fmt.Errorf("port cannot be 0")
}
pInt = uint16(p)
}
// Sanitize name
if checkNamedPort {
if pInfo.Name == "" {
return pInt, pName, protocol, fmt.Errorf("port %s in the local "+
"redirect policy spec must have a valid IANA_SVC_NAME, as there are multiple ports", pInfo.Port)
}
if !iana.IsSvcName(pInfo.Name) {
return pInt, pName, protocol, fmt.Errorf("port name %s isn't a "+
"valid IANA_SVC_NAME", pInfo.Name)
}
}
pName = strings.ToLower(pInfo.Name) // Normalize for case insensitive comparison
// Sanitize protocol
var err error
protocol, err = lb.NewL4Type(string(pInfo.Protocol))
if err != nil {
return pInt, pName, protocol, err
}
return pInt, pName, protocol, nil
}
// SPDX-License-Identifier: Apache-2.0
// Copyright Authors of Cilium
package v2
import (
"fmt"
"log/slog"
v1 "k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"github.com/cilium/cilium/pkg/comparator"
k8sCiliumUtils "github.com/cilium/cilium/pkg/k8s/apis/cilium.io/utils"
slimv1 "github.com/cilium/cilium/pkg/k8s/slim/k8s/apis/meta/v1"
k8sUtils "github.com/cilium/cilium/pkg/k8s/utils"
"github.com/cilium/cilium/pkg/labels"
"github.com/cilium/cilium/pkg/policy/api"
)
// +genclient
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
// +deepequal-gen:private-method=true
// +kubebuilder:resource:categories={cilium,ciliumpolicy},singular="ciliumnetworkpolicy",path="ciliumnetworkpolicies",scope="Namespaced",shortName={cnp,ciliumnp}
// +kubebuilder:printcolumn:JSONPath=".metadata.creationTimestamp",name="Age",type=date
// +kubebuilder:printcolumn:JSONPath=".status.conditions[?(@.type=='Valid')].status",name="Valid",type=string
// +kubebuilder:subresource:status
// +kubebuilder:storageversion
// CiliumNetworkPolicy is a Kubernetes third-party resource with an extended
// version of NetworkPolicy.
type CiliumNetworkPolicy struct {
// +deepequal-gen=false
metav1.TypeMeta `json:",inline"`
// +deepequal-gen=false
metav1.ObjectMeta `json:"metadata"`
// Spec is the desired Cilium specific rule specification.
Spec *api.Rule `json:"spec,omitempty"`
// Specs is a list of desired Cilium specific rule specification.
Specs api.Rules `json:"specs,omitempty"`
// Status is the status of the Cilium policy rule
//
// +deepequal-gen=false
// +kubebuilder:validation:Optional
Status CiliumNetworkPolicyStatus `json:"status"`
}
// DeepEqual compares 2 CNPs.
func (in *CiliumNetworkPolicy) DeepEqual(other *CiliumNetworkPolicy) bool {
return objectMetaDeepEqual(in.ObjectMeta, other.ObjectMeta) && in.deepEqual(other)
}
// objectMetaDeepEqual performs an equality check for metav1.ObjectMeta that
// ignores the LastAppliedConfigAnnotation. This function's usage is shared
// among CNP and CCNP as they have the same structure.
func objectMetaDeepEqual(in, other metav1.ObjectMeta) bool {
if !(in.Name == other.Name && in.Namespace == other.Namespace) {
return false
}
return comparator.MapStringEqualsIgnoreKeys(
in.GetAnnotations(),
other.GetAnnotations(),
// Ignore v1.LastAppliedConfigAnnotation annotation
[]string{v1.LastAppliedConfigAnnotation})
}
// +deepequal-gen=true
// CiliumNetworkPolicyStatus is the status of a Cilium policy rule.
type CiliumNetworkPolicyStatus struct {
// DerivativePolicies is the status of all policies derived from the Cilium
// policy
DerivativePolicies map[string]CiliumNetworkPolicyNodeStatus `json:"derivativePolicies,omitempty"`
// +optional
// +patchMergeKey=type
// +patchStrategy=merge
// +listType=map
// +listMapKey=type
Conditions []NetworkPolicyCondition `json:"conditions,omitempty"`
}
// +deepequal-gen=true
// CiliumNetworkPolicyNodeStatus is the status of a Cilium policy rule for a
// specific node.
type CiliumNetworkPolicyNodeStatus struct {
// OK is true when the policy has been parsed and imported successfully
// into the in-memory policy repository on the node.
OK bool `json:"ok,omitempty"`
// Error describes any error that occurred when parsing or importing the
// policy, or realizing the policy for the endpoints to which it applies
// on the node.
Error string `json:"error,omitempty"`
// LastUpdated contains the last time this status was updated
LastUpdated slimv1.Time `json:"lastUpdated,omitempty"`
// Revision is the policy revision of the repository which first implemented
// this policy.
Revision uint64 `json:"localPolicyRevision,omitempty"`
// Enforcing is set to true once all endpoints present at the time the
// policy has been imported are enforcing this policy.
Enforcing bool `json:"enforcing,omitempty"`
// Annotations corresponds to the Annotations in the ObjectMeta of the CNP
// that have been realized on the node for CNP. That is, if a CNP has been
// imported and has been assigned annotation X=Y by the user,
// Annotations in CiliumNetworkPolicyNodeStatus will be X=Y once the
// CNP that was imported corresponding to Annotation X=Y has been realized on
// the node.
Annotations map[string]string `json:"annotations,omitempty"`
}
// CreateCNPNodeStatus returns a CiliumNetworkPolicyNodeStatus created from the
// provided fields.
func CreateCNPNodeStatus(enforcing, ok bool, cnpError error, rev uint64, annotations map[string]string) CiliumNetworkPolicyNodeStatus {
cnpns := CiliumNetworkPolicyNodeStatus{
Enforcing: enforcing,
Revision: rev,
OK: ok,
LastUpdated: slimv1.Now(),
Annotations: annotations,
}
if cnpError != nil {
cnpns.Error = cnpError.Error()
}
return cnpns
}
func (r *CiliumNetworkPolicy) String() string {
result := ""
result += fmt.Sprintf("TypeMeta: %s, ", r.TypeMeta.String())
result += fmt.Sprintf("ObjectMeta: %s, ", r.ObjectMeta.String())
if r.Spec != nil {
result += fmt.Sprintf("Spec: %v", *(r.Spec))
}
if r.Specs != nil {
result += fmt.Sprintf("Specs: %v", r.Specs)
}
result += fmt.Sprintf("Status: %v", r.Status)
return result
}
// SetDerivedPolicyStatus set the derivative policy status for the given
// derivative policy name.
func (r *CiliumNetworkPolicy) SetDerivedPolicyStatus(derivativePolicyName string, status CiliumNetworkPolicyNodeStatus) {
if r.Status.DerivativePolicies == nil {
r.Status.DerivativePolicies = map[string]CiliumNetworkPolicyNodeStatus{}
}
r.Status.DerivativePolicies[derivativePolicyName] = status
}
// Parse parses a CiliumNetworkPolicy and returns a list of cilium policy
// rules.
func (r *CiliumNetworkPolicy) Parse(logger *slog.Logger, clusterName string) (api.Rules, error) {
if r.ObjectMeta.Name == "" {
return nil, NewErrParse("CiliumNetworkPolicy must have name")
}
namespace := k8sUtils.ExtractNamespace(&r.ObjectMeta)
// Temporary fix for CCNPs. See #12834.
// TL;DR. CCNPs are converted into SlimCNPs and end up here so we need to
// convert them back to CCNPs to allow proper parsing.
if namespace == "" {
ccnp := CiliumClusterwideNetworkPolicy{
TypeMeta: r.TypeMeta,
ObjectMeta: r.ObjectMeta,
Spec: r.Spec,
Specs: r.Specs,
Status: r.Status,
}
return ccnp.Parse(logger, clusterName)
}
name := r.ObjectMeta.Name
uid := r.ObjectMeta.UID
retRules := api.Rules{}
if r.Spec == nil && r.Specs == nil {
return nil, ErrEmptyCNP
}
if r.Spec != nil {
if err := r.Spec.Sanitize(); err != nil {
return nil, NewErrParse(fmt.Sprintf("Invalid CiliumNetworkPolicy spec: %s", err))
}
if r.Spec.NodeSelector.LabelSelector != nil {
return nil, NewErrParse("Invalid CiliumNetworkPolicy spec: rule cannot have NodeSelector")
}
cr := k8sCiliumUtils.ParseToCiliumRule(logger, clusterName, namespace, name, uid, r.Spec)
retRules = append(retRules, cr)
}
if r.Specs != nil {
for _, rule := range r.Specs {
if err := rule.Sanitize(); err != nil {
return nil, NewErrParse(fmt.Sprintf("Invalid CiliumNetworkPolicy specs: %s", err))
}
if rule.NodeSelector.LabelSelector != nil {
return nil, NewErrParse("Invalid CiliumNetworkPolicy spec: rule cannot have NodeSelector")
}
cr := k8sCiliumUtils.ParseToCiliumRule(logger, clusterName, namespace, name, uid, rule)
retRules = append(retRules, cr)
}
}
return retRules, nil
}
// GetIdentityLabels returns all rule labels in the CiliumNetworkPolicy.
func (r *CiliumNetworkPolicy) GetIdentityLabels() labels.LabelArray {
namespace := k8sUtils.ExtractNamespace(&r.ObjectMeta)
name := r.ObjectMeta.Name
uid := r.ObjectMeta.UID
// Even though the struct represents CiliumNetworkPolicy, we use it both for
// CiliumNetworkPolicy and CiliumClusterwideNetworkPolicy, so here we check for namespace
// to send correct derivedFrom label to get the correct policy labels.
derivedFrom := k8sCiliumUtils.ResourceTypeCiliumNetworkPolicy
if namespace == "" {
derivedFrom = k8sCiliumUtils.ResourceTypeCiliumClusterwideNetworkPolicy
}
return k8sCiliumUtils.GetPolicyLabels(namespace, name, uid, derivedFrom)
}
// RequiresDerivative return true if the CNP has any rule that will create a new
// derivative rule.
func (r *CiliumNetworkPolicy) RequiresDerivative() bool {
if r.Spec != nil {
if r.Spec.RequiresDerivative() {
return true
}
}
if r.Specs != nil {
for _, rule := range r.Specs {
if rule.RequiresDerivative() {
return true
}
}
}
return false
}
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
// +k8s:openapi-gen=false
// +deepequal-gen=false
// CiliumNetworkPolicyList is a list of CiliumNetworkPolicy objects.
type CiliumNetworkPolicyList struct {
metav1.TypeMeta `json:",inline"`
metav1.ListMeta `json:"metadata"`
// Items is a list of CiliumNetworkPolicy
Items []CiliumNetworkPolicy `json:"items"`
}
type PolicyConditionType string
const (
PolicyConditionValid PolicyConditionType = "Valid"
)
type NetworkPolicyCondition struct {
// The type of the policy condition
Type PolicyConditionType `json:"type"`
// The status of the condition, one of True, False, or Unknown
Status v1.ConditionStatus `json:"status"`
// The last time the condition transitioned from one status to another.
// +optional
LastTransitionTime slimv1.Time `json:"lastTransitionTime,omitempty"`
// The reason for the condition's last transition.
// +optional
Reason string `json:"reason,omitempty"`
// A human readable message indicating details about the transition.
// +optional
Message string `json:"message,omitempty"`
}
// SPDX-License-Identifier: Apache-2.0
// Copyright Authors of Cilium
package v2
var (
// ErrEmptyCNP is an error representing a CNP that is empty, which means it is
// missing both a `spec` and `specs` (both are nil).
ErrEmptyCNP = NewErrParse("Invalid CiliumNetworkPolicy spec(s): empty policy")
// ErrEmptyCCNP is an error representing a CCNP that is empty, which means it is
// missing both a `spec` and `specs` (both are nil).
ErrEmptyCCNP = NewErrParse("Invalid CiliumClusterwideNetworkPolicy spec(s): empty policy")
// ParsingErr is for comparison when checking error types.
ParsingErr = NewErrParse("")
)
// ErrParse is an error to describe where policy fails to parse due any invalid
// rule.
//
// +k8s:deepcopy-gen=false
// +deepequal-gen=false
type ErrParse struct {
msg string
}
// NewErrParse returns a new ErrParse.
func NewErrParse(msg string) ErrParse {
return ErrParse{
msg: msg,
}
}
// Error returns the error message for parsing
func (e ErrParse) Error() string {
return e.msg
}
// Is returns true if the given error is the type of 'ErrParse'.
func (_ ErrParse) Is(e error) bool {
_, ok := e.(ErrParse)
return ok
}
// SPDX-License-Identifier: Apache-2.0
// Copyright Authors of Cilium
package v2
import (
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/runtime"
"k8s.io/apimachinery/pkg/runtime/schema"
k8sconst "github.com/cilium/cilium/pkg/k8s/apis/cilium.io"
)
const (
// CustomResourceDefinitionGroup is the name of the third party resource group
CustomResourceDefinitionGroup = k8sconst.CustomResourceDefinitionGroup
// CustomResourceDefinitionVersion is the current version of the resource
CustomResourceDefinitionVersion = "v2"
// Cilium Network Policy (CNP)
// CNPPluralName is the plural name of Cilium Network Policy
CNPPluralName = "ciliumnetworkpolicies"
// CNPKindDefinition is the kind name for Cilium Network Policy
CNPKindDefinition = "CiliumNetworkPolicy"
// CNPName is the full name of Cilium Network Policy
CNPName = CNPPluralName + "." + CustomResourceDefinitionGroup
// Cilium Cluster wide Network Policy (CCNP)
// CCNPPluralName is the plural name of Cilium Cluster wide Network Policy
CCNPPluralName = "ciliumclusterwidenetworkpolicies"
// CCNPKindDefinition is the kind name for Cilium Cluster wide Network Policy
CCNPKindDefinition = "CiliumClusterwideNetworkPolicy"
// CCNPName is the full name of Cilium Cluster wide Network Policy
CCNPName = CCNPPluralName + "." + CustomResourceDefinitionGroup
// CiliumCIDRGroup (CCG)
// CCGPluralName is the plural name of Cilium CIDR Group
CCGPluralName = "ciliumcidrgroups"
// CCGKindDefinition is the kind name for Cilium CIDR Group
CCGKindDefinition = "CiliumCIDRGroup"
// CCGName is the full name of Cilium CIDR Group
CCGName = CCGPluralName + "." + CustomResourceDefinitionGroup
// Cilium Egress Gateway Policy (CEGP)
// CEGPPluralName is the plural name of Cilium Egress Gateway Policy
CEGPPluralName = "ciliumegressgatewaypolicies"
// CEGPKindDefinition is the kind name of Cilium Egress Gateway Policy
CEGPKindDefinition = "CiliumEgressGatewayPolicy"
// CEGPName is the full name of Cilium Egress Gateway Policy
CEGPName = CEGPPluralName + "." + CustomResourceDefinitionGroup
// Cilium Endpoint (CEP)
// CEPluralName is the plural name of Cilium Endpoint
CEPPluralName = "ciliumendpoints"
// CEKindDefinition is the kind name for Cilium Endpoint
CEPKindDefinition = "CiliumEndpoint"
// CEPName is the full name of Cilium Endpoint
CEPName = CEPPluralName + "." + CustomResourceDefinitionGroup
// Cilium Node (CN)
// CNPluralName is the plural name of Cilium Node
CNPluralName = "ciliumnodes"
// CNKindDefinition is the kind name for Cilium Node
CNKindDefinition = "CiliumNode"
// CNName is the full name of Cilium Node
CNName = CNPluralName + "." + CustomResourceDefinitionGroup
// Cilium Identity
// CIDPluralName is the plural name of Cilium Identity
CIDPluralName = "ciliumidentities"
// CIDKindDefinition is the kind name for Cilium Identity
CIDKindDefinition = "CiliumIdentity"
// CIDName is the full name of Cilium Identity
CIDName = CIDPluralName + "." + CustomResourceDefinitionGroup
// Cilium Local Redirect Policy (CLRP)
// CLRPPluralName is the plural name of Local Redirect Policy
CLRPPluralName = "ciliumlocalredirectpolicies"
// CLRPKindDefinition is the kind name for Local Redirect Policy
CLRPKindDefinition = "CiliumLocalRedirectPolicy"
// CLRPName is the full name of Local Redirect Policy
CLRPName = CLRPPluralName + "." + CustomResourceDefinitionGroup
// Cilium Cluster Envoy Config (CCEC)
// CCECPluralName is the plural name of Cilium Clusterwide Envoy Config
CCECPluralName = "ciliumclusterwideenvoyconfigs"
// CCECKindDefinition is the kind name of Cilium Clusterwide Envoy Config
CCECKindDefinition = "CiliumClusterwideEnvoyConfig"
// CCECName is the full name of Cilium Clusterwide Envoy Config
CCECName = CCECPluralName + "." + CustomResourceDefinitionGroup
// Cilium Envoy Config (CEC)
// CECPluralName is the plural name of Cilium Envoy Config
CECPluralName = "ciliumenvoyconfigs"
// CECKindDefinition is the kind name of Cilium Envoy Config
CECKindDefinition = "CiliumEnvoyConfig"
// CECName is the full name of Cilium Envoy Config
CECName = CECPluralName + "." + CustomResourceDefinitionGroup
// CiliumNodeConfig (CNC)
// CNCPluralName is the plural name of Cilium Node Config
CNCPluralName = "ciliumnodeconfigs"
// CNCKindDefinition is the kind name of Cilium Node Config
CNCKindDefinition = "CiliumNodeConfig"
// CNCName is the full name of Cilium Node Config
CNCName = CNCPluralName + "." + CustomResourceDefinitionGroup
// BGP Control Plane
// BGPClusterConfig (BGPCC)
BGPCCPluralName = "ciliumbgpclusterconfigs"
BGPCCKindDefinition = "CiliumBGPClusterConfig"
BGPCCName = BGPCCPluralName + "." + CustomResourceDefinitionGroup
// BGPPeerConfig (BGPPC)
BGPPCPluralName = "ciliumbgppeerconfigs"
BGPPCKindDefinition = "CiliumBGPPeerConfig"
BGPPCName = BGPPCPluralName + "." + CustomResourceDefinitionGroup
// BGPAdvertisement (BGPA)
BGPAPluralName = "ciliumbgpadvertisements"
BGPAKindDefinition = "CiliumBGPAdvertisement"
BGPAName = BGPAPluralName + "." + CustomResourceDefinitionGroup
// BGPNodeConfig (BGPNC)
BGPNCPluralName = "ciliumbgpnodeconfigs"
BGPNCKindDefinition = "CiliumBGPNodeConfig"
BGPNCName = BGPNCPluralName + "." + CustomResourceDefinitionGroup
// BGPNodeConfigOverride (BGPNCO)
BGPNCOPluralName = "ciliumbgpnodeconfigoverrides"
BGPNCOKindDefinition = "CiliumBGPNodeConfigOverride"
BGPNCOName = BGPNCOPluralName + "." + CustomResourceDefinitionGroup
// Cilium Load Balancer IP Pool (IPPool)
// PoolPluralName is the plural name of Cilium Load Balancer IP Pool
PoolPluralName = "ciliumloadbalancerippools"
// PoolKindDefinition is the kind name of Cilium Peering Policy
PoolKindDefinition = "CiliumLoadBalancerIPPool"
// LBIPPoolName is the full name of Cilium Load Balancer IP Pool
LBIPPoolName = PoolPluralName + "." + CustomResourceDefinitionGroup
)
// SchemeGroupVersion is group version used to register these objects
var SchemeGroupVersion = schema.GroupVersion{
Group: CustomResourceDefinitionGroup,
Version: CustomResourceDefinitionVersion,
}
// Resource takes an unqualified resource and returns a Group qualified GroupResource
func Resource(resource string) schema.GroupResource {
return SchemeGroupVersion.WithResource(resource).GroupResource()
}
var (
// SchemeBuilder is needed by DeepCopy generator.
SchemeBuilder runtime.SchemeBuilder
// localSchemeBuilder and AddToScheme will stay in k8s.io/kubernetes.
localSchemeBuilder = &SchemeBuilder
// AddToScheme adds all types of this clientset into the given scheme.
// This allows composition of clientsets, like in:
//
// import (
// "k8s.io/client-go/kubernetes"
// clientsetscheme "k8s.io/client-go/kubernetes/scheme"
// aggregatorclientsetscheme "k8s.io/kube-aggregator/pkg/client/clientset_generated/clientset/scheme"
// )
//
// kclientset, _ := kubernetes.NewForConfig(c)
// aggregatorclientsetscheme.AddToScheme(clientsetscheme.Scheme)
AddToScheme = localSchemeBuilder.AddToScheme
)
func init() {
// We only register manually written functions here. The registration of the
// generated functions takes place in the generated files. The separation
// makes the code compile even when the generated files are missing.
localSchemeBuilder.Register(addKnownTypes)
}
// Adds the list of known types to api.Scheme.
func addKnownTypes(scheme *runtime.Scheme) error {
scheme.AddKnownTypes(SchemeGroupVersion,
&CiliumNetworkPolicy{},
&CiliumNetworkPolicyList{},
&CiliumClusterwideNetworkPolicy{},
&CiliumClusterwideNetworkPolicyList{},
&CiliumCIDRGroup{},
&CiliumCIDRGroupList{},
&CiliumEgressGatewayPolicy{},
&CiliumEgressGatewayPolicyList{},
&CiliumEndpoint{},
&CiliumEndpointList{},
&CiliumNode{},
&CiliumNodeList{},
&CiliumNodeConfig{},
&CiliumNodeConfigList{},
&CiliumIdentity{},
&CiliumIdentityList{},
&CiliumLocalRedirectPolicy{},
&CiliumLocalRedirectPolicyList{},
&CiliumEnvoyConfig{},
&CiliumEnvoyConfigList{},
&CiliumClusterwideEnvoyConfig{},
&CiliumClusterwideEnvoyConfigList{},
&CiliumBGPClusterConfig{},
&CiliumBGPClusterConfigList{},
&CiliumBGPPeerConfig{},
&CiliumBGPPeerConfigList{},
&CiliumBGPAdvertisement{},
&CiliumBGPAdvertisementList{},
&CiliumBGPNodeConfig{},
&CiliumBGPNodeConfigList{},
&CiliumBGPNodeConfigOverride{},
&CiliumBGPNodeConfigOverrideList{},
&CiliumLoadBalancerIPPool{},
&CiliumLoadBalancerIPPoolList{},
)
metav1.AddToGroupVersion(scheme, SchemeGroupVersion)
return nil
}
// SPDX-License-Identifier: Apache-2.0
// Copyright Authors of Cilium
package v2
import (
"net"
"sort"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"github.com/cilium/cilium/api/v1/models"
alibabaCloudTypes "github.com/cilium/cilium/pkg/alibabacloud/eni/types"
eniTypes "github.com/cilium/cilium/pkg/aws/eni/types"
azureTypes "github.com/cilium/cilium/pkg/azure/types"
ipamTypes "github.com/cilium/cilium/pkg/ipam/types"
"github.com/cilium/cilium/pkg/node/addressing"
)
// +kubebuilder:validation:Format=cidr
type IPv4orIPv6CIDR string
// +genclient
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
// +k8s:openapi-gen=false
// +kubebuilder:resource:categories={cilium},singular="ciliumendpoint",path="ciliumendpoints",scope="Namespaced",shortName={cep,ciliumep}
// +kubebuilder:printcolumn:JSONPath=".status.identity.id",description="Security Identity",name="Security Identity",type=integer
// +kubebuilder:printcolumn:JSONPath=".status.policy.ingress.state",description="Ingress enforcement in the endpoint",name="Ingress Enforcement",type=string,priority=1
// +kubebuilder:printcolumn:JSONPath=".status.policy.egress.state",description="Egress enforcement in the endpoint",name="Egress Enforcement",type=string,priority=1
// +kubebuilder:printcolumn:JSONPath=".status.state",description="Endpoint current state",name="Endpoint State",type=string
// +kubebuilder:printcolumn:JSONPath=".status.networking.addressing[0].ipv4",description="Endpoint IPv4 address",name="IPv4",type=string
// +kubebuilder:printcolumn:JSONPath=".status.networking.addressing[0].ipv6",description="Endpoint IPv6 address",name="IPv6",type=string
// +kubebuilder:storageversion
// CiliumEndpoint is the status of a Cilium policy rule.
type CiliumEndpoint struct {
// +deepequal-gen=false
metav1.TypeMeta `json:",inline"`
// +deepequal-gen=false
metav1.ObjectMeta `json:"metadata"`
// +kubebuilder:validation:Optional
Status EndpointStatus `json:"status"`
}
// EndpointPolicyState defines the state of the Policy mode: "enforcing", "non-enforcing", "disabled"
type EndpointPolicyState string
// EndpointStatus is the status of a Cilium endpoint.
type EndpointStatus struct {
// ID is the cilium-agent-local ID of the endpoint.
ID int64 `json:"id,omitempty"`
// Controllers is the list of failing controllers for this endpoint.
Controllers ControllerList `json:"controllers,omitempty"`
// ExternalIdentifiers is a set of identifiers to identify the endpoint
// apart from the pod name. This includes container runtime IDs.
ExternalIdentifiers *models.EndpointIdentifiers `json:"external-identifiers,omitempty"`
// Health is the overall endpoint & subcomponent health.
Health *models.EndpointHealth `json:"health,omitempty"`
// Identity is the security identity associated with the endpoint
Identity *EndpointIdentity `json:"identity,omitempty"`
// Log is the list of the last few warning and error log entries
Log []*models.EndpointStatusChange `json:"log,omitempty"`
// Networking is the networking properties of the endpoint.
//
// +kubebuilder:validation:Optional
Networking *EndpointNetworking `json:"networking,omitempty"`
// Encryption is the encryption configuration of the node
//
// +kubebuilder:validation:Optional
Encryption EncryptionSpec `json:"encryption,omitempty"`
Policy *EndpointPolicy `json:"policy,omitempty"`
// State is the state of the endpoint.
//
// +kubebuilder:validation:Enum=creating;waiting-for-identity;not-ready;waiting-to-regenerate;regenerating;restoring;ready;disconnecting;disconnected;invalid
State string `json:"state,omitempty"`
NamedPorts models.NamedPorts `json:"named-ports,omitempty"`
}
// +k8s:deepcopy-gen=false
// ControllerList is a list of ControllerStatus.
type ControllerList []ControllerStatus
// Sort sorts the ControllerList by controller name
func (c ControllerList) Sort() {
sort.Slice(c, func(i, j int) bool { return c[i].Name < c[j].Name })
}
// ControllerStatus is the status of a failing controller.
type ControllerStatus struct {
// Name is the name of the controller
Name string `json:"name,omitempty"`
// Configuration is the controller configuration
Configuration *models.ControllerStatusConfiguration `json:"configuration,omitempty"`
// Status is the status of the controller
Status ControllerStatusStatus `json:"status,omitempty"`
// UUID is the UUID of the controller
UUID string `json:"uuid,omitempty"`
}
// +k8s:deepcopy-gen=false
// ControllerStatusStatus is the detailed status section of a controller.
type ControllerStatusStatus struct {
ConsecutiveFailureCount int64 `json:"consecutive-failure-count,omitempty"`
FailureCount int64 `json:"failure-count,omitempty"`
LastFailureMsg string `json:"last-failure-msg,omitempty"`
LastFailureTimestamp string `json:"last-failure-timestamp,omitempty"`
LastSuccessTimestamp string `json:"last-success-timestamp,omitempty"`
SuccessCount int64 `json:"success-count,omitempty"`
}
// EndpointPolicy represents the endpoint's policy by listing all allowed
// ingress and egress identities in combination with L4 port and protocol.
type EndpointPolicy struct {
Ingress *EndpointPolicyDirection `json:"ingress,omitempty"`
Egress *EndpointPolicyDirection `json:"egress,omitempty"`
}
// EndpointPolicyDirection is the list of allowed identities per direction.
type EndpointPolicyDirection struct {
Enforcing bool `json:"enforcing"`
Allowed AllowedIdentityList `json:"allowed,omitempty"`
Denied DenyIdentityList `json:"denied,omitempty"`
// Deprecated
Removing AllowedIdentityList `json:"removing,omitempty"`
// Deprecated
Adding AllowedIdentityList `json:"adding,omitempty"`
State EndpointPolicyState `json:"state,omitempty"`
}
// IdentityTuple specifies a peer by identity, destination port and protocol.
type IdentityTuple struct {
Identity uint64 `json:"identity,omitempty"`
IdentityLabels map[string]string `json:"identity-labels,omitempty"`
DestPort uint16 `json:"dest-port,omitempty"`
Protocol uint8 `json:"protocol,omitempty"`
}
// +k8s:deepcopy-gen=false
// IdentityList is a list of IdentityTuple.
type IdentityList []IdentityTuple
// Sort sorts a list IdentityList by numeric identity, port and protocol.
func (a IdentityList) Sort() {
sort.Slice(a, func(i, j int) bool {
if a[i].Identity < a[j].Identity {
return true
} else if a[i].Identity == a[j].Identity {
if a[i].DestPort < a[j].DestPort {
return true
} else if a[i].DestPort == a[j].DestPort {
return a[i].Protocol < a[j].Protocol
}
}
return false
})
}
// +k8s:deepcopy-gen=false
// AllowedIdentityList is a list of IdentityTuples that species peers that are
// allowed.
type AllowedIdentityList IdentityList
// Sort sorts a list IdentityList by numeric identity, port and protocol.
func (a AllowedIdentityList) Sort() {
IdentityList(a).Sort()
}
// +k8s:deepcopy-gen=false
// DenyIdentityList is a list of IdentityTuples that species peers that are
// denied.
type DenyIdentityList IdentityList
// Sort sorts a list IdentityList by numeric identity, port and protocol.
func (d DenyIdentityList) Sort() {
IdentityList(d).Sort()
}
// EndpointIdentity is the identity information of an endpoint.
type EndpointIdentity struct {
// ID is the numeric identity of the endpoint
ID int64 `json:"id,omitempty"`
// Labels is the list of labels associated with the identity
Labels []string `json:"labels,omitempty"`
}
// +genclient
// +genclient:nonNamespaced
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
// +kubebuilder:resource:categories={cilium},singular="ciliumidentity",path="ciliumidentities",scope="Cluster",shortName={ciliumid}
// +kubebuilder:printcolumn:JSONPath=".metadata.labels.io\\.kubernetes\\.pod\\.namespace",description="The namespace of the entity",name="Namespace",type=string
// +kubebuilder:printcolumn:JSONPath=".metadata.creationTimestamp",description="The age of the identity",name="Age",type=date
// +kubebuilder:subresource:status
// +kubebuilder:storageversion
// CiliumIdentity is a CRD that represents an identity managed by Cilium.
// It is intended as a backing store for identity allocation, acting as the
// global coordination backend, and can be used in place of a KVStore (such as
// etcd).
// The name of the CRD is the numeric identity and the labels on the CRD object
// are the kubernetes sourced labels seen by cilium. This is currently the
// only label source possible when running under kubernetes. Non-kubernetes
// labels are filtered but all labels, from all sources, are places in the
// SecurityLabels field. These also include the source and are used to define
// the identity.
// The labels under metav1.ObjectMeta can be used when searching for
// CiliumIdentity instances that include particular labels. This can be done
// with invocations such as:
//
// kubectl get ciliumid -l 'foo=bar'
type CiliumIdentity struct {
// +deepequal-gen=false
metav1.TypeMeta `json:",inline"`
// +deepequal-gen=false
metav1.ObjectMeta `json:"metadata"`
// SecurityLabels is the source-of-truth set of labels for this identity.
SecurityLabels map[string]string `json:"security-labels"`
}
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
// +deepequal-gen=false
// CiliumIdentityList is a list of CiliumIdentity objects.
type CiliumIdentityList struct {
metav1.TypeMeta `json:",inline"`
metav1.ListMeta `json:"metadata"`
// Items is a list of CiliumIdentity
Items []CiliumIdentity `json:"items"`
}
// +k8s:deepcopy-gen=false
// AddressPair is a pair of IPv4 and/or IPv6 address.
type AddressPair struct {
IPV4 string `json:"ipv4,omitempty"`
IPV6 string `json:"ipv6,omitempty"`
}
// +k8s:deepcopy-gen=false
// AddressPairList is a list of address pairs.
type AddressPairList []*AddressPair
// Sort sorts an AddressPairList by IPv4 and IPv6 address.
func (a AddressPairList) Sort() {
sort.Slice(a, func(i, j int) bool {
if a[i].IPV4 < a[j].IPV4 {
return true
} else if a[i].IPV4 == a[j].IPV4 {
return a[i].IPV6 < a[j].IPV6
}
return false
})
}
// EndpointNetworking is the addressing information of an endpoint.
type EndpointNetworking struct {
// IP4/6 addresses assigned to this Endpoint
Addressing AddressPairList `json:"addressing"`
// NodeIP is the IP of the node the endpoint is running on. The IP must
// be reachable between nodes.
NodeIP string `json:"node,omitempty"`
}
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
// +k8s:openapi-gen=false
// +deepequal-gen=false
// CiliumEndpointList is a list of CiliumEndpoint objects.
type CiliumEndpointList struct {
metav1.TypeMeta `json:",inline"`
metav1.ListMeta `json:"metadata"`
// Items is a list of CiliumEndpoint
Items []CiliumEndpoint `json:"items"`
}
// +genclient
// +genclient:nonNamespaced
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
// +kubebuilder:resource:categories={cilium},singular="ciliumnode",path="ciliumnodes",scope="Cluster",shortName={cn,ciliumn}
// +kubebuilder:printcolumn:JSONPath=".spec.addresses[?(@.type==\"CiliumInternalIP\")].ip",description="Cilium internal IP for this node",name="CiliumInternalIP",type=string
// +kubebuilder:printcolumn:JSONPath=".spec.addresses[?(@.type==\"InternalIP\")].ip",description="IP of the node",name="InternalIP",type=string
// +kubebuilder:printcolumn:JSONPath=".metadata.creationTimestamp",description="Time duration since creation of Ciliumnode",name="Age",type=date
// +kubebuilder:storageversion
// +kubebuilder:subresource:status
// CiliumNode represents a node managed by Cilium. It contains a specification
// to control various node specific configuration aspects and a status section
// to represent the status of the node.
type CiliumNode struct {
// +deepequal-gen=false
metav1.TypeMeta `json:",inline"`
// +deepequal-gen=false
metav1.ObjectMeta `json:"metadata"`
// Spec defines the desired specification/configuration of the node.
Spec NodeSpec `json:"spec"`
// Status defines the realized specification/configuration and status
// of the node.
//
// +kubebuilder:validation:Optional
Status NodeStatus `json:"status,omitempty"`
}
// NodeAddress is a node address.
type NodeAddress struct {
// Type is the type of the node address
Type addressing.AddressType `json:"type,omitempty"`
// IP is an IP of a node
IP string `json:"ip,omitempty"`
}
// NodeSpec is the configuration specific to a node.
type NodeSpec struct {
// InstanceID is the identifier of the node. This is different from the
// node name which is typically the FQDN of the node. The InstanceID
// typically refers to the identifier used by the cloud provider or
// some other means of identification.
InstanceID string `json:"instance-id,omitempty"`
// BootID is a unique node identifier generated on boot
//
// +kubebuilder:validation:Optional
BootID string `json:"bootid,omitempty"`
// Addresses is the list of all node addresses.
//
// +kubebuilder:validation:Optional
Addresses []NodeAddress `json:"addresses,omitempty"`
// HealthAddressing is the addressing information for health connectivity
// checking.
//
// +kubebuilder:validation:Optional
HealthAddressing HealthAddressingSpec `json:"health,omitempty"`
// IngressAddressing is the addressing information for Ingress listener.
//
// +kubebuilder:validation:Optional
IngressAddressing AddressPair `json:"ingress,omitempty"`
// Encryption is the encryption configuration of the node.
//
// +kubebuilder:validation:Optional
Encryption EncryptionSpec `json:"encryption,omitempty"`
// ENI is the AWS ENI specific configuration.
//
// +kubebuilder:validation:Optional
ENI eniTypes.ENISpec `json:"eni,omitempty"`
// Azure is the Azure IPAM specific configuration.
//
// +kubebuilder:validation:Optional
Azure azureTypes.AzureSpec `json:"azure,omitempty"`
// AlibabaCloud is the AlibabaCloud IPAM specific configuration.
//
// +kubebuilder:validation:Optional
AlibabaCloud alibabaCloudTypes.Spec `json:"alibaba-cloud,omitempty"`
// IPAM is the address management specification. This section can be
// populated by a user or it can be automatically populated by an IPAM
// operator.
//
// +kubebuilder:validation:Optional
IPAM ipamTypes.IPAMSpec `json:"ipam,omitempty"`
// NodeIdentity is the Cilium numeric identity allocated for the node, if any.
//
// +kubebuilder:validation:Optional
NodeIdentity uint64 `json:"nodeidentity,omitempty"`
}
// HealthAddressingSpec is the addressing information required to do
// connectivity health checking.
type HealthAddressingSpec struct {
// IPv4 is the IPv4 address of the IPv4 health endpoint.
//
// +kubebuilder:validation:Optional
IPv4 string `json:"ipv4,omitempty"`
// IPv6 is the IPv6 address of the IPv4 health endpoint.
//
// +kubebuilder:validation:Optional
IPv6 string `json:"ipv6,omitempty"`
}
// EncryptionSpec defines the encryption relevant configuration of a node.
type EncryptionSpec struct {
// Key is the index to the key to use for encryption or 0 if encryption is
// disabled.
//
// +kubebuilder:validation:Optional
Key int `json:"key,omitempty"`
}
// NodeStatus is the status of a node.
type NodeStatus struct {
// ENI is the AWS ENI specific status of the node.
//
// +kubebuilder:validation:Optional
ENI eniTypes.ENIStatus `json:"eni,omitempty"`
// Azure is the Azure specific status of the node.
//
// +kubebuilder:validation:Optional
Azure azureTypes.AzureStatus `json:"azure,omitempty"`
// IPAM is the IPAM status of the node.
//
// +kubebuilder:validation:Optional
IPAM ipamTypes.IPAMStatus `json:"ipam,omitempty"`
// AlibabaCloud is the AlibabaCloud specific status of the node.
//
// +kubebuilder:validation:Optional
AlibabaCloud alibabaCloudTypes.ENIStatus `json:"alibaba-cloud,omitempty"`
}
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
// +deepequal-gen=false
// CiliumNodeList is a list of CiliumNode objects.
type CiliumNodeList struct {
metav1.TypeMeta `json:",inline"`
metav1.ListMeta `json:"metadata"`
// Items is a list of CiliumNode
Items []CiliumNode `json:"items"`
}
// InstanceID returns the InstanceID of a CiliumNode.
func (n *CiliumNode) InstanceID() (instanceID string) {
if n != nil {
instanceID = n.Spec.InstanceID
// OBSOLETE: This fallback can be removed in Cilium 1.9
if instanceID == "" {
instanceID = n.Spec.ENI.InstanceID
}
}
return
}
func (n NodeAddress) ToString() string {
return n.IP
}
func (n NodeAddress) AddrType() addressing.AddressType {
return n.Type
}
// GetIP returns one of the CiliumNode's IP addresses available with the
// following priority:
// - NodeInternalIP
// - NodeExternalIP
// - other IP address type
// An error is returned if GetIP fails to extract an IP from the CiliumNode
// based on the provided address family.
func (n *CiliumNode) GetIP(ipv6 bool) net.IP {
return addressing.ExtractNodeIP[NodeAddress](n.Spec.Addresses, ipv6)
}
//go:build !ignore_autogenerated
// +build !ignore_autogenerated
// SPDX-License-Identifier: Apache-2.0
// Copyright Authors of Cilium
// Code generated by deepcopy-gen. DO NOT EDIT.
package v2
import (
models "github.com/cilium/cilium/api/v1/models"
v1 "github.com/cilium/cilium/pkg/k8s/slim/k8s/apis/meta/v1"
api "github.com/cilium/cilium/pkg/policy/api"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
runtime "k8s.io/apimachinery/pkg/runtime"
)
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *BGPAdvertisement) DeepCopyInto(out *BGPAdvertisement) {
*out = *in
if in.Service != nil {
in, out := &in.Service, &out.Service
*out = new(BGPServiceOptions)
(*in).DeepCopyInto(*out)
}
if in.Selector != nil {
in, out := &in.Selector, &out.Selector
*out = new(v1.LabelSelector)
(*in).DeepCopyInto(*out)
}
if in.Attributes != nil {
in, out := &in.Attributes, &out.Attributes
*out = new(BGPAttributes)
(*in).DeepCopyInto(*out)
}
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new BGPAdvertisement.
func (in *BGPAdvertisement) DeepCopy() *BGPAdvertisement {
if in == nil {
return nil
}
out := new(BGPAdvertisement)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *BGPAttributes) DeepCopyInto(out *BGPAttributes) {
*out = *in
if in.Communities != nil {
in, out := &in.Communities, &out.Communities
*out = new(BGPCommunities)
(*in).DeepCopyInto(*out)
}
if in.LocalPreference != nil {
in, out := &in.LocalPreference, &out.LocalPreference
*out = new(int64)
**out = **in
}
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new BGPAttributes.
func (in *BGPAttributes) DeepCopy() *BGPAttributes {
if in == nil {
return nil
}
out := new(BGPAttributes)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *BGPAutoDiscovery) DeepCopyInto(out *BGPAutoDiscovery) {
*out = *in
if in.DefaultGateway != nil {
in, out := &in.DefaultGateway, &out.DefaultGateway
*out = new(DefaultGateway)
**out = **in
}
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new BGPAutoDiscovery.
func (in *BGPAutoDiscovery) DeepCopy() *BGPAutoDiscovery {
if in == nil {
return nil
}
out := new(BGPAutoDiscovery)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *BGPCommunities) DeepCopyInto(out *BGPCommunities) {
*out = *in
if in.Standard != nil {
in, out := &in.Standard, &out.Standard
*out = make([]BGPStandardCommunity, len(*in))
copy(*out, *in)
}
if in.WellKnown != nil {
in, out := &in.WellKnown, &out.WellKnown
*out = make([]BGPWellKnownCommunity, len(*in))
copy(*out, *in)
}
if in.Large != nil {
in, out := &in.Large, &out.Large
*out = make([]BGPLargeCommunity, len(*in))
copy(*out, *in)
}
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new BGPCommunities.
func (in *BGPCommunities) DeepCopy() *BGPCommunities {
if in == nil {
return nil
}
out := new(BGPCommunities)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *BGPFamilyRouteCount) DeepCopyInto(out *BGPFamilyRouteCount) {
*out = *in
if in.Received != nil {
in, out := &in.Received, &out.Received
*out = new(int32)
**out = **in
}
if in.Advertised != nil {
in, out := &in.Advertised, &out.Advertised
*out = new(int32)
**out = **in
}
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new BGPFamilyRouteCount.
func (in *BGPFamilyRouteCount) DeepCopy() *BGPFamilyRouteCount {
if in == nil {
return nil
}
out := new(BGPFamilyRouteCount)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *BGPServiceOptions) DeepCopyInto(out *BGPServiceOptions) {
*out = *in
if in.Addresses != nil {
in, out := &in.Addresses, &out.Addresses
*out = make([]BGPServiceAddressType, len(*in))
copy(*out, *in)
}
if in.AggregationLengthIPv4 != nil {
in, out := &in.AggregationLengthIPv4, &out.AggregationLengthIPv4
*out = new(int16)
**out = **in
}
if in.AggregationLengthIPv6 != nil {
in, out := &in.AggregationLengthIPv6, &out.AggregationLengthIPv6
*out = new(int16)
**out = **in
}
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new BGPServiceOptions.
func (in *BGPServiceOptions) DeepCopy() *BGPServiceOptions {
if in == nil {
return nil
}
out := new(BGPServiceOptions)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *CiliumBGPAdvertisement) DeepCopyInto(out *CiliumBGPAdvertisement) {
*out = *in
out.TypeMeta = in.TypeMeta
in.ObjectMeta.DeepCopyInto(&out.ObjectMeta)
in.Spec.DeepCopyInto(&out.Spec)
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CiliumBGPAdvertisement.
func (in *CiliumBGPAdvertisement) DeepCopy() *CiliumBGPAdvertisement {
if in == nil {
return nil
}
out := new(CiliumBGPAdvertisement)
in.DeepCopyInto(out)
return out
}
// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
func (in *CiliumBGPAdvertisement) DeepCopyObject() runtime.Object {
if c := in.DeepCopy(); c != nil {
return c
}
return nil
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *CiliumBGPAdvertisementList) DeepCopyInto(out *CiliumBGPAdvertisementList) {
*out = *in
out.TypeMeta = in.TypeMeta
in.ListMeta.DeepCopyInto(&out.ListMeta)
if in.Items != nil {
in, out := &in.Items, &out.Items
*out = make([]CiliumBGPAdvertisement, len(*in))
for i := range *in {
(*in)[i].DeepCopyInto(&(*out)[i])
}
}
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CiliumBGPAdvertisementList.
func (in *CiliumBGPAdvertisementList) DeepCopy() *CiliumBGPAdvertisementList {
if in == nil {
return nil
}
out := new(CiliumBGPAdvertisementList)
in.DeepCopyInto(out)
return out
}
// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
func (in *CiliumBGPAdvertisementList) DeepCopyObject() runtime.Object {
if c := in.DeepCopy(); c != nil {
return c
}
return nil
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *CiliumBGPAdvertisementSpec) DeepCopyInto(out *CiliumBGPAdvertisementSpec) {
*out = *in
if in.Advertisements != nil {
in, out := &in.Advertisements, &out.Advertisements
*out = make([]BGPAdvertisement, len(*in))
for i := range *in {
(*in)[i].DeepCopyInto(&(*out)[i])
}
}
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CiliumBGPAdvertisementSpec.
func (in *CiliumBGPAdvertisementSpec) DeepCopy() *CiliumBGPAdvertisementSpec {
if in == nil {
return nil
}
out := new(CiliumBGPAdvertisementSpec)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *CiliumBGPClusterConfig) DeepCopyInto(out *CiliumBGPClusterConfig) {
*out = *in
out.TypeMeta = in.TypeMeta
in.ObjectMeta.DeepCopyInto(&out.ObjectMeta)
in.Spec.DeepCopyInto(&out.Spec)
in.Status.DeepCopyInto(&out.Status)
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CiliumBGPClusterConfig.
func (in *CiliumBGPClusterConfig) DeepCopy() *CiliumBGPClusterConfig {
if in == nil {
return nil
}
out := new(CiliumBGPClusterConfig)
in.DeepCopyInto(out)
return out
}
// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
func (in *CiliumBGPClusterConfig) DeepCopyObject() runtime.Object {
if c := in.DeepCopy(); c != nil {
return c
}
return nil
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *CiliumBGPClusterConfigList) DeepCopyInto(out *CiliumBGPClusterConfigList) {
*out = *in
out.TypeMeta = in.TypeMeta
in.ListMeta.DeepCopyInto(&out.ListMeta)
if in.Items != nil {
in, out := &in.Items, &out.Items
*out = make([]CiliumBGPClusterConfig, len(*in))
for i := range *in {
(*in)[i].DeepCopyInto(&(*out)[i])
}
}
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CiliumBGPClusterConfigList.
func (in *CiliumBGPClusterConfigList) DeepCopy() *CiliumBGPClusterConfigList {
if in == nil {
return nil
}
out := new(CiliumBGPClusterConfigList)
in.DeepCopyInto(out)
return out
}
// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
func (in *CiliumBGPClusterConfigList) DeepCopyObject() runtime.Object {
if c := in.DeepCopy(); c != nil {
return c
}
return nil
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *CiliumBGPClusterConfigSpec) DeepCopyInto(out *CiliumBGPClusterConfigSpec) {
*out = *in
if in.NodeSelector != nil {
in, out := &in.NodeSelector, &out.NodeSelector
*out = new(v1.LabelSelector)
(*in).DeepCopyInto(*out)
}
if in.BGPInstances != nil {
in, out := &in.BGPInstances, &out.BGPInstances
*out = make([]CiliumBGPInstance, len(*in))
for i := range *in {
(*in)[i].DeepCopyInto(&(*out)[i])
}
}
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CiliumBGPClusterConfigSpec.
func (in *CiliumBGPClusterConfigSpec) DeepCopy() *CiliumBGPClusterConfigSpec {
if in == nil {
return nil
}
out := new(CiliumBGPClusterConfigSpec)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *CiliumBGPClusterConfigStatus) DeepCopyInto(out *CiliumBGPClusterConfigStatus) {
*out = *in
if in.Conditions != nil {
in, out := &in.Conditions, &out.Conditions
*out = make([]metav1.Condition, len(*in))
for i := range *in {
(*in)[i].DeepCopyInto(&(*out)[i])
}
}
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CiliumBGPClusterConfigStatus.
func (in *CiliumBGPClusterConfigStatus) DeepCopy() *CiliumBGPClusterConfigStatus {
if in == nil {
return nil
}
out := new(CiliumBGPClusterConfigStatus)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *CiliumBGPFamily) DeepCopyInto(out *CiliumBGPFamily) {
*out = *in
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CiliumBGPFamily.
func (in *CiliumBGPFamily) DeepCopy() *CiliumBGPFamily {
if in == nil {
return nil
}
out := new(CiliumBGPFamily)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *CiliumBGPFamilyWithAdverts) DeepCopyInto(out *CiliumBGPFamilyWithAdverts) {
*out = *in
out.CiliumBGPFamily = in.CiliumBGPFamily
if in.Advertisements != nil {
in, out := &in.Advertisements, &out.Advertisements
*out = new(v1.LabelSelector)
(*in).DeepCopyInto(*out)
}
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CiliumBGPFamilyWithAdverts.
func (in *CiliumBGPFamilyWithAdverts) DeepCopy() *CiliumBGPFamilyWithAdverts {
if in == nil {
return nil
}
out := new(CiliumBGPFamilyWithAdverts)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *CiliumBGPInstance) DeepCopyInto(out *CiliumBGPInstance) {
*out = *in
if in.LocalASN != nil {
in, out := &in.LocalASN, &out.LocalASN
*out = new(int64)
**out = **in
}
if in.LocalPort != nil {
in, out := &in.LocalPort, &out.LocalPort
*out = new(int32)
**out = **in
}
if in.Peers != nil {
in, out := &in.Peers, &out.Peers
*out = make([]CiliumBGPPeer, len(*in))
for i := range *in {
(*in)[i].DeepCopyInto(&(*out)[i])
}
}
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CiliumBGPInstance.
func (in *CiliumBGPInstance) DeepCopy() *CiliumBGPInstance {
if in == nil {
return nil
}
out := new(CiliumBGPInstance)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *CiliumBGPNeighborGracefulRestart) DeepCopyInto(out *CiliumBGPNeighborGracefulRestart) {
*out = *in
if in.RestartTimeSeconds != nil {
in, out := &in.RestartTimeSeconds, &out.RestartTimeSeconds
*out = new(int32)
**out = **in
}
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CiliumBGPNeighborGracefulRestart.
func (in *CiliumBGPNeighborGracefulRestart) DeepCopy() *CiliumBGPNeighborGracefulRestart {
if in == nil {
return nil
}
out := new(CiliumBGPNeighborGracefulRestart)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *CiliumBGPNodeConfig) DeepCopyInto(out *CiliumBGPNodeConfig) {
*out = *in
out.TypeMeta = in.TypeMeta
in.ObjectMeta.DeepCopyInto(&out.ObjectMeta)
in.Spec.DeepCopyInto(&out.Spec)
in.Status.DeepCopyInto(&out.Status)
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CiliumBGPNodeConfig.
func (in *CiliumBGPNodeConfig) DeepCopy() *CiliumBGPNodeConfig {
if in == nil {
return nil
}
out := new(CiliumBGPNodeConfig)
in.DeepCopyInto(out)
return out
}
// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
func (in *CiliumBGPNodeConfig) DeepCopyObject() runtime.Object {
if c := in.DeepCopy(); c != nil {
return c
}
return nil
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *CiliumBGPNodeConfigInstanceOverride) DeepCopyInto(out *CiliumBGPNodeConfigInstanceOverride) {
*out = *in
if in.RouterID != nil {
in, out := &in.RouterID, &out.RouterID
*out = new(string)
**out = **in
}
if in.LocalPort != nil {
in, out := &in.LocalPort, &out.LocalPort
*out = new(int32)
**out = **in
}
if in.Peers != nil {
in, out := &in.Peers, &out.Peers
*out = make([]CiliumBGPNodeConfigPeerOverride, len(*in))
for i := range *in {
(*in)[i].DeepCopyInto(&(*out)[i])
}
}
if in.LocalASN != nil {
in, out := &in.LocalASN, &out.LocalASN
*out = new(int64)
**out = **in
}
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CiliumBGPNodeConfigInstanceOverride.
func (in *CiliumBGPNodeConfigInstanceOverride) DeepCopy() *CiliumBGPNodeConfigInstanceOverride {
if in == nil {
return nil
}
out := new(CiliumBGPNodeConfigInstanceOverride)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *CiliumBGPNodeConfigList) DeepCopyInto(out *CiliumBGPNodeConfigList) {
*out = *in
out.TypeMeta = in.TypeMeta
in.ListMeta.DeepCopyInto(&out.ListMeta)
if in.Items != nil {
in, out := &in.Items, &out.Items
*out = make([]CiliumBGPNodeConfig, len(*in))
for i := range *in {
(*in)[i].DeepCopyInto(&(*out)[i])
}
}
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CiliumBGPNodeConfigList.
func (in *CiliumBGPNodeConfigList) DeepCopy() *CiliumBGPNodeConfigList {
if in == nil {
return nil
}
out := new(CiliumBGPNodeConfigList)
in.DeepCopyInto(out)
return out
}
// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
func (in *CiliumBGPNodeConfigList) DeepCopyObject() runtime.Object {
if c := in.DeepCopy(); c != nil {
return c
}
return nil
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *CiliumBGPNodeConfigOverride) DeepCopyInto(out *CiliumBGPNodeConfigOverride) {
*out = *in
out.TypeMeta = in.TypeMeta
in.ObjectMeta.DeepCopyInto(&out.ObjectMeta)
in.Spec.DeepCopyInto(&out.Spec)
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CiliumBGPNodeConfigOverride.
func (in *CiliumBGPNodeConfigOverride) DeepCopy() *CiliumBGPNodeConfigOverride {
if in == nil {
return nil
}
out := new(CiliumBGPNodeConfigOverride)
in.DeepCopyInto(out)
return out
}
// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
func (in *CiliumBGPNodeConfigOverride) DeepCopyObject() runtime.Object {
if c := in.DeepCopy(); c != nil {
return c
}
return nil
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *CiliumBGPNodeConfigOverrideList) DeepCopyInto(out *CiliumBGPNodeConfigOverrideList) {
*out = *in
out.TypeMeta = in.TypeMeta
in.ListMeta.DeepCopyInto(&out.ListMeta)
if in.Items != nil {
in, out := &in.Items, &out.Items
*out = make([]CiliumBGPNodeConfigOverride, len(*in))
for i := range *in {
(*in)[i].DeepCopyInto(&(*out)[i])
}
}
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CiliumBGPNodeConfigOverrideList.
func (in *CiliumBGPNodeConfigOverrideList) DeepCopy() *CiliumBGPNodeConfigOverrideList {
if in == nil {
return nil
}
out := new(CiliumBGPNodeConfigOverrideList)
in.DeepCopyInto(out)
return out
}
// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
func (in *CiliumBGPNodeConfigOverrideList) DeepCopyObject() runtime.Object {
if c := in.DeepCopy(); c != nil {
return c
}
return nil
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *CiliumBGPNodeConfigOverrideSpec) DeepCopyInto(out *CiliumBGPNodeConfigOverrideSpec) {
*out = *in
if in.BGPInstances != nil {
in, out := &in.BGPInstances, &out.BGPInstances
*out = make([]CiliumBGPNodeConfigInstanceOverride, len(*in))
for i := range *in {
(*in)[i].DeepCopyInto(&(*out)[i])
}
}
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CiliumBGPNodeConfigOverrideSpec.
func (in *CiliumBGPNodeConfigOverrideSpec) DeepCopy() *CiliumBGPNodeConfigOverrideSpec {
if in == nil {
return nil
}
out := new(CiliumBGPNodeConfigOverrideSpec)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *CiliumBGPNodeConfigPeerOverride) DeepCopyInto(out *CiliumBGPNodeConfigPeerOverride) {
*out = *in
if in.LocalAddress != nil {
in, out := &in.LocalAddress, &out.LocalAddress
*out = new(string)
**out = **in
}
if in.LocalPort != nil {
in, out := &in.LocalPort, &out.LocalPort
*out = new(int32)
**out = **in
}
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CiliumBGPNodeConfigPeerOverride.
func (in *CiliumBGPNodeConfigPeerOverride) DeepCopy() *CiliumBGPNodeConfigPeerOverride {
if in == nil {
return nil
}
out := new(CiliumBGPNodeConfigPeerOverride)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *CiliumBGPNodeInstance) DeepCopyInto(out *CiliumBGPNodeInstance) {
*out = *in
if in.LocalASN != nil {
in, out := &in.LocalASN, &out.LocalASN
*out = new(int64)
**out = **in
}
if in.RouterID != nil {
in, out := &in.RouterID, &out.RouterID
*out = new(string)
**out = **in
}
if in.LocalPort != nil {
in, out := &in.LocalPort, &out.LocalPort
*out = new(int32)
**out = **in
}
if in.Peers != nil {
in, out := &in.Peers, &out.Peers
*out = make([]CiliumBGPNodePeer, len(*in))
for i := range *in {
(*in)[i].DeepCopyInto(&(*out)[i])
}
}
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CiliumBGPNodeInstance.
func (in *CiliumBGPNodeInstance) DeepCopy() *CiliumBGPNodeInstance {
if in == nil {
return nil
}
out := new(CiliumBGPNodeInstance)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *CiliumBGPNodeInstanceStatus) DeepCopyInto(out *CiliumBGPNodeInstanceStatus) {
*out = *in
if in.LocalASN != nil {
in, out := &in.LocalASN, &out.LocalASN
*out = new(int64)
**out = **in
}
if in.PeerStatuses != nil {
in, out := &in.PeerStatuses, &out.PeerStatuses
*out = make([]CiliumBGPNodePeerStatus, len(*in))
for i := range *in {
(*in)[i].DeepCopyInto(&(*out)[i])
}
}
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CiliumBGPNodeInstanceStatus.
func (in *CiliumBGPNodeInstanceStatus) DeepCopy() *CiliumBGPNodeInstanceStatus {
if in == nil {
return nil
}
out := new(CiliumBGPNodeInstanceStatus)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *CiliumBGPNodePeer) DeepCopyInto(out *CiliumBGPNodePeer) {
*out = *in
if in.PeerAddress != nil {
in, out := &in.PeerAddress, &out.PeerAddress
*out = new(string)
**out = **in
}
if in.PeerASN != nil {
in, out := &in.PeerASN, &out.PeerASN
*out = new(int64)
**out = **in
}
if in.AutoDiscovery != nil {
in, out := &in.AutoDiscovery, &out.AutoDiscovery
*out = new(BGPAutoDiscovery)
(*in).DeepCopyInto(*out)
}
if in.LocalAddress != nil {
in, out := &in.LocalAddress, &out.LocalAddress
*out = new(string)
**out = **in
}
if in.PeerConfigRef != nil {
in, out := &in.PeerConfigRef, &out.PeerConfigRef
*out = new(PeerConfigReference)
**out = **in
}
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CiliumBGPNodePeer.
func (in *CiliumBGPNodePeer) DeepCopy() *CiliumBGPNodePeer {
if in == nil {
return nil
}
out := new(CiliumBGPNodePeer)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *CiliumBGPNodePeerStatus) DeepCopyInto(out *CiliumBGPNodePeerStatus) {
*out = *in
if in.PeerASN != nil {
in, out := &in.PeerASN, &out.PeerASN
*out = new(int64)
**out = **in
}
if in.PeeringState != nil {
in, out := &in.PeeringState, &out.PeeringState
*out = new(string)
**out = **in
}
if in.Timers != nil {
in, out := &in.Timers, &out.Timers
*out = new(CiliumBGPTimersState)
(*in).DeepCopyInto(*out)
}
if in.EstablishedTime != nil {
in, out := &in.EstablishedTime, &out.EstablishedTime
*out = new(string)
**out = **in
}
if in.RouteCount != nil {
in, out := &in.RouteCount, &out.RouteCount
*out = make([]BGPFamilyRouteCount, len(*in))
for i := range *in {
(*in)[i].DeepCopyInto(&(*out)[i])
}
}
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CiliumBGPNodePeerStatus.
func (in *CiliumBGPNodePeerStatus) DeepCopy() *CiliumBGPNodePeerStatus {
if in == nil {
return nil
}
out := new(CiliumBGPNodePeerStatus)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *CiliumBGPNodeSpec) DeepCopyInto(out *CiliumBGPNodeSpec) {
*out = *in
if in.BGPInstances != nil {
in, out := &in.BGPInstances, &out.BGPInstances
*out = make([]CiliumBGPNodeInstance, len(*in))
for i := range *in {
(*in)[i].DeepCopyInto(&(*out)[i])
}
}
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CiliumBGPNodeSpec.
func (in *CiliumBGPNodeSpec) DeepCopy() *CiliumBGPNodeSpec {
if in == nil {
return nil
}
out := new(CiliumBGPNodeSpec)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *CiliumBGPNodeStatus) DeepCopyInto(out *CiliumBGPNodeStatus) {
*out = *in
if in.BGPInstances != nil {
in, out := &in.BGPInstances, &out.BGPInstances
*out = make([]CiliumBGPNodeInstanceStatus, len(*in))
for i := range *in {
(*in)[i].DeepCopyInto(&(*out)[i])
}
}
if in.Conditions != nil {
in, out := &in.Conditions, &out.Conditions
*out = make([]metav1.Condition, len(*in))
for i := range *in {
(*in)[i].DeepCopyInto(&(*out)[i])
}
}
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CiliumBGPNodeStatus.
func (in *CiliumBGPNodeStatus) DeepCopy() *CiliumBGPNodeStatus {
if in == nil {
return nil
}
out := new(CiliumBGPNodeStatus)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *CiliumBGPPeer) DeepCopyInto(out *CiliumBGPPeer) {
*out = *in
if in.PeerAddress != nil {
in, out := &in.PeerAddress, &out.PeerAddress
*out = new(string)
**out = **in
}
if in.PeerASN != nil {
in, out := &in.PeerASN, &out.PeerASN
*out = new(int64)
**out = **in
}
if in.AutoDiscovery != nil {
in, out := &in.AutoDiscovery, &out.AutoDiscovery
*out = new(BGPAutoDiscovery)
(*in).DeepCopyInto(*out)
}
if in.PeerConfigRef != nil {
in, out := &in.PeerConfigRef, &out.PeerConfigRef
*out = new(PeerConfigReference)
**out = **in
}
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CiliumBGPPeer.
func (in *CiliumBGPPeer) DeepCopy() *CiliumBGPPeer {
if in == nil {
return nil
}
out := new(CiliumBGPPeer)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *CiliumBGPPeerConfig) DeepCopyInto(out *CiliumBGPPeerConfig) {
*out = *in
out.TypeMeta = in.TypeMeta
in.ObjectMeta.DeepCopyInto(&out.ObjectMeta)
in.Spec.DeepCopyInto(&out.Spec)
in.Status.DeepCopyInto(&out.Status)
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CiliumBGPPeerConfig.
func (in *CiliumBGPPeerConfig) DeepCopy() *CiliumBGPPeerConfig {
if in == nil {
return nil
}
out := new(CiliumBGPPeerConfig)
in.DeepCopyInto(out)
return out
}
// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
func (in *CiliumBGPPeerConfig) DeepCopyObject() runtime.Object {
if c := in.DeepCopy(); c != nil {
return c
}
return nil
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *CiliumBGPPeerConfigList) DeepCopyInto(out *CiliumBGPPeerConfigList) {
*out = *in
out.TypeMeta = in.TypeMeta
in.ListMeta.DeepCopyInto(&out.ListMeta)
if in.Items != nil {
in, out := &in.Items, &out.Items
*out = make([]CiliumBGPPeerConfig, len(*in))
for i := range *in {
(*in)[i].DeepCopyInto(&(*out)[i])
}
}
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CiliumBGPPeerConfigList.
func (in *CiliumBGPPeerConfigList) DeepCopy() *CiliumBGPPeerConfigList {
if in == nil {
return nil
}
out := new(CiliumBGPPeerConfigList)
in.DeepCopyInto(out)
return out
}
// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
func (in *CiliumBGPPeerConfigList) DeepCopyObject() runtime.Object {
if c := in.DeepCopy(); c != nil {
return c
}
return nil
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *CiliumBGPPeerConfigSpec) DeepCopyInto(out *CiliumBGPPeerConfigSpec) {
*out = *in
if in.Transport != nil {
in, out := &in.Transport, &out.Transport
*out = new(CiliumBGPTransport)
(*in).DeepCopyInto(*out)
}
if in.Timers != nil {
in, out := &in.Timers, &out.Timers
*out = new(CiliumBGPTimers)
(*in).DeepCopyInto(*out)
}
if in.AuthSecretRef != nil {
in, out := &in.AuthSecretRef, &out.AuthSecretRef
*out = new(string)
**out = **in
}
if in.GracefulRestart != nil {
in, out := &in.GracefulRestart, &out.GracefulRestart
*out = new(CiliumBGPNeighborGracefulRestart)
(*in).DeepCopyInto(*out)
}
if in.EBGPMultihop != nil {
in, out := &in.EBGPMultihop, &out.EBGPMultihop
*out = new(int32)
**out = **in
}
if in.Families != nil {
in, out := &in.Families, &out.Families
*out = make([]CiliumBGPFamilyWithAdverts, len(*in))
for i := range *in {
(*in)[i].DeepCopyInto(&(*out)[i])
}
}
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CiliumBGPPeerConfigSpec.
func (in *CiliumBGPPeerConfigSpec) DeepCopy() *CiliumBGPPeerConfigSpec {
if in == nil {
return nil
}
out := new(CiliumBGPPeerConfigSpec)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *CiliumBGPPeerConfigStatus) DeepCopyInto(out *CiliumBGPPeerConfigStatus) {
*out = *in
if in.Conditions != nil {
in, out := &in.Conditions, &out.Conditions
*out = make([]metav1.Condition, len(*in))
for i := range *in {
(*in)[i].DeepCopyInto(&(*out)[i])
}
}
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CiliumBGPPeerConfigStatus.
func (in *CiliumBGPPeerConfigStatus) DeepCopy() *CiliumBGPPeerConfigStatus {
if in == nil {
return nil
}
out := new(CiliumBGPPeerConfigStatus)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *CiliumBGPTimers) DeepCopyInto(out *CiliumBGPTimers) {
*out = *in
if in.ConnectRetryTimeSeconds != nil {
in, out := &in.ConnectRetryTimeSeconds, &out.ConnectRetryTimeSeconds
*out = new(int32)
**out = **in
}
if in.HoldTimeSeconds != nil {
in, out := &in.HoldTimeSeconds, &out.HoldTimeSeconds
*out = new(int32)
**out = **in
}
if in.KeepAliveTimeSeconds != nil {
in, out := &in.KeepAliveTimeSeconds, &out.KeepAliveTimeSeconds
*out = new(int32)
**out = **in
}
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CiliumBGPTimers.
func (in *CiliumBGPTimers) DeepCopy() *CiliumBGPTimers {
if in == nil {
return nil
}
out := new(CiliumBGPTimers)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *CiliumBGPTimersState) DeepCopyInto(out *CiliumBGPTimersState) {
*out = *in
if in.AppliedHoldTimeSeconds != nil {
in, out := &in.AppliedHoldTimeSeconds, &out.AppliedHoldTimeSeconds
*out = new(int32)
**out = **in
}
if in.AppliedKeepaliveSeconds != nil {
in, out := &in.AppliedKeepaliveSeconds, &out.AppliedKeepaliveSeconds
*out = new(int32)
**out = **in
}
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CiliumBGPTimersState.
func (in *CiliumBGPTimersState) DeepCopy() *CiliumBGPTimersState {
if in == nil {
return nil
}
out := new(CiliumBGPTimersState)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *CiliumBGPTransport) DeepCopyInto(out *CiliumBGPTransport) {
*out = *in
if in.PeerPort != nil {
in, out := &in.PeerPort, &out.PeerPort
*out = new(int32)
**out = **in
}
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CiliumBGPTransport.
func (in *CiliumBGPTransport) DeepCopy() *CiliumBGPTransport {
if in == nil {
return nil
}
out := new(CiliumBGPTransport)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *CiliumCIDRGroup) DeepCopyInto(out *CiliumCIDRGroup) {
*out = *in
out.TypeMeta = in.TypeMeta
in.ObjectMeta.DeepCopyInto(&out.ObjectMeta)
in.Spec.DeepCopyInto(&out.Spec)
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CiliumCIDRGroup.
func (in *CiliumCIDRGroup) DeepCopy() *CiliumCIDRGroup {
if in == nil {
return nil
}
out := new(CiliumCIDRGroup)
in.DeepCopyInto(out)
return out
}
// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
func (in *CiliumCIDRGroup) DeepCopyObject() runtime.Object {
if c := in.DeepCopy(); c != nil {
return c
}
return nil
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *CiliumCIDRGroupList) DeepCopyInto(out *CiliumCIDRGroupList) {
*out = *in
out.TypeMeta = in.TypeMeta
in.ListMeta.DeepCopyInto(&out.ListMeta)
if in.Items != nil {
in, out := &in.Items, &out.Items
*out = make([]CiliumCIDRGroup, len(*in))
for i := range *in {
(*in)[i].DeepCopyInto(&(*out)[i])
}
}
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CiliumCIDRGroupList.
func (in *CiliumCIDRGroupList) DeepCopy() *CiliumCIDRGroupList {
if in == nil {
return nil
}
out := new(CiliumCIDRGroupList)
in.DeepCopyInto(out)
return out
}
// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
func (in *CiliumCIDRGroupList) DeepCopyObject() runtime.Object {
if c := in.DeepCopy(); c != nil {
return c
}
return nil
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *CiliumCIDRGroupSpec) DeepCopyInto(out *CiliumCIDRGroupSpec) {
*out = *in
if in.ExternalCIDRs != nil {
in, out := &in.ExternalCIDRs, &out.ExternalCIDRs
*out = make([]api.CIDR, len(*in))
copy(*out, *in)
}
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CiliumCIDRGroupSpec.
func (in *CiliumCIDRGroupSpec) DeepCopy() *CiliumCIDRGroupSpec {
if in == nil {
return nil
}
out := new(CiliumCIDRGroupSpec)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *CiliumClusterwideEnvoyConfig) DeepCopyInto(out *CiliumClusterwideEnvoyConfig) {
*out = *in
out.TypeMeta = in.TypeMeta
in.ObjectMeta.DeepCopyInto(&out.ObjectMeta)
in.Spec.DeepCopyInto(&out.Spec)
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CiliumClusterwideEnvoyConfig.
func (in *CiliumClusterwideEnvoyConfig) DeepCopy() *CiliumClusterwideEnvoyConfig {
if in == nil {
return nil
}
out := new(CiliumClusterwideEnvoyConfig)
in.DeepCopyInto(out)
return out
}
// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
func (in *CiliumClusterwideEnvoyConfig) DeepCopyObject() runtime.Object {
if c := in.DeepCopy(); c != nil {
return c
}
return nil
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *CiliumClusterwideEnvoyConfigList) DeepCopyInto(out *CiliumClusterwideEnvoyConfigList) {
*out = *in
out.TypeMeta = in.TypeMeta
in.ListMeta.DeepCopyInto(&out.ListMeta)
if in.Items != nil {
in, out := &in.Items, &out.Items
*out = make([]CiliumClusterwideEnvoyConfig, len(*in))
for i := range *in {
(*in)[i].DeepCopyInto(&(*out)[i])
}
}
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CiliumClusterwideEnvoyConfigList.
func (in *CiliumClusterwideEnvoyConfigList) DeepCopy() *CiliumClusterwideEnvoyConfigList {
if in == nil {
return nil
}
out := new(CiliumClusterwideEnvoyConfigList)
in.DeepCopyInto(out)
return out
}
// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
func (in *CiliumClusterwideEnvoyConfigList) DeepCopyObject() runtime.Object {
if c := in.DeepCopy(); c != nil {
return c
}
return nil
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *CiliumClusterwideNetworkPolicy) DeepCopyInto(out *CiliumClusterwideNetworkPolicy) {
*out = *in
out.TypeMeta = in.TypeMeta
in.ObjectMeta.DeepCopyInto(&out.ObjectMeta)
if in.Spec != nil {
in, out := &in.Spec, &out.Spec
*out = new(api.Rule)
(*in).DeepCopyInto(*out)
}
if in.Specs != nil {
in, out := &in.Specs, &out.Specs
*out = make(api.Rules, len(*in))
for i := range *in {
if (*in)[i] != nil {
in, out := &(*in)[i], &(*out)[i]
*out = new(api.Rule)
(*in).DeepCopyInto(*out)
}
}
}
in.Status.DeepCopyInto(&out.Status)
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CiliumClusterwideNetworkPolicy.
func (in *CiliumClusterwideNetworkPolicy) DeepCopy() *CiliumClusterwideNetworkPolicy {
if in == nil {
return nil
}
out := new(CiliumClusterwideNetworkPolicy)
in.DeepCopyInto(out)
return out
}
// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
func (in *CiliumClusterwideNetworkPolicy) DeepCopyObject() runtime.Object {
if c := in.DeepCopy(); c != nil {
return c
}
return nil
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *CiliumClusterwideNetworkPolicyList) DeepCopyInto(out *CiliumClusterwideNetworkPolicyList) {
*out = *in
out.TypeMeta = in.TypeMeta
in.ListMeta.DeepCopyInto(&out.ListMeta)
if in.Items != nil {
in, out := &in.Items, &out.Items
*out = make([]CiliumClusterwideNetworkPolicy, len(*in))
for i := range *in {
(*in)[i].DeepCopyInto(&(*out)[i])
}
}
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CiliumClusterwideNetworkPolicyList.
func (in *CiliumClusterwideNetworkPolicyList) DeepCopy() *CiliumClusterwideNetworkPolicyList {
if in == nil {
return nil
}
out := new(CiliumClusterwideNetworkPolicyList)
in.DeepCopyInto(out)
return out
}
// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
func (in *CiliumClusterwideNetworkPolicyList) DeepCopyObject() runtime.Object {
if c := in.DeepCopy(); c != nil {
return c
}
return nil
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *CiliumEgressGatewayPolicy) DeepCopyInto(out *CiliumEgressGatewayPolicy) {
*out = *in
out.TypeMeta = in.TypeMeta
in.ObjectMeta.DeepCopyInto(&out.ObjectMeta)
in.Spec.DeepCopyInto(&out.Spec)
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CiliumEgressGatewayPolicy.
func (in *CiliumEgressGatewayPolicy) DeepCopy() *CiliumEgressGatewayPolicy {
if in == nil {
return nil
}
out := new(CiliumEgressGatewayPolicy)
in.DeepCopyInto(out)
return out
}
// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
func (in *CiliumEgressGatewayPolicy) DeepCopyObject() runtime.Object {
if c := in.DeepCopy(); c != nil {
return c
}
return nil
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *CiliumEgressGatewayPolicyList) DeepCopyInto(out *CiliumEgressGatewayPolicyList) {
*out = *in
out.TypeMeta = in.TypeMeta
in.ListMeta.DeepCopyInto(&out.ListMeta)
if in.Items != nil {
in, out := &in.Items, &out.Items
*out = make([]CiliumEgressGatewayPolicy, len(*in))
for i := range *in {
(*in)[i].DeepCopyInto(&(*out)[i])
}
}
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CiliumEgressGatewayPolicyList.
func (in *CiliumEgressGatewayPolicyList) DeepCopy() *CiliumEgressGatewayPolicyList {
if in == nil {
return nil
}
out := new(CiliumEgressGatewayPolicyList)
in.DeepCopyInto(out)
return out
}
// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
func (in *CiliumEgressGatewayPolicyList) DeepCopyObject() runtime.Object {
if c := in.DeepCopy(); c != nil {
return c
}
return nil
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *CiliumEgressGatewayPolicySpec) DeepCopyInto(out *CiliumEgressGatewayPolicySpec) {
*out = *in
if in.Selectors != nil {
in, out := &in.Selectors, &out.Selectors
*out = make([]EgressRule, len(*in))
for i := range *in {
(*in)[i].DeepCopyInto(&(*out)[i])
}
}
if in.DestinationCIDRs != nil {
in, out := &in.DestinationCIDRs, &out.DestinationCIDRs
*out = make([]CIDR, len(*in))
copy(*out, *in)
}
if in.ExcludedCIDRs != nil {
in, out := &in.ExcludedCIDRs, &out.ExcludedCIDRs
*out = make([]CIDR, len(*in))
copy(*out, *in)
}
if in.EgressGateway != nil {
in, out := &in.EgressGateway, &out.EgressGateway
*out = new(EgressGateway)
(*in).DeepCopyInto(*out)
}
if in.EgressGateways != nil {
in, out := &in.EgressGateways, &out.EgressGateways
*out = make([]EgressGateway, len(*in))
for i := range *in {
(*in)[i].DeepCopyInto(&(*out)[i])
}
}
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CiliumEgressGatewayPolicySpec.
func (in *CiliumEgressGatewayPolicySpec) DeepCopy() *CiliumEgressGatewayPolicySpec {
if in == nil {
return nil
}
out := new(CiliumEgressGatewayPolicySpec)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *CiliumEndpoint) DeepCopyInto(out *CiliumEndpoint) {
*out = *in
out.TypeMeta = in.TypeMeta
in.ObjectMeta.DeepCopyInto(&out.ObjectMeta)
in.Status.DeepCopyInto(&out.Status)
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CiliumEndpoint.
func (in *CiliumEndpoint) DeepCopy() *CiliumEndpoint {
if in == nil {
return nil
}
out := new(CiliumEndpoint)
in.DeepCopyInto(out)
return out
}
// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
func (in *CiliumEndpoint) DeepCopyObject() runtime.Object {
if c := in.DeepCopy(); c != nil {
return c
}
return nil
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *CiliumEndpointList) DeepCopyInto(out *CiliumEndpointList) {
*out = *in
out.TypeMeta = in.TypeMeta
in.ListMeta.DeepCopyInto(&out.ListMeta)
if in.Items != nil {
in, out := &in.Items, &out.Items
*out = make([]CiliumEndpoint, len(*in))
for i := range *in {
(*in)[i].DeepCopyInto(&(*out)[i])
}
}
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CiliumEndpointList.
func (in *CiliumEndpointList) DeepCopy() *CiliumEndpointList {
if in == nil {
return nil
}
out := new(CiliumEndpointList)
in.DeepCopyInto(out)
return out
}
// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
func (in *CiliumEndpointList) DeepCopyObject() runtime.Object {
if c := in.DeepCopy(); c != nil {
return c
}
return nil
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *CiliumEnvoyConfig) DeepCopyInto(out *CiliumEnvoyConfig) {
*out = *in
out.TypeMeta = in.TypeMeta
in.ObjectMeta.DeepCopyInto(&out.ObjectMeta)
in.Spec.DeepCopyInto(&out.Spec)
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CiliumEnvoyConfig.
func (in *CiliumEnvoyConfig) DeepCopy() *CiliumEnvoyConfig {
if in == nil {
return nil
}
out := new(CiliumEnvoyConfig)
in.DeepCopyInto(out)
return out
}
// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
func (in *CiliumEnvoyConfig) DeepCopyObject() runtime.Object {
if c := in.DeepCopy(); c != nil {
return c
}
return nil
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *CiliumEnvoyConfigList) DeepCopyInto(out *CiliumEnvoyConfigList) {
*out = *in
out.TypeMeta = in.TypeMeta
in.ListMeta.DeepCopyInto(&out.ListMeta)
if in.Items != nil {
in, out := &in.Items, &out.Items
*out = make([]CiliumEnvoyConfig, len(*in))
for i := range *in {
(*in)[i].DeepCopyInto(&(*out)[i])
}
}
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CiliumEnvoyConfigList.
func (in *CiliumEnvoyConfigList) DeepCopy() *CiliumEnvoyConfigList {
if in == nil {
return nil
}
out := new(CiliumEnvoyConfigList)
in.DeepCopyInto(out)
return out
}
// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
func (in *CiliumEnvoyConfigList) DeepCopyObject() runtime.Object {
if c := in.DeepCopy(); c != nil {
return c
}
return nil
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *CiliumEnvoyConfigSpec) DeepCopyInto(out *CiliumEnvoyConfigSpec) {
*out = *in
if in.Services != nil {
in, out := &in.Services, &out.Services
*out = make([]*ServiceListener, len(*in))
for i := range *in {
if (*in)[i] != nil {
in, out := &(*in)[i], &(*out)[i]
*out = new(ServiceListener)
(*in).DeepCopyInto(*out)
}
}
}
if in.BackendServices != nil {
in, out := &in.BackendServices, &out.BackendServices
*out = make([]*Service, len(*in))
for i := range *in {
if (*in)[i] != nil {
in, out := &(*in)[i], &(*out)[i]
*out = new(Service)
(*in).DeepCopyInto(*out)
}
}
}
if in.Resources != nil {
in, out := &in.Resources, &out.Resources
*out = make([]XDSResource, len(*in))
for i := range *in {
(*in)[i].DeepCopyInto(&(*out)[i])
}
}
if in.NodeSelector != nil {
in, out := &in.NodeSelector, &out.NodeSelector
*out = new(v1.LabelSelector)
(*in).DeepCopyInto(*out)
}
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CiliumEnvoyConfigSpec.
func (in *CiliumEnvoyConfigSpec) DeepCopy() *CiliumEnvoyConfigSpec {
if in == nil {
return nil
}
out := new(CiliumEnvoyConfigSpec)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *CiliumIdentity) DeepCopyInto(out *CiliumIdentity) {
*out = *in
out.TypeMeta = in.TypeMeta
in.ObjectMeta.DeepCopyInto(&out.ObjectMeta)
if in.SecurityLabels != nil {
in, out := &in.SecurityLabels, &out.SecurityLabels
*out = make(map[string]string, len(*in))
for key, val := range *in {
(*out)[key] = val
}
}
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CiliumIdentity.
func (in *CiliumIdentity) DeepCopy() *CiliumIdentity {
if in == nil {
return nil
}
out := new(CiliumIdentity)
in.DeepCopyInto(out)
return out
}
// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
func (in *CiliumIdentity) DeepCopyObject() runtime.Object {
if c := in.DeepCopy(); c != nil {
return c
}
return nil
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *CiliumIdentityList) DeepCopyInto(out *CiliumIdentityList) {
*out = *in
out.TypeMeta = in.TypeMeta
in.ListMeta.DeepCopyInto(&out.ListMeta)
if in.Items != nil {
in, out := &in.Items, &out.Items
*out = make([]CiliumIdentity, len(*in))
for i := range *in {
(*in)[i].DeepCopyInto(&(*out)[i])
}
}
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CiliumIdentityList.
func (in *CiliumIdentityList) DeepCopy() *CiliumIdentityList {
if in == nil {
return nil
}
out := new(CiliumIdentityList)
in.DeepCopyInto(out)
return out
}
// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
func (in *CiliumIdentityList) DeepCopyObject() runtime.Object {
if c := in.DeepCopy(); c != nil {
return c
}
return nil
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *CiliumLoadBalancerIPPool) DeepCopyInto(out *CiliumLoadBalancerIPPool) {
*out = *in
out.TypeMeta = in.TypeMeta
in.ObjectMeta.DeepCopyInto(&out.ObjectMeta)
in.Spec.DeepCopyInto(&out.Spec)
in.Status.DeepCopyInto(&out.Status)
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CiliumLoadBalancerIPPool.
func (in *CiliumLoadBalancerIPPool) DeepCopy() *CiliumLoadBalancerIPPool {
if in == nil {
return nil
}
out := new(CiliumLoadBalancerIPPool)
in.DeepCopyInto(out)
return out
}
// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
func (in *CiliumLoadBalancerIPPool) DeepCopyObject() runtime.Object {
if c := in.DeepCopy(); c != nil {
return c
}
return nil
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *CiliumLoadBalancerIPPoolIPBlock) DeepCopyInto(out *CiliumLoadBalancerIPPoolIPBlock) {
*out = *in
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CiliumLoadBalancerIPPoolIPBlock.
func (in *CiliumLoadBalancerIPPoolIPBlock) DeepCopy() *CiliumLoadBalancerIPPoolIPBlock {
if in == nil {
return nil
}
out := new(CiliumLoadBalancerIPPoolIPBlock)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *CiliumLoadBalancerIPPoolList) DeepCopyInto(out *CiliumLoadBalancerIPPoolList) {
*out = *in
out.TypeMeta = in.TypeMeta
in.ListMeta.DeepCopyInto(&out.ListMeta)
if in.Items != nil {
in, out := &in.Items, &out.Items
*out = make([]CiliumLoadBalancerIPPool, len(*in))
for i := range *in {
(*in)[i].DeepCopyInto(&(*out)[i])
}
}
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CiliumLoadBalancerIPPoolList.
func (in *CiliumLoadBalancerIPPoolList) DeepCopy() *CiliumLoadBalancerIPPoolList {
if in == nil {
return nil
}
out := new(CiliumLoadBalancerIPPoolList)
in.DeepCopyInto(out)
return out
}
// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
func (in *CiliumLoadBalancerIPPoolList) DeepCopyObject() runtime.Object {
if c := in.DeepCopy(); c != nil {
return c
}
return nil
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *CiliumLoadBalancerIPPoolSpec) DeepCopyInto(out *CiliumLoadBalancerIPPoolSpec) {
*out = *in
if in.ServiceSelector != nil {
in, out := &in.ServiceSelector, &out.ServiceSelector
*out = new(v1.LabelSelector)
(*in).DeepCopyInto(*out)
}
if in.Blocks != nil {
in, out := &in.Blocks, &out.Blocks
*out = make([]CiliumLoadBalancerIPPoolIPBlock, len(*in))
copy(*out, *in)
}
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CiliumLoadBalancerIPPoolSpec.
func (in *CiliumLoadBalancerIPPoolSpec) DeepCopy() *CiliumLoadBalancerIPPoolSpec {
if in == nil {
return nil
}
out := new(CiliumLoadBalancerIPPoolSpec)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *CiliumLoadBalancerIPPoolStatus) DeepCopyInto(out *CiliumLoadBalancerIPPoolStatus) {
*out = *in
if in.Conditions != nil {
in, out := &in.Conditions, &out.Conditions
*out = make([]metav1.Condition, len(*in))
for i := range *in {
(*in)[i].DeepCopyInto(&(*out)[i])
}
}
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CiliumLoadBalancerIPPoolStatus.
func (in *CiliumLoadBalancerIPPoolStatus) DeepCopy() *CiliumLoadBalancerIPPoolStatus {
if in == nil {
return nil
}
out := new(CiliumLoadBalancerIPPoolStatus)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *CiliumLocalRedirectPolicy) DeepCopyInto(out *CiliumLocalRedirectPolicy) {
*out = *in
out.TypeMeta = in.TypeMeta
in.ObjectMeta.DeepCopyInto(&out.ObjectMeta)
in.Spec.DeepCopyInto(&out.Spec)
out.Status = in.Status
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CiliumLocalRedirectPolicy.
func (in *CiliumLocalRedirectPolicy) DeepCopy() *CiliumLocalRedirectPolicy {
if in == nil {
return nil
}
out := new(CiliumLocalRedirectPolicy)
in.DeepCopyInto(out)
return out
}
// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
func (in *CiliumLocalRedirectPolicy) DeepCopyObject() runtime.Object {
if c := in.DeepCopy(); c != nil {
return c
}
return nil
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *CiliumLocalRedirectPolicyList) DeepCopyInto(out *CiliumLocalRedirectPolicyList) {
*out = *in
out.TypeMeta = in.TypeMeta
in.ListMeta.DeepCopyInto(&out.ListMeta)
if in.Items != nil {
in, out := &in.Items, &out.Items
*out = make([]CiliumLocalRedirectPolicy, len(*in))
for i := range *in {
(*in)[i].DeepCopyInto(&(*out)[i])
}
}
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CiliumLocalRedirectPolicyList.
func (in *CiliumLocalRedirectPolicyList) DeepCopy() *CiliumLocalRedirectPolicyList {
if in == nil {
return nil
}
out := new(CiliumLocalRedirectPolicyList)
in.DeepCopyInto(out)
return out
}
// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
func (in *CiliumLocalRedirectPolicyList) DeepCopyObject() runtime.Object {
if c := in.DeepCopy(); c != nil {
return c
}
return nil
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *CiliumLocalRedirectPolicySpec) DeepCopyInto(out *CiliumLocalRedirectPolicySpec) {
*out = *in
in.RedirectFrontend.DeepCopyInto(&out.RedirectFrontend)
in.RedirectBackend.DeepCopyInto(&out.RedirectBackend)
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CiliumLocalRedirectPolicySpec.
func (in *CiliumLocalRedirectPolicySpec) DeepCopy() *CiliumLocalRedirectPolicySpec {
if in == nil {
return nil
}
out := new(CiliumLocalRedirectPolicySpec)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *CiliumLocalRedirectPolicyStatus) DeepCopyInto(out *CiliumLocalRedirectPolicyStatus) {
*out = *in
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CiliumLocalRedirectPolicyStatus.
func (in *CiliumLocalRedirectPolicyStatus) DeepCopy() *CiliumLocalRedirectPolicyStatus {
if in == nil {
return nil
}
out := new(CiliumLocalRedirectPolicyStatus)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *CiliumNetworkPolicy) DeepCopyInto(out *CiliumNetworkPolicy) {
*out = *in
out.TypeMeta = in.TypeMeta
in.ObjectMeta.DeepCopyInto(&out.ObjectMeta)
if in.Spec != nil {
in, out := &in.Spec, &out.Spec
*out = new(api.Rule)
(*in).DeepCopyInto(*out)
}
if in.Specs != nil {
in, out := &in.Specs, &out.Specs
*out = make(api.Rules, len(*in))
for i := range *in {
if (*in)[i] != nil {
in, out := &(*in)[i], &(*out)[i]
*out = new(api.Rule)
(*in).DeepCopyInto(*out)
}
}
}
in.Status.DeepCopyInto(&out.Status)
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CiliumNetworkPolicy.
func (in *CiliumNetworkPolicy) DeepCopy() *CiliumNetworkPolicy {
if in == nil {
return nil
}
out := new(CiliumNetworkPolicy)
in.DeepCopyInto(out)
return out
}
// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
func (in *CiliumNetworkPolicy) DeepCopyObject() runtime.Object {
if c := in.DeepCopy(); c != nil {
return c
}
return nil
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *CiliumNetworkPolicyList) DeepCopyInto(out *CiliumNetworkPolicyList) {
*out = *in
out.TypeMeta = in.TypeMeta
in.ListMeta.DeepCopyInto(&out.ListMeta)
if in.Items != nil {
in, out := &in.Items, &out.Items
*out = make([]CiliumNetworkPolicy, len(*in))
for i := range *in {
(*in)[i].DeepCopyInto(&(*out)[i])
}
}
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CiliumNetworkPolicyList.
func (in *CiliumNetworkPolicyList) DeepCopy() *CiliumNetworkPolicyList {
if in == nil {
return nil
}
out := new(CiliumNetworkPolicyList)
in.DeepCopyInto(out)
return out
}
// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
func (in *CiliumNetworkPolicyList) DeepCopyObject() runtime.Object {
if c := in.DeepCopy(); c != nil {
return c
}
return nil
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *CiliumNetworkPolicyNodeStatus) DeepCopyInto(out *CiliumNetworkPolicyNodeStatus) {
*out = *in
in.LastUpdated.DeepCopyInto(&out.LastUpdated)
if in.Annotations != nil {
in, out := &in.Annotations, &out.Annotations
*out = make(map[string]string, len(*in))
for key, val := range *in {
(*out)[key] = val
}
}
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CiliumNetworkPolicyNodeStatus.
func (in *CiliumNetworkPolicyNodeStatus) DeepCopy() *CiliumNetworkPolicyNodeStatus {
if in == nil {
return nil
}
out := new(CiliumNetworkPolicyNodeStatus)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *CiliumNetworkPolicyStatus) DeepCopyInto(out *CiliumNetworkPolicyStatus) {
*out = *in
if in.DerivativePolicies != nil {
in, out := &in.DerivativePolicies, &out.DerivativePolicies
*out = make(map[string]CiliumNetworkPolicyNodeStatus, len(*in))
for key, val := range *in {
(*out)[key] = *val.DeepCopy()
}
}
if in.Conditions != nil {
in, out := &in.Conditions, &out.Conditions
*out = make([]NetworkPolicyCondition, len(*in))
for i := range *in {
(*in)[i].DeepCopyInto(&(*out)[i])
}
}
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CiliumNetworkPolicyStatus.
func (in *CiliumNetworkPolicyStatus) DeepCopy() *CiliumNetworkPolicyStatus {
if in == nil {
return nil
}
out := new(CiliumNetworkPolicyStatus)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *CiliumNode) DeepCopyInto(out *CiliumNode) {
*out = *in
out.TypeMeta = in.TypeMeta
in.ObjectMeta.DeepCopyInto(&out.ObjectMeta)
in.Spec.DeepCopyInto(&out.Spec)
in.Status.DeepCopyInto(&out.Status)
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CiliumNode.
func (in *CiliumNode) DeepCopy() *CiliumNode {
if in == nil {
return nil
}
out := new(CiliumNode)
in.DeepCopyInto(out)
return out
}
// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
func (in *CiliumNode) DeepCopyObject() runtime.Object {
if c := in.DeepCopy(); c != nil {
return c
}
return nil
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *CiliumNodeConfig) DeepCopyInto(out *CiliumNodeConfig) {
*out = *in
out.TypeMeta = in.TypeMeta
in.ObjectMeta.DeepCopyInto(&out.ObjectMeta)
in.Spec.DeepCopyInto(&out.Spec)
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CiliumNodeConfig.
func (in *CiliumNodeConfig) DeepCopy() *CiliumNodeConfig {
if in == nil {
return nil
}
out := new(CiliumNodeConfig)
in.DeepCopyInto(out)
return out
}
// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
func (in *CiliumNodeConfig) DeepCopyObject() runtime.Object {
if c := in.DeepCopy(); c != nil {
return c
}
return nil
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *CiliumNodeConfigList) DeepCopyInto(out *CiliumNodeConfigList) {
*out = *in
out.TypeMeta = in.TypeMeta
in.ListMeta.DeepCopyInto(&out.ListMeta)
if in.Items != nil {
in, out := &in.Items, &out.Items
*out = make([]CiliumNodeConfig, len(*in))
for i := range *in {
(*in)[i].DeepCopyInto(&(*out)[i])
}
}
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CiliumNodeConfigList.
func (in *CiliumNodeConfigList) DeepCopy() *CiliumNodeConfigList {
if in == nil {
return nil
}
out := new(CiliumNodeConfigList)
in.DeepCopyInto(out)
return out
}
// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
func (in *CiliumNodeConfigList) DeepCopyObject() runtime.Object {
if c := in.DeepCopy(); c != nil {
return c
}
return nil
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *CiliumNodeConfigSpec) DeepCopyInto(out *CiliumNodeConfigSpec) {
*out = *in
if in.Defaults != nil {
in, out := &in.Defaults, &out.Defaults
*out = make(map[string]string, len(*in))
for key, val := range *in {
(*out)[key] = val
}
}
if in.NodeSelector != nil {
in, out := &in.NodeSelector, &out.NodeSelector
*out = new(metav1.LabelSelector)
(*in).DeepCopyInto(*out)
}
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CiliumNodeConfigSpec.
func (in *CiliumNodeConfigSpec) DeepCopy() *CiliumNodeConfigSpec {
if in == nil {
return nil
}
out := new(CiliumNodeConfigSpec)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *CiliumNodeList) DeepCopyInto(out *CiliumNodeList) {
*out = *in
out.TypeMeta = in.TypeMeta
in.ListMeta.DeepCopyInto(&out.ListMeta)
if in.Items != nil {
in, out := &in.Items, &out.Items
*out = make([]CiliumNode, len(*in))
for i := range *in {
(*in)[i].DeepCopyInto(&(*out)[i])
}
}
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CiliumNodeList.
func (in *CiliumNodeList) DeepCopy() *CiliumNodeList {
if in == nil {
return nil
}
out := new(CiliumNodeList)
in.DeepCopyInto(out)
return out
}
// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
func (in *CiliumNodeList) DeepCopyObject() runtime.Object {
if c := in.DeepCopy(); c != nil {
return c
}
return nil
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *ControllerStatus) DeepCopyInto(out *ControllerStatus) {
*out = *in
if in.Configuration != nil {
in, out := &in.Configuration, &out.Configuration
*out = new(models.ControllerStatusConfiguration)
**out = **in
}
out.Status = in.Status
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ControllerStatus.
func (in *ControllerStatus) DeepCopy() *ControllerStatus {
if in == nil {
return nil
}
out := new(ControllerStatus)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *DefaultGateway) DeepCopyInto(out *DefaultGateway) {
*out = *in
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DefaultGateway.
func (in *DefaultGateway) DeepCopy() *DefaultGateway {
if in == nil {
return nil
}
out := new(DefaultGateway)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *EgressGateway) DeepCopyInto(out *EgressGateway) {
*out = *in
if in.NodeSelector != nil {
in, out := &in.NodeSelector, &out.NodeSelector
*out = new(v1.LabelSelector)
(*in).DeepCopyInto(*out)
}
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new EgressGateway.
func (in *EgressGateway) DeepCopy() *EgressGateway {
if in == nil {
return nil
}
out := new(EgressGateway)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *EgressRule) DeepCopyInto(out *EgressRule) {
*out = *in
if in.NamespaceSelector != nil {
in, out := &in.NamespaceSelector, &out.NamespaceSelector
*out = new(v1.LabelSelector)
(*in).DeepCopyInto(*out)
}
if in.PodSelector != nil {
in, out := &in.PodSelector, &out.PodSelector
*out = new(v1.LabelSelector)
(*in).DeepCopyInto(*out)
}
if in.NodeSelector != nil {
in, out := &in.NodeSelector, &out.NodeSelector
*out = new(v1.LabelSelector)
(*in).DeepCopyInto(*out)
}
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new EgressRule.
func (in *EgressRule) DeepCopy() *EgressRule {
if in == nil {
return nil
}
out := new(EgressRule)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *EncryptionSpec) DeepCopyInto(out *EncryptionSpec) {
*out = *in
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new EncryptionSpec.
func (in *EncryptionSpec) DeepCopy() *EncryptionSpec {
if in == nil {
return nil
}
out := new(EncryptionSpec)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *EndpointIdentity) DeepCopyInto(out *EndpointIdentity) {
*out = *in
if in.Labels != nil {
in, out := &in.Labels, &out.Labels
*out = make([]string, len(*in))
copy(*out, *in)
}
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new EndpointIdentity.
func (in *EndpointIdentity) DeepCopy() *EndpointIdentity {
if in == nil {
return nil
}
out := new(EndpointIdentity)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *EndpointNetworking) DeepCopyInto(out *EndpointNetworking) {
*out = *in
if in.Addressing != nil {
in, out := &in.Addressing, &out.Addressing
*out = make(AddressPairList, len(*in))
for i := range *in {
if (*in)[i] != nil {
in, out := &(*in)[i], &(*out)[i]
*out = new(AddressPair)
**out = **in
}
}
}
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new EndpointNetworking.
func (in *EndpointNetworking) DeepCopy() *EndpointNetworking {
if in == nil {
return nil
}
out := new(EndpointNetworking)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *EndpointPolicy) DeepCopyInto(out *EndpointPolicy) {
*out = *in
if in.Ingress != nil {
in, out := &in.Ingress, &out.Ingress
*out = new(EndpointPolicyDirection)
(*in).DeepCopyInto(*out)
}
if in.Egress != nil {
in, out := &in.Egress, &out.Egress
*out = new(EndpointPolicyDirection)
(*in).DeepCopyInto(*out)
}
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new EndpointPolicy.
func (in *EndpointPolicy) DeepCopy() *EndpointPolicy {
if in == nil {
return nil
}
out := new(EndpointPolicy)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *EndpointPolicyDirection) DeepCopyInto(out *EndpointPolicyDirection) {
*out = *in
if in.Allowed != nil {
in, out := &in.Allowed, &out.Allowed
*out = make(AllowedIdentityList, len(*in))
for i := range *in {
(*in)[i].DeepCopyInto(&(*out)[i])
}
}
if in.Denied != nil {
in, out := &in.Denied, &out.Denied
*out = make(DenyIdentityList, len(*in))
for i := range *in {
(*in)[i].DeepCopyInto(&(*out)[i])
}
}
if in.Removing != nil {
in, out := &in.Removing, &out.Removing
*out = make(AllowedIdentityList, len(*in))
for i := range *in {
(*in)[i].DeepCopyInto(&(*out)[i])
}
}
if in.Adding != nil {
in, out := &in.Adding, &out.Adding
*out = make(AllowedIdentityList, len(*in))
for i := range *in {
(*in)[i].DeepCopyInto(&(*out)[i])
}
}
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new EndpointPolicyDirection.
func (in *EndpointPolicyDirection) DeepCopy() *EndpointPolicyDirection {
if in == nil {
return nil
}
out := new(EndpointPolicyDirection)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *EndpointStatus) DeepCopyInto(out *EndpointStatus) {
*out = *in
if in.Controllers != nil {
in, out := &in.Controllers, &out.Controllers
*out = make(ControllerList, len(*in))
for i := range *in {
(*in)[i].DeepCopyInto(&(*out)[i])
}
}
if in.ExternalIdentifiers != nil {
in, out := &in.ExternalIdentifiers, &out.ExternalIdentifiers
*out = new(models.EndpointIdentifiers)
**out = **in
}
if in.Health != nil {
in, out := &in.Health, &out.Health
*out = new(models.EndpointHealth)
**out = **in
}
if in.Identity != nil {
in, out := &in.Identity, &out.Identity
*out = new(EndpointIdentity)
(*in).DeepCopyInto(*out)
}
if in.Log != nil {
in, out := &in.Log, &out.Log
*out = make([]*models.EndpointStatusChange, len(*in))
for i := range *in {
if (*in)[i] != nil {
in, out := &(*in)[i], &(*out)[i]
*out = new(models.EndpointStatusChange)
**out = **in
}
}
}
if in.Networking != nil {
in, out := &in.Networking, &out.Networking
*out = new(EndpointNetworking)
(*in).DeepCopyInto(*out)
}
out.Encryption = in.Encryption
if in.Policy != nil {
in, out := &in.Policy, &out.Policy
*out = new(EndpointPolicy)
(*in).DeepCopyInto(*out)
}
if in.NamedPorts != nil {
in, out := &in.NamedPorts, &out.NamedPorts
*out = make(models.NamedPorts, len(*in))
for i := range *in {
if (*in)[i] != nil {
in, out := &(*in)[i], &(*out)[i]
*out = new(models.Port)
**out = **in
}
}
}
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new EndpointStatus.
func (in *EndpointStatus) DeepCopy() *EndpointStatus {
if in == nil {
return nil
}
out := new(EndpointStatus)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *Frontend) DeepCopyInto(out *Frontend) {
*out = *in
if in.ToPorts != nil {
in, out := &in.ToPorts, &out.ToPorts
*out = make([]PortInfo, len(*in))
copy(*out, *in)
}
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Frontend.
func (in *Frontend) DeepCopy() *Frontend {
if in == nil {
return nil
}
out := new(Frontend)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *HealthAddressingSpec) DeepCopyInto(out *HealthAddressingSpec) {
*out = *in
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new HealthAddressingSpec.
func (in *HealthAddressingSpec) DeepCopy() *HealthAddressingSpec {
if in == nil {
return nil
}
out := new(HealthAddressingSpec)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *IdentityTuple) DeepCopyInto(out *IdentityTuple) {
*out = *in
if in.IdentityLabels != nil {
in, out := &in.IdentityLabels, &out.IdentityLabels
*out = make(map[string]string, len(*in))
for key, val := range *in {
(*out)[key] = val
}
}
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new IdentityTuple.
func (in *IdentityTuple) DeepCopy() *IdentityTuple {
if in == nil {
return nil
}
out := new(IdentityTuple)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *NetworkPolicyCondition) DeepCopyInto(out *NetworkPolicyCondition) {
*out = *in
in.LastTransitionTime.DeepCopyInto(&out.LastTransitionTime)
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NetworkPolicyCondition.
func (in *NetworkPolicyCondition) DeepCopy() *NetworkPolicyCondition {
if in == nil {
return nil
}
out := new(NetworkPolicyCondition)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *NodeAddress) DeepCopyInto(out *NodeAddress) {
*out = *in
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NodeAddress.
func (in *NodeAddress) DeepCopy() *NodeAddress {
if in == nil {
return nil
}
out := new(NodeAddress)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *NodeSpec) DeepCopyInto(out *NodeSpec) {
*out = *in
if in.Addresses != nil {
in, out := &in.Addresses, &out.Addresses
*out = make([]NodeAddress, len(*in))
copy(*out, *in)
}
out.HealthAddressing = in.HealthAddressing
out.IngressAddressing = in.IngressAddressing
out.Encryption = in.Encryption
in.ENI.DeepCopyInto(&out.ENI)
out.Azure = in.Azure
in.AlibabaCloud.DeepCopyInto(&out.AlibabaCloud)
in.IPAM.DeepCopyInto(&out.IPAM)
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NodeSpec.
func (in *NodeSpec) DeepCopy() *NodeSpec {
if in == nil {
return nil
}
out := new(NodeSpec)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *NodeStatus) DeepCopyInto(out *NodeStatus) {
*out = *in
in.ENI.DeepCopyInto(&out.ENI)
in.Azure.DeepCopyInto(&out.Azure)
in.IPAM.DeepCopyInto(&out.IPAM)
in.AlibabaCloud.DeepCopyInto(&out.AlibabaCloud)
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NodeStatus.
func (in *NodeStatus) DeepCopy() *NodeStatus {
if in == nil {
return nil
}
out := new(NodeStatus)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *PeerConfigReference) DeepCopyInto(out *PeerConfigReference) {
*out = *in
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PeerConfigReference.
func (in *PeerConfigReference) DeepCopy() *PeerConfigReference {
if in == nil {
return nil
}
out := new(PeerConfigReference)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *PortInfo) DeepCopyInto(out *PortInfo) {
*out = *in
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PortInfo.
func (in *PortInfo) DeepCopy() *PortInfo {
if in == nil {
return nil
}
out := new(PortInfo)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *RedirectBackend) DeepCopyInto(out *RedirectBackend) {
*out = *in
in.LocalEndpointSelector.DeepCopyInto(&out.LocalEndpointSelector)
if in.ToPorts != nil {
in, out := &in.ToPorts, &out.ToPorts
*out = make([]PortInfo, len(*in))
copy(*out, *in)
}
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RedirectBackend.
func (in *RedirectBackend) DeepCopy() *RedirectBackend {
if in == nil {
return nil
}
out := new(RedirectBackend)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *RedirectFrontend) DeepCopyInto(out *RedirectFrontend) {
*out = *in
if in.AddressMatcher != nil {
in, out := &in.AddressMatcher, &out.AddressMatcher
*out = new(Frontend)
(*in).DeepCopyInto(*out)
}
if in.ServiceMatcher != nil {
in, out := &in.ServiceMatcher, &out.ServiceMatcher
*out = new(ServiceInfo)
(*in).DeepCopyInto(*out)
}
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RedirectFrontend.
func (in *RedirectFrontend) DeepCopy() *RedirectFrontend {
if in == nil {
return nil
}
out := new(RedirectFrontend)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *Service) DeepCopyInto(out *Service) {
*out = *in
if in.Ports != nil {
in, out := &in.Ports, &out.Ports
*out = make([]string, len(*in))
copy(*out, *in)
}
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Service.
func (in *Service) DeepCopy() *Service {
if in == nil {
return nil
}
out := new(Service)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *ServiceInfo) DeepCopyInto(out *ServiceInfo) {
*out = *in
if in.ToPorts != nil {
in, out := &in.ToPorts, &out.ToPorts
*out = make([]PortInfo, len(*in))
copy(*out, *in)
}
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ServiceInfo.
func (in *ServiceInfo) DeepCopy() *ServiceInfo {
if in == nil {
return nil
}
out := new(ServiceInfo)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *ServiceListener) DeepCopyInto(out *ServiceListener) {
*out = *in
if in.Ports != nil {
in, out := &in.Ports, &out.Ports
*out = make([]uint16, len(*in))
copy(*out, *in)
}
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ServiceListener.
func (in *ServiceListener) DeepCopy() *ServiceListener {
if in == nil {
return nil
}
out := new(ServiceListener)
in.DeepCopyInto(out)
return out
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new XDSResource.
func (in *XDSResource) DeepCopy() *XDSResource {
if in == nil {
return nil
}
out := new(XDSResource)
in.DeepCopyInto(out)
return out
}
//go:build !ignore_autogenerated
// +build !ignore_autogenerated
// SPDX-License-Identifier: Apache-2.0
// Copyright Authors of Cilium
// Code generated by deepequal-gen. DO NOT EDIT.
package v2
// DeepEqual is an autogenerated deepequal function, deeply comparing the
// receiver with other. in must be non-nil.
func (in *AddressPair) DeepEqual(other *AddressPair) bool {
if other == nil {
return false
}
if in.IPV4 != other.IPV4 {
return false
}
if in.IPV6 != other.IPV6 {
return false
}
return true
}
// DeepEqual is an autogenerated deepequal function, deeply comparing the
// receiver with other. in must be non-nil.
func (in *AddressPairList) DeepEqual(other *AddressPairList) bool {
if other == nil {
return false
}
if len(*in) != len(*other) {
return false
} else {
for i, inElement := range *in {
if !inElement.DeepEqual((*other)[i]) {
return false
}
}
}
return true
}
// DeepEqual is an autogenerated deepequal function, deeply comparing the
// receiver with other. in must be non-nil.
func (in *AllowedIdentityList) DeepEqual(other *AllowedIdentityList) bool {
if other == nil {
return false
}
if len(*in) != len(*other) {
return false
} else {
for i, inElement := range *in {
if !inElement.DeepEqual(&(*other)[i]) {
return false
}
}
}
return true
}
// DeepEqual is an autogenerated deepequal function, deeply comparing the
// receiver with other. in must be non-nil.
func (in *BGPAdvertisement) DeepEqual(other *BGPAdvertisement) bool {
if other == nil {
return false
}
if in.AdvertisementType != other.AdvertisementType {
return false
}
if (in.Service == nil) != (other.Service == nil) {
return false
} else if in.Service != nil {
if !in.Service.DeepEqual(other.Service) {
return false
}
}
if (in.Selector == nil) != (other.Selector == nil) {
return false
} else if in.Selector != nil {
if !in.Selector.DeepEqual(other.Selector) {
return false
}
}
if (in.Attributes == nil) != (other.Attributes == nil) {
return false
} else if in.Attributes != nil {
if !in.Attributes.DeepEqual(other.Attributes) {
return false
}
}
return true
}
// DeepEqual is an autogenerated deepequal function, deeply comparing the
// receiver with other. in must be non-nil.
func (in *BGPAttributes) DeepEqual(other *BGPAttributes) bool {
if other == nil {
return false
}
if (in.Communities == nil) != (other.Communities == nil) {
return false
} else if in.Communities != nil {
if !in.Communities.DeepEqual(other.Communities) {
return false
}
}
if (in.LocalPreference == nil) != (other.LocalPreference == nil) {
return false
} else if in.LocalPreference != nil {
if *in.LocalPreference != *other.LocalPreference {
return false
}
}
return true
}
// DeepEqual is an autogenerated deepequal function, deeply comparing the
// receiver with other. in must be non-nil.
func (in *BGPAutoDiscovery) DeepEqual(other *BGPAutoDiscovery) bool {
if other == nil {
return false
}
if in.Mode != other.Mode {
return false
}
if (in.DefaultGateway == nil) != (other.DefaultGateway == nil) {
return false
} else if in.DefaultGateway != nil {
if !in.DefaultGateway.DeepEqual(other.DefaultGateway) {
return false
}
}
return true
}
// DeepEqual is an autogenerated deepequal function, deeply comparing the
// receiver with other. in must be non-nil.
func (in *BGPCommunities) DeepEqual(other *BGPCommunities) bool {
if other == nil {
return false
}
if ((in.Standard != nil) && (other.Standard != nil)) || ((in.Standard == nil) != (other.Standard == nil)) {
in, other := &in.Standard, &other.Standard
if other == nil {
return false
}
if len(*in) != len(*other) {
return false
} else {
for i, inElement := range *in {
if inElement != (*other)[i] {
return false
}
}
}
}
if ((in.WellKnown != nil) && (other.WellKnown != nil)) || ((in.WellKnown == nil) != (other.WellKnown == nil)) {
in, other := &in.WellKnown, &other.WellKnown
if other == nil {
return false
}
if len(*in) != len(*other) {
return false
} else {
for i, inElement := range *in {
if inElement != (*other)[i] {
return false
}
}
}
}
if ((in.Large != nil) && (other.Large != nil)) || ((in.Large == nil) != (other.Large == nil)) {
in, other := &in.Large, &other.Large
if other == nil {
return false
}
if len(*in) != len(*other) {
return false
} else {
for i, inElement := range *in {
if inElement != (*other)[i] {
return false
}
}
}
}
return true
}
// DeepEqual is an autogenerated deepequal function, deeply comparing the
// receiver with other. in must be non-nil.
func (in *BGPFamilyRouteCount) DeepEqual(other *BGPFamilyRouteCount) bool {
if other == nil {
return false
}
if in.Afi != other.Afi {
return false
}
if in.Safi != other.Safi {
return false
}
if (in.Received == nil) != (other.Received == nil) {
return false
} else if in.Received != nil {
if *in.Received != *other.Received {
return false
}
}
if (in.Advertised == nil) != (other.Advertised == nil) {
return false
} else if in.Advertised != nil {
if *in.Advertised != *other.Advertised {
return false
}
}
return true
}
// DeepEqual is an autogenerated deepequal function, deeply comparing the
// receiver with other. in must be non-nil.
func (in *BGPServiceOptions) DeepEqual(other *BGPServiceOptions) bool {
if other == nil {
return false
}
if ((in.Addresses != nil) && (other.Addresses != nil)) || ((in.Addresses == nil) != (other.Addresses == nil)) {
in, other := &in.Addresses, &other.Addresses
if other == nil {
return false
}
if len(*in) != len(*other) {
return false
} else {
for i, inElement := range *in {
if inElement != (*other)[i] {
return false
}
}
}
}
if (in.AggregationLengthIPv4 == nil) != (other.AggregationLengthIPv4 == nil) {
return false
} else if in.AggregationLengthIPv4 != nil {
if *in.AggregationLengthIPv4 != *other.AggregationLengthIPv4 {
return false
}
}
if (in.AggregationLengthIPv6 == nil) != (other.AggregationLengthIPv6 == nil) {
return false
} else if in.AggregationLengthIPv6 != nil {
if *in.AggregationLengthIPv6 != *other.AggregationLengthIPv6 {
return false
}
}
return true
}
// DeepEqual is an autogenerated deepequal function, deeply comparing the
// receiver with other. in must be non-nil.
func (in *CiliumBGPAdvertisement) DeepEqual(other *CiliumBGPAdvertisement) bool {
if other == nil {
return false
}
if !in.Spec.DeepEqual(&other.Spec) {
return false
}
return true
}
// DeepEqual is an autogenerated deepequal function, deeply comparing the
// receiver with other. in must be non-nil.
func (in *CiliumBGPAdvertisementSpec) DeepEqual(other *CiliumBGPAdvertisementSpec) bool {
if other == nil {
return false
}
if ((in.Advertisements != nil) && (other.Advertisements != nil)) || ((in.Advertisements == nil) != (other.Advertisements == nil)) {
in, other := &in.Advertisements, &other.Advertisements
if other == nil {
return false
}
if len(*in) != len(*other) {
return false
} else {
for i, inElement := range *in {
if !inElement.DeepEqual(&(*other)[i]) {
return false
}
}
}
}
return true
}
// DeepEqual is an autogenerated deepequal function, deeply comparing the
// receiver with other. in must be non-nil.
func (in *CiliumBGPClusterConfig) DeepEqual(other *CiliumBGPClusterConfig) bool {
if other == nil {
return false
}
if !in.Spec.DeepEqual(&other.Spec) {
return false
}
if !in.Status.DeepEqual(&other.Status) {
return false
}
return true
}
// DeepEqual is an autogenerated deepequal function, deeply comparing the
// receiver with other. in must be non-nil.
func (in *CiliumBGPClusterConfigSpec) DeepEqual(other *CiliumBGPClusterConfigSpec) bool {
if other == nil {
return false
}
if (in.NodeSelector == nil) != (other.NodeSelector == nil) {
return false
} else if in.NodeSelector != nil {
if !in.NodeSelector.DeepEqual(other.NodeSelector) {
return false
}
}
if ((in.BGPInstances != nil) && (other.BGPInstances != nil)) || ((in.BGPInstances == nil) != (other.BGPInstances == nil)) {
in, other := &in.BGPInstances, &other.BGPInstances
if other == nil {
return false
}
if len(*in) != len(*other) {
return false
} else {
for i, inElement := range *in {
if !inElement.DeepEqual(&(*other)[i]) {
return false
}
}
}
}
return true
}
// DeepEqual is an autogenerated deepequal function, deeply comparing the
// receiver with other. in must be non-nil.
func (in *CiliumBGPClusterConfigStatus) DeepEqual(other *CiliumBGPClusterConfigStatus) bool {
if other == nil {
return false
}
return true
}
// DeepEqual is an autogenerated deepequal function, deeply comparing the
// receiver with other. in must be non-nil.
func (in *CiliumBGPFamily) DeepEqual(other *CiliumBGPFamily) bool {
if other == nil {
return false
}
if in.Afi != other.Afi {
return false
}
if in.Safi != other.Safi {
return false
}
return true
}
// DeepEqual is an autogenerated deepequal function, deeply comparing the
// receiver with other. in must be non-nil.
func (in *CiliumBGPFamilyWithAdverts) DeepEqual(other *CiliumBGPFamilyWithAdverts) bool {
if other == nil {
return false
}
if in.CiliumBGPFamily != other.CiliumBGPFamily {
return false
}
if (in.Advertisements == nil) != (other.Advertisements == nil) {
return false
} else if in.Advertisements != nil {
if !in.Advertisements.DeepEqual(other.Advertisements) {
return false
}
}
return true
}
// DeepEqual is an autogenerated deepequal function, deeply comparing the
// receiver with other. in must be non-nil.
func (in *CiliumBGPInstance) DeepEqual(other *CiliumBGPInstance) bool {
if other == nil {
return false
}
if in.Name != other.Name {
return false
}
if (in.LocalASN == nil) != (other.LocalASN == nil) {
return false
} else if in.LocalASN != nil {
if *in.LocalASN != *other.LocalASN {
return false
}
}
if (in.LocalPort == nil) != (other.LocalPort == nil) {
return false
} else if in.LocalPort != nil {
if *in.LocalPort != *other.LocalPort {
return false
}
}
if ((in.Peers != nil) && (other.Peers != nil)) || ((in.Peers == nil) != (other.Peers == nil)) {
in, other := &in.Peers, &other.Peers
if other == nil {
return false
}
if len(*in) != len(*other) {
return false
} else {
for i, inElement := range *in {
if !inElement.DeepEqual(&(*other)[i]) {
return false
}
}
}
}
return true
}
// DeepEqual is an autogenerated deepequal function, deeply comparing the
// receiver with other. in must be non-nil.
func (in *CiliumBGPNeighborGracefulRestart) DeepEqual(other *CiliumBGPNeighborGracefulRestart) bool {
if other == nil {
return false
}
if in.Enabled != other.Enabled {
return false
}
if (in.RestartTimeSeconds == nil) != (other.RestartTimeSeconds == nil) {
return false
} else if in.RestartTimeSeconds != nil {
if *in.RestartTimeSeconds != *other.RestartTimeSeconds {
return false
}
}
return true
}
// DeepEqual is an autogenerated deepequal function, deeply comparing the
// receiver with other. in must be non-nil.
func (in *CiliumBGPNodeConfig) DeepEqual(other *CiliumBGPNodeConfig) bool {
if other == nil {
return false
}
if !in.Spec.DeepEqual(&other.Spec) {
return false
}
if !in.Status.DeepEqual(&other.Status) {
return false
}
return true
}
// DeepEqual is an autogenerated deepequal function, deeply comparing the
// receiver with other. in must be non-nil.
func (in *CiliumBGPNodeConfigInstanceOverride) DeepEqual(other *CiliumBGPNodeConfigInstanceOverride) bool {
if other == nil {
return false
}
if in.Name != other.Name {
return false
}
if (in.RouterID == nil) != (other.RouterID == nil) {
return false
} else if in.RouterID != nil {
if *in.RouterID != *other.RouterID {
return false
}
}
if (in.LocalPort == nil) != (other.LocalPort == nil) {
return false
} else if in.LocalPort != nil {
if *in.LocalPort != *other.LocalPort {
return false
}
}
if ((in.Peers != nil) && (other.Peers != nil)) || ((in.Peers == nil) != (other.Peers == nil)) {
in, other := &in.Peers, &other.Peers
if other == nil {
return false
}
if len(*in) != len(*other) {
return false
} else {
for i, inElement := range *in {
if !inElement.DeepEqual(&(*other)[i]) {
return false
}
}
}
}
if (in.LocalASN == nil) != (other.LocalASN == nil) {
return false
} else if in.LocalASN != nil {
if *in.LocalASN != *other.LocalASN {
return false
}
}
return true
}
// DeepEqual is an autogenerated deepequal function, deeply comparing the
// receiver with other. in must be non-nil.
func (in *CiliumBGPNodeConfigOverride) DeepEqual(other *CiliumBGPNodeConfigOverride) bool {
if other == nil {
return false
}
if !in.Spec.DeepEqual(&other.Spec) {
return false
}
return true
}
// DeepEqual is an autogenerated deepequal function, deeply comparing the
// receiver with other. in must be non-nil.
func (in *CiliumBGPNodeConfigOverrideSpec) DeepEqual(other *CiliumBGPNodeConfigOverrideSpec) bool {
if other == nil {
return false
}
if ((in.BGPInstances != nil) && (other.BGPInstances != nil)) || ((in.BGPInstances == nil) != (other.BGPInstances == nil)) {
in, other := &in.BGPInstances, &other.BGPInstances
if other == nil {
return false
}
if len(*in) != len(*other) {
return false
} else {
for i, inElement := range *in {
if !inElement.DeepEqual(&(*other)[i]) {
return false
}
}
}
}
return true
}
// DeepEqual is an autogenerated deepequal function, deeply comparing the
// receiver with other. in must be non-nil.
func (in *CiliumBGPNodeConfigPeerOverride) DeepEqual(other *CiliumBGPNodeConfigPeerOverride) bool {
if other == nil {
return false
}
if in.Name != other.Name {
return false
}
if (in.LocalAddress == nil) != (other.LocalAddress == nil) {
return false
} else if in.LocalAddress != nil {
if *in.LocalAddress != *other.LocalAddress {
return false
}
}
if (in.LocalPort == nil) != (other.LocalPort == nil) {
return false
} else if in.LocalPort != nil {
if *in.LocalPort != *other.LocalPort {
return false
}
}
return true
}
// DeepEqual is an autogenerated deepequal function, deeply comparing the
// receiver with other. in must be non-nil.
func (in *CiliumBGPNodeInstance) DeepEqual(other *CiliumBGPNodeInstance) bool {
if other == nil {
return false
}
if in.Name != other.Name {
return false
}
if (in.LocalASN == nil) != (other.LocalASN == nil) {
return false
} else if in.LocalASN != nil {
if *in.LocalASN != *other.LocalASN {
return false
}
}
if (in.RouterID == nil) != (other.RouterID == nil) {
return false
} else if in.RouterID != nil {
if *in.RouterID != *other.RouterID {
return false
}
}
if (in.LocalPort == nil) != (other.LocalPort == nil) {
return false
} else if in.LocalPort != nil {
if *in.LocalPort != *other.LocalPort {
return false
}
}
if ((in.Peers != nil) && (other.Peers != nil)) || ((in.Peers == nil) != (other.Peers == nil)) {
in, other := &in.Peers, &other.Peers
if other == nil {
return false
}
if len(*in) != len(*other) {
return false
} else {
for i, inElement := range *in {
if !inElement.DeepEqual(&(*other)[i]) {
return false
}
}
}
}
return true
}
// DeepEqual is an autogenerated deepequal function, deeply comparing the
// receiver with other. in must be non-nil.
func (in *CiliumBGPNodeInstanceStatus) DeepEqual(other *CiliumBGPNodeInstanceStatus) bool {
if other == nil {
return false
}
if in.Name != other.Name {
return false
}
if (in.LocalASN == nil) != (other.LocalASN == nil) {
return false
} else if in.LocalASN != nil {
if *in.LocalASN != *other.LocalASN {
return false
}
}
if ((in.PeerStatuses != nil) && (other.PeerStatuses != nil)) || ((in.PeerStatuses == nil) != (other.PeerStatuses == nil)) {
in, other := &in.PeerStatuses, &other.PeerStatuses
if other == nil {
return false
}
if len(*in) != len(*other) {
return false
} else {
for i, inElement := range *in {
if !inElement.DeepEqual(&(*other)[i]) {
return false
}
}
}
}
return true
}
// DeepEqual is an autogenerated deepequal function, deeply comparing the
// receiver with other. in must be non-nil.
func (in *CiliumBGPNodePeer) DeepEqual(other *CiliumBGPNodePeer) bool {
if other == nil {
return false
}
if in.Name != other.Name {
return false
}
if (in.PeerAddress == nil) != (other.PeerAddress == nil) {
return false
} else if in.PeerAddress != nil {
if *in.PeerAddress != *other.PeerAddress {
return false
}
}
if (in.PeerASN == nil) != (other.PeerASN == nil) {
return false
} else if in.PeerASN != nil {
if *in.PeerASN != *other.PeerASN {
return false
}
}
if (in.AutoDiscovery == nil) != (other.AutoDiscovery == nil) {
return false
} else if in.AutoDiscovery != nil {
if !in.AutoDiscovery.DeepEqual(other.AutoDiscovery) {
return false
}
}
if (in.LocalAddress == nil) != (other.LocalAddress == nil) {
return false
} else if in.LocalAddress != nil {
if *in.LocalAddress != *other.LocalAddress {
return false
}
}
if (in.PeerConfigRef == nil) != (other.PeerConfigRef == nil) {
return false
} else if in.PeerConfigRef != nil {
if !in.PeerConfigRef.DeepEqual(other.PeerConfigRef) {
return false
}
}
return true
}
// DeepEqual is an autogenerated deepequal function, deeply comparing the
// receiver with other. in must be non-nil.
func (in *CiliumBGPNodePeerStatus) DeepEqual(other *CiliumBGPNodePeerStatus) bool {
if other == nil {
return false
}
if in.Name != other.Name {
return false
}
if in.PeerAddress != other.PeerAddress {
return false
}
if (in.PeerASN == nil) != (other.PeerASN == nil) {
return false
} else if in.PeerASN != nil {
if *in.PeerASN != *other.PeerASN {
return false
}
}
if (in.PeeringState == nil) != (other.PeeringState == nil) {
return false
} else if in.PeeringState != nil {
if *in.PeeringState != *other.PeeringState {
return false
}
}
if (in.Timers == nil) != (other.Timers == nil) {
return false
} else if in.Timers != nil {
if !in.Timers.DeepEqual(other.Timers) {
return false
}
}
if (in.EstablishedTime == nil) != (other.EstablishedTime == nil) {
return false
} else if in.EstablishedTime != nil {
if *in.EstablishedTime != *other.EstablishedTime {
return false
}
}
if ((in.RouteCount != nil) && (other.RouteCount != nil)) || ((in.RouteCount == nil) != (other.RouteCount == nil)) {
in, other := &in.RouteCount, &other.RouteCount
if other == nil {
return false
}
if len(*in) != len(*other) {
return false
} else {
for i, inElement := range *in {
if !inElement.DeepEqual(&(*other)[i]) {
return false
}
}
}
}
return true
}
// DeepEqual is an autogenerated deepequal function, deeply comparing the
// receiver with other. in must be non-nil.
func (in *CiliumBGPNodeSpec) DeepEqual(other *CiliumBGPNodeSpec) bool {
if other == nil {
return false
}
if ((in.BGPInstances != nil) && (other.BGPInstances != nil)) || ((in.BGPInstances == nil) != (other.BGPInstances == nil)) {
in, other := &in.BGPInstances, &other.BGPInstances
if other == nil {
return false
}
if len(*in) != len(*other) {
return false
} else {
for i, inElement := range *in {
if !inElement.DeepEqual(&(*other)[i]) {
return false
}
}
}
}
return true
}
// DeepEqual is an autogenerated deepequal function, deeply comparing the
// receiver with other. in must be non-nil.
func (in *CiliumBGPNodeStatus) DeepEqual(other *CiliumBGPNodeStatus) bool {
if other == nil {
return false
}
if ((in.BGPInstances != nil) && (other.BGPInstances != nil)) || ((in.BGPInstances == nil) != (other.BGPInstances == nil)) {
in, other := &in.BGPInstances, &other.BGPInstances
if other == nil {
return false
}
if len(*in) != len(*other) {
return false
} else {
for i, inElement := range *in {
if !inElement.DeepEqual(&(*other)[i]) {
return false
}
}
}
}
return true
}
// DeepEqual is an autogenerated deepequal function, deeply comparing the
// receiver with other. in must be non-nil.
func (in *CiliumBGPPeer) DeepEqual(other *CiliumBGPPeer) bool {
if other == nil {
return false
}
if in.Name != other.Name {
return false
}
if (in.PeerAddress == nil) != (other.PeerAddress == nil) {
return false
} else if in.PeerAddress != nil {
if *in.PeerAddress != *other.PeerAddress {
return false
}
}
if (in.PeerASN == nil) != (other.PeerASN == nil) {
return false
} else if in.PeerASN != nil {
if *in.PeerASN != *other.PeerASN {
return false
}
}
if (in.AutoDiscovery == nil) != (other.AutoDiscovery == nil) {
return false
} else if in.AutoDiscovery != nil {
if !in.AutoDiscovery.DeepEqual(other.AutoDiscovery) {
return false
}
}
if (in.PeerConfigRef == nil) != (other.PeerConfigRef == nil) {
return false
} else if in.PeerConfigRef != nil {
if !in.PeerConfigRef.DeepEqual(other.PeerConfigRef) {
return false
}
}
return true
}
// DeepEqual is an autogenerated deepequal function, deeply comparing the
// receiver with other. in must be non-nil.
func (in *CiliumBGPPeerConfig) DeepEqual(other *CiliumBGPPeerConfig) bool {
if other == nil {
return false
}
if !in.Spec.DeepEqual(&other.Spec) {
return false
}
if !in.Status.DeepEqual(&other.Status) {
return false
}
return true
}
// DeepEqual is an autogenerated deepequal function, deeply comparing the
// receiver with other. in must be non-nil.
func (in *CiliumBGPPeerConfigSpec) DeepEqual(other *CiliumBGPPeerConfigSpec) bool {
if other == nil {
return false
}
if (in.Transport == nil) != (other.Transport == nil) {
return false
} else if in.Transport != nil {
if !in.Transport.DeepEqual(other.Transport) {
return false
}
}
if (in.Timers == nil) != (other.Timers == nil) {
return false
} else if in.Timers != nil {
if !in.Timers.DeepEqual(other.Timers) {
return false
}
}
if (in.AuthSecretRef == nil) != (other.AuthSecretRef == nil) {
return false
} else if in.AuthSecretRef != nil {
if *in.AuthSecretRef != *other.AuthSecretRef {
return false
}
}
if (in.GracefulRestart == nil) != (other.GracefulRestart == nil) {
return false
} else if in.GracefulRestart != nil {
if !in.GracefulRestart.DeepEqual(other.GracefulRestart) {
return false
}
}
if (in.EBGPMultihop == nil) != (other.EBGPMultihop == nil) {
return false
} else if in.EBGPMultihop != nil {
if *in.EBGPMultihop != *other.EBGPMultihop {
return false
}
}
if ((in.Families != nil) && (other.Families != nil)) || ((in.Families == nil) != (other.Families == nil)) {
in, other := &in.Families, &other.Families
if other == nil {
return false
}
if len(*in) != len(*other) {
return false
} else {
for i, inElement := range *in {
if !inElement.DeepEqual(&(*other)[i]) {
return false
}
}
}
}
return true
}
// DeepEqual is an autogenerated deepequal function, deeply comparing the
// receiver with other. in must be non-nil.
func (in *CiliumBGPPeerConfigStatus) DeepEqual(other *CiliumBGPPeerConfigStatus) bool {
if other == nil {
return false
}
return true
}
// DeepEqual is an autogenerated deepequal function, deeply comparing the
// receiver with other. in must be non-nil.
func (in *CiliumBGPTimers) DeepEqual(other *CiliumBGPTimers) bool {
if other == nil {
return false
}
if (in.ConnectRetryTimeSeconds == nil) != (other.ConnectRetryTimeSeconds == nil) {
return false
} else if in.ConnectRetryTimeSeconds != nil {
if *in.ConnectRetryTimeSeconds != *other.ConnectRetryTimeSeconds {
return false
}
}
if (in.HoldTimeSeconds == nil) != (other.HoldTimeSeconds == nil) {
return false
} else if in.HoldTimeSeconds != nil {
if *in.HoldTimeSeconds != *other.HoldTimeSeconds {
return false
}
}
if (in.KeepAliveTimeSeconds == nil) != (other.KeepAliveTimeSeconds == nil) {
return false
} else if in.KeepAliveTimeSeconds != nil {
if *in.KeepAliveTimeSeconds != *other.KeepAliveTimeSeconds {
return false
}
}
return true
}
// DeepEqual is an autogenerated deepequal function, deeply comparing the
// receiver with other. in must be non-nil.
func (in *CiliumBGPTimersState) DeepEqual(other *CiliumBGPTimersState) bool {
if other == nil {
return false
}
if (in.AppliedHoldTimeSeconds == nil) != (other.AppliedHoldTimeSeconds == nil) {
return false
} else if in.AppliedHoldTimeSeconds != nil {
if *in.AppliedHoldTimeSeconds != *other.AppliedHoldTimeSeconds {
return false
}
}
if (in.AppliedKeepaliveSeconds == nil) != (other.AppliedKeepaliveSeconds == nil) {
return false
} else if in.AppliedKeepaliveSeconds != nil {
if *in.AppliedKeepaliveSeconds != *other.AppliedKeepaliveSeconds {
return false
}
}
return true
}
// DeepEqual is an autogenerated deepequal function, deeply comparing the
// receiver with other. in must be non-nil.
func (in *CiliumBGPTransport) DeepEqual(other *CiliumBGPTransport) bool {
if other == nil {
return false
}
if (in.PeerPort == nil) != (other.PeerPort == nil) {
return false
} else if in.PeerPort != nil {
if *in.PeerPort != *other.PeerPort {
return false
}
}
return true
}
// DeepEqual is an autogenerated deepequal function, deeply comparing the
// receiver with other. in must be non-nil.
func (in *CiliumCIDRGroupSpec) DeepEqual(other *CiliumCIDRGroupSpec) bool {
if other == nil {
return false
}
if ((in.ExternalCIDRs != nil) && (other.ExternalCIDRs != nil)) || ((in.ExternalCIDRs == nil) != (other.ExternalCIDRs == nil)) {
in, other := &in.ExternalCIDRs, &other.ExternalCIDRs
if other == nil {
return false
}
if len(*in) != len(*other) {
return false
} else {
for i, inElement := range *in {
if inElement != (*other)[i] {
return false
}
}
}
}
return true
}
// DeepEqual is an autogenerated deepequal function, deeply comparing the
// receiver with other. in must be non-nil.
func (in *CiliumClusterwideEnvoyConfig) DeepEqual(other *CiliumClusterwideEnvoyConfig) bool {
if other == nil {
return false
}
if !in.Spec.DeepEqual(&other.Spec) {
return false
}
return true
}
// deepEqual is an autogenerated deepequal function, deeply comparing the
// receiver with other. in must be non-nil.
func (in *CiliumClusterwideNetworkPolicy) deepEqual(other *CiliumClusterwideNetworkPolicy) bool {
if other == nil {
return false
}
if (in.Spec == nil) != (other.Spec == nil) {
return false
} else if in.Spec != nil {
if !in.Spec.DeepEqual(other.Spec) {
return false
}
}
if ((in.Specs != nil) && (other.Specs != nil)) || ((in.Specs == nil) != (other.Specs == nil)) {
in, other := &in.Specs, &other.Specs
if other == nil || !in.DeepEqual(other) {
return false
}
}
if !in.Status.DeepEqual(&other.Status) {
return false
}
return true
}
// DeepEqual is an autogenerated deepequal function, deeply comparing the
// receiver with other. in must be non-nil.
func (in *CiliumEgressGatewayPolicy) DeepEqual(other *CiliumEgressGatewayPolicy) bool {
if other == nil {
return false
}
if !in.Spec.DeepEqual(&other.Spec) {
return false
}
return true
}
// DeepEqual is an autogenerated deepequal function, deeply comparing the
// receiver with other. in must be non-nil.
func (in *CiliumEgressGatewayPolicySpec) DeepEqual(other *CiliumEgressGatewayPolicySpec) bool {
if other == nil {
return false
}
if ((in.Selectors != nil) && (other.Selectors != nil)) || ((in.Selectors == nil) != (other.Selectors == nil)) {
in, other := &in.Selectors, &other.Selectors
if other == nil {
return false
}
if len(*in) != len(*other) {
return false
} else {
for i, inElement := range *in {
if !inElement.DeepEqual(&(*other)[i]) {
return false
}
}
}
}
if ((in.DestinationCIDRs != nil) && (other.DestinationCIDRs != nil)) || ((in.DestinationCIDRs == nil) != (other.DestinationCIDRs == nil)) {
in, other := &in.DestinationCIDRs, &other.DestinationCIDRs
if other == nil {
return false
}
if len(*in) != len(*other) {
return false
} else {
for i, inElement := range *in {
if inElement != (*other)[i] {
return false
}
}
}
}
if ((in.ExcludedCIDRs != nil) && (other.ExcludedCIDRs != nil)) || ((in.ExcludedCIDRs == nil) != (other.ExcludedCIDRs == nil)) {
in, other := &in.ExcludedCIDRs, &other.ExcludedCIDRs
if other == nil {
return false
}
if len(*in) != len(*other) {
return false
} else {
for i, inElement := range *in {
if inElement != (*other)[i] {
return false
}
}
}
}
if (in.EgressGateway == nil) != (other.EgressGateway == nil) {
return false
} else if in.EgressGateway != nil {
if !in.EgressGateway.DeepEqual(other.EgressGateway) {
return false
}
}
if ((in.EgressGateways != nil) && (other.EgressGateways != nil)) || ((in.EgressGateways == nil) != (other.EgressGateways == nil)) {
in, other := &in.EgressGateways, &other.EgressGateways
if other == nil {
return false
}
if len(*in) != len(*other) {
return false
} else {
for i, inElement := range *in {
if !inElement.DeepEqual(&(*other)[i]) {
return false
}
}
}
}
return true
}
// DeepEqual is an autogenerated deepequal function, deeply comparing the
// receiver with other. in must be non-nil.
func (in *CiliumEndpoint) DeepEqual(other *CiliumEndpoint) bool {
if other == nil {
return false
}
if !in.Status.DeepEqual(&other.Status) {
return false
}
return true
}
// DeepEqual is an autogenerated deepequal function, deeply comparing the
// receiver with other. in must be non-nil.
func (in *CiliumEnvoyConfig) DeepEqual(other *CiliumEnvoyConfig) bool {
if other == nil {
return false
}
if !in.Spec.DeepEqual(&other.Spec) {
return false
}
return true
}
// DeepEqual is an autogenerated deepequal function, deeply comparing the
// receiver with other. in must be non-nil.
func (in *CiliumEnvoyConfigSpec) DeepEqual(other *CiliumEnvoyConfigSpec) bool {
if other == nil {
return false
}
if ((in.Services != nil) && (other.Services != nil)) || ((in.Services == nil) != (other.Services == nil)) {
in, other := &in.Services, &other.Services
if other == nil {
return false
}
if len(*in) != len(*other) {
return false
} else {
for i, inElement := range *in {
if !inElement.DeepEqual((*other)[i]) {
return false
}
}
}
}
if ((in.BackendServices != nil) && (other.BackendServices != nil)) || ((in.BackendServices == nil) != (other.BackendServices == nil)) {
in, other := &in.BackendServices, &other.BackendServices
if other == nil {
return false
}
if len(*in) != len(*other) {
return false
} else {
for i, inElement := range *in {
if !inElement.DeepEqual((*other)[i]) {
return false
}
}
}
}
if ((in.Resources != nil) && (other.Resources != nil)) || ((in.Resources == nil) != (other.Resources == nil)) {
in, other := &in.Resources, &other.Resources
if other == nil {
return false
}
if len(*in) != len(*other) {
return false
} else {
for i, inElement := range *in {
if !inElement.DeepEqual(&(*other)[i]) {
return false
}
}
}
}
if (in.NodeSelector == nil) != (other.NodeSelector == nil) {
return false
} else if in.NodeSelector != nil {
if !in.NodeSelector.DeepEqual(other.NodeSelector) {
return false
}
}
return true
}
// DeepEqual is an autogenerated deepequal function, deeply comparing the
// receiver with other. in must be non-nil.
func (in *CiliumIdentity) DeepEqual(other *CiliumIdentity) bool {
if other == nil {
return false
}
if ((in.SecurityLabels != nil) && (other.SecurityLabels != nil)) || ((in.SecurityLabels == nil) != (other.SecurityLabels == nil)) {
in, other := &in.SecurityLabels, &other.SecurityLabels
if other == nil {
return false
}
if len(*in) != len(*other) {
return false
} else {
for key, inValue := range *in {
if otherValue, present := (*other)[key]; !present {
return false
} else {
if inValue != otherValue {
return false
}
}
}
}
}
return true
}
// DeepEqual is an autogenerated deepequal function, deeply comparing the
// receiver with other. in must be non-nil.
func (in *CiliumLoadBalancerIPPool) DeepEqual(other *CiliumLoadBalancerIPPool) bool {
if other == nil {
return false
}
if !in.Spec.DeepEqual(&other.Spec) {
return false
}
return true
}
// DeepEqual is an autogenerated deepequal function, deeply comparing the
// receiver with other. in must be non-nil.
func (in *CiliumLoadBalancerIPPoolIPBlock) DeepEqual(other *CiliumLoadBalancerIPPoolIPBlock) bool {
if other == nil {
return false
}
if in.Cidr != other.Cidr {
return false
}
if in.Start != other.Start {
return false
}
if in.Stop != other.Stop {
return false
}
return true
}
// DeepEqual is an autogenerated deepequal function, deeply comparing the
// receiver with other. in must be non-nil.
func (in *CiliumLoadBalancerIPPoolSpec) DeepEqual(other *CiliumLoadBalancerIPPoolSpec) bool {
if other == nil {
return false
}
if (in.ServiceSelector == nil) != (other.ServiceSelector == nil) {
return false
} else if in.ServiceSelector != nil {
if !in.ServiceSelector.DeepEqual(other.ServiceSelector) {
return false
}
}
if in.AllowFirstLastIPs != other.AllowFirstLastIPs {
return false
}
if ((in.Blocks != nil) && (other.Blocks != nil)) || ((in.Blocks == nil) != (other.Blocks == nil)) {
in, other := &in.Blocks, &other.Blocks
if other == nil {
return false
}
if len(*in) != len(*other) {
return false
} else {
for i, inElement := range *in {
if !inElement.DeepEqual(&(*other)[i]) {
return false
}
}
}
}
if in.Disabled != other.Disabled {
return false
}
return true
}
// DeepEqual is an autogenerated deepequal function, deeply comparing the
// receiver with other. in must be non-nil.
func (in *CiliumLocalRedirectPolicy) DeepEqual(other *CiliumLocalRedirectPolicy) bool {
if other == nil {
return false
}
if !in.Spec.DeepEqual(&other.Spec) {
return false
}
return true
}
// DeepEqual is an autogenerated deepequal function, deeply comparing the
// receiver with other. in must be non-nil.
func (in *CiliumLocalRedirectPolicySpec) DeepEqual(other *CiliumLocalRedirectPolicySpec) bool {
if other == nil {
return false
}
if !in.RedirectFrontend.DeepEqual(&other.RedirectFrontend) {
return false
}
if !in.RedirectBackend.DeepEqual(&other.RedirectBackend) {
return false
}
if in.SkipRedirectFromBackend != other.SkipRedirectFromBackend {
return false
}
if in.Description != other.Description {
return false
}
return true
}
// DeepEqual is an autogenerated deepequal function, deeply comparing the
// receiver with other. in must be non-nil.
func (in *CiliumLocalRedirectPolicyStatus) DeepEqual(other *CiliumLocalRedirectPolicyStatus) bool {
if other == nil {
return false
}
if in.OK != other.OK {
return false
}
return true
}
// deepEqual is an autogenerated deepequal function, deeply comparing the
// receiver with other. in must be non-nil.
func (in *CiliumNetworkPolicy) deepEqual(other *CiliumNetworkPolicy) bool {
if other == nil {
return false
}
if (in.Spec == nil) != (other.Spec == nil) {
return false
} else if in.Spec != nil {
if !in.Spec.DeepEqual(other.Spec) {
return false
}
}
if ((in.Specs != nil) && (other.Specs != nil)) || ((in.Specs == nil) != (other.Specs == nil)) {
in, other := &in.Specs, &other.Specs
if other == nil || !in.DeepEqual(other) {
return false
}
}
return true
}
// DeepEqual is an autogenerated deepequal function, deeply comparing the
// receiver with other. in must be non-nil.
func (in *CiliumNetworkPolicyNodeStatus) DeepEqual(other *CiliumNetworkPolicyNodeStatus) bool {
if other == nil {
return false
}
if in.OK != other.OK {
return false
}
if in.Error != other.Error {
return false
}
if !in.LastUpdated.DeepEqual(&other.LastUpdated) {
return false
}
if in.Revision != other.Revision {
return false
}
if in.Enforcing != other.Enforcing {
return false
}
if ((in.Annotations != nil) && (other.Annotations != nil)) || ((in.Annotations == nil) != (other.Annotations == nil)) {
in, other := &in.Annotations, &other.Annotations
if other == nil {
return false
}
if len(*in) != len(*other) {
return false
} else {
for key, inValue := range *in {
if otherValue, present := (*other)[key]; !present {
return false
} else {
if inValue != otherValue {
return false
}
}
}
}
}
return true
}
// DeepEqual is an autogenerated deepequal function, deeply comparing the
// receiver with other. in must be non-nil.
func (in *CiliumNetworkPolicyStatus) DeepEqual(other *CiliumNetworkPolicyStatus) bool {
if other == nil {
return false
}
if ((in.DerivativePolicies != nil) && (other.DerivativePolicies != nil)) || ((in.DerivativePolicies == nil) != (other.DerivativePolicies == nil)) {
in, other := &in.DerivativePolicies, &other.DerivativePolicies
if other == nil {
return false
}
if len(*in) != len(*other) {
return false
} else {
for key, inValue := range *in {
if otherValue, present := (*other)[key]; !present {
return false
} else {
if !inValue.DeepEqual(&otherValue) {
return false
}
}
}
}
}
if ((in.Conditions != nil) && (other.Conditions != nil)) || ((in.Conditions == nil) != (other.Conditions == nil)) {
in, other := &in.Conditions, &other.Conditions
if other == nil {
return false
}
if len(*in) != len(*other) {
return false
} else {
for i, inElement := range *in {
if !inElement.DeepEqual(&(*other)[i]) {
return false
}
}
}
}
return true
}
// DeepEqual is an autogenerated deepequal function, deeply comparing the
// receiver with other. in must be non-nil.
func (in *CiliumNode) DeepEqual(other *CiliumNode) bool {
if other == nil {
return false
}
if !in.Spec.DeepEqual(&other.Spec) {
return false
}
if !in.Status.DeepEqual(&other.Status) {
return false
}
return true
}
// DeepEqual is an autogenerated deepequal function, deeply comparing the
// receiver with other. in must be non-nil.
func (in *ControllerList) DeepEqual(other *ControllerList) bool {
if other == nil {
return false
}
if len(*in) != len(*other) {
return false
} else {
for i, inElement := range *in {
if !inElement.DeepEqual(&(*other)[i]) {
return false
}
}
}
return true
}
// DeepEqual is an autogenerated deepequal function, deeply comparing the
// receiver with other. in must be non-nil.
func (in *ControllerStatus) DeepEqual(other *ControllerStatus) bool {
if other == nil {
return false
}
if in.Name != other.Name {
return false
}
if (in.Configuration == nil) != (other.Configuration == nil) {
return false
} else if in.Configuration != nil {
if !in.Configuration.DeepEqual(other.Configuration) {
return false
}
}
if in.Status != other.Status {
return false
}
if in.UUID != other.UUID {
return false
}
return true
}
// DeepEqual is an autogenerated deepequal function, deeply comparing the
// receiver with other. in must be non-nil.
func (in *ControllerStatusStatus) DeepEqual(other *ControllerStatusStatus) bool {
if other == nil {
return false
}
if in.ConsecutiveFailureCount != other.ConsecutiveFailureCount {
return false
}
if in.FailureCount != other.FailureCount {
return false
}
if in.LastFailureMsg != other.LastFailureMsg {
return false
}
if in.LastFailureTimestamp != other.LastFailureTimestamp {
return false
}
if in.LastSuccessTimestamp != other.LastSuccessTimestamp {
return false
}
if in.SuccessCount != other.SuccessCount {
return false
}
return true
}
// DeepEqual is an autogenerated deepequal function, deeply comparing the
// receiver with other. in must be non-nil.
func (in *DefaultGateway) DeepEqual(other *DefaultGateway) bool {
if other == nil {
return false
}
if in.AddressFamily != other.AddressFamily {
return false
}
return true
}
// DeepEqual is an autogenerated deepequal function, deeply comparing the
// receiver with other. in must be non-nil.
func (in *DenyIdentityList) DeepEqual(other *DenyIdentityList) bool {
if other == nil {
return false
}
if len(*in) != len(*other) {
return false
} else {
for i, inElement := range *in {
if !inElement.DeepEqual(&(*other)[i]) {
return false
}
}
}
return true
}
// DeepEqual is an autogenerated deepequal function, deeply comparing the
// receiver with other. in must be non-nil.
func (in *EgressGateway) DeepEqual(other *EgressGateway) bool {
if other == nil {
return false
}
if (in.NodeSelector == nil) != (other.NodeSelector == nil) {
return false
} else if in.NodeSelector != nil {
if !in.NodeSelector.DeepEqual(other.NodeSelector) {
return false
}
}
if in.Interface != other.Interface {
return false
}
if in.EgressIP != other.EgressIP {
return false
}
return true
}
// DeepEqual is an autogenerated deepequal function, deeply comparing the
// receiver with other. in must be non-nil.
func (in *EgressRule) DeepEqual(other *EgressRule) bool {
if other == nil {
return false
}
if (in.NamespaceSelector == nil) != (other.NamespaceSelector == nil) {
return false
} else if in.NamespaceSelector != nil {
if !in.NamespaceSelector.DeepEqual(other.NamespaceSelector) {
return false
}
}
if (in.PodSelector == nil) != (other.PodSelector == nil) {
return false
} else if in.PodSelector != nil {
if !in.PodSelector.DeepEqual(other.PodSelector) {
return false
}
}
if (in.NodeSelector == nil) != (other.NodeSelector == nil) {
return false
} else if in.NodeSelector != nil {
if !in.NodeSelector.DeepEqual(other.NodeSelector) {
return false
}
}
return true
}
// DeepEqual is an autogenerated deepequal function, deeply comparing the
// receiver with other. in must be non-nil.
func (in *EncryptionSpec) DeepEqual(other *EncryptionSpec) bool {
if other == nil {
return false
}
if in.Key != other.Key {
return false
}
return true
}
// DeepEqual is an autogenerated deepequal function, deeply comparing the
// receiver with other. in must be non-nil.
func (in *EndpointIdentity) DeepEqual(other *EndpointIdentity) bool {
if other == nil {
return false
}
if in.ID != other.ID {
return false
}
if ((in.Labels != nil) && (other.Labels != nil)) || ((in.Labels == nil) != (other.Labels == nil)) {
in, other := &in.Labels, &other.Labels
if other == nil {
return false
}
if len(*in) != len(*other) {
return false
} else {
for i, inElement := range *in {
if inElement != (*other)[i] {
return false
}
}
}
}
return true
}
// DeepEqual is an autogenerated deepequal function, deeply comparing the
// receiver with other. in must be non-nil.
func (in *EndpointNetworking) DeepEqual(other *EndpointNetworking) bool {
if other == nil {
return false
}
if ((in.Addressing != nil) && (other.Addressing != nil)) || ((in.Addressing == nil) != (other.Addressing == nil)) {
in, other := &in.Addressing, &other.Addressing
if other == nil || !in.DeepEqual(other) {
return false
}
}
if in.NodeIP != other.NodeIP {
return false
}
return true
}
// DeepEqual is an autogenerated deepequal function, deeply comparing the
// receiver with other. in must be non-nil.
func (in *EndpointPolicy) DeepEqual(other *EndpointPolicy) bool {
if other == nil {
return false
}
if (in.Ingress == nil) != (other.Ingress == nil) {
return false
} else if in.Ingress != nil {
if !in.Ingress.DeepEqual(other.Ingress) {
return false
}
}
if (in.Egress == nil) != (other.Egress == nil) {
return false
} else if in.Egress != nil {
if !in.Egress.DeepEqual(other.Egress) {
return false
}
}
return true
}
// DeepEqual is an autogenerated deepequal function, deeply comparing the
// receiver with other. in must be non-nil.
func (in *EndpointPolicyDirection) DeepEqual(other *EndpointPolicyDirection) bool {
if other == nil {
return false
}
if in.Enforcing != other.Enforcing {
return false
}
if ((in.Allowed != nil) && (other.Allowed != nil)) || ((in.Allowed == nil) != (other.Allowed == nil)) {
in, other := &in.Allowed, &other.Allowed
if other == nil || !in.DeepEqual(other) {
return false
}
}
if ((in.Denied != nil) && (other.Denied != nil)) || ((in.Denied == nil) != (other.Denied == nil)) {
in, other := &in.Denied, &other.Denied
if other == nil || !in.DeepEqual(other) {
return false
}
}
if ((in.Removing != nil) && (other.Removing != nil)) || ((in.Removing == nil) != (other.Removing == nil)) {
in, other := &in.Removing, &other.Removing
if other == nil || !in.DeepEqual(other) {
return false
}
}
if ((in.Adding != nil) && (other.Adding != nil)) || ((in.Adding == nil) != (other.Adding == nil)) {
in, other := &in.Adding, &other.Adding
if other == nil || !in.DeepEqual(other) {
return false
}
}
if in.State != other.State {
return false
}
return true
}
// DeepEqual is an autogenerated deepequal function, deeply comparing the
// receiver with other. in must be non-nil.
func (in *EndpointStatus) DeepEqual(other *EndpointStatus) bool {
if other == nil {
return false
}
if in.ID != other.ID {
return false
}
if ((in.Controllers != nil) && (other.Controllers != nil)) || ((in.Controllers == nil) != (other.Controllers == nil)) {
in, other := &in.Controllers, &other.Controllers
if other == nil || !in.DeepEqual(other) {
return false
}
}
if (in.ExternalIdentifiers == nil) != (other.ExternalIdentifiers == nil) {
return false
} else if in.ExternalIdentifiers != nil {
if !in.ExternalIdentifiers.DeepEqual(other.ExternalIdentifiers) {
return false
}
}
if (in.Health == nil) != (other.Health == nil) {
return false
} else if in.Health != nil {
if !in.Health.DeepEqual(other.Health) {
return false
}
}
if (in.Identity == nil) != (other.Identity == nil) {
return false
} else if in.Identity != nil {
if !in.Identity.DeepEqual(other.Identity) {
return false
}
}
if ((in.Log != nil) && (other.Log != nil)) || ((in.Log == nil) != (other.Log == nil)) {
in, other := &in.Log, &other.Log
if other == nil {
return false
}
if len(*in) != len(*other) {
return false
} else {
for i, inElement := range *in {
if !inElement.DeepEqual((*other)[i]) {
return false
}
}
}
}
if (in.Networking == nil) != (other.Networking == nil) {
return false
} else if in.Networking != nil {
if !in.Networking.DeepEqual(other.Networking) {
return false
}
}
if in.Encryption != other.Encryption {
return false
}
if (in.Policy == nil) != (other.Policy == nil) {
return false
} else if in.Policy != nil {
if !in.Policy.DeepEqual(other.Policy) {
return false
}
}
if in.State != other.State {
return false
}
if ((in.NamedPorts != nil) && (other.NamedPorts != nil)) || ((in.NamedPorts == nil) != (other.NamedPorts == nil)) {
in, other := &in.NamedPorts, &other.NamedPorts
if other == nil || !in.DeepEqual(other) {
return false
}
}
return true
}
// DeepEqual is an autogenerated deepequal function, deeply comparing the
// receiver with other. in must be non-nil.
func (in *Frontend) DeepEqual(other *Frontend) bool {
if other == nil {
return false
}
if in.IP != other.IP {
return false
}
if ((in.ToPorts != nil) && (other.ToPorts != nil)) || ((in.ToPorts == nil) != (other.ToPorts == nil)) {
in, other := &in.ToPorts, &other.ToPorts
if other == nil {
return false
}
if len(*in) != len(*other) {
return false
} else {
for i, inElement := range *in {
if !inElement.DeepEqual(&(*other)[i]) {
return false
}
}
}
}
return true
}
// DeepEqual is an autogenerated deepequal function, deeply comparing the
// receiver with other. in must be non-nil.
func (in *HealthAddressingSpec) DeepEqual(other *HealthAddressingSpec) bool {
if other == nil {
return false
}
if in.IPv4 != other.IPv4 {
return false
}
if in.IPv6 != other.IPv6 {
return false
}
return true
}
// DeepEqual is an autogenerated deepequal function, deeply comparing the
// receiver with other. in must be non-nil.
func (in *IdentityList) DeepEqual(other *IdentityList) bool {
if other == nil {
return false
}
if len(*in) != len(*other) {
return false
} else {
for i, inElement := range *in {
if !inElement.DeepEqual(&(*other)[i]) {
return false
}
}
}
return true
}
// DeepEqual is an autogenerated deepequal function, deeply comparing the
// receiver with other. in must be non-nil.
func (in *IdentityTuple) DeepEqual(other *IdentityTuple) bool {
if other == nil {
return false
}
if in.Identity != other.Identity {
return false
}
if ((in.IdentityLabels != nil) && (other.IdentityLabels != nil)) || ((in.IdentityLabels == nil) != (other.IdentityLabels == nil)) {
in, other := &in.IdentityLabels, &other.IdentityLabels
if other == nil {
return false
}
if len(*in) != len(*other) {
return false
} else {
for key, inValue := range *in {
if otherValue, present := (*other)[key]; !present {
return false
} else {
if inValue != otherValue {
return false
}
}
}
}
}
if in.DestPort != other.DestPort {
return false
}
if in.Protocol != other.Protocol {
return false
}
return true
}
// DeepEqual is an autogenerated deepequal function, deeply comparing the
// receiver with other. in must be non-nil.
func (in *NetworkPolicyCondition) DeepEqual(other *NetworkPolicyCondition) bool {
if other == nil {
return false
}
if in.Type != other.Type {
return false
}
if in.Status != other.Status {
return false
}
if !in.LastTransitionTime.DeepEqual(&other.LastTransitionTime) {
return false
}
if in.Reason != other.Reason {
return false
}
if in.Message != other.Message {
return false
}
return true
}
// DeepEqual is an autogenerated deepequal function, deeply comparing the
// receiver with other. in must be non-nil.
func (in *NodeAddress) DeepEqual(other *NodeAddress) bool {
if other == nil {
return false
}
if in.Type != other.Type {
return false
}
if in.IP != other.IP {
return false
}
return true
}
// DeepEqual is an autogenerated deepequal function, deeply comparing the
// receiver with other. in must be non-nil.
func (in *NodeSpec) DeepEqual(other *NodeSpec) bool {
if other == nil {
return false
}
if in.InstanceID != other.InstanceID {
return false
}
if in.BootID != other.BootID {
return false
}
if ((in.Addresses != nil) && (other.Addresses != nil)) || ((in.Addresses == nil) != (other.Addresses == nil)) {
in, other := &in.Addresses, &other.Addresses
if other == nil {
return false
}
if len(*in) != len(*other) {
return false
} else {
for i, inElement := range *in {
if !inElement.DeepEqual(&(*other)[i]) {
return false
}
}
}
}
if in.HealthAddressing != other.HealthAddressing {
return false
}
if in.IngressAddressing != other.IngressAddressing {
return false
}
if in.Encryption != other.Encryption {
return false
}
if !in.ENI.DeepEqual(&other.ENI) {
return false
}
if in.Azure != other.Azure {
return false
}
if !in.AlibabaCloud.DeepEqual(&other.AlibabaCloud) {
return false
}
if !in.IPAM.DeepEqual(&other.IPAM) {
return false
}
if in.NodeIdentity != other.NodeIdentity {
return false
}
return true
}
// DeepEqual is an autogenerated deepequal function, deeply comparing the
// receiver with other. in must be non-nil.
func (in *NodeStatus) DeepEqual(other *NodeStatus) bool {
if other == nil {
return false
}
if !in.ENI.DeepEqual(&other.ENI) {
return false
}
if !in.Azure.DeepEqual(&other.Azure) {
return false
}
if !in.IPAM.DeepEqual(&other.IPAM) {
return false
}
if !in.AlibabaCloud.DeepEqual(&other.AlibabaCloud) {
return false
}
return true
}
// DeepEqual is an autogenerated deepequal function, deeply comparing the
// receiver with other. in must be non-nil.
func (in *PeerConfigReference) DeepEqual(other *PeerConfigReference) bool {
if other == nil {
return false
}
if in.Name != other.Name {
return false
}
return true
}
// DeepEqual is an autogenerated deepequal function, deeply comparing the
// receiver with other. in must be non-nil.
func (in *PortInfo) DeepEqual(other *PortInfo) bool {
if other == nil {
return false
}
if in.Port != other.Port {
return false
}
if in.Protocol != other.Protocol {
return false
}
if in.Name != other.Name {
return false
}
return true
}
// DeepEqual is an autogenerated deepequal function, deeply comparing the
// receiver with other. in must be non-nil.
func (in *RedirectBackend) DeepEqual(other *RedirectBackend) bool {
if other == nil {
return false
}
if !in.LocalEndpointSelector.DeepEqual(&other.LocalEndpointSelector) {
return false
}
if ((in.ToPorts != nil) && (other.ToPorts != nil)) || ((in.ToPorts == nil) != (other.ToPorts == nil)) {
in, other := &in.ToPorts, &other.ToPorts
if other == nil {
return false
}
if len(*in) != len(*other) {
return false
} else {
for i, inElement := range *in {
if !inElement.DeepEqual(&(*other)[i]) {
return false
}
}
}
}
return true
}
// DeepEqual is an autogenerated deepequal function, deeply comparing the
// receiver with other. in must be non-nil.
func (in *RedirectFrontend) DeepEqual(other *RedirectFrontend) bool {
if other == nil {
return false
}
if (in.AddressMatcher == nil) != (other.AddressMatcher == nil) {
return false
} else if in.AddressMatcher != nil {
if !in.AddressMatcher.DeepEqual(other.AddressMatcher) {
return false
}
}
if (in.ServiceMatcher == nil) != (other.ServiceMatcher == nil) {
return false
} else if in.ServiceMatcher != nil {
if !in.ServiceMatcher.DeepEqual(other.ServiceMatcher) {
return false
}
}
return true
}
// DeepEqual is an autogenerated deepequal function, deeply comparing the
// receiver with other. in must be non-nil.
func (in *Service) DeepEqual(other *Service) bool {
if other == nil {
return false
}
if in.Name != other.Name {
return false
}
if in.Namespace != other.Namespace {
return false
}
if ((in.Ports != nil) && (other.Ports != nil)) || ((in.Ports == nil) != (other.Ports == nil)) {
in, other := &in.Ports, &other.Ports
if other == nil {
return false
}
if len(*in) != len(*other) {
return false
} else {
for i, inElement := range *in {
if inElement != (*other)[i] {
return false
}
}
}
}
return true
}
// DeepEqual is an autogenerated deepequal function, deeply comparing the
// receiver with other. in must be non-nil.
func (in *ServiceInfo) DeepEqual(other *ServiceInfo) bool {
if other == nil {
return false
}
if in.Name != other.Name {
return false
}
if in.Namespace != other.Namespace {
return false
}
if ((in.ToPorts != nil) && (other.ToPorts != nil)) || ((in.ToPorts == nil) != (other.ToPorts == nil)) {
in, other := &in.ToPorts, &other.ToPorts
if other == nil {
return false
}
if len(*in) != len(*other) {
return false
} else {
for i, inElement := range *in {
if !inElement.DeepEqual(&(*other)[i]) {
return false
}
}
}
}
return true
}
// DeepEqual is an autogenerated deepequal function, deeply comparing the
// receiver with other. in must be non-nil.
func (in *ServiceListener) DeepEqual(other *ServiceListener) bool {
if other == nil {
return false
}
if in.Name != other.Name {
return false
}
if in.Namespace != other.Namespace {
return false
}
if ((in.Ports != nil) && (other.Ports != nil)) || ((in.Ports == nil) != (other.Ports == nil)) {
in, other := &in.Ports, &other.Ports
if other == nil {
return false
}
if len(*in) != len(*other) {
return false
} else {
for i, inElement := range *in {
if inElement != (*other)[i] {
return false
}
}
}
}
if in.Listener != other.Listener {
return false
}
return true
}
// SPDX-License-Identifier: Apache-2.0
// Copyright Authors of Cilium
package v2alpha1
import (
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/utils/ptr"
slimv1 "github.com/cilium/cilium/pkg/k8s/slim/k8s/apis/meta/v1"
)
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
// +k8s:openapi-gen=false
// +deepequal-gen=false
// CiliumBGPPeerConfigList is a list of CiliumBGPPeer objects.
type CiliumBGPPeerConfigList struct {
metav1.TypeMeta `json:",inline"`
metav1.ListMeta `json:"metadata"`
// Items is a list of CiliumBGPPeer.
Items []CiliumBGPPeerConfig `json:"items"`
}
// +genclient
// +genclient:nonNamespaced
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
// +kubebuilder:resource:categories={cilium,ciliumbgp},singular="ciliumbgppeerconfig",path="ciliumbgppeerconfigs",scope="Cluster",shortName={cbgppeer}
// +kubebuilder:printcolumn:JSONPath=".metadata.creationTimestamp",name="Age",type=date
// +kubebuilder:subresource:status
// +kubebuilder:deprecatedversion
type CiliumBGPPeerConfig struct {
// +deepequal-gen=false
metav1.TypeMeta `json:",inline"`
// +deepequal-gen=false
metav1.ObjectMeta `json:"metadata"`
// Spec is the specification of the desired behavior of the CiliumBGPPeerConfig.
Spec CiliumBGPPeerConfigSpec `json:"spec"`
// Status is the running status of the CiliumBGPPeerConfig
//
// +kubebuilder:validation:Optional
Status CiliumBGPPeerConfigStatus `json:"status"`
}
type CiliumBGPPeerConfigSpec struct {
// Transport defines the BGP transport parameters for the peer.
//
// If not specified, the default transport parameters are used.
//
// +kubebuilder:validation:Optional
Transport *CiliumBGPTransport `json:"transport,omitempty"`
// Timers defines the BGP timers for the peer.
//
// If not specified, the default timers are used.
//
// +kubebuilder:validation:Optional
Timers *CiliumBGPTimers `json:"timers,omitempty"`
// AuthSecretRef is the name of the secret to use to fetch a TCP
// authentication password for this peer.
//
// If not specified, no authentication is used.
//
// +kubebuilder:validation:Optional
AuthSecretRef *string `json:"authSecretRef,omitempty"`
// GracefulRestart defines graceful restart parameters which are negotiated
// with this peer.
//
// If not specified, the graceful restart capability is disabled.
//
// +kubebuilder:validation:Optional
GracefulRestart *CiliumBGPNeighborGracefulRestart `json:"gracefulRestart,omitempty"`
// EBGPMultihopTTL controls the multi-hop feature for eBGP peers.
// Its value defines the Time To Live (TTL) value used in BGP
// packets sent to the peer.
//
// If not specified, EBGP multihop is disabled. This field is ignored for iBGP neighbors.
//
// +kubebuilder:validation:Optional
// +kubebuilder:validation:Minimum=1
// +kubebuilder:validation:Maximum=255
// +kubebuilder:default=1
EBGPMultihop *int32 `json:"ebgpMultihop,omitempty"`
// Families, if provided, defines a set of AFI/SAFIs the speaker will
// negotiate with it's peer.
//
// If not specified, the default families of IPv6/unicast and IPv4/unicast will be created.
//
// +kubebuilder:validation:Optional
Families []CiliumBGPFamilyWithAdverts `json:"families,omitempty"`
}
type CiliumBGPPeerConfigStatus struct {
// The current conditions of the CiliumBGPPeerConfig
//
// +optional
// +listType=map
// +listMapKey=type
// +deepequal-gen=false
Conditions []metav1.Condition `json:"conditions,omitempty"`
}
// Conditions for CiliumBGPPeerConfig. When you add a new condition, don't
// forget to to update the below AllBGPPeerConfigConditions list as well.
const (
// Referenced auth secret is missing
BGPPeerConfigConditionMissingAuthSecret = "cilium.io/MissingAuthSecret"
)
var AllBGPPeerConfigConditions = []string{
BGPPeerConfigConditionMissingAuthSecret,
}
// CiliumBGPFamily represents a AFI/SAFI address family pair.
type CiliumBGPFamily struct {
// Afi is the Address Family Identifier (AFI) of the family.
//
// +kubebuilder:validation:Enum=ipv4;ipv6;l2vpn;ls;opaque
// +kubebuilder:validation:Required
Afi string `json:"afi"`
// Safi is the Subsequent Address Family Identifier (SAFI) of the family.
//
// +kubebuilder:validation:Enum=unicast;multicast;mpls_label;encapsulation;vpls;evpn;ls;sr_policy;mup;mpls_vpn;mpls_vpn_multicast;route_target_constraints;flowspec_unicast;flowspec_vpn;key_value
// +kubebuilder:validation:Required
Safi string `json:"safi"`
}
// CiliumBGPFamilyWithAdverts represents a AFI/SAFI address family pair along with reference to BGP Advertisements.
type CiliumBGPFamilyWithAdverts struct {
CiliumBGPFamily `json:",inline"`
// Advertisements selects group of BGP Advertisement(s) to advertise for this family.
//
// If not specified, no advertisements are sent for this family.
//
// This field is ignored in CiliumBGPNeighbor which is used in CiliumBGPPeeringPolicy.
// Use CiliumBGPPeeringPolicy advertisement options instead.
//
// +kubebuilder:validation:Optional
Advertisements *slimv1.LabelSelector `json:"advertisements,omitempty"`
}
// CiliumBGPTransport defines the BGP transport parameters for the peer.
type CiliumBGPTransport struct {
// Deprecated
// LocalPort is the local port to be used for the BGP session.
//
// If not specified, ephemeral port will be picked to initiate a connection.
//
// This field is deprecated and will be removed in a future release.
// Local port configuration is unnecessary and is not recommended.
//
// +kubebuilder:validation:Optional
// +kubebuilder:validation:Minimum=1
// +kubebuilder:validation:Maximum=65535
LocalPort *int32 `json:"localPort,omitempty"`
// PeerPort is the peer port to be used for the BGP session.
//
// If not specified, defaults to TCP port 179.
//
// +kubebuilder:validation:Optional
// +kubebuilder:validation:Minimum=1
// +kubebuilder:validation:Maximum=65535
// +kubebuilder:default=179
PeerPort *int32 `json:"peerPort,omitempty"`
}
func (t *CiliumBGPTransport) SetDefaults() {
if t.LocalPort == nil || *t.LocalPort == 0 {
t.LocalPort = ptr.To[int32](DefaultBGPPeerLocalPort)
}
if t.PeerPort == nil || *t.PeerPort == 0 {
t.PeerPort = ptr.To[int32](DefaultBGPPeerPort)
}
}
// CiliumBGPTimers defines timers configuration for a BGP peer.
//
// +kubebuilder:validation:XValidation:rule="self.keepAliveTimeSeconds <= self.holdTimeSeconds", message="keepAliveTimeSeconds can not be larger than holdTimeSeconds"
type CiliumBGPTimers struct {
// ConnectRetryTimeSeconds defines the initial value for the BGP ConnectRetryTimer (RFC 4271, Section 8).
//
// If not specified, defaults to 120 seconds.
//
// +kubebuilder:validation:Optional
// +kubebuilder:validation:Minimum=1
// +kubebuilder:validation:Maximum=2147483647
// +kubebuilder:default=120
ConnectRetryTimeSeconds *int32 `json:"connectRetryTimeSeconds,omitempty"`
// HoldTimeSeconds defines the initial value for the BGP HoldTimer (RFC 4271, Section 4.2).
// Updating this value will cause a session reset.
//
// If not specified, defaults to 90 seconds.
//
// +kubebuilder:validation:Optional
// +kubebuilder:validation:Minimum=3
// +kubebuilder:validation:Maximum=65535
// +kubebuilder:default=90
HoldTimeSeconds *int32 `json:"holdTimeSeconds,omitempty"`
// KeepaliveTimeSeconds defines the initial value for the BGP KeepaliveTimer (RFC 4271, Section 8).
// It can not be larger than HoldTimeSeconds. Updating this value will cause a session reset.
//
// If not specified, defaults to 30 seconds.
//
// +kubebuilder:validation:Optional
// +kubebuilder:validation:Minimum=1
// +kubebuilder:validation:Maximum=65535
// +kubebuilder:default=30
KeepAliveTimeSeconds *int32 `json:"keepAliveTimeSeconds,omitempty"`
}
func (t *CiliumBGPTimers) SetDefaults() {
if t.ConnectRetryTimeSeconds == nil || *t.ConnectRetryTimeSeconds == 0 {
t.ConnectRetryTimeSeconds = ptr.To[int32](DefaultBGPConnectRetryTimeSeconds)
}
if t.HoldTimeSeconds == nil || *t.HoldTimeSeconds == 0 {
t.HoldTimeSeconds = ptr.To[int32](DefaultBGPHoldTimeSeconds)
}
if t.KeepAliveTimeSeconds == nil || *t.KeepAliveTimeSeconds == 0 {
t.KeepAliveTimeSeconds = ptr.To[int32](DefaultBGPKeepAliveTimeSeconds)
}
}
func (p *CiliumBGPPeerConfigSpec) SetDefaults() {
if p == nil {
return
}
if p.Transport == nil {
p.Transport = &CiliumBGPTransport{}
}
p.Transport.SetDefaults()
if p.Timers == nil {
p.Timers = &CiliumBGPTimers{}
}
p.Timers.SetDefaults()
if p.EBGPMultihop == nil {
p.EBGPMultihop = ptr.To[int32](DefaultBGPEBGPMultihopTTL)
}
if p.GracefulRestart == nil {
p.GracefulRestart = &CiliumBGPNeighborGracefulRestart{}
}
p.GracefulRestart.SetDefaults()
if len(p.Families) == 0 {
p.Families = []CiliumBGPFamilyWithAdverts{
{
CiliumBGPFamily: CiliumBGPFamily{
Afi: "ipv6",
Safi: "unicast",
},
},
{
CiliumBGPFamily: CiliumBGPFamily{
Afi: "ipv4",
Safi: "unicast",
},
},
}
}
}
// SPDX-License-Identifier: Apache-2.0
// Copyright Authors of Cilium
package v2alpha1
import (
"fmt"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/utils/ptr"
slimv1 "github.com/cilium/cilium/pkg/k8s/slim/k8s/apis/meta/v1"
)
const (
// DefaultBGPExportPodCIDR defines the default value for ExportPodCIDR determining whether to export the Node's private CIDR block.
DefaultBGPExportPodCIDR = false
// DefaultBGPPeerLocalPort defines the default value for the local port over which to connect to the peer.
// By default, BGP control plane will not set this value, and the kernel will pick a random port source port.
DefaultBGPPeerLocalPort = 0
// DefaultBGPPeerPort defines the TCP port number of a CiliumBGPNeighbor when PeerPort is unspecified.
DefaultBGPPeerPort = 179
// DefaultBGPEBGPMultihopTTL defines the default value for the TTL value used in BGP packets sent to the eBGP neighbors.
DefaultBGPEBGPMultihopTTL = 1
// DefaultBGPConnectRetryTimeSeconds defines the default initial value for the BGP ConnectRetryTimer (RFC 4271, Section 8).
DefaultBGPConnectRetryTimeSeconds = 120
// DefaultBGPHoldTimeSeconds defines the default initial value for the BGP HoldTimer (RFC 4271, Section 4.2).
DefaultBGPHoldTimeSeconds = 90
// DefaultBGPKeepAliveTimeSeconds defines the default initial value for the BGP KeepaliveTimer (RFC 4271, Section 8).
DefaultBGPKeepAliveTimeSeconds = 30
// DefaultBGPGRRestartTimeSeconds defines default Restart Time for graceful restart (RFC 4724, section 4.2)
DefaultBGPGRRestartTimeSeconds = 120
// BGPLoadBalancerClass defines the BGP Control Plane load balancer class for Services.
BGPLoadBalancerClass = "io.cilium/bgp-control-plane"
// PodCIDRSelectorName defines the name for a selector matching Pod CIDRs
// (standard cluster scope / Kubernetes IPAM CIDRs, not Multi-Pool IPAM CIDRs).
PodCIDRSelectorName = "PodCIDR"
// CiliumLoadBalancerIPPoolSelectorName defines the name for a selector matching CiliumLoadBalancerIPPool resources.
CiliumLoadBalancerIPPoolSelectorName = "CiliumLoadBalancerIPPool"
// CiliumPodIPPoolSelectorName defines the name for a selector matching CiliumPodIPPool resources.
CiliumPodIPPoolSelectorName = CPIPKindDefinition
)
// +genclient
// +genclient:nonNamespaced
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
// +kubebuilder:resource:categories={cilium,ciliumbgp},singular="ciliumbgppeeringpolicy",path="ciliumbgppeeringpolicies",scope="Cluster",shortName={bgpp}
// +kubebuilder:printcolumn:JSONPath=".metadata.creationTimestamp",name="Age",type=date
// +kubebuilder:storageversion
// +kubebuilder:deprecatedversion:warning="cilium.io/v2alpha1 CiliumBGPPeeringPolicy is deprecated; use cilium.io/v2 CRDs (CiliumBGPClusterConfig, CiliumBGPPeerConfig, CiliumBGPAdvertisement, CiliumBGPNodeConfigOverride) to configure BGP."
// CiliumBGPPeeringPolicy is a Kubernetes third-party resource for instructing
// Cilium's BGP control plane to create virtual BGP routers.
type CiliumBGPPeeringPolicy struct {
// +deepequal-gen=false
metav1.TypeMeta `json:",inline"`
// +deepequal-gen=false
metav1.ObjectMeta `json:"metadata"`
// Spec is a human readable description of a BGP peering policy
//
// +kubebuilder:validation:Optional
Spec CiliumBGPPeeringPolicySpec `json:"spec,omitempty"`
}
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
// +k8s:openapi-gen=false
// +deepequal-gen=false
// CiliumBGPPeeringPolicyList is a list of
// CiliumBGPPeeringPolicy objects.
type CiliumBGPPeeringPolicyList struct {
metav1.TypeMeta `json:",inline"`
metav1.ListMeta `json:"metadata"`
// Items is a list of CiliumBGPPeeringPolicies.
Items []CiliumBGPPeeringPolicy `json:"items"`
}
// CiliumBGPPeeringPolicySpec specifies one or more CiliumBGPVirtualRouter(s)
// to apply to nodes matching it's label selector.
type CiliumBGPPeeringPolicySpec struct {
// NodeSelector selects a group of nodes where this BGP Peering
// Policy applies.
//
// If empty / nil this policy applies to all nodes.
//
// +kubebuilder:validation:Optional
NodeSelector *slimv1.LabelSelector `json:"nodeSelector,omitempty"`
// A list of CiliumBGPVirtualRouter(s) which instructs
// the BGP control plane how to instantiate virtual BGP routers.
//
// +kubebuilder:validation:Required
// +kubebuilder:validation:MinItems=1
VirtualRouters []CiliumBGPVirtualRouter `json:"virtualRouters"`
}
type CiliumBGPNeighborGracefulRestart struct {
// Enabled flag, when set enables graceful restart capability.
//
// +kubebuilder:validation:Required
Enabled bool `json:"enabled"`
// RestartTimeSeconds is the estimated time it will take for the BGP
// session to be re-established with peer after a restart.
// After this period, peer will remove stale routes. This is
// described RFC 4724 section 4.2.
//
// +kubebuilder:validation:Optional
// +kubebuilder:validation:Minimum=1
// +kubebuilder:validation:Maximum=4095
// +kubebuilder:default=120
RestartTimeSeconds *int32 `json:"restartTimeSeconds,omitempty"`
}
func (gr *CiliumBGPNeighborGracefulRestart) SetDefaults() {
if gr.RestartTimeSeconds == nil || *gr.RestartTimeSeconds == 0 {
gr.RestartTimeSeconds = ptr.To[int32](DefaultBGPGRRestartTimeSeconds)
}
}
// BGPStandardCommunity type represents a value of the "standard" 32-bit BGP Communities Attribute (RFC 1997)
// as a 4-byte decimal number or two 2-byte decimal numbers separated by a colon (<0-65535>:<0-65535>).
// For example, no-export community value is 65553:65281.
// +kubebuilder:validation:Pattern=`^([0-9]|[1-9][0-9]{1,8}|[1-3][0-9]{9}|4[01][0-9]{8}|42[0-8][0-9]{7}|429[0-3][0-9]{6}|4294[0-8][0-9]{5}|42949[0-5][0-9]{4}|429496[0-6][0-9]{3}|4294967[01][0-9]{2}|42949672[0-8][0-9]|429496729[0-5])$|^([0-9]|[1-9][0-9]{1,3}|[1-5][0-9]{4}|6[0-4][0-9]{3}|65[0-4][0-9]{2}|655[0-2][0-9]|6553[0-5]):([0-9]|[1-9][0-9]{1,3}|[1-5][0-9]{4}|6[0-4][0-9]{3}|65[0-4][0-9]{2}|655[0-2][0-9]|6553[0-5])$`
type BGPStandardCommunity string
// BGPWellKnownCommunity type represents a value of the "standard" 32-bit BGP Communities Attribute (RFC 1997)
// as a well-known string alias to its numeric value. Allowed values and their mapping to the numeric values:
//
// internet = 0x00000000 (0:0)
// planned-shut = 0xffff0000 (65535:0)
// accept-own = 0xffff0001 (65535:1)
// route-filter-translated-v4 = 0xffff0002 (65535:2)
// route-filter-v4 = 0xffff0003 (65535:3)
// route-filter-translated-v6 = 0xffff0004 (65535:4)
// route-filter-v6 = 0xffff0005 (65535:5)
// llgr-stale = 0xffff0006 (65535:6)
// no-llgr = 0xffff0007 (65535:7)
// blackhole = 0xffff029a (65535:666)
// no-export = 0xffffff01 (65535:65281)
// no-advertise = 0xffffff02 (65535:65282)
// no-export-subconfed = 0xffffff03 (65535:65283)
// no-peer = 0xffffff04 (65535:65284)
//
// +kubebuilder:validation:Enum=internet;planned-shut;accept-own;route-filter-translated-v4;route-filter-v4;route-filter-translated-v6;route-filter-v6;llgr-stale;no-llgr;blackhole;no-export;no-advertise;no-export-subconfed;no-peer
type BGPWellKnownCommunity string
// BGPLargeCommunity type represents a value of the BGP Large Communities Attribute (RFC 8092),
// as three 4-byte decimal numbers separated by colons.
// +kubebuilder:validation:Pattern=`^([0-9]|[1-9][0-9]{1,8}|[1-3][0-9]{9}|4[01][0-9]{8}|42[0-8][0-9]{7}|429[0-3][0-9]{6}|4294[0-8][0-9]{5}|42949[0-5][0-9]{4}|429496[0-6][0-9]{3}|4294967[01][0-9]{2}|42949672[0-8][0-9]|429496729[0-5]):([0-9]|[1-9][0-9]{1,8}|[1-3][0-9]{9}|4[01][0-9]{8}|42[0-8][0-9]{7}|429[0-3][0-9]{6}|4294[0-8][0-9]{5}|42949[0-5][0-9]{4}|429496[0-6][0-9]{3}|4294967[01][0-9]{2}|42949672[0-8][0-9]|429496729[0-5]):([0-9]|[1-9][0-9]{1,8}|[1-3][0-9]{9}|4[01][0-9]{8}|42[0-8][0-9]{7}|429[0-3][0-9]{6}|4294[0-8][0-9]{5}|42949[0-5][0-9]{4}|429496[0-6][0-9]{3}|4294967[01][0-9]{2}|42949672[0-8][0-9]|429496729[0-5])$`
type BGPLargeCommunity string
// BGPCommunities holds community values of the supported BGP community path attributes.
type BGPCommunities struct {
// Standard holds a list of "standard" 32-bit BGP Communities Attribute (RFC 1997) values defined as numeric values.
//
// +kubebuilder:validation:Optional
Standard []BGPStandardCommunity `json:"standard,omitempty"`
// WellKnown holds a list "standard" 32-bit BGP Communities Attribute (RFC 1997) values defined as
// well-known string aliases to their numeric values.
//
// +kubebuilder:validation:Optional
WellKnown []BGPWellKnownCommunity `json:"wellKnown,omitempty"`
// Large holds a list of the BGP Large Communities Attribute (RFC 8092) values.
//
// +kubebuilder:validation:Optional
Large []BGPLargeCommunity `json:"large,omitempty"`
}
// CiliumBGPPathAttributes can be used to apply additional path attributes
// to matched routes when advertising them to a BGP peer.
type CiliumBGPPathAttributes struct {
// SelectorType defines the object type on which the Selector applies:
// - For "PodCIDR" the Selector matches k8s CiliumNode resources
// (path attributes apply to routes announced for PodCIDRs of selected CiliumNodes.
// Only affects routes of cluster scope / Kubernetes IPAM CIDRs, not Multi-Pool IPAM CIDRs.
// - For "CiliumLoadBalancerIPPool" the Selector matches CiliumLoadBalancerIPPool custom resources
// (path attributes apply to routes announced for selected CiliumLoadBalancerIPPools).
// - For "CiliumPodIPPool" the Selector matches CiliumPodIPPool custom resources
// (path attributes apply to routes announced for allocated CIDRs of selected CiliumPodIPPools).
//
// +kubebuilder:validation:Enum=PodCIDR;CiliumLoadBalancerIPPool;CiliumPodIPPool
// +kubebuilder:validation:Required
SelectorType string `json:"selectorType"`
// Selector selects a group of objects of the SelectorType
// resulting into routes that will be announced with the configured Attributes.
// If nil / not set, all objects of the SelectorType are selected.
//
// +kubebuilder:validation:Optional
Selector *slimv1.LabelSelector `json:"selector,omitempty"`
// Communities defines a set of community values advertised in the supported BGP Communities path attributes.
// If nil / not set, no BGP Communities path attribute will be advertised.
//
// +kubebuilder:validation:Optional
Communities *BGPCommunities `json:"communities,omitempty"`
// LocalPreference defines the preference value advertised in the BGP Local Preference path attribute.
// As Local Preference is only valid for iBGP peers, this value will be ignored for eBGP peers
// (no Local Preference path attribute will be advertised).
// If nil / not set, the default Local Preference of 100 will be advertised in
// the Local Preference path attribute for iBGP peers.
//
// +kubebuilder:validation:Optional
// +kubebuilder:validation:Minimum=0
// +kubebuilder:validation:Maximum=4294967295
LocalPreference *int64 `json:"localPreference,omitempty"`
}
// CiliumBGPNeighbor is a neighboring peer for use in a
// CiliumBGPVirtualRouter configuration.
type CiliumBGPNeighbor struct {
// PeerAddress is the IP address of the peer.
// This must be in CIDR notation and use a /32 to express
// a single host.
//
// +kubebuilder:validation:Required
// +kubebuilder:validation:Format=cidr
PeerAddress string `json:"peerAddress"`
// PeerPort is the TCP port of the peer. 1-65535 is the range of
// valid port numbers that can be specified. If unset, defaults to 179.
//
// +kubebuilder:validation:Optional
// +kubebuilder:validation:Minimum=1
// +kubebuilder:validation:Maximum=65535
// +kubebuilder:default=179
PeerPort *int32 `json:"peerPort,omitempty"`
// PeerASN is the ASN of the peer BGP router.
// Supports extended 32bit ASNs
//
// +kubebuilder:validation:Required
// +kubebuilder:validation:Minimum=0
// +kubebuilder:validation:Maximum=4294967295
PeerASN int64 `json:"peerASN"`
// AuthSecretRef is the name of the secret to use to fetch a TCP
// authentication password for this peer.
// +kubebuilder:validation:Optional
AuthSecretRef *string `json:"authSecretRef,omitempty"`
// EBGPMultihopTTL controls the multi-hop feature for eBGP peers.
// Its value defines the Time To Live (TTL) value used in BGP packets sent to the neighbor.
// The value 1 implies that eBGP multi-hop feature is disabled (only a single hop is allowed).
// This field is ignored for iBGP peers.
//
// +kubebuilder:validation:Optional
// +kubebuilder:validation:Minimum=1
// +kubebuilder:validation:Maximum=255
// +kubebuilder:default=1
EBGPMultihopTTL *int32 `json:"eBGPMultihopTTL,omitempty"`
// ConnectRetryTimeSeconds defines the initial value for the BGP ConnectRetryTimer (RFC 4271, Section 8).
//
// +kubebuilder:validation:Optional
// +kubebuilder:validation:Minimum=1
// +kubebuilder:validation:Maximum=2147483647
// +kubebuilder:default=120
ConnectRetryTimeSeconds *int32 `json:"connectRetryTimeSeconds,omitempty"`
// HoldTimeSeconds defines the initial value for the BGP HoldTimer (RFC 4271, Section 4.2).
// Updating this value will cause a session reset.
//
// +kubebuilder:validation:Optional
// +kubebuilder:validation:Minimum=3
// +kubebuilder:validation:Maximum=65535
// +kubebuilder:default=90
HoldTimeSeconds *int32 `json:"holdTimeSeconds,omitempty"`
// KeepaliveTimeSeconds defines the initial value for the BGP KeepaliveTimer (RFC 4271, Section 8).
// It can not be larger than HoldTimeSeconds. Updating this value will cause a session reset.
//
// +kubebuilder:validation:Optional
// +kubebuilder:validation:Minimum=1
// +kubebuilder:validation:Maximum=65535
// +kubebuilder:default=30
KeepAliveTimeSeconds *int32 `json:"keepAliveTimeSeconds,omitempty"`
// GracefulRestart defines graceful restart parameters which are negotiated
// with this neighbor. If empty / nil, the graceful restart capability is disabled.
//
// +kubebuilder:validation:Optional
GracefulRestart *CiliumBGPNeighborGracefulRestart `json:"gracefulRestart,omitempty"`
// Families, if provided, defines a set of AFI/SAFIs the speaker will
// negotiate with it's peer.
//
// If this slice is not provided the default families of IPv6 and IPv4 will
// be provided.
//
// +kubebuilder:validation:Optional
Families []CiliumBGPFamily `json:"families"`
// AdvertisedPathAttributes can be used to apply additional path attributes
// to selected routes when advertising them to the peer.
// If empty / nil, no additional path attributes are advertised.
//
// +kubebuilder:validation:Optional
AdvertisedPathAttributes []CiliumBGPPathAttributes `json:"advertisedPathAttributes,omitempty"`
}
// CiliumBGPVirtualRouter defines a discrete BGP virtual router configuration.
type CiliumBGPVirtualRouter struct {
// LocalASN is the ASN of this virtual router.
// Supports extended 32bit ASNs
//
// +kubebuilder:validation:Required
// +kubebuilder:validation:Minimum=0
// +kubebuilder:validation:Maximum=4294967295
LocalASN int64 `json:"localASN"`
// ExportPodCIDR determines whether to export the Node's private CIDR block
// to the configured neighbors.
//
// +kubebuilder:validation:Optional
// +kubebuilder:default=false
ExportPodCIDR *bool `json:"exportPodCIDR,omitempty"`
// PodIPPoolSelector selects CiliumPodIPPools based on labels. The virtual
// router will announce allocated CIDRs of matching CiliumPodIPPools.
//
// If empty / nil no CiliumPodIPPools will be announced.
//
// +kubebuilder:validation:Optional
PodIPPoolSelector *slimv1.LabelSelector `json:"podIPPoolSelector,omitempty"`
// ServiceSelector selects a group of load balancer services which this
// virtual router will announce. The loadBalancerClass for a service must
// be nil or specify a class supported by Cilium, e.g. "io.cilium/bgp-control-plane".
// Refer to the following document for additional details regarding load balancer
// classes:
//
// https://kubernetes.io/docs/concepts/services-networking/service/#load-balancer-class
//
// If empty / nil no services will be announced.
//
// +kubebuilder:validation:Optional
ServiceSelector *slimv1.LabelSelector `json:"serviceSelector,omitempty"`
// ServiceAdvertisements selects a group of BGP Advertisement(s) to advertise
// for the selected services.
//
// +kubebuilder:validation:Optional
// +kubebuilder:default={LoadBalancerIP}
ServiceAdvertisements []BGPServiceAddressType `json:"serviceAdvertisements,omitempty"`
// Neighbors is a list of neighboring BGP peers for this virtual router
//
// +kubebuilder:validation:Required
// +kubebuilder:validation:MinItems=1
Neighbors []CiliumBGPNeighbor `json:"neighbors"`
}
// SetDefaults applies default values on the CiliumBGPPeeringPolicy.
// This is normally done by kube-apiserver for fields with explicit static defaults,
// the main use of this method is to avoid the need for nil-checks in the controller code.
func (p *CiliumBGPPeeringPolicy) SetDefaults() {
for i := range p.Spec.VirtualRouters {
p.Spec.VirtualRouters[i].SetDefaults()
}
}
// SetDefaults applies default values on the CiliumBGPVirtualRouter.
// This is normally done by kube-apiserver for fields with explicit static defaults,
// the main use of this method is to avoid the need for nil-checks in the controller code.
func (r *CiliumBGPVirtualRouter) SetDefaults() {
if r.ExportPodCIDR == nil {
r.ExportPodCIDR = ptr.To[bool](DefaultBGPExportPodCIDR)
}
for i := range r.Neighbors {
r.Neighbors[i].SetDefaults()
}
if r.ServiceAdvertisements == nil {
r.ServiceAdvertisements = []BGPServiceAddressType{BGPLoadBalancerIPAddr}
}
}
// SetDefaults applies default values on the CiliumBGPNeighbor.
// This is normally done by kube-apiserver for fields with explicit static defaults,
// the main use of this method is to avoid the need for nil-checks in the controller code.
func (n *CiliumBGPNeighbor) SetDefaults() {
if n.PeerPort == nil || *n.PeerPort == 0 {
n.PeerPort = ptr.To[int32](DefaultBGPPeerPort)
}
if n.EBGPMultihopTTL == nil {
n.EBGPMultihopTTL = ptr.To[int32](DefaultBGPEBGPMultihopTTL)
}
if n.ConnectRetryTimeSeconds == nil || *n.ConnectRetryTimeSeconds == 0 {
n.ConnectRetryTimeSeconds = ptr.To[int32](DefaultBGPConnectRetryTimeSeconds)
}
if n.HoldTimeSeconds == nil || *n.HoldTimeSeconds == 0 {
n.HoldTimeSeconds = ptr.To[int32](DefaultBGPHoldTimeSeconds)
}
if n.KeepAliveTimeSeconds == nil || *n.KeepAliveTimeSeconds == 0 {
n.KeepAliveTimeSeconds = ptr.To[int32](DefaultBGPKeepAliveTimeSeconds)
}
if n.GracefulRestart != nil && n.GracefulRestart.Enabled &&
(n.GracefulRestart.RestartTimeSeconds == nil || *n.GracefulRestart.RestartTimeSeconds == 0) {
n.GracefulRestart.RestartTimeSeconds = ptr.To[int32](DefaultBGPGRRestartTimeSeconds)
}
if len(n.Families) == 0 {
n.Families = []CiliumBGPFamily{
{
Afi: "ipv4",
Safi: "unicast",
},
{
Afi: "ipv6",
Safi: "unicast",
},
}
}
}
// Validate validates CiliumBGPNeighbor's configuration constraints
// that can not be expressed using the kubebuilder validation markers.
func (n *CiliumBGPNeighbor) Validate() error {
keepAliveTime := ptr.Deref[int32](n.KeepAliveTimeSeconds, DefaultBGPKeepAliveTimeSeconds)
holdTime := ptr.Deref[int32](n.HoldTimeSeconds, DefaultBGPHoldTimeSeconds)
if keepAliveTime > holdTime {
return fmt.Errorf("KeepAliveTimeSeconds larger than HoldTimeSeconds for peer ASN:%d IP:%s", n.PeerASN, n.PeerAddress)
}
return nil
}
// SPDX-License-Identifier: Apache-2.0
// Copyright Authors of Cilium
package v2alpha1
import (
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/runtime"
"k8s.io/apimachinery/pkg/runtime/schema"
k8sconst "github.com/cilium/cilium/pkg/k8s/apis/cilium.io"
)
const (
// CustomResourceDefinitionGroup is the name of the third party resource group
CustomResourceDefinitionGroup = k8sconst.CustomResourceDefinitionGroup
// CustomResourceDefinitionVersion is the current version of the resource
CustomResourceDefinitionVersion = "v2alpha1"
// Cilium Endpoint Slice (CES)
// CESPluralName is the plural name of Cilium Endpoint Slice
CESPluralName = "ciliumendpointslices"
// CESKindDefinition is the kind name of Cilium Endpoint Slice
CESKindDefinition = "CiliumEndpointSlice"
// CESName is the full name of Cilium Endpoint Slice
CESName = CESPluralName + "." + CustomResourceDefinitionGroup
// Cilium BGP Peering Policy (BGPP)
// BGPPPluralName is the plural name of Cilium BGP Peering Policy
BGPPPluralName = "ciliumbgppeeringpolicies"
// BGPPKindDefinition is the kind name of Cilium BGP Peering Policy
BGPPKindDefinition = "CiliumBGPPeeringPolicy"
// BGPPName is the full name of Cilium BGP Peering Policy
BGPPName = BGPPPluralName + "." + CustomResourceDefinitionGroup
// BGPClusterConfig (BGPCC)
BGPCCPluralName = "ciliumbgpclusterconfigs"
BGPCCKindDefinition = "CiliumBGPClusterConfig"
BGPCCName = BGPCCPluralName + "." + CustomResourceDefinitionGroup
// BGPPeerConfig (BGPPC)
BGPPCPluralName = "ciliumbgppeerconfigs"
BGPPCKindDefinition = "CiliumBGPPeerConfig"
BGPPCName = BGPPCPluralName + "." + CustomResourceDefinitionGroup
// BGPAdvertisement (BGPA)
BGPAPluralName = "ciliumbgpadvertisements"
BGPAKindDefinition = "CiliumBGPAdvertisement"
BGPAName = BGPAPluralName + "." + CustomResourceDefinitionGroup
// BGPNodeConfig (BGPNC)
BGPNCPluralName = "ciliumbgpnodeconfigs"
BGPNCKindDefinition = "CiliumBGPNodeConfig"
BGPNCName = BGPNCPluralName + "." + CustomResourceDefinitionGroup
// BGPNodeConfigOverride (BGPNCO)
BGPNCOPluralName = "ciliumbgpnodeconfigoverrides"
BGPNCOKindDefinition = "CiliumBGPNodeConfigOverride"
BGPNCOName = BGPNCOPluralName + "." + CustomResourceDefinitionGroup
// Cilium Load Balancer IP Pool (IPPool)
// PoolPluralName is the plural name of Cilium Load Balancer IP Pool
PoolPluralName = "ciliumloadbalancerippools"
// PoolKindDefinition is the kind name of Cilium Peering Policy
PoolKindDefinition = "CiliumLoadBalancerIPPool"
// LBIPPoolName is the full name of Cilium Load Balancer IP Pool
LBIPPoolName = PoolPluralName + "." + CustomResourceDefinitionGroup
// CiliumNodeConfig (CNC)
// CNCPluralName is the plural name of Cilium Node Config
CNCPluralName = "ciliumnodeconfigs"
// CNCKindDefinition is the kind name of Cilium Node Config
CNCKindDefinition = "CiliumNodeConfig"
// CNCName is the full name of Cilium Node Config
CNCName = CNCPluralName + "." + CustomResourceDefinitionGroup
// Cilium L2 Announcement policy
// L2AnnouncementSingularName is the singular name ofCilium L2 announcement policy
L2AnnouncementSingularName = "ciliuml2announcementpolicy"
// L2AnnouncementPluralName is the plural name of Cilium L2 announcement policy
L2AnnouncementPluralName = "ciliuml2announcementpolicies"
// L2AnnouncementKindDefinition is the kind name of Cilium L2 announcement policy
L2AnnouncementKindDefinition = "CiliumL2AnnouncementPolicy"
// L2AnnouncementName is the full name of Cilium L2 announcement policy
L2AnnouncementName = L2AnnouncementPluralName + "." + CustomResourceDefinitionGroup
// CiliumPodIPPool (CPIP)
CPIPPluralName = "ciliumpodippools"
CPIPKindDefinition = "CiliumPodIPPool"
CPIPName = CPIPPluralName + "." + CustomResourceDefinitionGroup
// CiliumGatewayClassConfig (CGCC)
CGCCPluralName = "ciliumgatewayclassconfigs"
CGCCListName = "ciliumgatewayclassconfiglists"
CGCCKindDefinition = "CiliumGatewayClassConfig"
CGCCName = CGCCPluralName + "." + CustomResourceDefinitionGroup
)
// SchemeGroupVersion is group version used to register these objects
var SchemeGroupVersion = schema.GroupVersion{
Group: CustomResourceDefinitionGroup,
Version: CustomResourceDefinitionVersion,
}
// Resource takes an unqualified resource and returns a Group qualified GroupResource
func Resource(resource string) schema.GroupResource {
return SchemeGroupVersion.WithResource(resource).GroupResource()
}
var (
// SchemeBuilder is needed by DeepCopy generator.
SchemeBuilder runtime.SchemeBuilder
// localSchemeBuilder and AddToScheme will stay in k8s.io/kubernetes.
localSchemeBuilder = &SchemeBuilder
// AddToScheme adds all types of this clientset into the given scheme.
// This allows composition of clientsets, like in:
//
// import (
// "k8s.io/client-go/kubernetes"
// clientsetscheme "k8s.io/client-go/kubernetes/scheme"
// aggregatorclientsetscheme "k8s.io/kube-aggregator/pkg/client/clientset_generated/clientset/scheme"
// )
//
// kclientset, _ := kubernetes.NewForConfig(c)
// aggregatorclientsetscheme.AddToScheme(clientsetscheme.Scheme)
AddToScheme = localSchemeBuilder.AddToScheme
)
func init() {
// We only register manually written functions here. The registration of the
// generated functions takes place in the generated files. The separation
// makes the code compile even when the generated files are missing.
localSchemeBuilder.Register(addKnownTypes)
}
// Adds the list of known types to api.Scheme.
func addKnownTypes(scheme *runtime.Scheme) error {
scheme.AddKnownTypes(SchemeGroupVersion,
&CiliumEndpointSlice{},
&CiliumEndpointSliceList{},
&CiliumBGPPeeringPolicy{},
&CiliumBGPPeeringPolicyList{},
&CiliumLoadBalancerIPPool{},
&CiliumLoadBalancerIPPoolList{},
&CiliumL2AnnouncementPolicy{},
&CiliumL2AnnouncementPolicyList{},
&CiliumPodIPPool{},
&CiliumPodIPPoolList{},
&CiliumNodeConfig{},
&CiliumNodeConfigList{},
// new BGP types
&CiliumBGPClusterConfig{},
&CiliumBGPClusterConfigList{},
&CiliumBGPPeerConfig{},
&CiliumBGPPeerConfigList{},
&CiliumBGPAdvertisement{},
&CiliumBGPAdvertisementList{},
&CiliumBGPNodeConfig{},
&CiliumBGPNodeConfigList{},
&CiliumBGPNodeConfigOverride{},
&CiliumBGPNodeConfigOverrideList{},
// new Gateway API types
&CiliumGatewayClassConfig{},
&CiliumGatewayClassConfigList{},
)
metav1.AddToGroupVersion(scheme, SchemeGroupVersion)
return nil
}
//go:build !ignore_autogenerated
// +build !ignore_autogenerated
// SPDX-License-Identifier: Apache-2.0
// Copyright Authors of Cilium
// Code generated by deepcopy-gen. DO NOT EDIT.
package v2alpha1
import (
models "github.com/cilium/cilium/api/v1/models"
v2 "github.com/cilium/cilium/pkg/k8s/apis/cilium.io/v2"
corev1 "github.com/cilium/cilium/pkg/k8s/slim/k8s/api/core/v1"
v1 "github.com/cilium/cilium/pkg/k8s/slim/k8s/apis/meta/v1"
api "github.com/cilium/cilium/pkg/policy/api"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
runtime "k8s.io/apimachinery/pkg/runtime"
)
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *BGPAdvertisement) DeepCopyInto(out *BGPAdvertisement) {
*out = *in
if in.Service != nil {
in, out := &in.Service, &out.Service
*out = new(BGPServiceOptions)
(*in).DeepCopyInto(*out)
}
if in.Selector != nil {
in, out := &in.Selector, &out.Selector
*out = new(v1.LabelSelector)
(*in).DeepCopyInto(*out)
}
if in.Attributes != nil {
in, out := &in.Attributes, &out.Attributes
*out = new(BGPAttributes)
(*in).DeepCopyInto(*out)
}
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new BGPAdvertisement.
func (in *BGPAdvertisement) DeepCopy() *BGPAdvertisement {
if in == nil {
return nil
}
out := new(BGPAdvertisement)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *BGPAttributes) DeepCopyInto(out *BGPAttributes) {
*out = *in
if in.Communities != nil {
in, out := &in.Communities, &out.Communities
*out = new(BGPCommunities)
(*in).DeepCopyInto(*out)
}
if in.LocalPreference != nil {
in, out := &in.LocalPreference, &out.LocalPreference
*out = new(int64)
**out = **in
}
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new BGPAttributes.
func (in *BGPAttributes) DeepCopy() *BGPAttributes {
if in == nil {
return nil
}
out := new(BGPAttributes)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *BGPCommunities) DeepCopyInto(out *BGPCommunities) {
*out = *in
if in.Standard != nil {
in, out := &in.Standard, &out.Standard
*out = make([]BGPStandardCommunity, len(*in))
copy(*out, *in)
}
if in.WellKnown != nil {
in, out := &in.WellKnown, &out.WellKnown
*out = make([]BGPWellKnownCommunity, len(*in))
copy(*out, *in)
}
if in.Large != nil {
in, out := &in.Large, &out.Large
*out = make([]BGPLargeCommunity, len(*in))
copy(*out, *in)
}
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new BGPCommunities.
func (in *BGPCommunities) DeepCopy() *BGPCommunities {
if in == nil {
return nil
}
out := new(BGPCommunities)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *BGPFamilyRouteCount) DeepCopyInto(out *BGPFamilyRouteCount) {
*out = *in
if in.Received != nil {
in, out := &in.Received, &out.Received
*out = new(int32)
**out = **in
}
if in.Advertised != nil {
in, out := &in.Advertised, &out.Advertised
*out = new(int32)
**out = **in
}
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new BGPFamilyRouteCount.
func (in *BGPFamilyRouteCount) DeepCopy() *BGPFamilyRouteCount {
if in == nil {
return nil
}
out := new(BGPFamilyRouteCount)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *BGPServiceOptions) DeepCopyInto(out *BGPServiceOptions) {
*out = *in
if in.Addresses != nil {
in, out := &in.Addresses, &out.Addresses
*out = make([]BGPServiceAddressType, len(*in))
copy(*out, *in)
}
if in.AggregationLengthIPv4 != nil {
in, out := &in.AggregationLengthIPv4, &out.AggregationLengthIPv4
*out = new(int16)
**out = **in
}
if in.AggregationLengthIPv6 != nil {
in, out := &in.AggregationLengthIPv6, &out.AggregationLengthIPv6
*out = new(int16)
**out = **in
}
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new BGPServiceOptions.
func (in *BGPServiceOptions) DeepCopy() *BGPServiceOptions {
if in == nil {
return nil
}
out := new(BGPServiceOptions)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *CiliumBGPAdvertisement) DeepCopyInto(out *CiliumBGPAdvertisement) {
*out = *in
out.TypeMeta = in.TypeMeta
in.ObjectMeta.DeepCopyInto(&out.ObjectMeta)
in.Spec.DeepCopyInto(&out.Spec)
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CiliumBGPAdvertisement.
func (in *CiliumBGPAdvertisement) DeepCopy() *CiliumBGPAdvertisement {
if in == nil {
return nil
}
out := new(CiliumBGPAdvertisement)
in.DeepCopyInto(out)
return out
}
// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
func (in *CiliumBGPAdvertisement) DeepCopyObject() runtime.Object {
if c := in.DeepCopy(); c != nil {
return c
}
return nil
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *CiliumBGPAdvertisementList) DeepCopyInto(out *CiliumBGPAdvertisementList) {
*out = *in
out.TypeMeta = in.TypeMeta
in.ListMeta.DeepCopyInto(&out.ListMeta)
if in.Items != nil {
in, out := &in.Items, &out.Items
*out = make([]CiliumBGPAdvertisement, len(*in))
for i := range *in {
(*in)[i].DeepCopyInto(&(*out)[i])
}
}
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CiliumBGPAdvertisementList.
func (in *CiliumBGPAdvertisementList) DeepCopy() *CiliumBGPAdvertisementList {
if in == nil {
return nil
}
out := new(CiliumBGPAdvertisementList)
in.DeepCopyInto(out)
return out
}
// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
func (in *CiliumBGPAdvertisementList) DeepCopyObject() runtime.Object {
if c := in.DeepCopy(); c != nil {
return c
}
return nil
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *CiliumBGPAdvertisementSpec) DeepCopyInto(out *CiliumBGPAdvertisementSpec) {
*out = *in
if in.Advertisements != nil {
in, out := &in.Advertisements, &out.Advertisements
*out = make([]BGPAdvertisement, len(*in))
for i := range *in {
(*in)[i].DeepCopyInto(&(*out)[i])
}
}
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CiliumBGPAdvertisementSpec.
func (in *CiliumBGPAdvertisementSpec) DeepCopy() *CiliumBGPAdvertisementSpec {
if in == nil {
return nil
}
out := new(CiliumBGPAdvertisementSpec)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *CiliumBGPClusterConfig) DeepCopyInto(out *CiliumBGPClusterConfig) {
*out = *in
out.TypeMeta = in.TypeMeta
in.ObjectMeta.DeepCopyInto(&out.ObjectMeta)
in.Spec.DeepCopyInto(&out.Spec)
in.Status.DeepCopyInto(&out.Status)
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CiliumBGPClusterConfig.
func (in *CiliumBGPClusterConfig) DeepCopy() *CiliumBGPClusterConfig {
if in == nil {
return nil
}
out := new(CiliumBGPClusterConfig)
in.DeepCopyInto(out)
return out
}
// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
func (in *CiliumBGPClusterConfig) DeepCopyObject() runtime.Object {
if c := in.DeepCopy(); c != nil {
return c
}
return nil
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *CiliumBGPClusterConfigList) DeepCopyInto(out *CiliumBGPClusterConfigList) {
*out = *in
out.TypeMeta = in.TypeMeta
in.ListMeta.DeepCopyInto(&out.ListMeta)
if in.Items != nil {
in, out := &in.Items, &out.Items
*out = make([]CiliumBGPClusterConfig, len(*in))
for i := range *in {
(*in)[i].DeepCopyInto(&(*out)[i])
}
}
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CiliumBGPClusterConfigList.
func (in *CiliumBGPClusterConfigList) DeepCopy() *CiliumBGPClusterConfigList {
if in == nil {
return nil
}
out := new(CiliumBGPClusterConfigList)
in.DeepCopyInto(out)
return out
}
// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
func (in *CiliumBGPClusterConfigList) DeepCopyObject() runtime.Object {
if c := in.DeepCopy(); c != nil {
return c
}
return nil
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *CiliumBGPClusterConfigSpec) DeepCopyInto(out *CiliumBGPClusterConfigSpec) {
*out = *in
if in.NodeSelector != nil {
in, out := &in.NodeSelector, &out.NodeSelector
*out = new(v1.LabelSelector)
(*in).DeepCopyInto(*out)
}
if in.BGPInstances != nil {
in, out := &in.BGPInstances, &out.BGPInstances
*out = make([]CiliumBGPInstance, len(*in))
for i := range *in {
(*in)[i].DeepCopyInto(&(*out)[i])
}
}
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CiliumBGPClusterConfigSpec.
func (in *CiliumBGPClusterConfigSpec) DeepCopy() *CiliumBGPClusterConfigSpec {
if in == nil {
return nil
}
out := new(CiliumBGPClusterConfigSpec)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *CiliumBGPClusterConfigStatus) DeepCopyInto(out *CiliumBGPClusterConfigStatus) {
*out = *in
if in.Conditions != nil {
in, out := &in.Conditions, &out.Conditions
*out = make([]metav1.Condition, len(*in))
for i := range *in {
(*in)[i].DeepCopyInto(&(*out)[i])
}
}
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CiliumBGPClusterConfigStatus.
func (in *CiliumBGPClusterConfigStatus) DeepCopy() *CiliumBGPClusterConfigStatus {
if in == nil {
return nil
}
out := new(CiliumBGPClusterConfigStatus)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *CiliumBGPFamily) DeepCopyInto(out *CiliumBGPFamily) {
*out = *in
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CiliumBGPFamily.
func (in *CiliumBGPFamily) DeepCopy() *CiliumBGPFamily {
if in == nil {
return nil
}
out := new(CiliumBGPFamily)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *CiliumBGPFamilyWithAdverts) DeepCopyInto(out *CiliumBGPFamilyWithAdverts) {
*out = *in
out.CiliumBGPFamily = in.CiliumBGPFamily
if in.Advertisements != nil {
in, out := &in.Advertisements, &out.Advertisements
*out = new(v1.LabelSelector)
(*in).DeepCopyInto(*out)
}
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CiliumBGPFamilyWithAdverts.
func (in *CiliumBGPFamilyWithAdverts) DeepCopy() *CiliumBGPFamilyWithAdverts {
if in == nil {
return nil
}
out := new(CiliumBGPFamilyWithAdverts)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *CiliumBGPInstance) DeepCopyInto(out *CiliumBGPInstance) {
*out = *in
if in.LocalASN != nil {
in, out := &in.LocalASN, &out.LocalASN
*out = new(int64)
**out = **in
}
if in.LocalPort != nil {
in, out := &in.LocalPort, &out.LocalPort
*out = new(int32)
**out = **in
}
if in.Peers != nil {
in, out := &in.Peers, &out.Peers
*out = make([]CiliumBGPPeer, len(*in))
for i := range *in {
(*in)[i].DeepCopyInto(&(*out)[i])
}
}
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CiliumBGPInstance.
func (in *CiliumBGPInstance) DeepCopy() *CiliumBGPInstance {
if in == nil {
return nil
}
out := new(CiliumBGPInstance)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *CiliumBGPNeighbor) DeepCopyInto(out *CiliumBGPNeighbor) {
*out = *in
if in.PeerPort != nil {
in, out := &in.PeerPort, &out.PeerPort
*out = new(int32)
**out = **in
}
if in.AuthSecretRef != nil {
in, out := &in.AuthSecretRef, &out.AuthSecretRef
*out = new(string)
**out = **in
}
if in.EBGPMultihopTTL != nil {
in, out := &in.EBGPMultihopTTL, &out.EBGPMultihopTTL
*out = new(int32)
**out = **in
}
if in.ConnectRetryTimeSeconds != nil {
in, out := &in.ConnectRetryTimeSeconds, &out.ConnectRetryTimeSeconds
*out = new(int32)
**out = **in
}
if in.HoldTimeSeconds != nil {
in, out := &in.HoldTimeSeconds, &out.HoldTimeSeconds
*out = new(int32)
**out = **in
}
if in.KeepAliveTimeSeconds != nil {
in, out := &in.KeepAliveTimeSeconds, &out.KeepAliveTimeSeconds
*out = new(int32)
**out = **in
}
if in.GracefulRestart != nil {
in, out := &in.GracefulRestart, &out.GracefulRestart
*out = new(CiliumBGPNeighborGracefulRestart)
(*in).DeepCopyInto(*out)
}
if in.Families != nil {
in, out := &in.Families, &out.Families
*out = make([]CiliumBGPFamily, len(*in))
copy(*out, *in)
}
if in.AdvertisedPathAttributes != nil {
in, out := &in.AdvertisedPathAttributes, &out.AdvertisedPathAttributes
*out = make([]CiliumBGPPathAttributes, len(*in))
for i := range *in {
(*in)[i].DeepCopyInto(&(*out)[i])
}
}
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CiliumBGPNeighbor.
func (in *CiliumBGPNeighbor) DeepCopy() *CiliumBGPNeighbor {
if in == nil {
return nil
}
out := new(CiliumBGPNeighbor)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *CiliumBGPNeighborGracefulRestart) DeepCopyInto(out *CiliumBGPNeighborGracefulRestart) {
*out = *in
if in.RestartTimeSeconds != nil {
in, out := &in.RestartTimeSeconds, &out.RestartTimeSeconds
*out = new(int32)
**out = **in
}
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CiliumBGPNeighborGracefulRestart.
func (in *CiliumBGPNeighborGracefulRestart) DeepCopy() *CiliumBGPNeighborGracefulRestart {
if in == nil {
return nil
}
out := new(CiliumBGPNeighborGracefulRestart)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *CiliumBGPNodeConfig) DeepCopyInto(out *CiliumBGPNodeConfig) {
*out = *in
out.TypeMeta = in.TypeMeta
in.ObjectMeta.DeepCopyInto(&out.ObjectMeta)
in.Spec.DeepCopyInto(&out.Spec)
in.Status.DeepCopyInto(&out.Status)
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CiliumBGPNodeConfig.
func (in *CiliumBGPNodeConfig) DeepCopy() *CiliumBGPNodeConfig {
if in == nil {
return nil
}
out := new(CiliumBGPNodeConfig)
in.DeepCopyInto(out)
return out
}
// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
func (in *CiliumBGPNodeConfig) DeepCopyObject() runtime.Object {
if c := in.DeepCopy(); c != nil {
return c
}
return nil
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *CiliumBGPNodeConfigInstanceOverride) DeepCopyInto(out *CiliumBGPNodeConfigInstanceOverride) {
*out = *in
if in.RouterID != nil {
in, out := &in.RouterID, &out.RouterID
*out = new(string)
**out = **in
}
if in.LocalPort != nil {
in, out := &in.LocalPort, &out.LocalPort
*out = new(int32)
**out = **in
}
if in.Peers != nil {
in, out := &in.Peers, &out.Peers
*out = make([]CiliumBGPNodeConfigPeerOverride, len(*in))
for i := range *in {
(*in)[i].DeepCopyInto(&(*out)[i])
}
}
if in.LocalASN != nil {
in, out := &in.LocalASN, &out.LocalASN
*out = new(int64)
**out = **in
}
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CiliumBGPNodeConfigInstanceOverride.
func (in *CiliumBGPNodeConfigInstanceOverride) DeepCopy() *CiliumBGPNodeConfigInstanceOverride {
if in == nil {
return nil
}
out := new(CiliumBGPNodeConfigInstanceOverride)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *CiliumBGPNodeConfigList) DeepCopyInto(out *CiliumBGPNodeConfigList) {
*out = *in
out.TypeMeta = in.TypeMeta
in.ListMeta.DeepCopyInto(&out.ListMeta)
if in.Items != nil {
in, out := &in.Items, &out.Items
*out = make([]CiliumBGPNodeConfig, len(*in))
for i := range *in {
(*in)[i].DeepCopyInto(&(*out)[i])
}
}
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CiliumBGPNodeConfigList.
func (in *CiliumBGPNodeConfigList) DeepCopy() *CiliumBGPNodeConfigList {
if in == nil {
return nil
}
out := new(CiliumBGPNodeConfigList)
in.DeepCopyInto(out)
return out
}
// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
func (in *CiliumBGPNodeConfigList) DeepCopyObject() runtime.Object {
if c := in.DeepCopy(); c != nil {
return c
}
return nil
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *CiliumBGPNodeConfigOverride) DeepCopyInto(out *CiliumBGPNodeConfigOverride) {
*out = *in
out.TypeMeta = in.TypeMeta
in.ObjectMeta.DeepCopyInto(&out.ObjectMeta)
in.Spec.DeepCopyInto(&out.Spec)
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CiliumBGPNodeConfigOverride.
func (in *CiliumBGPNodeConfigOverride) DeepCopy() *CiliumBGPNodeConfigOverride {
if in == nil {
return nil
}
out := new(CiliumBGPNodeConfigOverride)
in.DeepCopyInto(out)
return out
}
// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
func (in *CiliumBGPNodeConfigOverride) DeepCopyObject() runtime.Object {
if c := in.DeepCopy(); c != nil {
return c
}
return nil
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *CiliumBGPNodeConfigOverrideList) DeepCopyInto(out *CiliumBGPNodeConfigOverrideList) {
*out = *in
out.TypeMeta = in.TypeMeta
in.ListMeta.DeepCopyInto(&out.ListMeta)
if in.Items != nil {
in, out := &in.Items, &out.Items
*out = make([]CiliumBGPNodeConfigOverride, len(*in))
for i := range *in {
(*in)[i].DeepCopyInto(&(*out)[i])
}
}
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CiliumBGPNodeConfigOverrideList.
func (in *CiliumBGPNodeConfigOverrideList) DeepCopy() *CiliumBGPNodeConfigOverrideList {
if in == nil {
return nil
}
out := new(CiliumBGPNodeConfigOverrideList)
in.DeepCopyInto(out)
return out
}
// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
func (in *CiliumBGPNodeConfigOverrideList) DeepCopyObject() runtime.Object {
if c := in.DeepCopy(); c != nil {
return c
}
return nil
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *CiliumBGPNodeConfigOverrideSpec) DeepCopyInto(out *CiliumBGPNodeConfigOverrideSpec) {
*out = *in
if in.BGPInstances != nil {
in, out := &in.BGPInstances, &out.BGPInstances
*out = make([]CiliumBGPNodeConfigInstanceOverride, len(*in))
for i := range *in {
(*in)[i].DeepCopyInto(&(*out)[i])
}
}
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CiliumBGPNodeConfigOverrideSpec.
func (in *CiliumBGPNodeConfigOverrideSpec) DeepCopy() *CiliumBGPNodeConfigOverrideSpec {
if in == nil {
return nil
}
out := new(CiliumBGPNodeConfigOverrideSpec)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *CiliumBGPNodeConfigPeerOverride) DeepCopyInto(out *CiliumBGPNodeConfigPeerOverride) {
*out = *in
if in.LocalAddress != nil {
in, out := &in.LocalAddress, &out.LocalAddress
*out = new(string)
**out = **in
}
if in.LocalPort != nil {
in, out := &in.LocalPort, &out.LocalPort
*out = new(int32)
**out = **in
}
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CiliumBGPNodeConfigPeerOverride.
func (in *CiliumBGPNodeConfigPeerOverride) DeepCopy() *CiliumBGPNodeConfigPeerOverride {
if in == nil {
return nil
}
out := new(CiliumBGPNodeConfigPeerOverride)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *CiliumBGPNodeInstance) DeepCopyInto(out *CiliumBGPNodeInstance) {
*out = *in
if in.LocalASN != nil {
in, out := &in.LocalASN, &out.LocalASN
*out = new(int64)
**out = **in
}
if in.RouterID != nil {
in, out := &in.RouterID, &out.RouterID
*out = new(string)
**out = **in
}
if in.LocalPort != nil {
in, out := &in.LocalPort, &out.LocalPort
*out = new(int32)
**out = **in
}
if in.Peers != nil {
in, out := &in.Peers, &out.Peers
*out = make([]CiliumBGPNodePeer, len(*in))
for i := range *in {
(*in)[i].DeepCopyInto(&(*out)[i])
}
}
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CiliumBGPNodeInstance.
func (in *CiliumBGPNodeInstance) DeepCopy() *CiliumBGPNodeInstance {
if in == nil {
return nil
}
out := new(CiliumBGPNodeInstance)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *CiliumBGPNodeInstanceStatus) DeepCopyInto(out *CiliumBGPNodeInstanceStatus) {
*out = *in
if in.LocalASN != nil {
in, out := &in.LocalASN, &out.LocalASN
*out = new(int64)
**out = **in
}
if in.PeerStatuses != nil {
in, out := &in.PeerStatuses, &out.PeerStatuses
*out = make([]CiliumBGPNodePeerStatus, len(*in))
for i := range *in {
(*in)[i].DeepCopyInto(&(*out)[i])
}
}
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CiliumBGPNodeInstanceStatus.
func (in *CiliumBGPNodeInstanceStatus) DeepCopy() *CiliumBGPNodeInstanceStatus {
if in == nil {
return nil
}
out := new(CiliumBGPNodeInstanceStatus)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *CiliumBGPNodePeer) DeepCopyInto(out *CiliumBGPNodePeer) {
*out = *in
if in.PeerAddress != nil {
in, out := &in.PeerAddress, &out.PeerAddress
*out = new(string)
**out = **in
}
if in.PeerASN != nil {
in, out := &in.PeerASN, &out.PeerASN
*out = new(int64)
**out = **in
}
if in.LocalAddress != nil {
in, out := &in.LocalAddress, &out.LocalAddress
*out = new(string)
**out = **in
}
if in.PeerConfigRef != nil {
in, out := &in.PeerConfigRef, &out.PeerConfigRef
*out = new(PeerConfigReference)
**out = **in
}
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CiliumBGPNodePeer.
func (in *CiliumBGPNodePeer) DeepCopy() *CiliumBGPNodePeer {
if in == nil {
return nil
}
out := new(CiliumBGPNodePeer)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *CiliumBGPNodePeerStatus) DeepCopyInto(out *CiliumBGPNodePeerStatus) {
*out = *in
if in.PeerASN != nil {
in, out := &in.PeerASN, &out.PeerASN
*out = new(int64)
**out = **in
}
if in.PeeringState != nil {
in, out := &in.PeeringState, &out.PeeringState
*out = new(string)
**out = **in
}
if in.Timers != nil {
in, out := &in.Timers, &out.Timers
*out = new(CiliumBGPTimersState)
(*in).DeepCopyInto(*out)
}
if in.EstablishedTime != nil {
in, out := &in.EstablishedTime, &out.EstablishedTime
*out = new(string)
**out = **in
}
if in.RouteCount != nil {
in, out := &in.RouteCount, &out.RouteCount
*out = make([]BGPFamilyRouteCount, len(*in))
for i := range *in {
(*in)[i].DeepCopyInto(&(*out)[i])
}
}
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CiliumBGPNodePeerStatus.
func (in *CiliumBGPNodePeerStatus) DeepCopy() *CiliumBGPNodePeerStatus {
if in == nil {
return nil
}
out := new(CiliumBGPNodePeerStatus)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *CiliumBGPNodeSpec) DeepCopyInto(out *CiliumBGPNodeSpec) {
*out = *in
if in.BGPInstances != nil {
in, out := &in.BGPInstances, &out.BGPInstances
*out = make([]CiliumBGPNodeInstance, len(*in))
for i := range *in {
(*in)[i].DeepCopyInto(&(*out)[i])
}
}
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CiliumBGPNodeSpec.
func (in *CiliumBGPNodeSpec) DeepCopy() *CiliumBGPNodeSpec {
if in == nil {
return nil
}
out := new(CiliumBGPNodeSpec)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *CiliumBGPNodeStatus) DeepCopyInto(out *CiliumBGPNodeStatus) {
*out = *in
if in.BGPInstances != nil {
in, out := &in.BGPInstances, &out.BGPInstances
*out = make([]CiliumBGPNodeInstanceStatus, len(*in))
for i := range *in {
(*in)[i].DeepCopyInto(&(*out)[i])
}
}
if in.Conditions != nil {
in, out := &in.Conditions, &out.Conditions
*out = make([]metav1.Condition, len(*in))
for i := range *in {
(*in)[i].DeepCopyInto(&(*out)[i])
}
}
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CiliumBGPNodeStatus.
func (in *CiliumBGPNodeStatus) DeepCopy() *CiliumBGPNodeStatus {
if in == nil {
return nil
}
out := new(CiliumBGPNodeStatus)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *CiliumBGPPathAttributes) DeepCopyInto(out *CiliumBGPPathAttributes) {
*out = *in
if in.Selector != nil {
in, out := &in.Selector, &out.Selector
*out = new(v1.LabelSelector)
(*in).DeepCopyInto(*out)
}
if in.Communities != nil {
in, out := &in.Communities, &out.Communities
*out = new(BGPCommunities)
(*in).DeepCopyInto(*out)
}
if in.LocalPreference != nil {
in, out := &in.LocalPreference, &out.LocalPreference
*out = new(int64)
**out = **in
}
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CiliumBGPPathAttributes.
func (in *CiliumBGPPathAttributes) DeepCopy() *CiliumBGPPathAttributes {
if in == nil {
return nil
}
out := new(CiliumBGPPathAttributes)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *CiliumBGPPeer) DeepCopyInto(out *CiliumBGPPeer) {
*out = *in
if in.PeerAddress != nil {
in, out := &in.PeerAddress, &out.PeerAddress
*out = new(string)
**out = **in
}
if in.PeerASN != nil {
in, out := &in.PeerASN, &out.PeerASN
*out = new(int64)
**out = **in
}
if in.PeerConfigRef != nil {
in, out := &in.PeerConfigRef, &out.PeerConfigRef
*out = new(PeerConfigReference)
**out = **in
}
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CiliumBGPPeer.
func (in *CiliumBGPPeer) DeepCopy() *CiliumBGPPeer {
if in == nil {
return nil
}
out := new(CiliumBGPPeer)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *CiliumBGPPeerConfig) DeepCopyInto(out *CiliumBGPPeerConfig) {
*out = *in
out.TypeMeta = in.TypeMeta
in.ObjectMeta.DeepCopyInto(&out.ObjectMeta)
in.Spec.DeepCopyInto(&out.Spec)
in.Status.DeepCopyInto(&out.Status)
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CiliumBGPPeerConfig.
func (in *CiliumBGPPeerConfig) DeepCopy() *CiliumBGPPeerConfig {
if in == nil {
return nil
}
out := new(CiliumBGPPeerConfig)
in.DeepCopyInto(out)
return out
}
// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
func (in *CiliumBGPPeerConfig) DeepCopyObject() runtime.Object {
if c := in.DeepCopy(); c != nil {
return c
}
return nil
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *CiliumBGPPeerConfigList) DeepCopyInto(out *CiliumBGPPeerConfigList) {
*out = *in
out.TypeMeta = in.TypeMeta
in.ListMeta.DeepCopyInto(&out.ListMeta)
if in.Items != nil {
in, out := &in.Items, &out.Items
*out = make([]CiliumBGPPeerConfig, len(*in))
for i := range *in {
(*in)[i].DeepCopyInto(&(*out)[i])
}
}
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CiliumBGPPeerConfigList.
func (in *CiliumBGPPeerConfigList) DeepCopy() *CiliumBGPPeerConfigList {
if in == nil {
return nil
}
out := new(CiliumBGPPeerConfigList)
in.DeepCopyInto(out)
return out
}
// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
func (in *CiliumBGPPeerConfigList) DeepCopyObject() runtime.Object {
if c := in.DeepCopy(); c != nil {
return c
}
return nil
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *CiliumBGPPeerConfigSpec) DeepCopyInto(out *CiliumBGPPeerConfigSpec) {
*out = *in
if in.Transport != nil {
in, out := &in.Transport, &out.Transport
*out = new(CiliumBGPTransport)
(*in).DeepCopyInto(*out)
}
if in.Timers != nil {
in, out := &in.Timers, &out.Timers
*out = new(CiliumBGPTimers)
(*in).DeepCopyInto(*out)
}
if in.AuthSecretRef != nil {
in, out := &in.AuthSecretRef, &out.AuthSecretRef
*out = new(string)
**out = **in
}
if in.GracefulRestart != nil {
in, out := &in.GracefulRestart, &out.GracefulRestart
*out = new(CiliumBGPNeighborGracefulRestart)
(*in).DeepCopyInto(*out)
}
if in.EBGPMultihop != nil {
in, out := &in.EBGPMultihop, &out.EBGPMultihop
*out = new(int32)
**out = **in
}
if in.Families != nil {
in, out := &in.Families, &out.Families
*out = make([]CiliumBGPFamilyWithAdverts, len(*in))
for i := range *in {
(*in)[i].DeepCopyInto(&(*out)[i])
}
}
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CiliumBGPPeerConfigSpec.
func (in *CiliumBGPPeerConfigSpec) DeepCopy() *CiliumBGPPeerConfigSpec {
if in == nil {
return nil
}
out := new(CiliumBGPPeerConfigSpec)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *CiliumBGPPeerConfigStatus) DeepCopyInto(out *CiliumBGPPeerConfigStatus) {
*out = *in
if in.Conditions != nil {
in, out := &in.Conditions, &out.Conditions
*out = make([]metav1.Condition, len(*in))
for i := range *in {
(*in)[i].DeepCopyInto(&(*out)[i])
}
}
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CiliumBGPPeerConfigStatus.
func (in *CiliumBGPPeerConfigStatus) DeepCopy() *CiliumBGPPeerConfigStatus {
if in == nil {
return nil
}
out := new(CiliumBGPPeerConfigStatus)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *CiliumBGPPeeringPolicy) DeepCopyInto(out *CiliumBGPPeeringPolicy) {
*out = *in
out.TypeMeta = in.TypeMeta
in.ObjectMeta.DeepCopyInto(&out.ObjectMeta)
in.Spec.DeepCopyInto(&out.Spec)
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CiliumBGPPeeringPolicy.
func (in *CiliumBGPPeeringPolicy) DeepCopy() *CiliumBGPPeeringPolicy {
if in == nil {
return nil
}
out := new(CiliumBGPPeeringPolicy)
in.DeepCopyInto(out)
return out
}
// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
func (in *CiliumBGPPeeringPolicy) DeepCopyObject() runtime.Object {
if c := in.DeepCopy(); c != nil {
return c
}
return nil
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *CiliumBGPPeeringPolicyList) DeepCopyInto(out *CiliumBGPPeeringPolicyList) {
*out = *in
out.TypeMeta = in.TypeMeta
in.ListMeta.DeepCopyInto(&out.ListMeta)
if in.Items != nil {
in, out := &in.Items, &out.Items
*out = make([]CiliumBGPPeeringPolicy, len(*in))
for i := range *in {
(*in)[i].DeepCopyInto(&(*out)[i])
}
}
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CiliumBGPPeeringPolicyList.
func (in *CiliumBGPPeeringPolicyList) DeepCopy() *CiliumBGPPeeringPolicyList {
if in == nil {
return nil
}
out := new(CiliumBGPPeeringPolicyList)
in.DeepCopyInto(out)
return out
}
// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
func (in *CiliumBGPPeeringPolicyList) DeepCopyObject() runtime.Object {
if c := in.DeepCopy(); c != nil {
return c
}
return nil
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *CiliumBGPPeeringPolicySpec) DeepCopyInto(out *CiliumBGPPeeringPolicySpec) {
*out = *in
if in.NodeSelector != nil {
in, out := &in.NodeSelector, &out.NodeSelector
*out = new(v1.LabelSelector)
(*in).DeepCopyInto(*out)
}
if in.VirtualRouters != nil {
in, out := &in.VirtualRouters, &out.VirtualRouters
*out = make([]CiliumBGPVirtualRouter, len(*in))
for i := range *in {
(*in)[i].DeepCopyInto(&(*out)[i])
}
}
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CiliumBGPPeeringPolicySpec.
func (in *CiliumBGPPeeringPolicySpec) DeepCopy() *CiliumBGPPeeringPolicySpec {
if in == nil {
return nil
}
out := new(CiliumBGPPeeringPolicySpec)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *CiliumBGPTimers) DeepCopyInto(out *CiliumBGPTimers) {
*out = *in
if in.ConnectRetryTimeSeconds != nil {
in, out := &in.ConnectRetryTimeSeconds, &out.ConnectRetryTimeSeconds
*out = new(int32)
**out = **in
}
if in.HoldTimeSeconds != nil {
in, out := &in.HoldTimeSeconds, &out.HoldTimeSeconds
*out = new(int32)
**out = **in
}
if in.KeepAliveTimeSeconds != nil {
in, out := &in.KeepAliveTimeSeconds, &out.KeepAliveTimeSeconds
*out = new(int32)
**out = **in
}
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CiliumBGPTimers.
func (in *CiliumBGPTimers) DeepCopy() *CiliumBGPTimers {
if in == nil {
return nil
}
out := new(CiliumBGPTimers)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *CiliumBGPTimersState) DeepCopyInto(out *CiliumBGPTimersState) {
*out = *in
if in.AppliedHoldTimeSeconds != nil {
in, out := &in.AppliedHoldTimeSeconds, &out.AppliedHoldTimeSeconds
*out = new(int32)
**out = **in
}
if in.AppliedKeepaliveSeconds != nil {
in, out := &in.AppliedKeepaliveSeconds, &out.AppliedKeepaliveSeconds
*out = new(int32)
**out = **in
}
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CiliumBGPTimersState.
func (in *CiliumBGPTimersState) DeepCopy() *CiliumBGPTimersState {
if in == nil {
return nil
}
out := new(CiliumBGPTimersState)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *CiliumBGPTransport) DeepCopyInto(out *CiliumBGPTransport) {
*out = *in
if in.LocalPort != nil {
in, out := &in.LocalPort, &out.LocalPort
*out = new(int32)
**out = **in
}
if in.PeerPort != nil {
in, out := &in.PeerPort, &out.PeerPort
*out = new(int32)
**out = **in
}
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CiliumBGPTransport.
func (in *CiliumBGPTransport) DeepCopy() *CiliumBGPTransport {
if in == nil {
return nil
}
out := new(CiliumBGPTransport)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *CiliumBGPVirtualRouter) DeepCopyInto(out *CiliumBGPVirtualRouter) {
*out = *in
if in.ExportPodCIDR != nil {
in, out := &in.ExportPodCIDR, &out.ExportPodCIDR
*out = new(bool)
**out = **in
}
if in.PodIPPoolSelector != nil {
in, out := &in.PodIPPoolSelector, &out.PodIPPoolSelector
*out = new(v1.LabelSelector)
(*in).DeepCopyInto(*out)
}
if in.ServiceSelector != nil {
in, out := &in.ServiceSelector, &out.ServiceSelector
*out = new(v1.LabelSelector)
(*in).DeepCopyInto(*out)
}
if in.ServiceAdvertisements != nil {
in, out := &in.ServiceAdvertisements, &out.ServiceAdvertisements
*out = make([]BGPServiceAddressType, len(*in))
copy(*out, *in)
}
if in.Neighbors != nil {
in, out := &in.Neighbors, &out.Neighbors
*out = make([]CiliumBGPNeighbor, len(*in))
for i := range *in {
(*in)[i].DeepCopyInto(&(*out)[i])
}
}
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CiliumBGPVirtualRouter.
func (in *CiliumBGPVirtualRouter) DeepCopy() *CiliumBGPVirtualRouter {
if in == nil {
return nil
}
out := new(CiliumBGPVirtualRouter)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *CiliumCIDRGroup) DeepCopyInto(out *CiliumCIDRGroup) {
*out = *in
out.TypeMeta = in.TypeMeta
in.ObjectMeta.DeepCopyInto(&out.ObjectMeta)
in.Spec.DeepCopyInto(&out.Spec)
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CiliumCIDRGroup.
func (in *CiliumCIDRGroup) DeepCopy() *CiliumCIDRGroup {
if in == nil {
return nil
}
out := new(CiliumCIDRGroup)
in.DeepCopyInto(out)
return out
}
// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
func (in *CiliumCIDRGroup) DeepCopyObject() runtime.Object {
if c := in.DeepCopy(); c != nil {
return c
}
return nil
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *CiliumCIDRGroupList) DeepCopyInto(out *CiliumCIDRGroupList) {
*out = *in
out.TypeMeta = in.TypeMeta
in.ListMeta.DeepCopyInto(&out.ListMeta)
if in.Items != nil {
in, out := &in.Items, &out.Items
*out = make([]CiliumCIDRGroup, len(*in))
for i := range *in {
(*in)[i].DeepCopyInto(&(*out)[i])
}
}
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CiliumCIDRGroupList.
func (in *CiliumCIDRGroupList) DeepCopy() *CiliumCIDRGroupList {
if in == nil {
return nil
}
out := new(CiliumCIDRGroupList)
in.DeepCopyInto(out)
return out
}
// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
func (in *CiliumCIDRGroupList) DeepCopyObject() runtime.Object {
if c := in.DeepCopy(); c != nil {
return c
}
return nil
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *CiliumCIDRGroupSpec) DeepCopyInto(out *CiliumCIDRGroupSpec) {
*out = *in
if in.ExternalCIDRs != nil {
in, out := &in.ExternalCIDRs, &out.ExternalCIDRs
*out = make([]api.CIDR, len(*in))
copy(*out, *in)
}
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CiliumCIDRGroupSpec.
func (in *CiliumCIDRGroupSpec) DeepCopy() *CiliumCIDRGroupSpec {
if in == nil {
return nil
}
out := new(CiliumCIDRGroupSpec)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *CiliumEndpointSlice) DeepCopyInto(out *CiliumEndpointSlice) {
*out = *in
out.TypeMeta = in.TypeMeta
in.ObjectMeta.DeepCopyInto(&out.ObjectMeta)
if in.Endpoints != nil {
in, out := &in.Endpoints, &out.Endpoints
*out = make([]CoreCiliumEndpoint, len(*in))
for i := range *in {
(*in)[i].DeepCopyInto(&(*out)[i])
}
}
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CiliumEndpointSlice.
func (in *CiliumEndpointSlice) DeepCopy() *CiliumEndpointSlice {
if in == nil {
return nil
}
out := new(CiliumEndpointSlice)
in.DeepCopyInto(out)
return out
}
// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
func (in *CiliumEndpointSlice) DeepCopyObject() runtime.Object {
if c := in.DeepCopy(); c != nil {
return c
}
return nil
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *CiliumEndpointSliceList) DeepCopyInto(out *CiliumEndpointSliceList) {
*out = *in
out.TypeMeta = in.TypeMeta
in.ListMeta.DeepCopyInto(&out.ListMeta)
if in.Items != nil {
in, out := &in.Items, &out.Items
*out = make([]CiliumEndpointSlice, len(*in))
for i := range *in {
(*in)[i].DeepCopyInto(&(*out)[i])
}
}
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CiliumEndpointSliceList.
func (in *CiliumEndpointSliceList) DeepCopy() *CiliumEndpointSliceList {
if in == nil {
return nil
}
out := new(CiliumEndpointSliceList)
in.DeepCopyInto(out)
return out
}
// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
func (in *CiliumEndpointSliceList) DeepCopyObject() runtime.Object {
if c := in.DeepCopy(); c != nil {
return c
}
return nil
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *CiliumGatewayClassConfig) DeepCopyInto(out *CiliumGatewayClassConfig) {
*out = *in
out.TypeMeta = in.TypeMeta
in.ObjectMeta.DeepCopyInto(&out.ObjectMeta)
in.Spec.DeepCopyInto(&out.Spec)
in.Status.DeepCopyInto(&out.Status)
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CiliumGatewayClassConfig.
func (in *CiliumGatewayClassConfig) DeepCopy() *CiliumGatewayClassConfig {
if in == nil {
return nil
}
out := new(CiliumGatewayClassConfig)
in.DeepCopyInto(out)
return out
}
// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
func (in *CiliumGatewayClassConfig) DeepCopyObject() runtime.Object {
if c := in.DeepCopy(); c != nil {
return c
}
return nil
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *CiliumGatewayClassConfigList) DeepCopyInto(out *CiliumGatewayClassConfigList) {
*out = *in
out.TypeMeta = in.TypeMeta
in.ListMeta.DeepCopyInto(&out.ListMeta)
if in.Items != nil {
in, out := &in.Items, &out.Items
*out = make([]CiliumGatewayClassConfig, len(*in))
for i := range *in {
(*in)[i].DeepCopyInto(&(*out)[i])
}
}
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CiliumGatewayClassConfigList.
func (in *CiliumGatewayClassConfigList) DeepCopy() *CiliumGatewayClassConfigList {
if in == nil {
return nil
}
out := new(CiliumGatewayClassConfigList)
in.DeepCopyInto(out)
return out
}
// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
func (in *CiliumGatewayClassConfigList) DeepCopyObject() runtime.Object {
if c := in.DeepCopy(); c != nil {
return c
}
return nil
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *CiliumGatewayClassConfigSpec) DeepCopyInto(out *CiliumGatewayClassConfigSpec) {
*out = *in
if in.Description != nil {
in, out := &in.Description, &out.Description
*out = new(string)
**out = **in
}
if in.Service != nil {
in, out := &in.Service, &out.Service
*out = new(ServiceConfig)
(*in).DeepCopyInto(*out)
}
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CiliumGatewayClassConfigSpec.
func (in *CiliumGatewayClassConfigSpec) DeepCopy() *CiliumGatewayClassConfigSpec {
if in == nil {
return nil
}
out := new(CiliumGatewayClassConfigSpec)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *CiliumGatewayClassConfigStatus) DeepCopyInto(out *CiliumGatewayClassConfigStatus) {
*out = *in
if in.Conditions != nil {
in, out := &in.Conditions, &out.Conditions
*out = make([]metav1.Condition, len(*in))
for i := range *in {
(*in)[i].DeepCopyInto(&(*out)[i])
}
}
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CiliumGatewayClassConfigStatus.
func (in *CiliumGatewayClassConfigStatus) DeepCopy() *CiliumGatewayClassConfigStatus {
if in == nil {
return nil
}
out := new(CiliumGatewayClassConfigStatus)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *CiliumL2AnnouncementPolicy) DeepCopyInto(out *CiliumL2AnnouncementPolicy) {
*out = *in
out.TypeMeta = in.TypeMeta
in.ObjectMeta.DeepCopyInto(&out.ObjectMeta)
in.Spec.DeepCopyInto(&out.Spec)
in.Status.DeepCopyInto(&out.Status)
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CiliumL2AnnouncementPolicy.
func (in *CiliumL2AnnouncementPolicy) DeepCopy() *CiliumL2AnnouncementPolicy {
if in == nil {
return nil
}
out := new(CiliumL2AnnouncementPolicy)
in.DeepCopyInto(out)
return out
}
// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
func (in *CiliumL2AnnouncementPolicy) DeepCopyObject() runtime.Object {
if c := in.DeepCopy(); c != nil {
return c
}
return nil
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *CiliumL2AnnouncementPolicyList) DeepCopyInto(out *CiliumL2AnnouncementPolicyList) {
*out = *in
out.TypeMeta = in.TypeMeta
in.ListMeta.DeepCopyInto(&out.ListMeta)
if in.Items != nil {
in, out := &in.Items, &out.Items
*out = make([]CiliumL2AnnouncementPolicy, len(*in))
for i := range *in {
(*in)[i].DeepCopyInto(&(*out)[i])
}
}
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CiliumL2AnnouncementPolicyList.
func (in *CiliumL2AnnouncementPolicyList) DeepCopy() *CiliumL2AnnouncementPolicyList {
if in == nil {
return nil
}
out := new(CiliumL2AnnouncementPolicyList)
in.DeepCopyInto(out)
return out
}
// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
func (in *CiliumL2AnnouncementPolicyList) DeepCopyObject() runtime.Object {
if c := in.DeepCopy(); c != nil {
return c
}
return nil
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *CiliumL2AnnouncementPolicySpec) DeepCopyInto(out *CiliumL2AnnouncementPolicySpec) {
*out = *in
if in.NodeSelector != nil {
in, out := &in.NodeSelector, &out.NodeSelector
*out = new(v1.LabelSelector)
(*in).DeepCopyInto(*out)
}
if in.ServiceSelector != nil {
in, out := &in.ServiceSelector, &out.ServiceSelector
*out = new(v1.LabelSelector)
(*in).DeepCopyInto(*out)
}
if in.Interfaces != nil {
in, out := &in.Interfaces, &out.Interfaces
*out = make([]string, len(*in))
copy(*out, *in)
}
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CiliumL2AnnouncementPolicySpec.
func (in *CiliumL2AnnouncementPolicySpec) DeepCopy() *CiliumL2AnnouncementPolicySpec {
if in == nil {
return nil
}
out := new(CiliumL2AnnouncementPolicySpec)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *CiliumL2AnnouncementPolicyStatus) DeepCopyInto(out *CiliumL2AnnouncementPolicyStatus) {
*out = *in
if in.Conditions != nil {
in, out := &in.Conditions, &out.Conditions
*out = make([]metav1.Condition, len(*in))
for i := range *in {
(*in)[i].DeepCopyInto(&(*out)[i])
}
}
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CiliumL2AnnouncementPolicyStatus.
func (in *CiliumL2AnnouncementPolicyStatus) DeepCopy() *CiliumL2AnnouncementPolicyStatus {
if in == nil {
return nil
}
out := new(CiliumL2AnnouncementPolicyStatus)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *CiliumLoadBalancerIPPool) DeepCopyInto(out *CiliumLoadBalancerIPPool) {
*out = *in
out.TypeMeta = in.TypeMeta
in.ObjectMeta.DeepCopyInto(&out.ObjectMeta)
in.Spec.DeepCopyInto(&out.Spec)
in.Status.DeepCopyInto(&out.Status)
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CiliumLoadBalancerIPPool.
func (in *CiliumLoadBalancerIPPool) DeepCopy() *CiliumLoadBalancerIPPool {
if in == nil {
return nil
}
out := new(CiliumLoadBalancerIPPool)
in.DeepCopyInto(out)
return out
}
// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
func (in *CiliumLoadBalancerIPPool) DeepCopyObject() runtime.Object {
if c := in.DeepCopy(); c != nil {
return c
}
return nil
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *CiliumLoadBalancerIPPoolIPBlock) DeepCopyInto(out *CiliumLoadBalancerIPPoolIPBlock) {
*out = *in
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CiliumLoadBalancerIPPoolIPBlock.
func (in *CiliumLoadBalancerIPPoolIPBlock) DeepCopy() *CiliumLoadBalancerIPPoolIPBlock {
if in == nil {
return nil
}
out := new(CiliumLoadBalancerIPPoolIPBlock)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *CiliumLoadBalancerIPPoolList) DeepCopyInto(out *CiliumLoadBalancerIPPoolList) {
*out = *in
out.TypeMeta = in.TypeMeta
in.ListMeta.DeepCopyInto(&out.ListMeta)
if in.Items != nil {
in, out := &in.Items, &out.Items
*out = make([]CiliumLoadBalancerIPPool, len(*in))
for i := range *in {
(*in)[i].DeepCopyInto(&(*out)[i])
}
}
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CiliumLoadBalancerIPPoolList.
func (in *CiliumLoadBalancerIPPoolList) DeepCopy() *CiliumLoadBalancerIPPoolList {
if in == nil {
return nil
}
out := new(CiliumLoadBalancerIPPoolList)
in.DeepCopyInto(out)
return out
}
// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
func (in *CiliumLoadBalancerIPPoolList) DeepCopyObject() runtime.Object {
if c := in.DeepCopy(); c != nil {
return c
}
return nil
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *CiliumLoadBalancerIPPoolSpec) DeepCopyInto(out *CiliumLoadBalancerIPPoolSpec) {
*out = *in
if in.ServiceSelector != nil {
in, out := &in.ServiceSelector, &out.ServiceSelector
*out = new(v1.LabelSelector)
(*in).DeepCopyInto(*out)
}
if in.Blocks != nil {
in, out := &in.Blocks, &out.Blocks
*out = make([]CiliumLoadBalancerIPPoolIPBlock, len(*in))
copy(*out, *in)
}
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CiliumLoadBalancerIPPoolSpec.
func (in *CiliumLoadBalancerIPPoolSpec) DeepCopy() *CiliumLoadBalancerIPPoolSpec {
if in == nil {
return nil
}
out := new(CiliumLoadBalancerIPPoolSpec)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *CiliumLoadBalancerIPPoolStatus) DeepCopyInto(out *CiliumLoadBalancerIPPoolStatus) {
*out = *in
if in.Conditions != nil {
in, out := &in.Conditions, &out.Conditions
*out = make([]metav1.Condition, len(*in))
for i := range *in {
(*in)[i].DeepCopyInto(&(*out)[i])
}
}
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CiliumLoadBalancerIPPoolStatus.
func (in *CiliumLoadBalancerIPPoolStatus) DeepCopy() *CiliumLoadBalancerIPPoolStatus {
if in == nil {
return nil
}
out := new(CiliumLoadBalancerIPPoolStatus)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *CiliumNodeConfig) DeepCopyInto(out *CiliumNodeConfig) {
*out = *in
out.TypeMeta = in.TypeMeta
in.ObjectMeta.DeepCopyInto(&out.ObjectMeta)
in.Spec.DeepCopyInto(&out.Spec)
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CiliumNodeConfig.
func (in *CiliumNodeConfig) DeepCopy() *CiliumNodeConfig {
if in == nil {
return nil
}
out := new(CiliumNodeConfig)
in.DeepCopyInto(out)
return out
}
// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
func (in *CiliumNodeConfig) DeepCopyObject() runtime.Object {
if c := in.DeepCopy(); c != nil {
return c
}
return nil
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *CiliumNodeConfigList) DeepCopyInto(out *CiliumNodeConfigList) {
*out = *in
out.TypeMeta = in.TypeMeta
in.ListMeta.DeepCopyInto(&out.ListMeta)
if in.Items != nil {
in, out := &in.Items, &out.Items
*out = make([]CiliumNodeConfig, len(*in))
for i := range *in {
(*in)[i].DeepCopyInto(&(*out)[i])
}
}
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CiliumNodeConfigList.
func (in *CiliumNodeConfigList) DeepCopy() *CiliumNodeConfigList {
if in == nil {
return nil
}
out := new(CiliumNodeConfigList)
in.DeepCopyInto(out)
return out
}
// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
func (in *CiliumNodeConfigList) DeepCopyObject() runtime.Object {
if c := in.DeepCopy(); c != nil {
return c
}
return nil
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *CiliumNodeConfigSpec) DeepCopyInto(out *CiliumNodeConfigSpec) {
*out = *in
if in.Defaults != nil {
in, out := &in.Defaults, &out.Defaults
*out = make(map[string]string, len(*in))
for key, val := range *in {
(*out)[key] = val
}
}
if in.NodeSelector != nil {
in, out := &in.NodeSelector, &out.NodeSelector
*out = new(metav1.LabelSelector)
(*in).DeepCopyInto(*out)
}
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CiliumNodeConfigSpec.
func (in *CiliumNodeConfigSpec) DeepCopy() *CiliumNodeConfigSpec {
if in == nil {
return nil
}
out := new(CiliumNodeConfigSpec)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *CiliumPodIPPool) DeepCopyInto(out *CiliumPodIPPool) {
*out = *in
out.TypeMeta = in.TypeMeta
in.ObjectMeta.DeepCopyInto(&out.ObjectMeta)
in.Spec.DeepCopyInto(&out.Spec)
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CiliumPodIPPool.
func (in *CiliumPodIPPool) DeepCopy() *CiliumPodIPPool {
if in == nil {
return nil
}
out := new(CiliumPodIPPool)
in.DeepCopyInto(out)
return out
}
// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
func (in *CiliumPodIPPool) DeepCopyObject() runtime.Object {
if c := in.DeepCopy(); c != nil {
return c
}
return nil
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *CiliumPodIPPoolList) DeepCopyInto(out *CiliumPodIPPoolList) {
*out = *in
out.TypeMeta = in.TypeMeta
in.ListMeta.DeepCopyInto(&out.ListMeta)
if in.Items != nil {
in, out := &in.Items, &out.Items
*out = make([]CiliumPodIPPool, len(*in))
for i := range *in {
(*in)[i].DeepCopyInto(&(*out)[i])
}
}
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CiliumPodIPPoolList.
func (in *CiliumPodIPPoolList) DeepCopy() *CiliumPodIPPoolList {
if in == nil {
return nil
}
out := new(CiliumPodIPPoolList)
in.DeepCopyInto(out)
return out
}
// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
func (in *CiliumPodIPPoolList) DeepCopyObject() runtime.Object {
if c := in.DeepCopy(); c != nil {
return c
}
return nil
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *CoreCiliumEndpoint) DeepCopyInto(out *CoreCiliumEndpoint) {
*out = *in
if in.Networking != nil {
in, out := &in.Networking, &out.Networking
*out = new(v2.EndpointNetworking)
(*in).DeepCopyInto(*out)
}
out.Encryption = in.Encryption
if in.NamedPorts != nil {
in, out := &in.NamedPorts, &out.NamedPorts
*out = make(models.NamedPorts, len(*in))
for i := range *in {
if (*in)[i] != nil {
in, out := &(*in)[i], &(*out)[i]
*out = new(models.Port)
**out = **in
}
}
}
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CoreCiliumEndpoint.
func (in *CoreCiliumEndpoint) DeepCopy() *CoreCiliumEndpoint {
if in == nil {
return nil
}
out := new(CoreCiliumEndpoint)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *EgressRule) DeepCopyInto(out *EgressRule) {
*out = *in
if in.NamespaceSelector != nil {
in, out := &in.NamespaceSelector, &out.NamespaceSelector
*out = new(v1.LabelSelector)
(*in).DeepCopyInto(*out)
}
if in.PodSelector != nil {
in, out := &in.PodSelector, &out.PodSelector
*out = new(v1.LabelSelector)
(*in).DeepCopyInto(*out)
}
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new EgressRule.
func (in *EgressRule) DeepCopy() *EgressRule {
if in == nil {
return nil
}
out := new(EgressRule)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *IPPoolSpec) DeepCopyInto(out *IPPoolSpec) {
*out = *in
if in.IPv4 != nil {
in, out := &in.IPv4, &out.IPv4
*out = new(IPv4PoolSpec)
(*in).DeepCopyInto(*out)
}
if in.IPv6 != nil {
in, out := &in.IPv6, &out.IPv6
*out = new(IPv6PoolSpec)
(*in).DeepCopyInto(*out)
}
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new IPPoolSpec.
func (in *IPPoolSpec) DeepCopy() *IPPoolSpec {
if in == nil {
return nil
}
out := new(IPPoolSpec)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *IPv4PoolSpec) DeepCopyInto(out *IPv4PoolSpec) {
*out = *in
if in.CIDRs != nil {
in, out := &in.CIDRs, &out.CIDRs
*out = make([]PoolCIDR, len(*in))
copy(*out, *in)
}
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new IPv4PoolSpec.
func (in *IPv4PoolSpec) DeepCopy() *IPv4PoolSpec {
if in == nil {
return nil
}
out := new(IPv4PoolSpec)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *IPv6PoolSpec) DeepCopyInto(out *IPv6PoolSpec) {
*out = *in
if in.CIDRs != nil {
in, out := &in.CIDRs, &out.CIDRs
*out = make([]PoolCIDR, len(*in))
copy(*out, *in)
}
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new IPv6PoolSpec.
func (in *IPv6PoolSpec) DeepCopy() *IPv6PoolSpec {
if in == nil {
return nil
}
out := new(IPv6PoolSpec)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *PeerConfigReference) DeepCopyInto(out *PeerConfigReference) {
*out = *in
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PeerConfigReference.
func (in *PeerConfigReference) DeepCopy() *PeerConfigReference {
if in == nil {
return nil
}
out := new(PeerConfigReference)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *ServiceConfig) DeepCopyInto(out *ServiceConfig) {
*out = *in
if in.LoadBalancerClass != nil {
in, out := &in.LoadBalancerClass, &out.LoadBalancerClass
*out = new(string)
**out = **in
}
if in.IPFamilies != nil {
in, out := &in.IPFamilies, &out.IPFamilies
*out = make([]corev1.IPFamily, len(*in))
copy(*out, *in)
}
if in.IPFamilyPolicy != nil {
in, out := &in.IPFamilyPolicy, &out.IPFamilyPolicy
*out = new(corev1.IPFamilyPolicy)
**out = **in
}
if in.AllocateLoadBalancerNodePorts != nil {
in, out := &in.AllocateLoadBalancerNodePorts, &out.AllocateLoadBalancerNodePorts
*out = new(bool)
**out = **in
}
if in.LoadBalancerSourceRanges != nil {
in, out := &in.LoadBalancerSourceRanges, &out.LoadBalancerSourceRanges
*out = make([]string, len(*in))
copy(*out, *in)
}
if in.TrafficDistribution != nil {
in, out := &in.TrafficDistribution, &out.TrafficDistribution
*out = new(string)
**out = **in
}
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ServiceConfig.
func (in *ServiceConfig) DeepCopy() *ServiceConfig {
if in == nil {
return nil
}
out := new(ServiceConfig)
in.DeepCopyInto(out)
return out
}
//go:build !ignore_autogenerated
// +build !ignore_autogenerated
// SPDX-License-Identifier: Apache-2.0
// Copyright Authors of Cilium
// Code generated by deepequal-gen. DO NOT EDIT.
package v2alpha1
// DeepEqual is an autogenerated deepequal function, deeply comparing the
// receiver with other. in must be non-nil.
func (in *BGPAdvertisement) DeepEqual(other *BGPAdvertisement) bool {
if other == nil {
return false
}
if in.AdvertisementType != other.AdvertisementType {
return false
}
if (in.Service == nil) != (other.Service == nil) {
return false
} else if in.Service != nil {
if !in.Service.DeepEqual(other.Service) {
return false
}
}
if (in.Selector == nil) != (other.Selector == nil) {
return false
} else if in.Selector != nil {
if !in.Selector.DeepEqual(other.Selector) {
return false
}
}
if (in.Attributes == nil) != (other.Attributes == nil) {
return false
} else if in.Attributes != nil {
if !in.Attributes.DeepEqual(other.Attributes) {
return false
}
}
return true
}
// DeepEqual is an autogenerated deepequal function, deeply comparing the
// receiver with other. in must be non-nil.
func (in *BGPAttributes) DeepEqual(other *BGPAttributes) bool {
if other == nil {
return false
}
if (in.Communities == nil) != (other.Communities == nil) {
return false
} else if in.Communities != nil {
if !in.Communities.DeepEqual(other.Communities) {
return false
}
}
if (in.LocalPreference == nil) != (other.LocalPreference == nil) {
return false
} else if in.LocalPreference != nil {
if *in.LocalPreference != *other.LocalPreference {
return false
}
}
return true
}
// DeepEqual is an autogenerated deepequal function, deeply comparing the
// receiver with other. in must be non-nil.
func (in *BGPCommunities) DeepEqual(other *BGPCommunities) bool {
if other == nil {
return false
}
if ((in.Standard != nil) && (other.Standard != nil)) || ((in.Standard == nil) != (other.Standard == nil)) {
in, other := &in.Standard, &other.Standard
if other == nil {
return false
}
if len(*in) != len(*other) {
return false
} else {
for i, inElement := range *in {
if inElement != (*other)[i] {
return false
}
}
}
}
if ((in.WellKnown != nil) && (other.WellKnown != nil)) || ((in.WellKnown == nil) != (other.WellKnown == nil)) {
in, other := &in.WellKnown, &other.WellKnown
if other == nil {
return false
}
if len(*in) != len(*other) {
return false
} else {
for i, inElement := range *in {
if inElement != (*other)[i] {
return false
}
}
}
}
if ((in.Large != nil) && (other.Large != nil)) || ((in.Large == nil) != (other.Large == nil)) {
in, other := &in.Large, &other.Large
if other == nil {
return false
}
if len(*in) != len(*other) {
return false
} else {
for i, inElement := range *in {
if inElement != (*other)[i] {
return false
}
}
}
}
return true
}
// DeepEqual is an autogenerated deepequal function, deeply comparing the
// receiver with other. in must be non-nil.
func (in *BGPFamilyRouteCount) DeepEqual(other *BGPFamilyRouteCount) bool {
if other == nil {
return false
}
if in.Afi != other.Afi {
return false
}
if in.Safi != other.Safi {
return false
}
if (in.Received == nil) != (other.Received == nil) {
return false
} else if in.Received != nil {
if *in.Received != *other.Received {
return false
}
}
if (in.Advertised == nil) != (other.Advertised == nil) {
return false
} else if in.Advertised != nil {
if *in.Advertised != *other.Advertised {
return false
}
}
return true
}
// DeepEqual is an autogenerated deepequal function, deeply comparing the
// receiver with other. in must be non-nil.
func (in *BGPServiceOptions) DeepEqual(other *BGPServiceOptions) bool {
if other == nil {
return false
}
if ((in.Addresses != nil) && (other.Addresses != nil)) || ((in.Addresses == nil) != (other.Addresses == nil)) {
in, other := &in.Addresses, &other.Addresses
if other == nil {
return false
}
if len(*in) != len(*other) {
return false
} else {
for i, inElement := range *in {
if inElement != (*other)[i] {
return false
}
}
}
}
if (in.AggregationLengthIPv4 == nil) != (other.AggregationLengthIPv4 == nil) {
return false
} else if in.AggregationLengthIPv4 != nil {
if *in.AggregationLengthIPv4 != *other.AggregationLengthIPv4 {
return false
}
}
if (in.AggregationLengthIPv6 == nil) != (other.AggregationLengthIPv6 == nil) {
return false
} else if in.AggregationLengthIPv6 != nil {
if *in.AggregationLengthIPv6 != *other.AggregationLengthIPv6 {
return false
}
}
return true
}
// DeepEqual is an autogenerated deepequal function, deeply comparing the
// receiver with other. in must be non-nil.
func (in *CiliumBGPAdvertisement) DeepEqual(other *CiliumBGPAdvertisement) bool {
if other == nil {
return false
}
if !in.Spec.DeepEqual(&other.Spec) {
return false
}
return true
}
// DeepEqual is an autogenerated deepequal function, deeply comparing the
// receiver with other. in must be non-nil.
func (in *CiliumBGPAdvertisementSpec) DeepEqual(other *CiliumBGPAdvertisementSpec) bool {
if other == nil {
return false
}
if ((in.Advertisements != nil) && (other.Advertisements != nil)) || ((in.Advertisements == nil) != (other.Advertisements == nil)) {
in, other := &in.Advertisements, &other.Advertisements
if other == nil {
return false
}
if len(*in) != len(*other) {
return false
} else {
for i, inElement := range *in {
if !inElement.DeepEqual(&(*other)[i]) {
return false
}
}
}
}
return true
}
// DeepEqual is an autogenerated deepequal function, deeply comparing the
// receiver with other. in must be non-nil.
func (in *CiliumBGPClusterConfig) DeepEqual(other *CiliumBGPClusterConfig) bool {
if other == nil {
return false
}
if !in.Spec.DeepEqual(&other.Spec) {
return false
}
if !in.Status.DeepEqual(&other.Status) {
return false
}
return true
}
// DeepEqual is an autogenerated deepequal function, deeply comparing the
// receiver with other. in must be non-nil.
func (in *CiliumBGPClusterConfigSpec) DeepEqual(other *CiliumBGPClusterConfigSpec) bool {
if other == nil {
return false
}
if (in.NodeSelector == nil) != (other.NodeSelector == nil) {
return false
} else if in.NodeSelector != nil {
if !in.NodeSelector.DeepEqual(other.NodeSelector) {
return false
}
}
if ((in.BGPInstances != nil) && (other.BGPInstances != nil)) || ((in.BGPInstances == nil) != (other.BGPInstances == nil)) {
in, other := &in.BGPInstances, &other.BGPInstances
if other == nil {
return false
}
if len(*in) != len(*other) {
return false
} else {
for i, inElement := range *in {
if !inElement.DeepEqual(&(*other)[i]) {
return false
}
}
}
}
return true
}
// DeepEqual is an autogenerated deepequal function, deeply comparing the
// receiver with other. in must be non-nil.
func (in *CiliumBGPClusterConfigStatus) DeepEqual(other *CiliumBGPClusterConfigStatus) bool {
if other == nil {
return false
}
return true
}
// DeepEqual is an autogenerated deepequal function, deeply comparing the
// receiver with other. in must be non-nil.
func (in *CiliumBGPFamily) DeepEqual(other *CiliumBGPFamily) bool {
if other == nil {
return false
}
if in.Afi != other.Afi {
return false
}
if in.Safi != other.Safi {
return false
}
return true
}
// DeepEqual is an autogenerated deepequal function, deeply comparing the
// receiver with other. in must be non-nil.
func (in *CiliumBGPFamilyWithAdverts) DeepEqual(other *CiliumBGPFamilyWithAdverts) bool {
if other == nil {
return false
}
if in.CiliumBGPFamily != other.CiliumBGPFamily {
return false
}
if (in.Advertisements == nil) != (other.Advertisements == nil) {
return false
} else if in.Advertisements != nil {
if !in.Advertisements.DeepEqual(other.Advertisements) {
return false
}
}
return true
}
// DeepEqual is an autogenerated deepequal function, deeply comparing the
// receiver with other. in must be non-nil.
func (in *CiliumBGPInstance) DeepEqual(other *CiliumBGPInstance) bool {
if other == nil {
return false
}
if in.Name != other.Name {
return false
}
if (in.LocalASN == nil) != (other.LocalASN == nil) {
return false
} else if in.LocalASN != nil {
if *in.LocalASN != *other.LocalASN {
return false
}
}
if (in.LocalPort == nil) != (other.LocalPort == nil) {
return false
} else if in.LocalPort != nil {
if *in.LocalPort != *other.LocalPort {
return false
}
}
if ((in.Peers != nil) && (other.Peers != nil)) || ((in.Peers == nil) != (other.Peers == nil)) {
in, other := &in.Peers, &other.Peers
if other == nil {
return false
}
if len(*in) != len(*other) {
return false
} else {
for i, inElement := range *in {
if !inElement.DeepEqual(&(*other)[i]) {
return false
}
}
}
}
return true
}
// DeepEqual is an autogenerated deepequal function, deeply comparing the
// receiver with other. in must be non-nil.
func (in *CiliumBGPNeighbor) DeepEqual(other *CiliumBGPNeighbor) bool {
if other == nil {
return false
}
if in.PeerAddress != other.PeerAddress {
return false
}
if (in.PeerPort == nil) != (other.PeerPort == nil) {
return false
} else if in.PeerPort != nil {
if *in.PeerPort != *other.PeerPort {
return false
}
}
if in.PeerASN != other.PeerASN {
return false
}
if (in.AuthSecretRef == nil) != (other.AuthSecretRef == nil) {
return false
} else if in.AuthSecretRef != nil {
if *in.AuthSecretRef != *other.AuthSecretRef {
return false
}
}
if (in.EBGPMultihopTTL == nil) != (other.EBGPMultihopTTL == nil) {
return false
} else if in.EBGPMultihopTTL != nil {
if *in.EBGPMultihopTTL != *other.EBGPMultihopTTL {
return false
}
}
if (in.ConnectRetryTimeSeconds == nil) != (other.ConnectRetryTimeSeconds == nil) {
return false
} else if in.ConnectRetryTimeSeconds != nil {
if *in.ConnectRetryTimeSeconds != *other.ConnectRetryTimeSeconds {
return false
}
}
if (in.HoldTimeSeconds == nil) != (other.HoldTimeSeconds == nil) {
return false
} else if in.HoldTimeSeconds != nil {
if *in.HoldTimeSeconds != *other.HoldTimeSeconds {
return false
}
}
if (in.KeepAliveTimeSeconds == nil) != (other.KeepAliveTimeSeconds == nil) {
return false
} else if in.KeepAliveTimeSeconds != nil {
if *in.KeepAliveTimeSeconds != *other.KeepAliveTimeSeconds {
return false
}
}
if (in.GracefulRestart == nil) != (other.GracefulRestart == nil) {
return false
} else if in.GracefulRestart != nil {
if !in.GracefulRestart.DeepEqual(other.GracefulRestart) {
return false
}
}
if ((in.Families != nil) && (other.Families != nil)) || ((in.Families == nil) != (other.Families == nil)) {
in, other := &in.Families, &other.Families
if other == nil {
return false
}
if len(*in) != len(*other) {
return false
} else {
for i, inElement := range *in {
if !inElement.DeepEqual(&(*other)[i]) {
return false
}
}
}
}
if ((in.AdvertisedPathAttributes != nil) && (other.AdvertisedPathAttributes != nil)) || ((in.AdvertisedPathAttributes == nil) != (other.AdvertisedPathAttributes == nil)) {
in, other := &in.AdvertisedPathAttributes, &other.AdvertisedPathAttributes
if other == nil {
return false
}
if len(*in) != len(*other) {
return false
} else {
for i, inElement := range *in {
if !inElement.DeepEqual(&(*other)[i]) {
return false
}
}
}
}
return true
}
// DeepEqual is an autogenerated deepequal function, deeply comparing the
// receiver with other. in must be non-nil.
func (in *CiliumBGPNeighborGracefulRestart) DeepEqual(other *CiliumBGPNeighborGracefulRestart) bool {
if other == nil {
return false
}
if in.Enabled != other.Enabled {
return false
}
if (in.RestartTimeSeconds == nil) != (other.RestartTimeSeconds == nil) {
return false
} else if in.RestartTimeSeconds != nil {
if *in.RestartTimeSeconds != *other.RestartTimeSeconds {
return false
}
}
return true
}
// DeepEqual is an autogenerated deepequal function, deeply comparing the
// receiver with other. in must be non-nil.
func (in *CiliumBGPNodeConfig) DeepEqual(other *CiliumBGPNodeConfig) bool {
if other == nil {
return false
}
if !in.Spec.DeepEqual(&other.Spec) {
return false
}
if !in.Status.DeepEqual(&other.Status) {
return false
}
return true
}
// DeepEqual is an autogenerated deepequal function, deeply comparing the
// receiver with other. in must be non-nil.
func (in *CiliumBGPNodeConfigInstanceOverride) DeepEqual(other *CiliumBGPNodeConfigInstanceOverride) bool {
if other == nil {
return false
}
if in.Name != other.Name {
return false
}
if (in.RouterID == nil) != (other.RouterID == nil) {
return false
} else if in.RouterID != nil {
if *in.RouterID != *other.RouterID {
return false
}
}
if (in.LocalPort == nil) != (other.LocalPort == nil) {
return false
} else if in.LocalPort != nil {
if *in.LocalPort != *other.LocalPort {
return false
}
}
if ((in.Peers != nil) && (other.Peers != nil)) || ((in.Peers == nil) != (other.Peers == nil)) {
in, other := &in.Peers, &other.Peers
if other == nil {
return false
}
if len(*in) != len(*other) {
return false
} else {
for i, inElement := range *in {
if !inElement.DeepEqual(&(*other)[i]) {
return false
}
}
}
}
if (in.LocalASN == nil) != (other.LocalASN == nil) {
return false
} else if in.LocalASN != nil {
if *in.LocalASN != *other.LocalASN {
return false
}
}
return true
}
// DeepEqual is an autogenerated deepequal function, deeply comparing the
// receiver with other. in must be non-nil.
func (in *CiliumBGPNodeConfigOverride) DeepEqual(other *CiliumBGPNodeConfigOverride) bool {
if other == nil {
return false
}
if !in.Spec.DeepEqual(&other.Spec) {
return false
}
return true
}
// DeepEqual is an autogenerated deepequal function, deeply comparing the
// receiver with other. in must be non-nil.
func (in *CiliumBGPNodeConfigOverrideSpec) DeepEqual(other *CiliumBGPNodeConfigOverrideSpec) bool {
if other == nil {
return false
}
if ((in.BGPInstances != nil) && (other.BGPInstances != nil)) || ((in.BGPInstances == nil) != (other.BGPInstances == nil)) {
in, other := &in.BGPInstances, &other.BGPInstances
if other == nil {
return false
}
if len(*in) != len(*other) {
return false
} else {
for i, inElement := range *in {
if !inElement.DeepEqual(&(*other)[i]) {
return false
}
}
}
}
return true
}
// DeepEqual is an autogenerated deepequal function, deeply comparing the
// receiver with other. in must be non-nil.
func (in *CiliumBGPNodeConfigPeerOverride) DeepEqual(other *CiliumBGPNodeConfigPeerOverride) bool {
if other == nil {
return false
}
if in.Name != other.Name {
return false
}
if (in.LocalAddress == nil) != (other.LocalAddress == nil) {
return false
} else if in.LocalAddress != nil {
if *in.LocalAddress != *other.LocalAddress {
return false
}
}
if (in.LocalPort == nil) != (other.LocalPort == nil) {
return false
} else if in.LocalPort != nil {
if *in.LocalPort != *other.LocalPort {
return false
}
}
return true
}
// DeepEqual is an autogenerated deepequal function, deeply comparing the
// receiver with other. in must be non-nil.
func (in *CiliumBGPNodeInstance) DeepEqual(other *CiliumBGPNodeInstance) bool {
if other == nil {
return false
}
if in.Name != other.Name {
return false
}
if (in.LocalASN == nil) != (other.LocalASN == nil) {
return false
} else if in.LocalASN != nil {
if *in.LocalASN != *other.LocalASN {
return false
}
}
if (in.RouterID == nil) != (other.RouterID == nil) {
return false
} else if in.RouterID != nil {
if *in.RouterID != *other.RouterID {
return false
}
}
if (in.LocalPort == nil) != (other.LocalPort == nil) {
return false
} else if in.LocalPort != nil {
if *in.LocalPort != *other.LocalPort {
return false
}
}
if ((in.Peers != nil) && (other.Peers != nil)) || ((in.Peers == nil) != (other.Peers == nil)) {
in, other := &in.Peers, &other.Peers
if other == nil {
return false
}
if len(*in) != len(*other) {
return false
} else {
for i, inElement := range *in {
if !inElement.DeepEqual(&(*other)[i]) {
return false
}
}
}
}
return true
}
// DeepEqual is an autogenerated deepequal function, deeply comparing the
// receiver with other. in must be non-nil.
func (in *CiliumBGPNodeInstanceStatus) DeepEqual(other *CiliumBGPNodeInstanceStatus) bool {
if other == nil {
return false
}
if in.Name != other.Name {
return false
}
if (in.LocalASN == nil) != (other.LocalASN == nil) {
return false
} else if in.LocalASN != nil {
if *in.LocalASN != *other.LocalASN {
return false
}
}
if ((in.PeerStatuses != nil) && (other.PeerStatuses != nil)) || ((in.PeerStatuses == nil) != (other.PeerStatuses == nil)) {
in, other := &in.PeerStatuses, &other.PeerStatuses
if other == nil {
return false
}
if len(*in) != len(*other) {
return false
} else {
for i, inElement := range *in {
if !inElement.DeepEqual(&(*other)[i]) {
return false
}
}
}
}
return true
}
// DeepEqual is an autogenerated deepequal function, deeply comparing the
// receiver with other. in must be non-nil.
func (in *CiliumBGPNodePeer) DeepEqual(other *CiliumBGPNodePeer) bool {
if other == nil {
return false
}
if in.Name != other.Name {
return false
}
if (in.PeerAddress == nil) != (other.PeerAddress == nil) {
return false
} else if in.PeerAddress != nil {
if *in.PeerAddress != *other.PeerAddress {
return false
}
}
if (in.PeerASN == nil) != (other.PeerASN == nil) {
return false
} else if in.PeerASN != nil {
if *in.PeerASN != *other.PeerASN {
return false
}
}
if (in.LocalAddress == nil) != (other.LocalAddress == nil) {
return false
} else if in.LocalAddress != nil {
if *in.LocalAddress != *other.LocalAddress {
return false
}
}
if (in.PeerConfigRef == nil) != (other.PeerConfigRef == nil) {
return false
} else if in.PeerConfigRef != nil {
if !in.PeerConfigRef.DeepEqual(other.PeerConfigRef) {
return false
}
}
return true
}
// DeepEqual is an autogenerated deepequal function, deeply comparing the
// receiver with other. in must be non-nil.
func (in *CiliumBGPNodePeerStatus) DeepEqual(other *CiliumBGPNodePeerStatus) bool {
if other == nil {
return false
}
if in.Name != other.Name {
return false
}
if in.PeerAddress != other.PeerAddress {
return false
}
if (in.PeerASN == nil) != (other.PeerASN == nil) {
return false
} else if in.PeerASN != nil {
if *in.PeerASN != *other.PeerASN {
return false
}
}
if (in.PeeringState == nil) != (other.PeeringState == nil) {
return false
} else if in.PeeringState != nil {
if *in.PeeringState != *other.PeeringState {
return false
}
}
if (in.Timers == nil) != (other.Timers == nil) {
return false
} else if in.Timers != nil {
if !in.Timers.DeepEqual(other.Timers) {
return false
}
}
if (in.EstablishedTime == nil) != (other.EstablishedTime == nil) {
return false
} else if in.EstablishedTime != nil {
if *in.EstablishedTime != *other.EstablishedTime {
return false
}
}
if ((in.RouteCount != nil) && (other.RouteCount != nil)) || ((in.RouteCount == nil) != (other.RouteCount == nil)) {
in, other := &in.RouteCount, &other.RouteCount
if other == nil {
return false
}
if len(*in) != len(*other) {
return false
} else {
for i, inElement := range *in {
if !inElement.DeepEqual(&(*other)[i]) {
return false
}
}
}
}
return true
}
// DeepEqual is an autogenerated deepequal function, deeply comparing the
// receiver with other. in must be non-nil.
func (in *CiliumBGPNodeSpec) DeepEqual(other *CiliumBGPNodeSpec) bool {
if other == nil {
return false
}
if ((in.BGPInstances != nil) && (other.BGPInstances != nil)) || ((in.BGPInstances == nil) != (other.BGPInstances == nil)) {
in, other := &in.BGPInstances, &other.BGPInstances
if other == nil {
return false
}
if len(*in) != len(*other) {
return false
} else {
for i, inElement := range *in {
if !inElement.DeepEqual(&(*other)[i]) {
return false
}
}
}
}
return true
}
// DeepEqual is an autogenerated deepequal function, deeply comparing the
// receiver with other. in must be non-nil.
func (in *CiliumBGPNodeStatus) DeepEqual(other *CiliumBGPNodeStatus) bool {
if other == nil {
return false
}
if ((in.BGPInstances != nil) && (other.BGPInstances != nil)) || ((in.BGPInstances == nil) != (other.BGPInstances == nil)) {
in, other := &in.BGPInstances, &other.BGPInstances
if other == nil {
return false
}
if len(*in) != len(*other) {
return false
} else {
for i, inElement := range *in {
if !inElement.DeepEqual(&(*other)[i]) {
return false
}
}
}
}
return true
}
// DeepEqual is an autogenerated deepequal function, deeply comparing the
// receiver with other. in must be non-nil.
func (in *CiliumBGPPathAttributes) DeepEqual(other *CiliumBGPPathAttributes) bool {
if other == nil {
return false
}
if in.SelectorType != other.SelectorType {
return false
}
if (in.Selector == nil) != (other.Selector == nil) {
return false
} else if in.Selector != nil {
if !in.Selector.DeepEqual(other.Selector) {
return false
}
}
if (in.Communities == nil) != (other.Communities == nil) {
return false
} else if in.Communities != nil {
if !in.Communities.DeepEqual(other.Communities) {
return false
}
}
if (in.LocalPreference == nil) != (other.LocalPreference == nil) {
return false
} else if in.LocalPreference != nil {
if *in.LocalPreference != *other.LocalPreference {
return false
}
}
return true
}
// DeepEqual is an autogenerated deepequal function, deeply comparing the
// receiver with other. in must be non-nil.
func (in *CiliumBGPPeer) DeepEqual(other *CiliumBGPPeer) bool {
if other == nil {
return false
}
if in.Name != other.Name {
return false
}
if (in.PeerAddress == nil) != (other.PeerAddress == nil) {
return false
} else if in.PeerAddress != nil {
if *in.PeerAddress != *other.PeerAddress {
return false
}
}
if (in.PeerASN == nil) != (other.PeerASN == nil) {
return false
} else if in.PeerASN != nil {
if *in.PeerASN != *other.PeerASN {
return false
}
}
if (in.PeerConfigRef == nil) != (other.PeerConfigRef == nil) {
return false
} else if in.PeerConfigRef != nil {
if !in.PeerConfigRef.DeepEqual(other.PeerConfigRef) {
return false
}
}
return true
}
// DeepEqual is an autogenerated deepequal function, deeply comparing the
// receiver with other. in must be non-nil.
func (in *CiliumBGPPeerConfig) DeepEqual(other *CiliumBGPPeerConfig) bool {
if other == nil {
return false
}
if !in.Spec.DeepEqual(&other.Spec) {
return false
}
if !in.Status.DeepEqual(&other.Status) {
return false
}
return true
}
// DeepEqual is an autogenerated deepequal function, deeply comparing the
// receiver with other. in must be non-nil.
func (in *CiliumBGPPeerConfigSpec) DeepEqual(other *CiliumBGPPeerConfigSpec) bool {
if other == nil {
return false
}
if (in.Transport == nil) != (other.Transport == nil) {
return false
} else if in.Transport != nil {
if !in.Transport.DeepEqual(other.Transport) {
return false
}
}
if (in.Timers == nil) != (other.Timers == nil) {
return false
} else if in.Timers != nil {
if !in.Timers.DeepEqual(other.Timers) {
return false
}
}
if (in.AuthSecretRef == nil) != (other.AuthSecretRef == nil) {
return false
} else if in.AuthSecretRef != nil {
if *in.AuthSecretRef != *other.AuthSecretRef {
return false
}
}
if (in.GracefulRestart == nil) != (other.GracefulRestart == nil) {
return false
} else if in.GracefulRestart != nil {
if !in.GracefulRestart.DeepEqual(other.GracefulRestart) {
return false
}
}
if (in.EBGPMultihop == nil) != (other.EBGPMultihop == nil) {
return false
} else if in.EBGPMultihop != nil {
if *in.EBGPMultihop != *other.EBGPMultihop {
return false
}
}
if ((in.Families != nil) && (other.Families != nil)) || ((in.Families == nil) != (other.Families == nil)) {
in, other := &in.Families, &other.Families
if other == nil {
return false
}
if len(*in) != len(*other) {
return false
} else {
for i, inElement := range *in {
if !inElement.DeepEqual(&(*other)[i]) {
return false
}
}
}
}
return true
}
// DeepEqual is an autogenerated deepequal function, deeply comparing the
// receiver with other. in must be non-nil.
func (in *CiliumBGPPeerConfigStatus) DeepEqual(other *CiliumBGPPeerConfigStatus) bool {
if other == nil {
return false
}
return true
}
// DeepEqual is an autogenerated deepequal function, deeply comparing the
// receiver with other. in must be non-nil.
func (in *CiliumBGPPeeringPolicy) DeepEqual(other *CiliumBGPPeeringPolicy) bool {
if other == nil {
return false
}
if !in.Spec.DeepEqual(&other.Spec) {
return false
}
return true
}
// DeepEqual is an autogenerated deepequal function, deeply comparing the
// receiver with other. in must be non-nil.
func (in *CiliumBGPPeeringPolicySpec) DeepEqual(other *CiliumBGPPeeringPolicySpec) bool {
if other == nil {
return false
}
if (in.NodeSelector == nil) != (other.NodeSelector == nil) {
return false
} else if in.NodeSelector != nil {
if !in.NodeSelector.DeepEqual(other.NodeSelector) {
return false
}
}
if ((in.VirtualRouters != nil) && (other.VirtualRouters != nil)) || ((in.VirtualRouters == nil) != (other.VirtualRouters == nil)) {
in, other := &in.VirtualRouters, &other.VirtualRouters
if other == nil {
return false
}
if len(*in) != len(*other) {
return false
} else {
for i, inElement := range *in {
if !inElement.DeepEqual(&(*other)[i]) {
return false
}
}
}
}
return true
}
// DeepEqual is an autogenerated deepequal function, deeply comparing the
// receiver with other. in must be non-nil.
func (in *CiliumBGPTimers) DeepEqual(other *CiliumBGPTimers) bool {
if other == nil {
return false
}
if (in.ConnectRetryTimeSeconds == nil) != (other.ConnectRetryTimeSeconds == nil) {
return false
} else if in.ConnectRetryTimeSeconds != nil {
if *in.ConnectRetryTimeSeconds != *other.ConnectRetryTimeSeconds {
return false
}
}
if (in.HoldTimeSeconds == nil) != (other.HoldTimeSeconds == nil) {
return false
} else if in.HoldTimeSeconds != nil {
if *in.HoldTimeSeconds != *other.HoldTimeSeconds {
return false
}
}
if (in.KeepAliveTimeSeconds == nil) != (other.KeepAliveTimeSeconds == nil) {
return false
} else if in.KeepAliveTimeSeconds != nil {
if *in.KeepAliveTimeSeconds != *other.KeepAliveTimeSeconds {
return false
}
}
return true
}
// DeepEqual is an autogenerated deepequal function, deeply comparing the
// receiver with other. in must be non-nil.
func (in *CiliumBGPTimersState) DeepEqual(other *CiliumBGPTimersState) bool {
if other == nil {
return false
}
if (in.AppliedHoldTimeSeconds == nil) != (other.AppliedHoldTimeSeconds == nil) {
return false
} else if in.AppliedHoldTimeSeconds != nil {
if *in.AppliedHoldTimeSeconds != *other.AppliedHoldTimeSeconds {
return false
}
}
if (in.AppliedKeepaliveSeconds == nil) != (other.AppliedKeepaliveSeconds == nil) {
return false
} else if in.AppliedKeepaliveSeconds != nil {
if *in.AppliedKeepaliveSeconds != *other.AppliedKeepaliveSeconds {
return false
}
}
return true
}
// DeepEqual is an autogenerated deepequal function, deeply comparing the
// receiver with other. in must be non-nil.
func (in *CiliumBGPTransport) DeepEqual(other *CiliumBGPTransport) bool {
if other == nil {
return false
}
if (in.LocalPort == nil) != (other.LocalPort == nil) {
return false
} else if in.LocalPort != nil {
if *in.LocalPort != *other.LocalPort {
return false
}
}
if (in.PeerPort == nil) != (other.PeerPort == nil) {
return false
} else if in.PeerPort != nil {
if *in.PeerPort != *other.PeerPort {
return false
}
}
return true
}
// DeepEqual is an autogenerated deepequal function, deeply comparing the
// receiver with other. in must be non-nil.
func (in *CiliumBGPVirtualRouter) DeepEqual(other *CiliumBGPVirtualRouter) bool {
if other == nil {
return false
}
if in.LocalASN != other.LocalASN {
return false
}
if (in.ExportPodCIDR == nil) != (other.ExportPodCIDR == nil) {
return false
} else if in.ExportPodCIDR != nil {
if *in.ExportPodCIDR != *other.ExportPodCIDR {
return false
}
}
if (in.PodIPPoolSelector == nil) != (other.PodIPPoolSelector == nil) {
return false
} else if in.PodIPPoolSelector != nil {
if !in.PodIPPoolSelector.DeepEqual(other.PodIPPoolSelector) {
return false
}
}
if (in.ServiceSelector == nil) != (other.ServiceSelector == nil) {
return false
} else if in.ServiceSelector != nil {
if !in.ServiceSelector.DeepEqual(other.ServiceSelector) {
return false
}
}
if ((in.ServiceAdvertisements != nil) && (other.ServiceAdvertisements != nil)) || ((in.ServiceAdvertisements == nil) != (other.ServiceAdvertisements == nil)) {
in, other := &in.ServiceAdvertisements, &other.ServiceAdvertisements
if other == nil {
return false
}
if len(*in) != len(*other) {
return false
} else {
for i, inElement := range *in {
if inElement != (*other)[i] {
return false
}
}
}
}
if ((in.Neighbors != nil) && (other.Neighbors != nil)) || ((in.Neighbors == nil) != (other.Neighbors == nil)) {
in, other := &in.Neighbors, &other.Neighbors
if other == nil {
return false
}
if len(*in) != len(*other) {
return false
} else {
for i, inElement := range *in {
if !inElement.DeepEqual(&(*other)[i]) {
return false
}
}
}
}
return true
}
// DeepEqual is an autogenerated deepequal function, deeply comparing the
// receiver with other. in must be non-nil.
func (in *CiliumCIDRGroupSpec) DeepEqual(other *CiliumCIDRGroupSpec) bool {
if other == nil {
return false
}
if ((in.ExternalCIDRs != nil) && (other.ExternalCIDRs != nil)) || ((in.ExternalCIDRs == nil) != (other.ExternalCIDRs == nil)) {
in, other := &in.ExternalCIDRs, &other.ExternalCIDRs
if other == nil {
return false
}
if len(*in) != len(*other) {
return false
} else {
for i, inElement := range *in {
if inElement != (*other)[i] {
return false
}
}
}
}
return true
}
// DeepEqual is an autogenerated deepequal function, deeply comparing the
// receiver with other. in must be non-nil.
func (in *CiliumEndpointSlice) DeepEqual(other *CiliumEndpointSlice) bool {
if other == nil {
return false
}
if in.Namespace != other.Namespace {
return false
}
if ((in.Endpoints != nil) && (other.Endpoints != nil)) || ((in.Endpoints == nil) != (other.Endpoints == nil)) {
in, other := &in.Endpoints, &other.Endpoints
if other == nil {
return false
}
if len(*in) != len(*other) {
return false
} else {
for i, inElement := range *in {
if !inElement.DeepEqual(&(*other)[i]) {
return false
}
}
}
}
return true
}
// DeepEqual is an autogenerated deepequal function, deeply comparing the
// receiver with other. in must be non-nil.
func (in *CiliumGatewayClassConfig) DeepEqual(other *CiliumGatewayClassConfig) bool {
if other == nil {
return false
}
if !in.Spec.DeepEqual(&other.Spec) {
return false
}
return true
}
// DeepEqual is an autogenerated deepequal function, deeply comparing the
// receiver with other. in must be non-nil.
func (in *CiliumGatewayClassConfigSpec) DeepEqual(other *CiliumGatewayClassConfigSpec) bool {
if other == nil {
return false
}
if (in.Description == nil) != (other.Description == nil) {
return false
} else if in.Description != nil {
if *in.Description != *other.Description {
return false
}
}
if (in.Service == nil) != (other.Service == nil) {
return false
} else if in.Service != nil {
if !in.Service.DeepEqual(other.Service) {
return false
}
}
return true
}
// DeepEqual is an autogenerated deepequal function, deeply comparing the
// receiver with other. in must be non-nil.
func (in *CiliumL2AnnouncementPolicy) DeepEqual(other *CiliumL2AnnouncementPolicy) bool {
if other == nil {
return false
}
if !in.Spec.DeepEqual(&other.Spec) {
return false
}
return true
}
// DeepEqual is an autogenerated deepequal function, deeply comparing the
// receiver with other. in must be non-nil.
func (in *CiliumL2AnnouncementPolicySpec) DeepEqual(other *CiliumL2AnnouncementPolicySpec) bool {
if other == nil {
return false
}
if (in.NodeSelector == nil) != (other.NodeSelector == nil) {
return false
} else if in.NodeSelector != nil {
if !in.NodeSelector.DeepEqual(other.NodeSelector) {
return false
}
}
if (in.ServiceSelector == nil) != (other.ServiceSelector == nil) {
return false
} else if in.ServiceSelector != nil {
if !in.ServiceSelector.DeepEqual(other.ServiceSelector) {
return false
}
}
if in.LoadBalancerIPs != other.LoadBalancerIPs {
return false
}
if in.ExternalIPs != other.ExternalIPs {
return false
}
if ((in.Interfaces != nil) && (other.Interfaces != nil)) || ((in.Interfaces == nil) != (other.Interfaces == nil)) {
in, other := &in.Interfaces, &other.Interfaces
if other == nil {
return false
}
if len(*in) != len(*other) {
return false
} else {
for i, inElement := range *in {
if inElement != (*other)[i] {
return false
}
}
}
}
return true
}
// DeepEqual is an autogenerated deepequal function, deeply comparing the
// receiver with other. in must be non-nil.
func (in *CiliumLoadBalancerIPPool) DeepEqual(other *CiliumLoadBalancerIPPool) bool {
if other == nil {
return false
}
if !in.Spec.DeepEqual(&other.Spec) {
return false
}
return true
}
// DeepEqual is an autogenerated deepequal function, deeply comparing the
// receiver with other. in must be non-nil.
func (in *CiliumLoadBalancerIPPoolIPBlock) DeepEqual(other *CiliumLoadBalancerIPPoolIPBlock) bool {
if other == nil {
return false
}
if in.Cidr != other.Cidr {
return false
}
if in.Start != other.Start {
return false
}
if in.Stop != other.Stop {
return false
}
return true
}
// DeepEqual is an autogenerated deepequal function, deeply comparing the
// receiver with other. in must be non-nil.
func (in *CiliumLoadBalancerIPPoolSpec) DeepEqual(other *CiliumLoadBalancerIPPoolSpec) bool {
if other == nil {
return false
}
if (in.ServiceSelector == nil) != (other.ServiceSelector == nil) {
return false
} else if in.ServiceSelector != nil {
if !in.ServiceSelector.DeepEqual(other.ServiceSelector) {
return false
}
}
if in.AllowFirstLastIPs != other.AllowFirstLastIPs {
return false
}
if ((in.Blocks != nil) && (other.Blocks != nil)) || ((in.Blocks == nil) != (other.Blocks == nil)) {
in, other := &in.Blocks, &other.Blocks
if other == nil {
return false
}
if len(*in) != len(*other) {
return false
} else {
for i, inElement := range *in {
if !inElement.DeepEqual(&(*other)[i]) {
return false
}
}
}
}
if in.Disabled != other.Disabled {
return false
}
return true
}
// DeepEqual is an autogenerated deepequal function, deeply comparing the
// receiver with other. in must be non-nil.
func (in *CiliumPodIPPool) DeepEqual(other *CiliumPodIPPool) bool {
if other == nil {
return false
}
if !in.Spec.DeepEqual(&other.Spec) {
return false
}
return true
}
// DeepEqual is an autogenerated deepequal function, deeply comparing the
// receiver with other. in must be non-nil.
func (in *CoreCiliumEndpoint) DeepEqual(other *CoreCiliumEndpoint) bool {
if other == nil {
return false
}
if in.Name != other.Name {
return false
}
if in.IdentityID != other.IdentityID {
return false
}
if (in.Networking == nil) != (other.Networking == nil) {
return false
} else if in.Networking != nil {
if !in.Networking.DeepEqual(other.Networking) {
return false
}
}
if in.Encryption != other.Encryption {
return false
}
if ((in.NamedPorts != nil) && (other.NamedPorts != nil)) || ((in.NamedPorts == nil) != (other.NamedPorts == nil)) {
in, other := &in.NamedPorts, &other.NamedPorts
if other == nil || !in.DeepEqual(other) {
return false
}
}
return true
}
// DeepEqual is an autogenerated deepequal function, deeply comparing the
// receiver with other. in must be non-nil.
func (in *EgressRule) DeepEqual(other *EgressRule) bool {
if other == nil {
return false
}
if (in.NamespaceSelector == nil) != (other.NamespaceSelector == nil) {
return false
} else if in.NamespaceSelector != nil {
if !in.NamespaceSelector.DeepEqual(other.NamespaceSelector) {
return false
}
}
if (in.PodSelector == nil) != (other.PodSelector == nil) {
return false
} else if in.PodSelector != nil {
if !in.PodSelector.DeepEqual(other.PodSelector) {
return false
}
}
return true
}
// DeepEqual is an autogenerated deepequal function, deeply comparing the
// receiver with other. in must be non-nil.
func (in *IPPoolSpec) DeepEqual(other *IPPoolSpec) bool {
if other == nil {
return false
}
if (in.IPv4 == nil) != (other.IPv4 == nil) {
return false
} else if in.IPv4 != nil {
if !in.IPv4.DeepEqual(other.IPv4) {
return false
}
}
if (in.IPv6 == nil) != (other.IPv6 == nil) {
return false
} else if in.IPv6 != nil {
if !in.IPv6.DeepEqual(other.IPv6) {
return false
}
}
return true
}
// DeepEqual is an autogenerated deepequal function, deeply comparing the
// receiver with other. in must be non-nil.
func (in *IPv4PoolSpec) DeepEqual(other *IPv4PoolSpec) bool {
if other == nil {
return false
}
if ((in.CIDRs != nil) && (other.CIDRs != nil)) || ((in.CIDRs == nil) != (other.CIDRs == nil)) {
in, other := &in.CIDRs, &other.CIDRs
if other == nil {
return false
}
if len(*in) != len(*other) {
return false
} else {
for i, inElement := range *in {
if inElement != (*other)[i] {
return false
}
}
}
}
if in.MaskSize != other.MaskSize {
return false
}
return true
}
// DeepEqual is an autogenerated deepequal function, deeply comparing the
// receiver with other. in must be non-nil.
func (in *IPv6PoolSpec) DeepEqual(other *IPv6PoolSpec) bool {
if other == nil {
return false
}
if ((in.CIDRs != nil) && (other.CIDRs != nil)) || ((in.CIDRs == nil) != (other.CIDRs == nil)) {
in, other := &in.CIDRs, &other.CIDRs
if other == nil {
return false
}
if len(*in) != len(*other) {
return false
} else {
for i, inElement := range *in {
if inElement != (*other)[i] {
return false
}
}
}
}
if in.MaskSize != other.MaskSize {
return false
}
return true
}
// DeepEqual is an autogenerated deepequal function, deeply comparing the
// receiver with other. in must be non-nil.
func (in *PeerConfigReference) DeepEqual(other *PeerConfigReference) bool {
if other == nil {
return false
}
if in.Group != other.Group {
return false
}
if in.Kind != other.Kind {
return false
}
if in.Name != other.Name {
return false
}
return true
}
// DeepEqual is an autogenerated deepequal function, deeply comparing the
// receiver with other. in must be non-nil.
func (in *ServiceConfig) DeepEqual(other *ServiceConfig) bool {
if other == nil {
return false
}
if in.Type != other.Type {
return false
}
if in.ExternalTrafficPolicy != other.ExternalTrafficPolicy {
return false
}
if (in.LoadBalancerClass == nil) != (other.LoadBalancerClass == nil) {
return false
} else if in.LoadBalancerClass != nil {
if *in.LoadBalancerClass != *other.LoadBalancerClass {
return false
}
}
if ((in.IPFamilies != nil) && (other.IPFamilies != nil)) || ((in.IPFamilies == nil) != (other.IPFamilies == nil)) {
in, other := &in.IPFamilies, &other.IPFamilies
if other == nil {
return false
}
if len(*in) != len(*other) {
return false
} else {
for i, inElement := range *in {
if inElement != (*other)[i] {
return false
}
}
}
}
if (in.IPFamilyPolicy == nil) != (other.IPFamilyPolicy == nil) {
return false
} else if in.IPFamilyPolicy != nil {
if *in.IPFamilyPolicy != *other.IPFamilyPolicy {
return false
}
}
if (in.AllocateLoadBalancerNodePorts == nil) != (other.AllocateLoadBalancerNodePorts == nil) {
return false
} else if in.AllocateLoadBalancerNodePorts != nil {
if *in.AllocateLoadBalancerNodePorts != *other.AllocateLoadBalancerNodePorts {
return false
}
}
if ((in.LoadBalancerSourceRanges != nil) && (other.LoadBalancerSourceRanges != nil)) || ((in.LoadBalancerSourceRanges == nil) != (other.LoadBalancerSourceRanges == nil)) {
in, other := &in.LoadBalancerSourceRanges, &other.LoadBalancerSourceRanges
if other == nil {
return false
}
if len(*in) != len(*other) {
return false
} else {
for i, inElement := range *in {
if inElement != (*other)[i] {
return false
}
}
}
}
if in.LoadBalancerSourceRangesPolicy != other.LoadBalancerSourceRangesPolicy {
return false
}
if (in.TrafficDistribution == nil) != (other.TrafficDistribution == nil) {
return false
} else if in.TrafficDistribution != nil {
if *in.TrafficDistribution != *other.TrafficDistribution {
return false
}
}
return true
}
// SPDX-License-Identifier: Apache-2.0
// Copyright Authors of Cilium
package client
import (
"context"
"errors"
"fmt"
"log/slog"
"net"
"net/http"
"os"
"time"
"github.com/cilium/hive/cell"
apiext_clientset "k8s.io/apiextensions-apiserver/pkg/client/clientset/clientset"
k8sErrors "k8s.io/apimachinery/pkg/api/errors"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
utilnet "k8s.io/apimachinery/pkg/util/net"
utilruntime "k8s.io/apimachinery/pkg/util/runtime"
"k8s.io/apimachinery/pkg/util/wait"
"k8s.io/client-go/discovery"
"k8s.io/client-go/kubernetes"
"k8s.io/client-go/rest"
"k8s.io/client-go/util/connrotation"
mcsapi_clientset "sigs.k8s.io/mcs-api/pkg/client/clientset/versioned"
"github.com/cilium/cilium/pkg/controller"
cilium_clientset "github.com/cilium/cilium/pkg/k8s/client/clientset/versioned"
k8smetrics "github.com/cilium/cilium/pkg/k8s/metrics"
slim_apiextclientsetscheme "github.com/cilium/cilium/pkg/k8s/slim/k8s/apiextensions-client/clientset/versioned/scheme"
slim_apiext_clientset "github.com/cilium/cilium/pkg/k8s/slim/k8s/apiextensions-clientset"
slim_metav1 "github.com/cilium/cilium/pkg/k8s/slim/k8s/apis/meta/v1"
slim_metav1beta1 "github.com/cilium/cilium/pkg/k8s/slim/k8s/apis/meta/v1beta1"
slim_clientset "github.com/cilium/cilium/pkg/k8s/slim/k8s/client/clientset/versioned"
k8sversion "github.com/cilium/cilium/pkg/k8s/version"
"github.com/cilium/cilium/pkg/logging/logfields"
)
// client.Cell provides Clientset, a composition of clientsets to Kubernetes resources
// used by Cilium.
var Cell = cell.Module(
"k8s-client",
"Kubernetes Client",
cell.Config(defaultSharedConfig),
cell.Config(defaultClientParams),
cell.Provide(NewClientConfig),
cell.Provide(newClientset),
cell.Invoke(registerMappingsUpdater),
)
// client.ClientBuilderCell provides a function to create a new composite Clientset,
// allowing a controller to use its own Clientset with a different user agent.
var ClientBuilderCell = cell.Module(
"k8s-client-builder",
"Kubernetes Client Builder",
cell.Config(defaultSharedConfig),
cell.Provide(NewClientConfig),
cell.Provide(NewClientBuilder),
)
var (
k8sHeartbeatControllerGroup = controller.NewGroup("k8s-heartbeat")
connTimeout = time.Minute
connRetryInterval = 5 * time.Second
)
// Type aliases for the clientsets to avoid name collision on 'Clientset' when composing them.
type (
MCSAPIClientset = mcsapi_clientset.Clientset
KubernetesClientset = kubernetes.Clientset
SlimClientset = slim_clientset.Clientset
APIExtClientset = slim_apiext_clientset.Clientset
CiliumClientset = cilium_clientset.Clientset
)
// Clientset is a composition of the different client sets used by Cilium.
type Clientset interface {
mcsapi_clientset.Interface
kubernetes.Interface
apiext_clientset.Interface
cilium_clientset.Interface
Getters
// Slim returns the slim client, which contains some of the same APIs as the
// normal kubernetes client, but with slimmed down messages to reduce memory
// usage. Prefer the slim version when caching messages.
Slim() slim_clientset.Interface
// IsEnabled returns true if Kubernetes support is enabled and the
// clientset can be used.
IsEnabled() bool
// Disable disables the client. Panics if called after the clientset has been
// started.
Disable()
// Config returns the configuration used to create this client.
Config() Config
// RestConfig returns the deep copy of rest configuration.
RestConfig() *rest.Config
}
// compositeClientset implements the Clientset using real clients.
type compositeClientset struct {
started bool
disabled bool
*MCSAPIClientset
*KubernetesClientset
*APIExtClientset
*CiliumClientset
ClientsetGetters
controller *controller.Manager
slim *SlimClientset
config Config
logger *slog.Logger
closeAllConns func()
restConfigManager *restConfigManager
}
// ConfigureK8sClientsetDialer provides an optional extension point
// to configure the dialer used by the clientset.
type ConfigureK8sClientsetDialer interface {
ConfigureK8sClientsetDialer(dialer *net.Dialer)
}
type compositeClientsetParams struct {
cell.In
Logger *slog.Logger
Lifecycle cell.Lifecycle
Config Config
ConfigureK8sClientsetDialer ConfigureK8sClientsetDialer `optional:"true"`
}
func newClientset(params compositeClientsetParams) (Clientset, *restConfigManager, error) {
return newClientsetForUserAgent(params, "")
}
func newClientsetForUserAgent(params compositeClientsetParams, name string) (Clientset, *restConfigManager, error) {
if !params.Config.isEnabled() {
return &compositeClientset{disabled: true}, nil, nil
}
client := compositeClientset{
logger: params.Logger,
controller: controller.NewManager(),
config: params.Config,
}
var err error
client.restConfigManager, err = restConfigManagerInit(params.Config, name, params.Logger)
if err != nil {
return nil, nil, fmt.Errorf("unable to create k8s client rest configuration: %w", err)
}
rc := client.restConfigManager.getConfig()
defaultCloseAllConns := params.setDialer(rc)
httpClient, err := rest.HTTPClientFor(rc)
if err != nil {
return nil, nil, fmt.Errorf("unable to create k8s REST client: %w", err)
}
// We are implementing the same logic as Kubelet, see
// https://github.com/kubernetes/kubernetes/blob/v1.24.0-beta.0/cmd/kubelet/app/server.go#L852.
if s := os.Getenv("DISABLE_HTTP2"); len(s) > 0 {
client.closeAllConns = defaultCloseAllConns
} else {
client.closeAllConns = func() {
utilnet.CloseIdleConnectionsFor(rc.Transport)
}
}
// Slim and K8s clients use protobuf marshalling.
rc.ContentConfig.ContentType = `application/vnd.kubernetes.protobuf`
client.slim, err = slim_clientset.NewForConfigAndClient(rc, httpClient)
if err != nil {
return nil, nil, fmt.Errorf("unable to create slim k8s client: %w", err)
}
client.APIExtClientset, err = slim_apiext_clientset.NewForConfigAndClient(rc, httpClient)
if err != nil {
return nil, nil, fmt.Errorf("unable to create apiext k8s client: %w", err)
}
client.MCSAPIClientset, err = mcsapi_clientset.NewForConfigAndClient(rc, httpClient)
if err != nil {
return nil, nil, fmt.Errorf("unable to create mcsapi k8s client: %w", err)
}
client.KubernetesClientset, err = kubernetes.NewForConfigAndClient(rc, httpClient)
if err != nil {
return nil, nil, fmt.Errorf("unable to create k8s client: %w", err)
}
client.ClientsetGetters = ClientsetGetters{&client}
// The cilium client uses JSON marshalling.
rc.ContentConfig.ContentType = `application/json`
client.CiliumClientset, err = cilium_clientset.NewForConfigAndClient(rc, httpClient)
if err != nil {
return nil, nil, fmt.Errorf("unable to create cilium k8s client: %w", err)
}
params.Lifecycle.Append(cell.Hook{
OnStart: client.onStart,
OnStop: client.onStop,
})
return &client, client.restConfigManager, nil
}
func (c *compositeClientset) Slim() slim_clientset.Interface {
return c.slim
}
func (c *compositeClientset) Discovery() discovery.DiscoveryInterface {
return c.KubernetesClientset.Discovery()
}
func (c *compositeClientset) IsEnabled() bool {
return c != nil && c.config.isEnabled() && !c.disabled
}
func (c *compositeClientset) Disable() {
if c.started {
panic("Clientset.Disable() called after it had been started")
}
c.disabled = true
}
func (c *compositeClientset) Config() Config {
return c.config
}
func (c *compositeClientset) RestConfig() *rest.Config {
return c.restConfigManager.getConfig()
}
func (c *compositeClientset) onStart(startCtx cell.HookContext) error {
if !c.IsEnabled() {
return nil
}
if err := c.waitForConn(startCtx); err != nil {
return err
}
c.startHeartbeat()
// Update the global K8s clients, K8s version and the capabilities.
if err := k8sversion.Update(c.logger, c, c.config.EnableK8sAPIDiscovery); err != nil {
return err
}
if !k8sversion.Capabilities().MinimalVersionMet {
return fmt.Errorf("k8s version (%v) is not meeting the minimal requirement (%v)",
k8sversion.Version(), k8sversion.MinimalVersionConstraint)
}
c.started = true
return nil
}
func (c *compositeClientset) onStop(stopCtx cell.HookContext) error {
if c.IsEnabled() {
c.controller.RemoveAllAndWait()
c.closeAllConns()
}
c.started = false
return nil
}
func (c *compositeClientset) startHeartbeat() {
restClient := c.KubernetesClientset.RESTClient()
timeout := c.config.K8sHeartbeatTimeout
if timeout == 0 {
return
}
heartBeat := func(ctx context.Context) error {
// Kubernetes does a get node of the node that kubelet is running [0]. This seems excessive in
// our case because the amount of data transferred is bigger than doing a Get of /healthz.
// For this reason we have picked to perform a get on `/healthz` instead a get of a node.
//
// [0] https://github.com/kubernetes/kubernetes/blob/v1.17.3/pkg/kubelet/kubelet_node_status.go#L423
res := restClient.Get().Resource("healthz").Do(ctx)
return res.Error()
}
rotateAPIServer := func() {
if c.restConfigManager.canRotateAPIServerURL() {
c.restConfigManager.rotateAPIServerURL()
}
}
c.controller.UpdateController("k8s-heartbeat",
controller.ControllerParams{
Group: k8sHeartbeatControllerGroup,
DoFunc: func(context.Context) error {
runHeartbeat(
c.logger,
heartBeat,
timeout,
c.closeAllConns,
rotateAPIServer,
)
return nil
},
RunInterval: timeout,
})
}
func (c *compositeClientset) waitForConn(ctx context.Context) error {
stop := make(chan struct{})
timeout := time.NewTimer(connTimeout)
defer timeout.Stop()
var err error
wait.Until(func() {
retry:
c.logger.Info("Establishing connection to apiserver",
logfields.IPAddr, c.restConfigManager.getConfig().Host,
)
err = isConnReady(c)
if err == nil {
close(stop)
return
}
select {
case <-ctx.Done():
case <-timeout.C:
default:
if c.restConfigManager.canRotateAPIServerURL() {
c.restConfigManager.rotateAPIServerURL()
goto retry
}
return
}
c.logger.Error("Unable to contact k8s api-server",
logfields.IPAddr, c.restConfigManager.getConfig().Host,
logfields.Error, err,
)
close(stop)
}, connRetryInterval, stop)
if err == nil {
c.logger.Info("Connected to apiserver")
}
return err
}
func (p *compositeClientsetParams) setDialer(restConfig *rest.Config) func() {
cfg := p.Config
innerDialer := &net.Dialer{
Timeout: cfg.K8sClientConnectionTimeout,
KeepAlive: cfg.K8sClientConnectionKeepAlive,
}
if p.ConfigureK8sClientsetDialer != nil {
p.ConfigureK8sClientsetDialer.ConfigureK8sClientsetDialer(innerDialer)
}
ctx := innerDialer.DialContext
if cfg.K8sClientConnectionTimeout == 0 || cfg.K8sClientConnectionKeepAlive == 0 {
restConfig.Dial = ctx
return func() {}
}
dialer := connrotation.NewDialer(ctx)
restConfig.Dial = dialer.DialContext
return dialer.CloseAll
}
func runHeartbeat(logger *slog.Logger, heartBeat func(context.Context) error, timeout time.Duration, onFailure ...func()) {
expireDate := time.Now().Add(-timeout)
// Don't even perform a health check if we have received a successful
// k8s event in the last 'timeout' duration
if k8smetrics.LastSuccessInteraction.Time().After(expireDate) {
return
}
done := make(chan error)
ctx, cancel := context.WithTimeout(context.Background(), timeout)
defer cancel()
go func() {
// If we have reached up to this point to perform a heartbeat to
// kube-apiserver then we should close the connections if we receive
// any error at all except if we receive a http.StatusTooManyRequests
// which means the server is overloaded and only for this reason we
// will not close all connections.
err := heartBeat(ctx)
if err != nil {
statusError := &k8sErrors.StatusError{}
if !errors.As(err, &statusError) ||
statusError.ErrStatus.Code != http.StatusTooManyRequests {
done <- err
}
}
close(done)
}()
select {
case err := <-done:
if err != nil {
logger.Warn("Network status error received, restarting client connections", logfields.Error, err)
for _, fn := range onFailure {
fn()
}
}
case <-ctx.Done():
logger.Warn("Heartbeat timed out, restarting client connections")
for _, fn := range onFailure {
fn()
}
}
}
// isConnReady returns the err for the kube-system namespace get
func isConnReady(c kubernetes.Interface) error {
_, err := c.CoreV1().Namespaces().Get(context.TODO(), "kube-system", metav1.GetOptions{})
return err
}
type ClientBuilderFunc func(name string) (Clientset, error)
// NewClientBuilder returns a function that creates a new Clientset with the given
// name appended to the user agent, or returns an error if the Clientset cannot be
// created.
func NewClientBuilder(params compositeClientsetParams) ClientBuilderFunc {
return func(name string) (Clientset, error) {
c, _, err := newClientsetForUserAgent(params, name)
if err != nil {
return nil, err
}
return c, nil
}
}
func init() {
// Register the metav1.Table and metav1.PartialObjectMetadata for the
// apiextclientset.
utilruntime.Must(slim_metav1.AddMetaToScheme(slim_apiextclientsetscheme.Scheme))
utilruntime.Must(slim_metav1beta1.AddMetaToScheme(slim_apiextclientsetscheme.Scheme))
}
// SPDX-License-Identifier: Apache-2.0
// Copyright Authors of Cilium
// Code generated by client-gen. DO NOT EDIT.
package versioned
import (
fmt "fmt"
http "net/http"
ciliumv2 "github.com/cilium/cilium/pkg/k8s/client/clientset/versioned/typed/cilium.io/v2"
ciliumv2alpha1 "github.com/cilium/cilium/pkg/k8s/client/clientset/versioned/typed/cilium.io/v2alpha1"
discovery "k8s.io/client-go/discovery"
rest "k8s.io/client-go/rest"
flowcontrol "k8s.io/client-go/util/flowcontrol"
)
type Interface interface {
Discovery() discovery.DiscoveryInterface
CiliumV2() ciliumv2.CiliumV2Interface
CiliumV2alpha1() ciliumv2alpha1.CiliumV2alpha1Interface
}
// Clientset contains the clients for groups.
type Clientset struct {
*discovery.DiscoveryClient
ciliumV2 *ciliumv2.CiliumV2Client
ciliumV2alpha1 *ciliumv2alpha1.CiliumV2alpha1Client
}
// CiliumV2 retrieves the CiliumV2Client
func (c *Clientset) CiliumV2() ciliumv2.CiliumV2Interface {
return c.ciliumV2
}
// CiliumV2alpha1 retrieves the CiliumV2alpha1Client
func (c *Clientset) CiliumV2alpha1() ciliumv2alpha1.CiliumV2alpha1Interface {
return c.ciliumV2alpha1
}
// Discovery retrieves the DiscoveryClient
func (c *Clientset) Discovery() discovery.DiscoveryInterface {
if c == nil {
return nil
}
return c.DiscoveryClient
}
// NewForConfig creates a new Clientset for the given config.
// If config's RateLimiter is not set and QPS and Burst are acceptable,
// NewForConfig will generate a rate-limiter in configShallowCopy.
// NewForConfig is equivalent to NewForConfigAndClient(c, httpClient),
// where httpClient was generated with rest.HTTPClientFor(c).
func NewForConfig(c *rest.Config) (*Clientset, error) {
configShallowCopy := *c
if configShallowCopy.UserAgent == "" {
configShallowCopy.UserAgent = rest.DefaultKubernetesUserAgent()
}
// share the transport between all clients
httpClient, err := rest.HTTPClientFor(&configShallowCopy)
if err != nil {
return nil, err
}
return NewForConfigAndClient(&configShallowCopy, httpClient)
}
// NewForConfigAndClient creates a new Clientset for the given config and http client.
// Note the http client provided takes precedence over the configured transport values.
// If config's RateLimiter is not set and QPS and Burst are acceptable,
// NewForConfigAndClient will generate a rate-limiter in configShallowCopy.
func NewForConfigAndClient(c *rest.Config, httpClient *http.Client) (*Clientset, error) {
configShallowCopy := *c
if configShallowCopy.RateLimiter == nil && configShallowCopy.QPS > 0 {
if configShallowCopy.Burst <= 0 {
return nil, fmt.Errorf("burst is required to be greater than 0 when RateLimiter is not set and QPS is set to greater than 0")
}
configShallowCopy.RateLimiter = flowcontrol.NewTokenBucketRateLimiter(configShallowCopy.QPS, configShallowCopy.Burst)
}
var cs Clientset
var err error
cs.ciliumV2, err = ciliumv2.NewForConfigAndClient(&configShallowCopy, httpClient)
if err != nil {
return nil, err
}
cs.ciliumV2alpha1, err = ciliumv2alpha1.NewForConfigAndClient(&configShallowCopy, httpClient)
if err != nil {
return nil, err
}
cs.DiscoveryClient, err = discovery.NewDiscoveryClientForConfigAndClient(&configShallowCopy, httpClient)
if err != nil {
return nil, err
}
return &cs, nil
}
// NewForConfigOrDie creates a new Clientset for the given config and
// panics if there is an error in the config.
func NewForConfigOrDie(c *rest.Config) *Clientset {
cs, err := NewForConfig(c)
if err != nil {
panic(err)
}
return cs
}
// New creates a new Clientset for the given RESTClient.
func New(c rest.Interface) *Clientset {
var cs Clientset
cs.ciliumV2 = ciliumv2.New(c)
cs.ciliumV2alpha1 = ciliumv2alpha1.New(c)
cs.DiscoveryClient = discovery.NewDiscoveryClient(c)
return &cs
}
// SPDX-License-Identifier: Apache-2.0
// Copyright Authors of Cilium
// Code generated by client-gen. DO NOT EDIT.
package scheme
import (
ciliumv2 "github.com/cilium/cilium/pkg/k8s/apis/cilium.io/v2"
ciliumv2alpha1 "github.com/cilium/cilium/pkg/k8s/apis/cilium.io/v2alpha1"
v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
runtime "k8s.io/apimachinery/pkg/runtime"
schema "k8s.io/apimachinery/pkg/runtime/schema"
serializer "k8s.io/apimachinery/pkg/runtime/serializer"
utilruntime "k8s.io/apimachinery/pkg/util/runtime"
)
var Scheme = runtime.NewScheme()
var Codecs = serializer.NewCodecFactory(Scheme)
var ParameterCodec = runtime.NewParameterCodec(Scheme)
var localSchemeBuilder = runtime.SchemeBuilder{
ciliumv2.AddToScheme,
ciliumv2alpha1.AddToScheme,
}
// AddToScheme adds all types of this clientset into the given scheme. This allows composition
// of clientsets, like in:
//
// import (
// "k8s.io/client-go/kubernetes"
// clientsetscheme "k8s.io/client-go/kubernetes/scheme"
// aggregatorclientsetscheme "k8s.io/kube-aggregator/pkg/client/clientset_generated/clientset/scheme"
// )
//
// kclientset, _ := kubernetes.NewForConfig(c)
// _ = aggregatorclientsetscheme.AddToScheme(clientsetscheme.Scheme)
//
// After this, RawExtensions in Kubernetes types will serialize kube-aggregator types
// correctly.
var AddToScheme = localSchemeBuilder.AddToScheme
func init() {
v1.AddToGroupVersion(Scheme, schema.GroupVersion{Version: "v1"})
utilruntime.Must(AddToScheme(Scheme))
}
// SPDX-License-Identifier: Apache-2.0
// Copyright Authors of Cilium
// Code generated by client-gen. DO NOT EDIT.
package v2
import (
http "net/http"
ciliumiov2 "github.com/cilium/cilium/pkg/k8s/apis/cilium.io/v2"
scheme "github.com/cilium/cilium/pkg/k8s/client/clientset/versioned/scheme"
rest "k8s.io/client-go/rest"
)
type CiliumV2Interface interface {
RESTClient() rest.Interface
CiliumBGPAdvertisementsGetter
CiliumBGPClusterConfigsGetter
CiliumBGPNodeConfigsGetter
CiliumBGPNodeConfigOverridesGetter
CiliumBGPPeerConfigsGetter
CiliumCIDRGroupsGetter
CiliumClusterwideEnvoyConfigsGetter
CiliumClusterwideNetworkPoliciesGetter
CiliumEgressGatewayPoliciesGetter
CiliumEndpointsGetter
CiliumEnvoyConfigsGetter
CiliumIdentitiesGetter
CiliumLoadBalancerIPPoolsGetter
CiliumLocalRedirectPoliciesGetter
CiliumNetworkPoliciesGetter
CiliumNodesGetter
CiliumNodeConfigsGetter
}
// CiliumV2Client is used to interact with features provided by the cilium.io group.
type CiliumV2Client struct {
restClient rest.Interface
}
func (c *CiliumV2Client) CiliumBGPAdvertisements() CiliumBGPAdvertisementInterface {
return newCiliumBGPAdvertisements(c)
}
func (c *CiliumV2Client) CiliumBGPClusterConfigs() CiliumBGPClusterConfigInterface {
return newCiliumBGPClusterConfigs(c)
}
func (c *CiliumV2Client) CiliumBGPNodeConfigs() CiliumBGPNodeConfigInterface {
return newCiliumBGPNodeConfigs(c)
}
func (c *CiliumV2Client) CiliumBGPNodeConfigOverrides() CiliumBGPNodeConfigOverrideInterface {
return newCiliumBGPNodeConfigOverrides(c)
}
func (c *CiliumV2Client) CiliumBGPPeerConfigs() CiliumBGPPeerConfigInterface {
return newCiliumBGPPeerConfigs(c)
}
func (c *CiliumV2Client) CiliumCIDRGroups() CiliumCIDRGroupInterface {
return newCiliumCIDRGroups(c)
}
func (c *CiliumV2Client) CiliumClusterwideEnvoyConfigs() CiliumClusterwideEnvoyConfigInterface {
return newCiliumClusterwideEnvoyConfigs(c)
}
func (c *CiliumV2Client) CiliumClusterwideNetworkPolicies() CiliumClusterwideNetworkPolicyInterface {
return newCiliumClusterwideNetworkPolicies(c)
}
func (c *CiliumV2Client) CiliumEgressGatewayPolicies() CiliumEgressGatewayPolicyInterface {
return newCiliumEgressGatewayPolicies(c)
}
func (c *CiliumV2Client) CiliumEndpoints(namespace string) CiliumEndpointInterface {
return newCiliumEndpoints(c, namespace)
}
func (c *CiliumV2Client) CiliumEnvoyConfigs(namespace string) CiliumEnvoyConfigInterface {
return newCiliumEnvoyConfigs(c, namespace)
}
func (c *CiliumV2Client) CiliumIdentities() CiliumIdentityInterface {
return newCiliumIdentities(c)
}
func (c *CiliumV2Client) CiliumLoadBalancerIPPools() CiliumLoadBalancerIPPoolInterface {
return newCiliumLoadBalancerIPPools(c)
}
func (c *CiliumV2Client) CiliumLocalRedirectPolicies(namespace string) CiliumLocalRedirectPolicyInterface {
return newCiliumLocalRedirectPolicies(c, namespace)
}
func (c *CiliumV2Client) CiliumNetworkPolicies(namespace string) CiliumNetworkPolicyInterface {
return newCiliumNetworkPolicies(c, namespace)
}
func (c *CiliumV2Client) CiliumNodes() CiliumNodeInterface {
return newCiliumNodes(c)
}
func (c *CiliumV2Client) CiliumNodeConfigs(namespace string) CiliumNodeConfigInterface {
return newCiliumNodeConfigs(c, namespace)
}
// NewForConfig creates a new CiliumV2Client for the given config.
// NewForConfig is equivalent to NewForConfigAndClient(c, httpClient),
// where httpClient was generated with rest.HTTPClientFor(c).
func NewForConfig(c *rest.Config) (*CiliumV2Client, error) {
config := *c
setConfigDefaults(&config)
httpClient, err := rest.HTTPClientFor(&config)
if err != nil {
return nil, err
}
return NewForConfigAndClient(&config, httpClient)
}
// NewForConfigAndClient creates a new CiliumV2Client for the given config and http client.
// Note the http client provided takes precedence over the configured transport values.
func NewForConfigAndClient(c *rest.Config, h *http.Client) (*CiliumV2Client, error) {
config := *c
setConfigDefaults(&config)
client, err := rest.RESTClientForConfigAndClient(&config, h)
if err != nil {
return nil, err
}
return &CiliumV2Client{client}, nil
}
// NewForConfigOrDie creates a new CiliumV2Client for the given config and
// panics if there is an error in the config.
func NewForConfigOrDie(c *rest.Config) *CiliumV2Client {
client, err := NewForConfig(c)
if err != nil {
panic(err)
}
return client
}
// New creates a new CiliumV2Client for the given RESTClient.
func New(c rest.Interface) *CiliumV2Client {
return &CiliumV2Client{c}
}
func setConfigDefaults(config *rest.Config) {
gv := ciliumiov2.SchemeGroupVersion
config.GroupVersion = &gv
config.APIPath = "/apis"
config.NegotiatedSerializer = rest.CodecFactoryForGeneratedClient(scheme.Scheme, scheme.Codecs).WithoutConversion()
if config.UserAgent == "" {
config.UserAgent = rest.DefaultKubernetesUserAgent()
}
}
// RESTClient returns a RESTClient that is used to communicate
// with API server by this client implementation.
func (c *CiliumV2Client) RESTClient() rest.Interface {
if c == nil {
return nil
}
return c.restClient
}
// SPDX-License-Identifier: Apache-2.0
// Copyright Authors of Cilium
// Code generated by client-gen. DO NOT EDIT.
package v2
import (
context "context"
ciliumiov2 "github.com/cilium/cilium/pkg/k8s/apis/cilium.io/v2"
scheme "github.com/cilium/cilium/pkg/k8s/client/clientset/versioned/scheme"
v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
types "k8s.io/apimachinery/pkg/types"
watch "k8s.io/apimachinery/pkg/watch"
gentype "k8s.io/client-go/gentype"
)
// CiliumBGPAdvertisementsGetter has a method to return a CiliumBGPAdvertisementInterface.
// A group's client should implement this interface.
type CiliumBGPAdvertisementsGetter interface {
CiliumBGPAdvertisements() CiliumBGPAdvertisementInterface
}
// CiliumBGPAdvertisementInterface has methods to work with CiliumBGPAdvertisement resources.
type CiliumBGPAdvertisementInterface interface {
Create(ctx context.Context, ciliumBGPAdvertisement *ciliumiov2.CiliumBGPAdvertisement, opts v1.CreateOptions) (*ciliumiov2.CiliumBGPAdvertisement, error)
Update(ctx context.Context, ciliumBGPAdvertisement *ciliumiov2.CiliumBGPAdvertisement, opts v1.UpdateOptions) (*ciliumiov2.CiliumBGPAdvertisement, error)
Delete(ctx context.Context, name string, opts v1.DeleteOptions) error
DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error
Get(ctx context.Context, name string, opts v1.GetOptions) (*ciliumiov2.CiliumBGPAdvertisement, error)
List(ctx context.Context, opts v1.ListOptions) (*ciliumiov2.CiliumBGPAdvertisementList, error)
Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error)
Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *ciliumiov2.CiliumBGPAdvertisement, err error)
CiliumBGPAdvertisementExpansion
}
// ciliumBGPAdvertisements implements CiliumBGPAdvertisementInterface
type ciliumBGPAdvertisements struct {
*gentype.ClientWithList[*ciliumiov2.CiliumBGPAdvertisement, *ciliumiov2.CiliumBGPAdvertisementList]
}
// newCiliumBGPAdvertisements returns a CiliumBGPAdvertisements
func newCiliumBGPAdvertisements(c *CiliumV2Client) *ciliumBGPAdvertisements {
return &ciliumBGPAdvertisements{
gentype.NewClientWithList[*ciliumiov2.CiliumBGPAdvertisement, *ciliumiov2.CiliumBGPAdvertisementList](
"ciliumbgpadvertisements",
c.RESTClient(),
scheme.ParameterCodec,
"",
func() *ciliumiov2.CiliumBGPAdvertisement { return &ciliumiov2.CiliumBGPAdvertisement{} },
func() *ciliumiov2.CiliumBGPAdvertisementList { return &ciliumiov2.CiliumBGPAdvertisementList{} },
),
}
}
// SPDX-License-Identifier: Apache-2.0
// Copyright Authors of Cilium
// Code generated by client-gen. DO NOT EDIT.
package v2
import (
context "context"
ciliumiov2 "github.com/cilium/cilium/pkg/k8s/apis/cilium.io/v2"
scheme "github.com/cilium/cilium/pkg/k8s/client/clientset/versioned/scheme"
v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
types "k8s.io/apimachinery/pkg/types"
watch "k8s.io/apimachinery/pkg/watch"
gentype "k8s.io/client-go/gentype"
)
// CiliumBGPClusterConfigsGetter has a method to return a CiliumBGPClusterConfigInterface.
// A group's client should implement this interface.
type CiliumBGPClusterConfigsGetter interface {
CiliumBGPClusterConfigs() CiliumBGPClusterConfigInterface
}
// CiliumBGPClusterConfigInterface has methods to work with CiliumBGPClusterConfig resources.
type CiliumBGPClusterConfigInterface interface {
Create(ctx context.Context, ciliumBGPClusterConfig *ciliumiov2.CiliumBGPClusterConfig, opts v1.CreateOptions) (*ciliumiov2.CiliumBGPClusterConfig, error)
Update(ctx context.Context, ciliumBGPClusterConfig *ciliumiov2.CiliumBGPClusterConfig, opts v1.UpdateOptions) (*ciliumiov2.CiliumBGPClusterConfig, error)
// Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus().
UpdateStatus(ctx context.Context, ciliumBGPClusterConfig *ciliumiov2.CiliumBGPClusterConfig, opts v1.UpdateOptions) (*ciliumiov2.CiliumBGPClusterConfig, error)
Delete(ctx context.Context, name string, opts v1.DeleteOptions) error
DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error
Get(ctx context.Context, name string, opts v1.GetOptions) (*ciliumiov2.CiliumBGPClusterConfig, error)
List(ctx context.Context, opts v1.ListOptions) (*ciliumiov2.CiliumBGPClusterConfigList, error)
Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error)
Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *ciliumiov2.CiliumBGPClusterConfig, err error)
CiliumBGPClusterConfigExpansion
}
// ciliumBGPClusterConfigs implements CiliumBGPClusterConfigInterface
type ciliumBGPClusterConfigs struct {
*gentype.ClientWithList[*ciliumiov2.CiliumBGPClusterConfig, *ciliumiov2.CiliumBGPClusterConfigList]
}
// newCiliumBGPClusterConfigs returns a CiliumBGPClusterConfigs
func newCiliumBGPClusterConfigs(c *CiliumV2Client) *ciliumBGPClusterConfigs {
return &ciliumBGPClusterConfigs{
gentype.NewClientWithList[*ciliumiov2.CiliumBGPClusterConfig, *ciliumiov2.CiliumBGPClusterConfigList](
"ciliumbgpclusterconfigs",
c.RESTClient(),
scheme.ParameterCodec,
"",
func() *ciliumiov2.CiliumBGPClusterConfig { return &ciliumiov2.CiliumBGPClusterConfig{} },
func() *ciliumiov2.CiliumBGPClusterConfigList { return &ciliumiov2.CiliumBGPClusterConfigList{} },
),
}
}
// SPDX-License-Identifier: Apache-2.0
// Copyright Authors of Cilium
// Code generated by client-gen. DO NOT EDIT.
package v2
import (
context "context"
ciliumiov2 "github.com/cilium/cilium/pkg/k8s/apis/cilium.io/v2"
scheme "github.com/cilium/cilium/pkg/k8s/client/clientset/versioned/scheme"
v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
types "k8s.io/apimachinery/pkg/types"
watch "k8s.io/apimachinery/pkg/watch"
gentype "k8s.io/client-go/gentype"
)
// CiliumBGPNodeConfigsGetter has a method to return a CiliumBGPNodeConfigInterface.
// A group's client should implement this interface.
type CiliumBGPNodeConfigsGetter interface {
CiliumBGPNodeConfigs() CiliumBGPNodeConfigInterface
}
// CiliumBGPNodeConfigInterface has methods to work with CiliumBGPNodeConfig resources.
type CiliumBGPNodeConfigInterface interface {
Create(ctx context.Context, ciliumBGPNodeConfig *ciliumiov2.CiliumBGPNodeConfig, opts v1.CreateOptions) (*ciliumiov2.CiliumBGPNodeConfig, error)
Update(ctx context.Context, ciliumBGPNodeConfig *ciliumiov2.CiliumBGPNodeConfig, opts v1.UpdateOptions) (*ciliumiov2.CiliumBGPNodeConfig, error)
// Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus().
UpdateStatus(ctx context.Context, ciliumBGPNodeConfig *ciliumiov2.CiliumBGPNodeConfig, opts v1.UpdateOptions) (*ciliumiov2.CiliumBGPNodeConfig, error)
Delete(ctx context.Context, name string, opts v1.DeleteOptions) error
DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error
Get(ctx context.Context, name string, opts v1.GetOptions) (*ciliumiov2.CiliumBGPNodeConfig, error)
List(ctx context.Context, opts v1.ListOptions) (*ciliumiov2.CiliumBGPNodeConfigList, error)
Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error)
Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *ciliumiov2.CiliumBGPNodeConfig, err error)
CiliumBGPNodeConfigExpansion
}
// ciliumBGPNodeConfigs implements CiliumBGPNodeConfigInterface
type ciliumBGPNodeConfigs struct {
*gentype.ClientWithList[*ciliumiov2.CiliumBGPNodeConfig, *ciliumiov2.CiliumBGPNodeConfigList]
}
// newCiliumBGPNodeConfigs returns a CiliumBGPNodeConfigs
func newCiliumBGPNodeConfigs(c *CiliumV2Client) *ciliumBGPNodeConfigs {
return &ciliumBGPNodeConfigs{
gentype.NewClientWithList[*ciliumiov2.CiliumBGPNodeConfig, *ciliumiov2.CiliumBGPNodeConfigList](
"ciliumbgpnodeconfigs",
c.RESTClient(),
scheme.ParameterCodec,
"",
func() *ciliumiov2.CiliumBGPNodeConfig { return &ciliumiov2.CiliumBGPNodeConfig{} },
func() *ciliumiov2.CiliumBGPNodeConfigList { return &ciliumiov2.CiliumBGPNodeConfigList{} },
),
}
}
// SPDX-License-Identifier: Apache-2.0
// Copyright Authors of Cilium
// Code generated by client-gen. DO NOT EDIT.
package v2
import (
context "context"
ciliumiov2 "github.com/cilium/cilium/pkg/k8s/apis/cilium.io/v2"
scheme "github.com/cilium/cilium/pkg/k8s/client/clientset/versioned/scheme"
v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
types "k8s.io/apimachinery/pkg/types"
watch "k8s.io/apimachinery/pkg/watch"
gentype "k8s.io/client-go/gentype"
)
// CiliumBGPNodeConfigOverridesGetter has a method to return a CiliumBGPNodeConfigOverrideInterface.
// A group's client should implement this interface.
type CiliumBGPNodeConfigOverridesGetter interface {
CiliumBGPNodeConfigOverrides() CiliumBGPNodeConfigOverrideInterface
}
// CiliumBGPNodeConfigOverrideInterface has methods to work with CiliumBGPNodeConfigOverride resources.
type CiliumBGPNodeConfigOverrideInterface interface {
Create(ctx context.Context, ciliumBGPNodeConfigOverride *ciliumiov2.CiliumBGPNodeConfigOverride, opts v1.CreateOptions) (*ciliumiov2.CiliumBGPNodeConfigOverride, error)
Update(ctx context.Context, ciliumBGPNodeConfigOverride *ciliumiov2.CiliumBGPNodeConfigOverride, opts v1.UpdateOptions) (*ciliumiov2.CiliumBGPNodeConfigOverride, error)
Delete(ctx context.Context, name string, opts v1.DeleteOptions) error
DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error
Get(ctx context.Context, name string, opts v1.GetOptions) (*ciliumiov2.CiliumBGPNodeConfigOverride, error)
List(ctx context.Context, opts v1.ListOptions) (*ciliumiov2.CiliumBGPNodeConfigOverrideList, error)
Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error)
Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *ciliumiov2.CiliumBGPNodeConfigOverride, err error)
CiliumBGPNodeConfigOverrideExpansion
}
// ciliumBGPNodeConfigOverrides implements CiliumBGPNodeConfigOverrideInterface
type ciliumBGPNodeConfigOverrides struct {
*gentype.ClientWithList[*ciliumiov2.CiliumBGPNodeConfigOverride, *ciliumiov2.CiliumBGPNodeConfigOverrideList]
}
// newCiliumBGPNodeConfigOverrides returns a CiliumBGPNodeConfigOverrides
func newCiliumBGPNodeConfigOverrides(c *CiliumV2Client) *ciliumBGPNodeConfigOverrides {
return &ciliumBGPNodeConfigOverrides{
gentype.NewClientWithList[*ciliumiov2.CiliumBGPNodeConfigOverride, *ciliumiov2.CiliumBGPNodeConfigOverrideList](
"ciliumbgpnodeconfigoverrides",
c.RESTClient(),
scheme.ParameterCodec,
"",
func() *ciliumiov2.CiliumBGPNodeConfigOverride { return &ciliumiov2.CiliumBGPNodeConfigOverride{} },
func() *ciliumiov2.CiliumBGPNodeConfigOverrideList {
return &ciliumiov2.CiliumBGPNodeConfigOverrideList{}
},
),
}
}
// SPDX-License-Identifier: Apache-2.0
// Copyright Authors of Cilium
// Code generated by client-gen. DO NOT EDIT.
package v2
import (
context "context"
ciliumiov2 "github.com/cilium/cilium/pkg/k8s/apis/cilium.io/v2"
scheme "github.com/cilium/cilium/pkg/k8s/client/clientset/versioned/scheme"
v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
types "k8s.io/apimachinery/pkg/types"
watch "k8s.io/apimachinery/pkg/watch"
gentype "k8s.io/client-go/gentype"
)
// CiliumBGPPeerConfigsGetter has a method to return a CiliumBGPPeerConfigInterface.
// A group's client should implement this interface.
type CiliumBGPPeerConfigsGetter interface {
CiliumBGPPeerConfigs() CiliumBGPPeerConfigInterface
}
// CiliumBGPPeerConfigInterface has methods to work with CiliumBGPPeerConfig resources.
type CiliumBGPPeerConfigInterface interface {
Create(ctx context.Context, ciliumBGPPeerConfig *ciliumiov2.CiliumBGPPeerConfig, opts v1.CreateOptions) (*ciliumiov2.CiliumBGPPeerConfig, error)
Update(ctx context.Context, ciliumBGPPeerConfig *ciliumiov2.CiliumBGPPeerConfig, opts v1.UpdateOptions) (*ciliumiov2.CiliumBGPPeerConfig, error)
// Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus().
UpdateStatus(ctx context.Context, ciliumBGPPeerConfig *ciliumiov2.CiliumBGPPeerConfig, opts v1.UpdateOptions) (*ciliumiov2.CiliumBGPPeerConfig, error)
Delete(ctx context.Context, name string, opts v1.DeleteOptions) error
DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error
Get(ctx context.Context, name string, opts v1.GetOptions) (*ciliumiov2.CiliumBGPPeerConfig, error)
List(ctx context.Context, opts v1.ListOptions) (*ciliumiov2.CiliumBGPPeerConfigList, error)
Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error)
Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *ciliumiov2.CiliumBGPPeerConfig, err error)
CiliumBGPPeerConfigExpansion
}
// ciliumBGPPeerConfigs implements CiliumBGPPeerConfigInterface
type ciliumBGPPeerConfigs struct {
*gentype.ClientWithList[*ciliumiov2.CiliumBGPPeerConfig, *ciliumiov2.CiliumBGPPeerConfigList]
}
// newCiliumBGPPeerConfigs returns a CiliumBGPPeerConfigs
func newCiliumBGPPeerConfigs(c *CiliumV2Client) *ciliumBGPPeerConfigs {
return &ciliumBGPPeerConfigs{
gentype.NewClientWithList[*ciliumiov2.CiliumBGPPeerConfig, *ciliumiov2.CiliumBGPPeerConfigList](
"ciliumbgppeerconfigs",
c.RESTClient(),
scheme.ParameterCodec,
"",
func() *ciliumiov2.CiliumBGPPeerConfig { return &ciliumiov2.CiliumBGPPeerConfig{} },
func() *ciliumiov2.CiliumBGPPeerConfigList { return &ciliumiov2.CiliumBGPPeerConfigList{} },
),
}
}
// SPDX-License-Identifier: Apache-2.0
// Copyright Authors of Cilium
// Code generated by client-gen. DO NOT EDIT.
package v2
import (
context "context"
ciliumiov2 "github.com/cilium/cilium/pkg/k8s/apis/cilium.io/v2"
scheme "github.com/cilium/cilium/pkg/k8s/client/clientset/versioned/scheme"
v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
types "k8s.io/apimachinery/pkg/types"
watch "k8s.io/apimachinery/pkg/watch"
gentype "k8s.io/client-go/gentype"
)
// CiliumCIDRGroupsGetter has a method to return a CiliumCIDRGroupInterface.
// A group's client should implement this interface.
type CiliumCIDRGroupsGetter interface {
CiliumCIDRGroups() CiliumCIDRGroupInterface
}
// CiliumCIDRGroupInterface has methods to work with CiliumCIDRGroup resources.
type CiliumCIDRGroupInterface interface {
Create(ctx context.Context, ciliumCIDRGroup *ciliumiov2.CiliumCIDRGroup, opts v1.CreateOptions) (*ciliumiov2.CiliumCIDRGroup, error)
Update(ctx context.Context, ciliumCIDRGroup *ciliumiov2.CiliumCIDRGroup, opts v1.UpdateOptions) (*ciliumiov2.CiliumCIDRGroup, error)
Delete(ctx context.Context, name string, opts v1.DeleteOptions) error
DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error
Get(ctx context.Context, name string, opts v1.GetOptions) (*ciliumiov2.CiliumCIDRGroup, error)
List(ctx context.Context, opts v1.ListOptions) (*ciliumiov2.CiliumCIDRGroupList, error)
Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error)
Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *ciliumiov2.CiliumCIDRGroup, err error)
CiliumCIDRGroupExpansion
}
// ciliumCIDRGroups implements CiliumCIDRGroupInterface
type ciliumCIDRGroups struct {
*gentype.ClientWithList[*ciliumiov2.CiliumCIDRGroup, *ciliumiov2.CiliumCIDRGroupList]
}
// newCiliumCIDRGroups returns a CiliumCIDRGroups
func newCiliumCIDRGroups(c *CiliumV2Client) *ciliumCIDRGroups {
return &ciliumCIDRGroups{
gentype.NewClientWithList[*ciliumiov2.CiliumCIDRGroup, *ciliumiov2.CiliumCIDRGroupList](
"ciliumcidrgroups",
c.RESTClient(),
scheme.ParameterCodec,
"",
func() *ciliumiov2.CiliumCIDRGroup { return &ciliumiov2.CiliumCIDRGroup{} },
func() *ciliumiov2.CiliumCIDRGroupList { return &ciliumiov2.CiliumCIDRGroupList{} },
),
}
}
// SPDX-License-Identifier: Apache-2.0
// Copyright Authors of Cilium
// Code generated by client-gen. DO NOT EDIT.
package v2
import (
context "context"
ciliumiov2 "github.com/cilium/cilium/pkg/k8s/apis/cilium.io/v2"
scheme "github.com/cilium/cilium/pkg/k8s/client/clientset/versioned/scheme"
v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
types "k8s.io/apimachinery/pkg/types"
watch "k8s.io/apimachinery/pkg/watch"
gentype "k8s.io/client-go/gentype"
)
// CiliumClusterwideEnvoyConfigsGetter has a method to return a CiliumClusterwideEnvoyConfigInterface.
// A group's client should implement this interface.
type CiliumClusterwideEnvoyConfigsGetter interface {
CiliumClusterwideEnvoyConfigs() CiliumClusterwideEnvoyConfigInterface
}
// CiliumClusterwideEnvoyConfigInterface has methods to work with CiliumClusterwideEnvoyConfig resources.
type CiliumClusterwideEnvoyConfigInterface interface {
Create(ctx context.Context, ciliumClusterwideEnvoyConfig *ciliumiov2.CiliumClusterwideEnvoyConfig, opts v1.CreateOptions) (*ciliumiov2.CiliumClusterwideEnvoyConfig, error)
Update(ctx context.Context, ciliumClusterwideEnvoyConfig *ciliumiov2.CiliumClusterwideEnvoyConfig, opts v1.UpdateOptions) (*ciliumiov2.CiliumClusterwideEnvoyConfig, error)
Delete(ctx context.Context, name string, opts v1.DeleteOptions) error
DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error
Get(ctx context.Context, name string, opts v1.GetOptions) (*ciliumiov2.CiliumClusterwideEnvoyConfig, error)
List(ctx context.Context, opts v1.ListOptions) (*ciliumiov2.CiliumClusterwideEnvoyConfigList, error)
Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error)
Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *ciliumiov2.CiliumClusterwideEnvoyConfig, err error)
CiliumClusterwideEnvoyConfigExpansion
}
// ciliumClusterwideEnvoyConfigs implements CiliumClusterwideEnvoyConfigInterface
type ciliumClusterwideEnvoyConfigs struct {
*gentype.ClientWithList[*ciliumiov2.CiliumClusterwideEnvoyConfig, *ciliumiov2.CiliumClusterwideEnvoyConfigList]
}
// newCiliumClusterwideEnvoyConfigs returns a CiliumClusterwideEnvoyConfigs
func newCiliumClusterwideEnvoyConfigs(c *CiliumV2Client) *ciliumClusterwideEnvoyConfigs {
return &ciliumClusterwideEnvoyConfigs{
gentype.NewClientWithList[*ciliumiov2.CiliumClusterwideEnvoyConfig, *ciliumiov2.CiliumClusterwideEnvoyConfigList](
"ciliumclusterwideenvoyconfigs",
c.RESTClient(),
scheme.ParameterCodec,
"",
func() *ciliumiov2.CiliumClusterwideEnvoyConfig { return &ciliumiov2.CiliumClusterwideEnvoyConfig{} },
func() *ciliumiov2.CiliumClusterwideEnvoyConfigList {
return &ciliumiov2.CiliumClusterwideEnvoyConfigList{}
},
),
}
}
// SPDX-License-Identifier: Apache-2.0
// Copyright Authors of Cilium
// Code generated by client-gen. DO NOT EDIT.
package v2
import (
context "context"
ciliumiov2 "github.com/cilium/cilium/pkg/k8s/apis/cilium.io/v2"
scheme "github.com/cilium/cilium/pkg/k8s/client/clientset/versioned/scheme"
v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
types "k8s.io/apimachinery/pkg/types"
watch "k8s.io/apimachinery/pkg/watch"
gentype "k8s.io/client-go/gentype"
)
// CiliumClusterwideNetworkPoliciesGetter has a method to return a CiliumClusterwideNetworkPolicyInterface.
// A group's client should implement this interface.
type CiliumClusterwideNetworkPoliciesGetter interface {
CiliumClusterwideNetworkPolicies() CiliumClusterwideNetworkPolicyInterface
}
// CiliumClusterwideNetworkPolicyInterface has methods to work with CiliumClusterwideNetworkPolicy resources.
type CiliumClusterwideNetworkPolicyInterface interface {
Create(ctx context.Context, ciliumClusterwideNetworkPolicy *ciliumiov2.CiliumClusterwideNetworkPolicy, opts v1.CreateOptions) (*ciliumiov2.CiliumClusterwideNetworkPolicy, error)
Update(ctx context.Context, ciliumClusterwideNetworkPolicy *ciliumiov2.CiliumClusterwideNetworkPolicy, opts v1.UpdateOptions) (*ciliumiov2.CiliumClusterwideNetworkPolicy, error)
// Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus().
UpdateStatus(ctx context.Context, ciliumClusterwideNetworkPolicy *ciliumiov2.CiliumClusterwideNetworkPolicy, opts v1.UpdateOptions) (*ciliumiov2.CiliumClusterwideNetworkPolicy, error)
Delete(ctx context.Context, name string, opts v1.DeleteOptions) error
DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error
Get(ctx context.Context, name string, opts v1.GetOptions) (*ciliumiov2.CiliumClusterwideNetworkPolicy, error)
List(ctx context.Context, opts v1.ListOptions) (*ciliumiov2.CiliumClusterwideNetworkPolicyList, error)
Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error)
Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *ciliumiov2.CiliumClusterwideNetworkPolicy, err error)
CiliumClusterwideNetworkPolicyExpansion
}
// ciliumClusterwideNetworkPolicies implements CiliumClusterwideNetworkPolicyInterface
type ciliumClusterwideNetworkPolicies struct {
*gentype.ClientWithList[*ciliumiov2.CiliumClusterwideNetworkPolicy, *ciliumiov2.CiliumClusterwideNetworkPolicyList]
}
// newCiliumClusterwideNetworkPolicies returns a CiliumClusterwideNetworkPolicies
func newCiliumClusterwideNetworkPolicies(c *CiliumV2Client) *ciliumClusterwideNetworkPolicies {
return &ciliumClusterwideNetworkPolicies{
gentype.NewClientWithList[*ciliumiov2.CiliumClusterwideNetworkPolicy, *ciliumiov2.CiliumClusterwideNetworkPolicyList](
"ciliumclusterwidenetworkpolicies",
c.RESTClient(),
scheme.ParameterCodec,
"",
func() *ciliumiov2.CiliumClusterwideNetworkPolicy { return &ciliumiov2.CiliumClusterwideNetworkPolicy{} },
func() *ciliumiov2.CiliumClusterwideNetworkPolicyList {
return &ciliumiov2.CiliumClusterwideNetworkPolicyList{}
},
),
}
}
// SPDX-License-Identifier: Apache-2.0
// Copyright Authors of Cilium
// Code generated by client-gen. DO NOT EDIT.
package v2
import (
context "context"
ciliumiov2 "github.com/cilium/cilium/pkg/k8s/apis/cilium.io/v2"
scheme "github.com/cilium/cilium/pkg/k8s/client/clientset/versioned/scheme"
v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
types "k8s.io/apimachinery/pkg/types"
watch "k8s.io/apimachinery/pkg/watch"
gentype "k8s.io/client-go/gentype"
)
// CiliumEgressGatewayPoliciesGetter has a method to return a CiliumEgressGatewayPolicyInterface.
// A group's client should implement this interface.
type CiliumEgressGatewayPoliciesGetter interface {
CiliumEgressGatewayPolicies() CiliumEgressGatewayPolicyInterface
}
// CiliumEgressGatewayPolicyInterface has methods to work with CiliumEgressGatewayPolicy resources.
type CiliumEgressGatewayPolicyInterface interface {
Create(ctx context.Context, ciliumEgressGatewayPolicy *ciliumiov2.CiliumEgressGatewayPolicy, opts v1.CreateOptions) (*ciliumiov2.CiliumEgressGatewayPolicy, error)
Update(ctx context.Context, ciliumEgressGatewayPolicy *ciliumiov2.CiliumEgressGatewayPolicy, opts v1.UpdateOptions) (*ciliumiov2.CiliumEgressGatewayPolicy, error)
Delete(ctx context.Context, name string, opts v1.DeleteOptions) error
DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error
Get(ctx context.Context, name string, opts v1.GetOptions) (*ciliumiov2.CiliumEgressGatewayPolicy, error)
List(ctx context.Context, opts v1.ListOptions) (*ciliumiov2.CiliumEgressGatewayPolicyList, error)
Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error)
Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *ciliumiov2.CiliumEgressGatewayPolicy, err error)
CiliumEgressGatewayPolicyExpansion
}
// ciliumEgressGatewayPolicies implements CiliumEgressGatewayPolicyInterface
type ciliumEgressGatewayPolicies struct {
*gentype.ClientWithList[*ciliumiov2.CiliumEgressGatewayPolicy, *ciliumiov2.CiliumEgressGatewayPolicyList]
}
// newCiliumEgressGatewayPolicies returns a CiliumEgressGatewayPolicies
func newCiliumEgressGatewayPolicies(c *CiliumV2Client) *ciliumEgressGatewayPolicies {
return &ciliumEgressGatewayPolicies{
gentype.NewClientWithList[*ciliumiov2.CiliumEgressGatewayPolicy, *ciliumiov2.CiliumEgressGatewayPolicyList](
"ciliumegressgatewaypolicies",
c.RESTClient(),
scheme.ParameterCodec,
"",
func() *ciliumiov2.CiliumEgressGatewayPolicy { return &ciliumiov2.CiliumEgressGatewayPolicy{} },
func() *ciliumiov2.CiliumEgressGatewayPolicyList { return &ciliumiov2.CiliumEgressGatewayPolicyList{} },
),
}
}
// SPDX-License-Identifier: Apache-2.0
// Copyright Authors of Cilium
// Code generated by client-gen. DO NOT EDIT.
package v2
import (
context "context"
ciliumiov2 "github.com/cilium/cilium/pkg/k8s/apis/cilium.io/v2"
scheme "github.com/cilium/cilium/pkg/k8s/client/clientset/versioned/scheme"
v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
types "k8s.io/apimachinery/pkg/types"
watch "k8s.io/apimachinery/pkg/watch"
gentype "k8s.io/client-go/gentype"
)
// CiliumEndpointsGetter has a method to return a CiliumEndpointInterface.
// A group's client should implement this interface.
type CiliumEndpointsGetter interface {
CiliumEndpoints(namespace string) CiliumEndpointInterface
}
// CiliumEndpointInterface has methods to work with CiliumEndpoint resources.
type CiliumEndpointInterface interface {
Create(ctx context.Context, ciliumEndpoint *ciliumiov2.CiliumEndpoint, opts v1.CreateOptions) (*ciliumiov2.CiliumEndpoint, error)
Update(ctx context.Context, ciliumEndpoint *ciliumiov2.CiliumEndpoint, opts v1.UpdateOptions) (*ciliumiov2.CiliumEndpoint, error)
// Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus().
UpdateStatus(ctx context.Context, ciliumEndpoint *ciliumiov2.CiliumEndpoint, opts v1.UpdateOptions) (*ciliumiov2.CiliumEndpoint, error)
Delete(ctx context.Context, name string, opts v1.DeleteOptions) error
DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error
Get(ctx context.Context, name string, opts v1.GetOptions) (*ciliumiov2.CiliumEndpoint, error)
List(ctx context.Context, opts v1.ListOptions) (*ciliumiov2.CiliumEndpointList, error)
Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error)
Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *ciliumiov2.CiliumEndpoint, err error)
CiliumEndpointExpansion
}
// ciliumEndpoints implements CiliumEndpointInterface
type ciliumEndpoints struct {
*gentype.ClientWithList[*ciliumiov2.CiliumEndpoint, *ciliumiov2.CiliumEndpointList]
}
// newCiliumEndpoints returns a CiliumEndpoints
func newCiliumEndpoints(c *CiliumV2Client, namespace string) *ciliumEndpoints {
return &ciliumEndpoints{
gentype.NewClientWithList[*ciliumiov2.CiliumEndpoint, *ciliumiov2.CiliumEndpointList](
"ciliumendpoints",
c.RESTClient(),
scheme.ParameterCodec,
namespace,
func() *ciliumiov2.CiliumEndpoint { return &ciliumiov2.CiliumEndpoint{} },
func() *ciliumiov2.CiliumEndpointList { return &ciliumiov2.CiliumEndpointList{} },
),
}
}
// SPDX-License-Identifier: Apache-2.0
// Copyright Authors of Cilium
// Code generated by client-gen. DO NOT EDIT.
package v2
import (
context "context"
ciliumiov2 "github.com/cilium/cilium/pkg/k8s/apis/cilium.io/v2"
scheme "github.com/cilium/cilium/pkg/k8s/client/clientset/versioned/scheme"
v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
types "k8s.io/apimachinery/pkg/types"
watch "k8s.io/apimachinery/pkg/watch"
gentype "k8s.io/client-go/gentype"
)
// CiliumEnvoyConfigsGetter has a method to return a CiliumEnvoyConfigInterface.
// A group's client should implement this interface.
type CiliumEnvoyConfigsGetter interface {
CiliumEnvoyConfigs(namespace string) CiliumEnvoyConfigInterface
}
// CiliumEnvoyConfigInterface has methods to work with CiliumEnvoyConfig resources.
type CiliumEnvoyConfigInterface interface {
Create(ctx context.Context, ciliumEnvoyConfig *ciliumiov2.CiliumEnvoyConfig, opts v1.CreateOptions) (*ciliumiov2.CiliumEnvoyConfig, error)
Update(ctx context.Context, ciliumEnvoyConfig *ciliumiov2.CiliumEnvoyConfig, opts v1.UpdateOptions) (*ciliumiov2.CiliumEnvoyConfig, error)
Delete(ctx context.Context, name string, opts v1.DeleteOptions) error
DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error
Get(ctx context.Context, name string, opts v1.GetOptions) (*ciliumiov2.CiliumEnvoyConfig, error)
List(ctx context.Context, opts v1.ListOptions) (*ciliumiov2.CiliumEnvoyConfigList, error)
Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error)
Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *ciliumiov2.CiliumEnvoyConfig, err error)
CiliumEnvoyConfigExpansion
}
// ciliumEnvoyConfigs implements CiliumEnvoyConfigInterface
type ciliumEnvoyConfigs struct {
*gentype.ClientWithList[*ciliumiov2.CiliumEnvoyConfig, *ciliumiov2.CiliumEnvoyConfigList]
}
// newCiliumEnvoyConfigs returns a CiliumEnvoyConfigs
func newCiliumEnvoyConfigs(c *CiliumV2Client, namespace string) *ciliumEnvoyConfigs {
return &ciliumEnvoyConfigs{
gentype.NewClientWithList[*ciliumiov2.CiliumEnvoyConfig, *ciliumiov2.CiliumEnvoyConfigList](
"ciliumenvoyconfigs",
c.RESTClient(),
scheme.ParameterCodec,
namespace,
func() *ciliumiov2.CiliumEnvoyConfig { return &ciliumiov2.CiliumEnvoyConfig{} },
func() *ciliumiov2.CiliumEnvoyConfigList { return &ciliumiov2.CiliumEnvoyConfigList{} },
),
}
}
// SPDX-License-Identifier: Apache-2.0
// Copyright Authors of Cilium
// Code generated by client-gen. DO NOT EDIT.
package v2
import (
context "context"
ciliumiov2 "github.com/cilium/cilium/pkg/k8s/apis/cilium.io/v2"
scheme "github.com/cilium/cilium/pkg/k8s/client/clientset/versioned/scheme"
v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
types "k8s.io/apimachinery/pkg/types"
watch "k8s.io/apimachinery/pkg/watch"
gentype "k8s.io/client-go/gentype"
)
// CiliumIdentitiesGetter has a method to return a CiliumIdentityInterface.
// A group's client should implement this interface.
type CiliumIdentitiesGetter interface {
CiliumIdentities() CiliumIdentityInterface
}
// CiliumIdentityInterface has methods to work with CiliumIdentity resources.
type CiliumIdentityInterface interface {
Create(ctx context.Context, ciliumIdentity *ciliumiov2.CiliumIdentity, opts v1.CreateOptions) (*ciliumiov2.CiliumIdentity, error)
Update(ctx context.Context, ciliumIdentity *ciliumiov2.CiliumIdentity, opts v1.UpdateOptions) (*ciliumiov2.CiliumIdentity, error)
Delete(ctx context.Context, name string, opts v1.DeleteOptions) error
DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error
Get(ctx context.Context, name string, opts v1.GetOptions) (*ciliumiov2.CiliumIdentity, error)
List(ctx context.Context, opts v1.ListOptions) (*ciliumiov2.CiliumIdentityList, error)
Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error)
Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *ciliumiov2.CiliumIdentity, err error)
CiliumIdentityExpansion
}
// ciliumIdentities implements CiliumIdentityInterface
type ciliumIdentities struct {
*gentype.ClientWithList[*ciliumiov2.CiliumIdentity, *ciliumiov2.CiliumIdentityList]
}
// newCiliumIdentities returns a CiliumIdentities
func newCiliumIdentities(c *CiliumV2Client) *ciliumIdentities {
return &ciliumIdentities{
gentype.NewClientWithList[*ciliumiov2.CiliumIdentity, *ciliumiov2.CiliumIdentityList](
"ciliumidentities",
c.RESTClient(),
scheme.ParameterCodec,
"",
func() *ciliumiov2.CiliumIdentity { return &ciliumiov2.CiliumIdentity{} },
func() *ciliumiov2.CiliumIdentityList { return &ciliumiov2.CiliumIdentityList{} },
),
}
}
// SPDX-License-Identifier: Apache-2.0
// Copyright Authors of Cilium
// Code generated by client-gen. DO NOT EDIT.
package v2
import (
context "context"
ciliumiov2 "github.com/cilium/cilium/pkg/k8s/apis/cilium.io/v2"
scheme "github.com/cilium/cilium/pkg/k8s/client/clientset/versioned/scheme"
v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
types "k8s.io/apimachinery/pkg/types"
watch "k8s.io/apimachinery/pkg/watch"
gentype "k8s.io/client-go/gentype"
)
// CiliumLoadBalancerIPPoolsGetter has a method to return a CiliumLoadBalancerIPPoolInterface.
// A group's client should implement this interface.
type CiliumLoadBalancerIPPoolsGetter interface {
CiliumLoadBalancerIPPools() CiliumLoadBalancerIPPoolInterface
}
// CiliumLoadBalancerIPPoolInterface has methods to work with CiliumLoadBalancerIPPool resources.
type CiliumLoadBalancerIPPoolInterface interface {
Create(ctx context.Context, ciliumLoadBalancerIPPool *ciliumiov2.CiliumLoadBalancerIPPool, opts v1.CreateOptions) (*ciliumiov2.CiliumLoadBalancerIPPool, error)
Update(ctx context.Context, ciliumLoadBalancerIPPool *ciliumiov2.CiliumLoadBalancerIPPool, opts v1.UpdateOptions) (*ciliumiov2.CiliumLoadBalancerIPPool, error)
// Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus().
UpdateStatus(ctx context.Context, ciliumLoadBalancerIPPool *ciliumiov2.CiliumLoadBalancerIPPool, opts v1.UpdateOptions) (*ciliumiov2.CiliumLoadBalancerIPPool, error)
Delete(ctx context.Context, name string, opts v1.DeleteOptions) error
DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error
Get(ctx context.Context, name string, opts v1.GetOptions) (*ciliumiov2.CiliumLoadBalancerIPPool, error)
List(ctx context.Context, opts v1.ListOptions) (*ciliumiov2.CiliumLoadBalancerIPPoolList, error)
Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error)
Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *ciliumiov2.CiliumLoadBalancerIPPool, err error)
CiliumLoadBalancerIPPoolExpansion
}
// ciliumLoadBalancerIPPools implements CiliumLoadBalancerIPPoolInterface
type ciliumLoadBalancerIPPools struct {
*gentype.ClientWithList[*ciliumiov2.CiliumLoadBalancerIPPool, *ciliumiov2.CiliumLoadBalancerIPPoolList]
}
// newCiliumLoadBalancerIPPools returns a CiliumLoadBalancerIPPools
func newCiliumLoadBalancerIPPools(c *CiliumV2Client) *ciliumLoadBalancerIPPools {
return &ciliumLoadBalancerIPPools{
gentype.NewClientWithList[*ciliumiov2.CiliumLoadBalancerIPPool, *ciliumiov2.CiliumLoadBalancerIPPoolList](
"ciliumloadbalancerippools",
c.RESTClient(),
scheme.ParameterCodec,
"",
func() *ciliumiov2.CiliumLoadBalancerIPPool { return &ciliumiov2.CiliumLoadBalancerIPPool{} },
func() *ciliumiov2.CiliumLoadBalancerIPPoolList { return &ciliumiov2.CiliumLoadBalancerIPPoolList{} },
),
}
}
// SPDX-License-Identifier: Apache-2.0
// Copyright Authors of Cilium
// Code generated by client-gen. DO NOT EDIT.
package v2
import (
context "context"
ciliumiov2 "github.com/cilium/cilium/pkg/k8s/apis/cilium.io/v2"
scheme "github.com/cilium/cilium/pkg/k8s/client/clientset/versioned/scheme"
v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
types "k8s.io/apimachinery/pkg/types"
watch "k8s.io/apimachinery/pkg/watch"
gentype "k8s.io/client-go/gentype"
)
// CiliumLocalRedirectPoliciesGetter has a method to return a CiliumLocalRedirectPolicyInterface.
// A group's client should implement this interface.
type CiliumLocalRedirectPoliciesGetter interface {
CiliumLocalRedirectPolicies(namespace string) CiliumLocalRedirectPolicyInterface
}
// CiliumLocalRedirectPolicyInterface has methods to work with CiliumLocalRedirectPolicy resources.
type CiliumLocalRedirectPolicyInterface interface {
Create(ctx context.Context, ciliumLocalRedirectPolicy *ciliumiov2.CiliumLocalRedirectPolicy, opts v1.CreateOptions) (*ciliumiov2.CiliumLocalRedirectPolicy, error)
Update(ctx context.Context, ciliumLocalRedirectPolicy *ciliumiov2.CiliumLocalRedirectPolicy, opts v1.UpdateOptions) (*ciliumiov2.CiliumLocalRedirectPolicy, error)
// Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus().
UpdateStatus(ctx context.Context, ciliumLocalRedirectPolicy *ciliumiov2.CiliumLocalRedirectPolicy, opts v1.UpdateOptions) (*ciliumiov2.CiliumLocalRedirectPolicy, error)
Delete(ctx context.Context, name string, opts v1.DeleteOptions) error
DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error
Get(ctx context.Context, name string, opts v1.GetOptions) (*ciliumiov2.CiliumLocalRedirectPolicy, error)
List(ctx context.Context, opts v1.ListOptions) (*ciliumiov2.CiliumLocalRedirectPolicyList, error)
Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error)
Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *ciliumiov2.CiliumLocalRedirectPolicy, err error)
CiliumLocalRedirectPolicyExpansion
}
// ciliumLocalRedirectPolicies implements CiliumLocalRedirectPolicyInterface
type ciliumLocalRedirectPolicies struct {
*gentype.ClientWithList[*ciliumiov2.CiliumLocalRedirectPolicy, *ciliumiov2.CiliumLocalRedirectPolicyList]
}
// newCiliumLocalRedirectPolicies returns a CiliumLocalRedirectPolicies
func newCiliumLocalRedirectPolicies(c *CiliumV2Client, namespace string) *ciliumLocalRedirectPolicies {
return &ciliumLocalRedirectPolicies{
gentype.NewClientWithList[*ciliumiov2.CiliumLocalRedirectPolicy, *ciliumiov2.CiliumLocalRedirectPolicyList](
"ciliumlocalredirectpolicies",
c.RESTClient(),
scheme.ParameterCodec,
namespace,
func() *ciliumiov2.CiliumLocalRedirectPolicy { return &ciliumiov2.CiliumLocalRedirectPolicy{} },
func() *ciliumiov2.CiliumLocalRedirectPolicyList { return &ciliumiov2.CiliumLocalRedirectPolicyList{} },
),
}
}
// SPDX-License-Identifier: Apache-2.0
// Copyright Authors of Cilium
// Code generated by client-gen. DO NOT EDIT.
package v2
import (
context "context"
ciliumiov2 "github.com/cilium/cilium/pkg/k8s/apis/cilium.io/v2"
scheme "github.com/cilium/cilium/pkg/k8s/client/clientset/versioned/scheme"
v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
types "k8s.io/apimachinery/pkg/types"
watch "k8s.io/apimachinery/pkg/watch"
gentype "k8s.io/client-go/gentype"
)
// CiliumNetworkPoliciesGetter has a method to return a CiliumNetworkPolicyInterface.
// A group's client should implement this interface.
type CiliumNetworkPoliciesGetter interface {
CiliumNetworkPolicies(namespace string) CiliumNetworkPolicyInterface
}
// CiliumNetworkPolicyInterface has methods to work with CiliumNetworkPolicy resources.
type CiliumNetworkPolicyInterface interface {
Create(ctx context.Context, ciliumNetworkPolicy *ciliumiov2.CiliumNetworkPolicy, opts v1.CreateOptions) (*ciliumiov2.CiliumNetworkPolicy, error)
Update(ctx context.Context, ciliumNetworkPolicy *ciliumiov2.CiliumNetworkPolicy, opts v1.UpdateOptions) (*ciliumiov2.CiliumNetworkPolicy, error)
// Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus().
UpdateStatus(ctx context.Context, ciliumNetworkPolicy *ciliumiov2.CiliumNetworkPolicy, opts v1.UpdateOptions) (*ciliumiov2.CiliumNetworkPolicy, error)
Delete(ctx context.Context, name string, opts v1.DeleteOptions) error
DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error
Get(ctx context.Context, name string, opts v1.GetOptions) (*ciliumiov2.CiliumNetworkPolicy, error)
List(ctx context.Context, opts v1.ListOptions) (*ciliumiov2.CiliumNetworkPolicyList, error)
Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error)
Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *ciliumiov2.CiliumNetworkPolicy, err error)
CiliumNetworkPolicyExpansion
}
// ciliumNetworkPolicies implements CiliumNetworkPolicyInterface
type ciliumNetworkPolicies struct {
*gentype.ClientWithList[*ciliumiov2.CiliumNetworkPolicy, *ciliumiov2.CiliumNetworkPolicyList]
}
// newCiliumNetworkPolicies returns a CiliumNetworkPolicies
func newCiliumNetworkPolicies(c *CiliumV2Client, namespace string) *ciliumNetworkPolicies {
return &ciliumNetworkPolicies{
gentype.NewClientWithList[*ciliumiov2.CiliumNetworkPolicy, *ciliumiov2.CiliumNetworkPolicyList](
"ciliumnetworkpolicies",
c.RESTClient(),
scheme.ParameterCodec,
namespace,
func() *ciliumiov2.CiliumNetworkPolicy { return &ciliumiov2.CiliumNetworkPolicy{} },
func() *ciliumiov2.CiliumNetworkPolicyList { return &ciliumiov2.CiliumNetworkPolicyList{} },
),
}
}
// SPDX-License-Identifier: Apache-2.0
// Copyright Authors of Cilium
// Code generated by client-gen. DO NOT EDIT.
package v2
import (
context "context"
ciliumiov2 "github.com/cilium/cilium/pkg/k8s/apis/cilium.io/v2"
scheme "github.com/cilium/cilium/pkg/k8s/client/clientset/versioned/scheme"
v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
types "k8s.io/apimachinery/pkg/types"
watch "k8s.io/apimachinery/pkg/watch"
gentype "k8s.io/client-go/gentype"
)
// CiliumNodesGetter has a method to return a CiliumNodeInterface.
// A group's client should implement this interface.
type CiliumNodesGetter interface {
CiliumNodes() CiliumNodeInterface
}
// CiliumNodeInterface has methods to work with CiliumNode resources.
type CiliumNodeInterface interface {
Create(ctx context.Context, ciliumNode *ciliumiov2.CiliumNode, opts v1.CreateOptions) (*ciliumiov2.CiliumNode, error)
Update(ctx context.Context, ciliumNode *ciliumiov2.CiliumNode, opts v1.UpdateOptions) (*ciliumiov2.CiliumNode, error)
// Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus().
UpdateStatus(ctx context.Context, ciliumNode *ciliumiov2.CiliumNode, opts v1.UpdateOptions) (*ciliumiov2.CiliumNode, error)
Delete(ctx context.Context, name string, opts v1.DeleteOptions) error
DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error
Get(ctx context.Context, name string, opts v1.GetOptions) (*ciliumiov2.CiliumNode, error)
List(ctx context.Context, opts v1.ListOptions) (*ciliumiov2.CiliumNodeList, error)
Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error)
Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *ciliumiov2.CiliumNode, err error)
CiliumNodeExpansion
}
// ciliumNodes implements CiliumNodeInterface
type ciliumNodes struct {
*gentype.ClientWithList[*ciliumiov2.CiliumNode, *ciliumiov2.CiliumNodeList]
}
// newCiliumNodes returns a CiliumNodes
func newCiliumNodes(c *CiliumV2Client) *ciliumNodes {
return &ciliumNodes{
gentype.NewClientWithList[*ciliumiov2.CiliumNode, *ciliumiov2.CiliumNodeList](
"ciliumnodes",
c.RESTClient(),
scheme.ParameterCodec,
"",
func() *ciliumiov2.CiliumNode { return &ciliumiov2.CiliumNode{} },
func() *ciliumiov2.CiliumNodeList { return &ciliumiov2.CiliumNodeList{} },
),
}
}
// SPDX-License-Identifier: Apache-2.0
// Copyright Authors of Cilium
// Code generated by client-gen. DO NOT EDIT.
package v2
import (
context "context"
ciliumiov2 "github.com/cilium/cilium/pkg/k8s/apis/cilium.io/v2"
scheme "github.com/cilium/cilium/pkg/k8s/client/clientset/versioned/scheme"
v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
types "k8s.io/apimachinery/pkg/types"
watch "k8s.io/apimachinery/pkg/watch"
gentype "k8s.io/client-go/gentype"
)
// CiliumNodeConfigsGetter has a method to return a CiliumNodeConfigInterface.
// A group's client should implement this interface.
type CiliumNodeConfigsGetter interface {
CiliumNodeConfigs(namespace string) CiliumNodeConfigInterface
}
// CiliumNodeConfigInterface has methods to work with CiliumNodeConfig resources.
type CiliumNodeConfigInterface interface {
Create(ctx context.Context, ciliumNodeConfig *ciliumiov2.CiliumNodeConfig, opts v1.CreateOptions) (*ciliumiov2.CiliumNodeConfig, error)
Update(ctx context.Context, ciliumNodeConfig *ciliumiov2.CiliumNodeConfig, opts v1.UpdateOptions) (*ciliumiov2.CiliumNodeConfig, error)
Delete(ctx context.Context, name string, opts v1.DeleteOptions) error
DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error
Get(ctx context.Context, name string, opts v1.GetOptions) (*ciliumiov2.CiliumNodeConfig, error)
List(ctx context.Context, opts v1.ListOptions) (*ciliumiov2.CiliumNodeConfigList, error)
Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error)
Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *ciliumiov2.CiliumNodeConfig, err error)
CiliumNodeConfigExpansion
}
// ciliumNodeConfigs implements CiliumNodeConfigInterface
type ciliumNodeConfigs struct {
*gentype.ClientWithList[*ciliumiov2.CiliumNodeConfig, *ciliumiov2.CiliumNodeConfigList]
}
// newCiliumNodeConfigs returns a CiliumNodeConfigs
func newCiliumNodeConfigs(c *CiliumV2Client, namespace string) *ciliumNodeConfigs {
return &ciliumNodeConfigs{
gentype.NewClientWithList[*ciliumiov2.CiliumNodeConfig, *ciliumiov2.CiliumNodeConfigList](
"ciliumnodeconfigs",
c.RESTClient(),
scheme.ParameterCodec,
namespace,
func() *ciliumiov2.CiliumNodeConfig { return &ciliumiov2.CiliumNodeConfig{} },
func() *ciliumiov2.CiliumNodeConfigList { return &ciliumiov2.CiliumNodeConfigList{} },
),
}
}
// SPDX-License-Identifier: Apache-2.0
// Copyright Authors of Cilium
// Code generated by client-gen. DO NOT EDIT.
package v2alpha1
import (
http "net/http"
ciliumiov2alpha1 "github.com/cilium/cilium/pkg/k8s/apis/cilium.io/v2alpha1"
scheme "github.com/cilium/cilium/pkg/k8s/client/clientset/versioned/scheme"
rest "k8s.io/client-go/rest"
)
type CiliumV2alpha1Interface interface {
RESTClient() rest.Interface
CiliumBGPAdvertisementsGetter
CiliumBGPClusterConfigsGetter
CiliumBGPNodeConfigsGetter
CiliumBGPNodeConfigOverridesGetter
CiliumBGPPeerConfigsGetter
CiliumBGPPeeringPoliciesGetter
CiliumCIDRGroupsGetter
CiliumEndpointSlicesGetter
CiliumGatewayClassConfigsGetter
CiliumL2AnnouncementPoliciesGetter
CiliumLoadBalancerIPPoolsGetter
CiliumNodeConfigsGetter
CiliumPodIPPoolsGetter
}
// CiliumV2alpha1Client is used to interact with features provided by the cilium.io group.
type CiliumV2alpha1Client struct {
restClient rest.Interface
}
func (c *CiliumV2alpha1Client) CiliumBGPAdvertisements() CiliumBGPAdvertisementInterface {
return newCiliumBGPAdvertisements(c)
}
func (c *CiliumV2alpha1Client) CiliumBGPClusterConfigs() CiliumBGPClusterConfigInterface {
return newCiliumBGPClusterConfigs(c)
}
func (c *CiliumV2alpha1Client) CiliumBGPNodeConfigs() CiliumBGPNodeConfigInterface {
return newCiliumBGPNodeConfigs(c)
}
func (c *CiliumV2alpha1Client) CiliumBGPNodeConfigOverrides() CiliumBGPNodeConfigOverrideInterface {
return newCiliumBGPNodeConfigOverrides(c)
}
func (c *CiliumV2alpha1Client) CiliumBGPPeerConfigs() CiliumBGPPeerConfigInterface {
return newCiliumBGPPeerConfigs(c)
}
func (c *CiliumV2alpha1Client) CiliumBGPPeeringPolicies() CiliumBGPPeeringPolicyInterface {
return newCiliumBGPPeeringPolicies(c)
}
func (c *CiliumV2alpha1Client) CiliumCIDRGroups() CiliumCIDRGroupInterface {
return newCiliumCIDRGroups(c)
}
func (c *CiliumV2alpha1Client) CiliumEndpointSlices() CiliumEndpointSliceInterface {
return newCiliumEndpointSlices(c)
}
func (c *CiliumV2alpha1Client) CiliumGatewayClassConfigs() CiliumGatewayClassConfigInterface {
return newCiliumGatewayClassConfigs(c)
}
func (c *CiliumV2alpha1Client) CiliumL2AnnouncementPolicies() CiliumL2AnnouncementPolicyInterface {
return newCiliumL2AnnouncementPolicies(c)
}
func (c *CiliumV2alpha1Client) CiliumLoadBalancerIPPools() CiliumLoadBalancerIPPoolInterface {
return newCiliumLoadBalancerIPPools(c)
}
func (c *CiliumV2alpha1Client) CiliumNodeConfigs(namespace string) CiliumNodeConfigInterface {
return newCiliumNodeConfigs(c, namespace)
}
func (c *CiliumV2alpha1Client) CiliumPodIPPools() CiliumPodIPPoolInterface {
return newCiliumPodIPPools(c)
}
// NewForConfig creates a new CiliumV2alpha1Client for the given config.
// NewForConfig is equivalent to NewForConfigAndClient(c, httpClient),
// where httpClient was generated with rest.HTTPClientFor(c).
func NewForConfig(c *rest.Config) (*CiliumV2alpha1Client, error) {
config := *c
setConfigDefaults(&config)
httpClient, err := rest.HTTPClientFor(&config)
if err != nil {
return nil, err
}
return NewForConfigAndClient(&config, httpClient)
}
// NewForConfigAndClient creates a new CiliumV2alpha1Client for the given config and http client.
// Note the http client provided takes precedence over the configured transport values.
func NewForConfigAndClient(c *rest.Config, h *http.Client) (*CiliumV2alpha1Client, error) {
config := *c
setConfigDefaults(&config)
client, err := rest.RESTClientForConfigAndClient(&config, h)
if err != nil {
return nil, err
}
return &CiliumV2alpha1Client{client}, nil
}
// NewForConfigOrDie creates a new CiliumV2alpha1Client for the given config and
// panics if there is an error in the config.
func NewForConfigOrDie(c *rest.Config) *CiliumV2alpha1Client {
client, err := NewForConfig(c)
if err != nil {
panic(err)
}
return client
}
// New creates a new CiliumV2alpha1Client for the given RESTClient.
func New(c rest.Interface) *CiliumV2alpha1Client {
return &CiliumV2alpha1Client{c}
}
func setConfigDefaults(config *rest.Config) {
gv := ciliumiov2alpha1.SchemeGroupVersion
config.GroupVersion = &gv
config.APIPath = "/apis"
config.NegotiatedSerializer = rest.CodecFactoryForGeneratedClient(scheme.Scheme, scheme.Codecs).WithoutConversion()
if config.UserAgent == "" {
config.UserAgent = rest.DefaultKubernetesUserAgent()
}
}
// RESTClient returns a RESTClient that is used to communicate
// with API server by this client implementation.
func (c *CiliumV2alpha1Client) RESTClient() rest.Interface {
if c == nil {
return nil
}
return c.restClient
}
// SPDX-License-Identifier: Apache-2.0
// Copyright Authors of Cilium
// Code generated by client-gen. DO NOT EDIT.
package v2alpha1
import (
context "context"
ciliumiov2alpha1 "github.com/cilium/cilium/pkg/k8s/apis/cilium.io/v2alpha1"
scheme "github.com/cilium/cilium/pkg/k8s/client/clientset/versioned/scheme"
v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
types "k8s.io/apimachinery/pkg/types"
watch "k8s.io/apimachinery/pkg/watch"
gentype "k8s.io/client-go/gentype"
)
// CiliumBGPAdvertisementsGetter has a method to return a CiliumBGPAdvertisementInterface.
// A group's client should implement this interface.
type CiliumBGPAdvertisementsGetter interface {
CiliumBGPAdvertisements() CiliumBGPAdvertisementInterface
}
// CiliumBGPAdvertisementInterface has methods to work with CiliumBGPAdvertisement resources.
type CiliumBGPAdvertisementInterface interface {
Create(ctx context.Context, ciliumBGPAdvertisement *ciliumiov2alpha1.CiliumBGPAdvertisement, opts v1.CreateOptions) (*ciliumiov2alpha1.CiliumBGPAdvertisement, error)
Update(ctx context.Context, ciliumBGPAdvertisement *ciliumiov2alpha1.CiliumBGPAdvertisement, opts v1.UpdateOptions) (*ciliumiov2alpha1.CiliumBGPAdvertisement, error)
Delete(ctx context.Context, name string, opts v1.DeleteOptions) error
DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error
Get(ctx context.Context, name string, opts v1.GetOptions) (*ciliumiov2alpha1.CiliumBGPAdvertisement, error)
List(ctx context.Context, opts v1.ListOptions) (*ciliumiov2alpha1.CiliumBGPAdvertisementList, error)
Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error)
Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *ciliumiov2alpha1.CiliumBGPAdvertisement, err error)
CiliumBGPAdvertisementExpansion
}
// ciliumBGPAdvertisements implements CiliumBGPAdvertisementInterface
type ciliumBGPAdvertisements struct {
*gentype.ClientWithList[*ciliumiov2alpha1.CiliumBGPAdvertisement, *ciliumiov2alpha1.CiliumBGPAdvertisementList]
}
// newCiliumBGPAdvertisements returns a CiliumBGPAdvertisements
func newCiliumBGPAdvertisements(c *CiliumV2alpha1Client) *ciliumBGPAdvertisements {
return &ciliumBGPAdvertisements{
gentype.NewClientWithList[*ciliumiov2alpha1.CiliumBGPAdvertisement, *ciliumiov2alpha1.CiliumBGPAdvertisementList](
"ciliumbgpadvertisements",
c.RESTClient(),
scheme.ParameterCodec,
"",
func() *ciliumiov2alpha1.CiliumBGPAdvertisement { return &ciliumiov2alpha1.CiliumBGPAdvertisement{} },
func() *ciliumiov2alpha1.CiliumBGPAdvertisementList {
return &ciliumiov2alpha1.CiliumBGPAdvertisementList{}
},
),
}
}
// SPDX-License-Identifier: Apache-2.0
// Copyright Authors of Cilium
// Code generated by client-gen. DO NOT EDIT.
package v2alpha1
import (
context "context"
ciliumiov2alpha1 "github.com/cilium/cilium/pkg/k8s/apis/cilium.io/v2alpha1"
scheme "github.com/cilium/cilium/pkg/k8s/client/clientset/versioned/scheme"
v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
types "k8s.io/apimachinery/pkg/types"
watch "k8s.io/apimachinery/pkg/watch"
gentype "k8s.io/client-go/gentype"
)
// CiliumBGPClusterConfigsGetter has a method to return a CiliumBGPClusterConfigInterface.
// A group's client should implement this interface.
type CiliumBGPClusterConfigsGetter interface {
CiliumBGPClusterConfigs() CiliumBGPClusterConfigInterface
}
// CiliumBGPClusterConfigInterface has methods to work with CiliumBGPClusterConfig resources.
type CiliumBGPClusterConfigInterface interface {
Create(ctx context.Context, ciliumBGPClusterConfig *ciliumiov2alpha1.CiliumBGPClusterConfig, opts v1.CreateOptions) (*ciliumiov2alpha1.CiliumBGPClusterConfig, error)
Update(ctx context.Context, ciliumBGPClusterConfig *ciliumiov2alpha1.CiliumBGPClusterConfig, opts v1.UpdateOptions) (*ciliumiov2alpha1.CiliumBGPClusterConfig, error)
// Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus().
UpdateStatus(ctx context.Context, ciliumBGPClusterConfig *ciliumiov2alpha1.CiliumBGPClusterConfig, opts v1.UpdateOptions) (*ciliumiov2alpha1.CiliumBGPClusterConfig, error)
Delete(ctx context.Context, name string, opts v1.DeleteOptions) error
DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error
Get(ctx context.Context, name string, opts v1.GetOptions) (*ciliumiov2alpha1.CiliumBGPClusterConfig, error)
List(ctx context.Context, opts v1.ListOptions) (*ciliumiov2alpha1.CiliumBGPClusterConfigList, error)
Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error)
Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *ciliumiov2alpha1.CiliumBGPClusterConfig, err error)
CiliumBGPClusterConfigExpansion
}
// ciliumBGPClusterConfigs implements CiliumBGPClusterConfigInterface
type ciliumBGPClusterConfigs struct {
*gentype.ClientWithList[*ciliumiov2alpha1.CiliumBGPClusterConfig, *ciliumiov2alpha1.CiliumBGPClusterConfigList]
}
// newCiliumBGPClusterConfigs returns a CiliumBGPClusterConfigs
func newCiliumBGPClusterConfigs(c *CiliumV2alpha1Client) *ciliumBGPClusterConfigs {
return &ciliumBGPClusterConfigs{
gentype.NewClientWithList[*ciliumiov2alpha1.CiliumBGPClusterConfig, *ciliumiov2alpha1.CiliumBGPClusterConfigList](
"ciliumbgpclusterconfigs",
c.RESTClient(),
scheme.ParameterCodec,
"",
func() *ciliumiov2alpha1.CiliumBGPClusterConfig { return &ciliumiov2alpha1.CiliumBGPClusterConfig{} },
func() *ciliumiov2alpha1.CiliumBGPClusterConfigList {
return &ciliumiov2alpha1.CiliumBGPClusterConfigList{}
},
),
}
}
// SPDX-License-Identifier: Apache-2.0
// Copyright Authors of Cilium
// Code generated by client-gen. DO NOT EDIT.
package v2alpha1
import (
context "context"
ciliumiov2alpha1 "github.com/cilium/cilium/pkg/k8s/apis/cilium.io/v2alpha1"
scheme "github.com/cilium/cilium/pkg/k8s/client/clientset/versioned/scheme"
v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
types "k8s.io/apimachinery/pkg/types"
watch "k8s.io/apimachinery/pkg/watch"
gentype "k8s.io/client-go/gentype"
)
// CiliumBGPNodeConfigsGetter has a method to return a CiliumBGPNodeConfigInterface.
// A group's client should implement this interface.
type CiliumBGPNodeConfigsGetter interface {
CiliumBGPNodeConfigs() CiliumBGPNodeConfigInterface
}
// CiliumBGPNodeConfigInterface has methods to work with CiliumBGPNodeConfig resources.
type CiliumBGPNodeConfigInterface interface {
Create(ctx context.Context, ciliumBGPNodeConfig *ciliumiov2alpha1.CiliumBGPNodeConfig, opts v1.CreateOptions) (*ciliumiov2alpha1.CiliumBGPNodeConfig, error)
Update(ctx context.Context, ciliumBGPNodeConfig *ciliumiov2alpha1.CiliumBGPNodeConfig, opts v1.UpdateOptions) (*ciliumiov2alpha1.CiliumBGPNodeConfig, error)
// Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus().
UpdateStatus(ctx context.Context, ciliumBGPNodeConfig *ciliumiov2alpha1.CiliumBGPNodeConfig, opts v1.UpdateOptions) (*ciliumiov2alpha1.CiliumBGPNodeConfig, error)
Delete(ctx context.Context, name string, opts v1.DeleteOptions) error
DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error
Get(ctx context.Context, name string, opts v1.GetOptions) (*ciliumiov2alpha1.CiliumBGPNodeConfig, error)
List(ctx context.Context, opts v1.ListOptions) (*ciliumiov2alpha1.CiliumBGPNodeConfigList, error)
Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error)
Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *ciliumiov2alpha1.CiliumBGPNodeConfig, err error)
CiliumBGPNodeConfigExpansion
}
// ciliumBGPNodeConfigs implements CiliumBGPNodeConfigInterface
type ciliumBGPNodeConfigs struct {
*gentype.ClientWithList[*ciliumiov2alpha1.CiliumBGPNodeConfig, *ciliumiov2alpha1.CiliumBGPNodeConfigList]
}
// newCiliumBGPNodeConfigs returns a CiliumBGPNodeConfigs
func newCiliumBGPNodeConfigs(c *CiliumV2alpha1Client) *ciliumBGPNodeConfigs {
return &ciliumBGPNodeConfigs{
gentype.NewClientWithList[*ciliumiov2alpha1.CiliumBGPNodeConfig, *ciliumiov2alpha1.CiliumBGPNodeConfigList](
"ciliumbgpnodeconfigs",
c.RESTClient(),
scheme.ParameterCodec,
"",
func() *ciliumiov2alpha1.CiliumBGPNodeConfig { return &ciliumiov2alpha1.CiliumBGPNodeConfig{} },
func() *ciliumiov2alpha1.CiliumBGPNodeConfigList { return &ciliumiov2alpha1.CiliumBGPNodeConfigList{} },
),
}
}
// SPDX-License-Identifier: Apache-2.0
// Copyright Authors of Cilium
// Code generated by client-gen. DO NOT EDIT.
package v2alpha1
import (
context "context"
ciliumiov2alpha1 "github.com/cilium/cilium/pkg/k8s/apis/cilium.io/v2alpha1"
scheme "github.com/cilium/cilium/pkg/k8s/client/clientset/versioned/scheme"
v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
types "k8s.io/apimachinery/pkg/types"
watch "k8s.io/apimachinery/pkg/watch"
gentype "k8s.io/client-go/gentype"
)
// CiliumBGPNodeConfigOverridesGetter has a method to return a CiliumBGPNodeConfigOverrideInterface.
// A group's client should implement this interface.
type CiliumBGPNodeConfigOverridesGetter interface {
CiliumBGPNodeConfigOverrides() CiliumBGPNodeConfigOverrideInterface
}
// CiliumBGPNodeConfigOverrideInterface has methods to work with CiliumBGPNodeConfigOverride resources.
type CiliumBGPNodeConfigOverrideInterface interface {
Create(ctx context.Context, ciliumBGPNodeConfigOverride *ciliumiov2alpha1.CiliumBGPNodeConfigOverride, opts v1.CreateOptions) (*ciliumiov2alpha1.CiliumBGPNodeConfigOverride, error)
Update(ctx context.Context, ciliumBGPNodeConfigOverride *ciliumiov2alpha1.CiliumBGPNodeConfigOverride, opts v1.UpdateOptions) (*ciliumiov2alpha1.CiliumBGPNodeConfigOverride, error)
Delete(ctx context.Context, name string, opts v1.DeleteOptions) error
DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error
Get(ctx context.Context, name string, opts v1.GetOptions) (*ciliumiov2alpha1.CiliumBGPNodeConfigOverride, error)
List(ctx context.Context, opts v1.ListOptions) (*ciliumiov2alpha1.CiliumBGPNodeConfigOverrideList, error)
Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error)
Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *ciliumiov2alpha1.CiliumBGPNodeConfigOverride, err error)
CiliumBGPNodeConfigOverrideExpansion
}
// ciliumBGPNodeConfigOverrides implements CiliumBGPNodeConfigOverrideInterface
type ciliumBGPNodeConfigOverrides struct {
*gentype.ClientWithList[*ciliumiov2alpha1.CiliumBGPNodeConfigOverride, *ciliumiov2alpha1.CiliumBGPNodeConfigOverrideList]
}
// newCiliumBGPNodeConfigOverrides returns a CiliumBGPNodeConfigOverrides
func newCiliumBGPNodeConfigOverrides(c *CiliumV2alpha1Client) *ciliumBGPNodeConfigOverrides {
return &ciliumBGPNodeConfigOverrides{
gentype.NewClientWithList[*ciliumiov2alpha1.CiliumBGPNodeConfigOverride, *ciliumiov2alpha1.CiliumBGPNodeConfigOverrideList](
"ciliumbgpnodeconfigoverrides",
c.RESTClient(),
scheme.ParameterCodec,
"",
func() *ciliumiov2alpha1.CiliumBGPNodeConfigOverride {
return &ciliumiov2alpha1.CiliumBGPNodeConfigOverride{}
},
func() *ciliumiov2alpha1.CiliumBGPNodeConfigOverrideList {
return &ciliumiov2alpha1.CiliumBGPNodeConfigOverrideList{}
},
),
}
}
// SPDX-License-Identifier: Apache-2.0
// Copyright Authors of Cilium
// Code generated by client-gen. DO NOT EDIT.
package v2alpha1
import (
context "context"
ciliumiov2alpha1 "github.com/cilium/cilium/pkg/k8s/apis/cilium.io/v2alpha1"
scheme "github.com/cilium/cilium/pkg/k8s/client/clientset/versioned/scheme"
v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
types "k8s.io/apimachinery/pkg/types"
watch "k8s.io/apimachinery/pkg/watch"
gentype "k8s.io/client-go/gentype"
)
// CiliumBGPPeerConfigsGetter has a method to return a CiliumBGPPeerConfigInterface.
// A group's client should implement this interface.
type CiliumBGPPeerConfigsGetter interface {
CiliumBGPPeerConfigs() CiliumBGPPeerConfigInterface
}
// CiliumBGPPeerConfigInterface has methods to work with CiliumBGPPeerConfig resources.
type CiliumBGPPeerConfigInterface interface {
Create(ctx context.Context, ciliumBGPPeerConfig *ciliumiov2alpha1.CiliumBGPPeerConfig, opts v1.CreateOptions) (*ciliumiov2alpha1.CiliumBGPPeerConfig, error)
Update(ctx context.Context, ciliumBGPPeerConfig *ciliumiov2alpha1.CiliumBGPPeerConfig, opts v1.UpdateOptions) (*ciliumiov2alpha1.CiliumBGPPeerConfig, error)
// Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus().
UpdateStatus(ctx context.Context, ciliumBGPPeerConfig *ciliumiov2alpha1.CiliumBGPPeerConfig, opts v1.UpdateOptions) (*ciliumiov2alpha1.CiliumBGPPeerConfig, error)
Delete(ctx context.Context, name string, opts v1.DeleteOptions) error
DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error
Get(ctx context.Context, name string, opts v1.GetOptions) (*ciliumiov2alpha1.CiliumBGPPeerConfig, error)
List(ctx context.Context, opts v1.ListOptions) (*ciliumiov2alpha1.CiliumBGPPeerConfigList, error)
Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error)
Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *ciliumiov2alpha1.CiliumBGPPeerConfig, err error)
CiliumBGPPeerConfigExpansion
}
// ciliumBGPPeerConfigs implements CiliumBGPPeerConfigInterface
type ciliumBGPPeerConfigs struct {
*gentype.ClientWithList[*ciliumiov2alpha1.CiliumBGPPeerConfig, *ciliumiov2alpha1.CiliumBGPPeerConfigList]
}
// newCiliumBGPPeerConfigs returns a CiliumBGPPeerConfigs
func newCiliumBGPPeerConfigs(c *CiliumV2alpha1Client) *ciliumBGPPeerConfigs {
return &ciliumBGPPeerConfigs{
gentype.NewClientWithList[*ciliumiov2alpha1.CiliumBGPPeerConfig, *ciliumiov2alpha1.CiliumBGPPeerConfigList](
"ciliumbgppeerconfigs",
c.RESTClient(),
scheme.ParameterCodec,
"",
func() *ciliumiov2alpha1.CiliumBGPPeerConfig { return &ciliumiov2alpha1.CiliumBGPPeerConfig{} },
func() *ciliumiov2alpha1.CiliumBGPPeerConfigList { return &ciliumiov2alpha1.CiliumBGPPeerConfigList{} },
),
}
}
// SPDX-License-Identifier: Apache-2.0
// Copyright Authors of Cilium
// Code generated by client-gen. DO NOT EDIT.
package v2alpha1
import (
context "context"
ciliumiov2alpha1 "github.com/cilium/cilium/pkg/k8s/apis/cilium.io/v2alpha1"
scheme "github.com/cilium/cilium/pkg/k8s/client/clientset/versioned/scheme"
v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
types "k8s.io/apimachinery/pkg/types"
watch "k8s.io/apimachinery/pkg/watch"
gentype "k8s.io/client-go/gentype"
)
// CiliumBGPPeeringPoliciesGetter has a method to return a CiliumBGPPeeringPolicyInterface.
// A group's client should implement this interface.
type CiliumBGPPeeringPoliciesGetter interface {
CiliumBGPPeeringPolicies() CiliumBGPPeeringPolicyInterface
}
// CiliumBGPPeeringPolicyInterface has methods to work with CiliumBGPPeeringPolicy resources.
type CiliumBGPPeeringPolicyInterface interface {
Create(ctx context.Context, ciliumBGPPeeringPolicy *ciliumiov2alpha1.CiliumBGPPeeringPolicy, opts v1.CreateOptions) (*ciliumiov2alpha1.CiliumBGPPeeringPolicy, error)
Update(ctx context.Context, ciliumBGPPeeringPolicy *ciliumiov2alpha1.CiliumBGPPeeringPolicy, opts v1.UpdateOptions) (*ciliumiov2alpha1.CiliumBGPPeeringPolicy, error)
Delete(ctx context.Context, name string, opts v1.DeleteOptions) error
DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error
Get(ctx context.Context, name string, opts v1.GetOptions) (*ciliumiov2alpha1.CiliumBGPPeeringPolicy, error)
List(ctx context.Context, opts v1.ListOptions) (*ciliumiov2alpha1.CiliumBGPPeeringPolicyList, error)
Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error)
Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *ciliumiov2alpha1.CiliumBGPPeeringPolicy, err error)
CiliumBGPPeeringPolicyExpansion
}
// ciliumBGPPeeringPolicies implements CiliumBGPPeeringPolicyInterface
type ciliumBGPPeeringPolicies struct {
*gentype.ClientWithList[*ciliumiov2alpha1.CiliumBGPPeeringPolicy, *ciliumiov2alpha1.CiliumBGPPeeringPolicyList]
}
// newCiliumBGPPeeringPolicies returns a CiliumBGPPeeringPolicies
func newCiliumBGPPeeringPolicies(c *CiliumV2alpha1Client) *ciliumBGPPeeringPolicies {
return &ciliumBGPPeeringPolicies{
gentype.NewClientWithList[*ciliumiov2alpha1.CiliumBGPPeeringPolicy, *ciliumiov2alpha1.CiliumBGPPeeringPolicyList](
"ciliumbgppeeringpolicies",
c.RESTClient(),
scheme.ParameterCodec,
"",
func() *ciliumiov2alpha1.CiliumBGPPeeringPolicy { return &ciliumiov2alpha1.CiliumBGPPeeringPolicy{} },
func() *ciliumiov2alpha1.CiliumBGPPeeringPolicyList {
return &ciliumiov2alpha1.CiliumBGPPeeringPolicyList{}
},
),
}
}
// SPDX-License-Identifier: Apache-2.0
// Copyright Authors of Cilium
// Code generated by client-gen. DO NOT EDIT.
package v2alpha1
import (
context "context"
ciliumiov2alpha1 "github.com/cilium/cilium/pkg/k8s/apis/cilium.io/v2alpha1"
scheme "github.com/cilium/cilium/pkg/k8s/client/clientset/versioned/scheme"
v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
types "k8s.io/apimachinery/pkg/types"
watch "k8s.io/apimachinery/pkg/watch"
gentype "k8s.io/client-go/gentype"
)
// CiliumCIDRGroupsGetter has a method to return a CiliumCIDRGroupInterface.
// A group's client should implement this interface.
type CiliumCIDRGroupsGetter interface {
CiliumCIDRGroups() CiliumCIDRGroupInterface
}
// CiliumCIDRGroupInterface has methods to work with CiliumCIDRGroup resources.
type CiliumCIDRGroupInterface interface {
Create(ctx context.Context, ciliumCIDRGroup *ciliumiov2alpha1.CiliumCIDRGroup, opts v1.CreateOptions) (*ciliumiov2alpha1.CiliumCIDRGroup, error)
Update(ctx context.Context, ciliumCIDRGroup *ciliumiov2alpha1.CiliumCIDRGroup, opts v1.UpdateOptions) (*ciliumiov2alpha1.CiliumCIDRGroup, error)
Delete(ctx context.Context, name string, opts v1.DeleteOptions) error
DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error
Get(ctx context.Context, name string, opts v1.GetOptions) (*ciliumiov2alpha1.CiliumCIDRGroup, error)
List(ctx context.Context, opts v1.ListOptions) (*ciliumiov2alpha1.CiliumCIDRGroupList, error)
Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error)
Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *ciliumiov2alpha1.CiliumCIDRGroup, err error)
CiliumCIDRGroupExpansion
}
// ciliumCIDRGroups implements CiliumCIDRGroupInterface
type ciliumCIDRGroups struct {
*gentype.ClientWithList[*ciliumiov2alpha1.CiliumCIDRGroup, *ciliumiov2alpha1.CiliumCIDRGroupList]
}
// newCiliumCIDRGroups returns a CiliumCIDRGroups
func newCiliumCIDRGroups(c *CiliumV2alpha1Client) *ciliumCIDRGroups {
return &ciliumCIDRGroups{
gentype.NewClientWithList[*ciliumiov2alpha1.CiliumCIDRGroup, *ciliumiov2alpha1.CiliumCIDRGroupList](
"ciliumcidrgroups",
c.RESTClient(),
scheme.ParameterCodec,
"",
func() *ciliumiov2alpha1.CiliumCIDRGroup { return &ciliumiov2alpha1.CiliumCIDRGroup{} },
func() *ciliumiov2alpha1.CiliumCIDRGroupList { return &ciliumiov2alpha1.CiliumCIDRGroupList{} },
),
}
}
// SPDX-License-Identifier: Apache-2.0
// Copyright Authors of Cilium
// Code generated by client-gen. DO NOT EDIT.
package v2alpha1
import (
context "context"
ciliumiov2alpha1 "github.com/cilium/cilium/pkg/k8s/apis/cilium.io/v2alpha1"
scheme "github.com/cilium/cilium/pkg/k8s/client/clientset/versioned/scheme"
v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
types "k8s.io/apimachinery/pkg/types"
watch "k8s.io/apimachinery/pkg/watch"
gentype "k8s.io/client-go/gentype"
)
// CiliumEndpointSlicesGetter has a method to return a CiliumEndpointSliceInterface.
// A group's client should implement this interface.
type CiliumEndpointSlicesGetter interface {
CiliumEndpointSlices() CiliumEndpointSliceInterface
}
// CiliumEndpointSliceInterface has methods to work with CiliumEndpointSlice resources.
type CiliumEndpointSliceInterface interface {
Create(ctx context.Context, ciliumEndpointSlice *ciliumiov2alpha1.CiliumEndpointSlice, opts v1.CreateOptions) (*ciliumiov2alpha1.CiliumEndpointSlice, error)
Update(ctx context.Context, ciliumEndpointSlice *ciliumiov2alpha1.CiliumEndpointSlice, opts v1.UpdateOptions) (*ciliumiov2alpha1.CiliumEndpointSlice, error)
Delete(ctx context.Context, name string, opts v1.DeleteOptions) error
DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error
Get(ctx context.Context, name string, opts v1.GetOptions) (*ciliumiov2alpha1.CiliumEndpointSlice, error)
List(ctx context.Context, opts v1.ListOptions) (*ciliumiov2alpha1.CiliumEndpointSliceList, error)
Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error)
Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *ciliumiov2alpha1.CiliumEndpointSlice, err error)
CiliumEndpointSliceExpansion
}
// ciliumEndpointSlices implements CiliumEndpointSliceInterface
type ciliumEndpointSlices struct {
*gentype.ClientWithList[*ciliumiov2alpha1.CiliumEndpointSlice, *ciliumiov2alpha1.CiliumEndpointSliceList]
}
// newCiliumEndpointSlices returns a CiliumEndpointSlices
func newCiliumEndpointSlices(c *CiliumV2alpha1Client) *ciliumEndpointSlices {
return &ciliumEndpointSlices{
gentype.NewClientWithList[*ciliumiov2alpha1.CiliumEndpointSlice, *ciliumiov2alpha1.CiliumEndpointSliceList](
"ciliumendpointslices",
c.RESTClient(),
scheme.ParameterCodec,
"",
func() *ciliumiov2alpha1.CiliumEndpointSlice { return &ciliumiov2alpha1.CiliumEndpointSlice{} },
func() *ciliumiov2alpha1.CiliumEndpointSliceList { return &ciliumiov2alpha1.CiliumEndpointSliceList{} },
),
}
}
// SPDX-License-Identifier: Apache-2.0
// Copyright Authors of Cilium
// Code generated by client-gen. DO NOT EDIT.
package v2alpha1
import (
context "context"
ciliumiov2alpha1 "github.com/cilium/cilium/pkg/k8s/apis/cilium.io/v2alpha1"
scheme "github.com/cilium/cilium/pkg/k8s/client/clientset/versioned/scheme"
v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
types "k8s.io/apimachinery/pkg/types"
watch "k8s.io/apimachinery/pkg/watch"
gentype "k8s.io/client-go/gentype"
)
// CiliumGatewayClassConfigsGetter has a method to return a CiliumGatewayClassConfigInterface.
// A group's client should implement this interface.
type CiliumGatewayClassConfigsGetter interface {
CiliumGatewayClassConfigs() CiliumGatewayClassConfigInterface
}
// CiliumGatewayClassConfigInterface has methods to work with CiliumGatewayClassConfig resources.
type CiliumGatewayClassConfigInterface interface {
Create(ctx context.Context, ciliumGatewayClassConfig *ciliumiov2alpha1.CiliumGatewayClassConfig, opts v1.CreateOptions) (*ciliumiov2alpha1.CiliumGatewayClassConfig, error)
Update(ctx context.Context, ciliumGatewayClassConfig *ciliumiov2alpha1.CiliumGatewayClassConfig, opts v1.UpdateOptions) (*ciliumiov2alpha1.CiliumGatewayClassConfig, error)
// Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus().
UpdateStatus(ctx context.Context, ciliumGatewayClassConfig *ciliumiov2alpha1.CiliumGatewayClassConfig, opts v1.UpdateOptions) (*ciliumiov2alpha1.CiliumGatewayClassConfig, error)
Delete(ctx context.Context, name string, opts v1.DeleteOptions) error
DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error
Get(ctx context.Context, name string, opts v1.GetOptions) (*ciliumiov2alpha1.CiliumGatewayClassConfig, error)
List(ctx context.Context, opts v1.ListOptions) (*ciliumiov2alpha1.CiliumGatewayClassConfigList, error)
Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error)
Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *ciliumiov2alpha1.CiliumGatewayClassConfig, err error)
CiliumGatewayClassConfigExpansion
}
// ciliumGatewayClassConfigs implements CiliumGatewayClassConfigInterface
type ciliumGatewayClassConfigs struct {
*gentype.ClientWithList[*ciliumiov2alpha1.CiliumGatewayClassConfig, *ciliumiov2alpha1.CiliumGatewayClassConfigList]
}
// newCiliumGatewayClassConfigs returns a CiliumGatewayClassConfigs
func newCiliumGatewayClassConfigs(c *CiliumV2alpha1Client) *ciliumGatewayClassConfigs {
return &ciliumGatewayClassConfigs{
gentype.NewClientWithList[*ciliumiov2alpha1.CiliumGatewayClassConfig, *ciliumiov2alpha1.CiliumGatewayClassConfigList](
"ciliumgatewayclassconfigs",
c.RESTClient(),
scheme.ParameterCodec,
"",
func() *ciliumiov2alpha1.CiliumGatewayClassConfig { return &ciliumiov2alpha1.CiliumGatewayClassConfig{} },
func() *ciliumiov2alpha1.CiliumGatewayClassConfigList {
return &ciliumiov2alpha1.CiliumGatewayClassConfigList{}
},
),
}
}
// SPDX-License-Identifier: Apache-2.0
// Copyright Authors of Cilium
// Code generated by client-gen. DO NOT EDIT.
package v2alpha1
import (
context "context"
ciliumiov2alpha1 "github.com/cilium/cilium/pkg/k8s/apis/cilium.io/v2alpha1"
scheme "github.com/cilium/cilium/pkg/k8s/client/clientset/versioned/scheme"
v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
types "k8s.io/apimachinery/pkg/types"
watch "k8s.io/apimachinery/pkg/watch"
gentype "k8s.io/client-go/gentype"
)
// CiliumL2AnnouncementPoliciesGetter has a method to return a CiliumL2AnnouncementPolicyInterface.
// A group's client should implement this interface.
type CiliumL2AnnouncementPoliciesGetter interface {
CiliumL2AnnouncementPolicies() CiliumL2AnnouncementPolicyInterface
}
// CiliumL2AnnouncementPolicyInterface has methods to work with CiliumL2AnnouncementPolicy resources.
type CiliumL2AnnouncementPolicyInterface interface {
Create(ctx context.Context, ciliumL2AnnouncementPolicy *ciliumiov2alpha1.CiliumL2AnnouncementPolicy, opts v1.CreateOptions) (*ciliumiov2alpha1.CiliumL2AnnouncementPolicy, error)
Update(ctx context.Context, ciliumL2AnnouncementPolicy *ciliumiov2alpha1.CiliumL2AnnouncementPolicy, opts v1.UpdateOptions) (*ciliumiov2alpha1.CiliumL2AnnouncementPolicy, error)
// Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus().
UpdateStatus(ctx context.Context, ciliumL2AnnouncementPolicy *ciliumiov2alpha1.CiliumL2AnnouncementPolicy, opts v1.UpdateOptions) (*ciliumiov2alpha1.CiliumL2AnnouncementPolicy, error)
Delete(ctx context.Context, name string, opts v1.DeleteOptions) error
DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error
Get(ctx context.Context, name string, opts v1.GetOptions) (*ciliumiov2alpha1.CiliumL2AnnouncementPolicy, error)
List(ctx context.Context, opts v1.ListOptions) (*ciliumiov2alpha1.CiliumL2AnnouncementPolicyList, error)
Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error)
Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *ciliumiov2alpha1.CiliumL2AnnouncementPolicy, err error)
CiliumL2AnnouncementPolicyExpansion
}
// ciliumL2AnnouncementPolicies implements CiliumL2AnnouncementPolicyInterface
type ciliumL2AnnouncementPolicies struct {
*gentype.ClientWithList[*ciliumiov2alpha1.CiliumL2AnnouncementPolicy, *ciliumiov2alpha1.CiliumL2AnnouncementPolicyList]
}
// newCiliumL2AnnouncementPolicies returns a CiliumL2AnnouncementPolicies
func newCiliumL2AnnouncementPolicies(c *CiliumV2alpha1Client) *ciliumL2AnnouncementPolicies {
return &ciliumL2AnnouncementPolicies{
gentype.NewClientWithList[*ciliumiov2alpha1.CiliumL2AnnouncementPolicy, *ciliumiov2alpha1.CiliumL2AnnouncementPolicyList](
"ciliuml2announcementpolicies",
c.RESTClient(),
scheme.ParameterCodec,
"",
func() *ciliumiov2alpha1.CiliumL2AnnouncementPolicy {
return &ciliumiov2alpha1.CiliumL2AnnouncementPolicy{}
},
func() *ciliumiov2alpha1.CiliumL2AnnouncementPolicyList {
return &ciliumiov2alpha1.CiliumL2AnnouncementPolicyList{}
},
),
}
}
// SPDX-License-Identifier: Apache-2.0
// Copyright Authors of Cilium
// Code generated by client-gen. DO NOT EDIT.
package v2alpha1
import (
context "context"
ciliumiov2alpha1 "github.com/cilium/cilium/pkg/k8s/apis/cilium.io/v2alpha1"
scheme "github.com/cilium/cilium/pkg/k8s/client/clientset/versioned/scheme"
v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
types "k8s.io/apimachinery/pkg/types"
watch "k8s.io/apimachinery/pkg/watch"
gentype "k8s.io/client-go/gentype"
)
// CiliumLoadBalancerIPPoolsGetter has a method to return a CiliumLoadBalancerIPPoolInterface.
// A group's client should implement this interface.
type CiliumLoadBalancerIPPoolsGetter interface {
CiliumLoadBalancerIPPools() CiliumLoadBalancerIPPoolInterface
}
// CiliumLoadBalancerIPPoolInterface has methods to work with CiliumLoadBalancerIPPool resources.
type CiliumLoadBalancerIPPoolInterface interface {
Create(ctx context.Context, ciliumLoadBalancerIPPool *ciliumiov2alpha1.CiliumLoadBalancerIPPool, opts v1.CreateOptions) (*ciliumiov2alpha1.CiliumLoadBalancerIPPool, error)
Update(ctx context.Context, ciliumLoadBalancerIPPool *ciliumiov2alpha1.CiliumLoadBalancerIPPool, opts v1.UpdateOptions) (*ciliumiov2alpha1.CiliumLoadBalancerIPPool, error)
// Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus().
UpdateStatus(ctx context.Context, ciliumLoadBalancerIPPool *ciliumiov2alpha1.CiliumLoadBalancerIPPool, opts v1.UpdateOptions) (*ciliumiov2alpha1.CiliumLoadBalancerIPPool, error)
Delete(ctx context.Context, name string, opts v1.DeleteOptions) error
DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error
Get(ctx context.Context, name string, opts v1.GetOptions) (*ciliumiov2alpha1.CiliumLoadBalancerIPPool, error)
List(ctx context.Context, opts v1.ListOptions) (*ciliumiov2alpha1.CiliumLoadBalancerIPPoolList, error)
Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error)
Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *ciliumiov2alpha1.CiliumLoadBalancerIPPool, err error)
CiliumLoadBalancerIPPoolExpansion
}
// ciliumLoadBalancerIPPools implements CiliumLoadBalancerIPPoolInterface
type ciliumLoadBalancerIPPools struct {
*gentype.ClientWithList[*ciliumiov2alpha1.CiliumLoadBalancerIPPool, *ciliumiov2alpha1.CiliumLoadBalancerIPPoolList]
}
// newCiliumLoadBalancerIPPools returns a CiliumLoadBalancerIPPools
func newCiliumLoadBalancerIPPools(c *CiliumV2alpha1Client) *ciliumLoadBalancerIPPools {
return &ciliumLoadBalancerIPPools{
gentype.NewClientWithList[*ciliumiov2alpha1.CiliumLoadBalancerIPPool, *ciliumiov2alpha1.CiliumLoadBalancerIPPoolList](
"ciliumloadbalancerippools",
c.RESTClient(),
scheme.ParameterCodec,
"",
func() *ciliumiov2alpha1.CiliumLoadBalancerIPPool { return &ciliumiov2alpha1.CiliumLoadBalancerIPPool{} },
func() *ciliumiov2alpha1.CiliumLoadBalancerIPPoolList {
return &ciliumiov2alpha1.CiliumLoadBalancerIPPoolList{}
},
),
}
}
// SPDX-License-Identifier: Apache-2.0
// Copyright Authors of Cilium
// Code generated by client-gen. DO NOT EDIT.
package v2alpha1
import (
context "context"
ciliumiov2alpha1 "github.com/cilium/cilium/pkg/k8s/apis/cilium.io/v2alpha1"
scheme "github.com/cilium/cilium/pkg/k8s/client/clientset/versioned/scheme"
v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
types "k8s.io/apimachinery/pkg/types"
watch "k8s.io/apimachinery/pkg/watch"
gentype "k8s.io/client-go/gentype"
)
// CiliumNodeConfigsGetter has a method to return a CiliumNodeConfigInterface.
// A group's client should implement this interface.
type CiliumNodeConfigsGetter interface {
CiliumNodeConfigs(namespace string) CiliumNodeConfigInterface
}
// CiliumNodeConfigInterface has methods to work with CiliumNodeConfig resources.
type CiliumNodeConfigInterface interface {
Create(ctx context.Context, ciliumNodeConfig *ciliumiov2alpha1.CiliumNodeConfig, opts v1.CreateOptions) (*ciliumiov2alpha1.CiliumNodeConfig, error)
Update(ctx context.Context, ciliumNodeConfig *ciliumiov2alpha1.CiliumNodeConfig, opts v1.UpdateOptions) (*ciliumiov2alpha1.CiliumNodeConfig, error)
Delete(ctx context.Context, name string, opts v1.DeleteOptions) error
DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error
Get(ctx context.Context, name string, opts v1.GetOptions) (*ciliumiov2alpha1.CiliumNodeConfig, error)
List(ctx context.Context, opts v1.ListOptions) (*ciliumiov2alpha1.CiliumNodeConfigList, error)
Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error)
Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *ciliumiov2alpha1.CiliumNodeConfig, err error)
CiliumNodeConfigExpansion
}
// ciliumNodeConfigs implements CiliumNodeConfigInterface
type ciliumNodeConfigs struct {
*gentype.ClientWithList[*ciliumiov2alpha1.CiliumNodeConfig, *ciliumiov2alpha1.CiliumNodeConfigList]
}
// newCiliumNodeConfigs returns a CiliumNodeConfigs
func newCiliumNodeConfigs(c *CiliumV2alpha1Client, namespace string) *ciliumNodeConfigs {
return &ciliumNodeConfigs{
gentype.NewClientWithList[*ciliumiov2alpha1.CiliumNodeConfig, *ciliumiov2alpha1.CiliumNodeConfigList](
"ciliumnodeconfigs",
c.RESTClient(),
scheme.ParameterCodec,
namespace,
func() *ciliumiov2alpha1.CiliumNodeConfig { return &ciliumiov2alpha1.CiliumNodeConfig{} },
func() *ciliumiov2alpha1.CiliumNodeConfigList { return &ciliumiov2alpha1.CiliumNodeConfigList{} },
),
}
}
// SPDX-License-Identifier: Apache-2.0
// Copyright Authors of Cilium
// Code generated by client-gen. DO NOT EDIT.
package v2alpha1
import (
context "context"
ciliumiov2alpha1 "github.com/cilium/cilium/pkg/k8s/apis/cilium.io/v2alpha1"
scheme "github.com/cilium/cilium/pkg/k8s/client/clientset/versioned/scheme"
v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
types "k8s.io/apimachinery/pkg/types"
watch "k8s.io/apimachinery/pkg/watch"
gentype "k8s.io/client-go/gentype"
)
// CiliumPodIPPoolsGetter has a method to return a CiliumPodIPPoolInterface.
// A group's client should implement this interface.
type CiliumPodIPPoolsGetter interface {
CiliumPodIPPools() CiliumPodIPPoolInterface
}
// CiliumPodIPPoolInterface has methods to work with CiliumPodIPPool resources.
type CiliumPodIPPoolInterface interface {
Create(ctx context.Context, ciliumPodIPPool *ciliumiov2alpha1.CiliumPodIPPool, opts v1.CreateOptions) (*ciliumiov2alpha1.CiliumPodIPPool, error)
Update(ctx context.Context, ciliumPodIPPool *ciliumiov2alpha1.CiliumPodIPPool, opts v1.UpdateOptions) (*ciliumiov2alpha1.CiliumPodIPPool, error)
Delete(ctx context.Context, name string, opts v1.DeleteOptions) error
DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error
Get(ctx context.Context, name string, opts v1.GetOptions) (*ciliumiov2alpha1.CiliumPodIPPool, error)
List(ctx context.Context, opts v1.ListOptions) (*ciliumiov2alpha1.CiliumPodIPPoolList, error)
Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error)
Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *ciliumiov2alpha1.CiliumPodIPPool, err error)
CiliumPodIPPoolExpansion
}
// ciliumPodIPPools implements CiliumPodIPPoolInterface
type ciliumPodIPPools struct {
*gentype.ClientWithList[*ciliumiov2alpha1.CiliumPodIPPool, *ciliumiov2alpha1.CiliumPodIPPoolList]
}
// newCiliumPodIPPools returns a CiliumPodIPPools
func newCiliumPodIPPools(c *CiliumV2alpha1Client) *ciliumPodIPPools {
return &ciliumPodIPPools{
gentype.NewClientWithList[*ciliumiov2alpha1.CiliumPodIPPool, *ciliumiov2alpha1.CiliumPodIPPoolList](
"ciliumpodippools",
c.RESTClient(),
scheme.ParameterCodec,
"",
func() *ciliumiov2alpha1.CiliumPodIPPool { return &ciliumiov2alpha1.CiliumPodIPPool{} },
func() *ciliumiov2alpha1.CiliumPodIPPoolList { return &ciliumiov2alpha1.CiliumPodIPPoolList{} },
),
}
}
// SPDX-License-Identifier: Apache-2.0
// Copyright Authors of Cilium
package client
import (
"fmt"
"os"
"time"
"github.com/spf13/pflag"
"github.com/cilium/cilium/pkg/defaults"
"github.com/cilium/cilium/pkg/option"
)
const OptUserAgent = "user-agent"
type Config struct {
ClientParams
SharedConfig
}
type SharedConfig struct {
// EnableK8s is a flag that, when set to false, forcibly disables the clientset, to let cilium
// operates with CNI-compatible orchestrators other than Kubernetes. Default to true.
EnableK8s bool
// K8sAPIServerURLs is the list of API server instances
K8sAPIServerURLs []string
// K8sAPIServer is the kubernetes api address server (for https use --k8s-kubeconfig-path instead)
K8sAPIServer string
// K8sKubeConfigPath is the absolute path of the kubernetes kubeconfig file
K8sKubeConfigPath string
// K8sClientConnectionTimeout configures the timeout for K8s client connections.
K8sClientConnectionTimeout time.Duration
// K8sClientConnectionKeepAlive configures the keep alive duration for K8s client connections.
K8sClientConnectionKeepAlive time.Duration
// K8sHeartbeatTimeout configures the timeout for apiserver heartbeat
K8sHeartbeatTimeout time.Duration
// EnableAPIDiscovery enables Kubernetes API discovery
EnableK8sAPIDiscovery bool
}
type ClientParams struct {
// K8sClientQPS is the queries per second limit for the K8s client. Defaults to k8s client defaults.
K8sClientQPS float32
// K8sClientBurst is the burst value allowed for the K8s client. Defaults to k8s client defaults.
K8sClientBurst int
}
var defaultClientParams = ClientParams{
K8sClientQPS: defaults.K8sClientQPSLimit,
K8sClientBurst: defaults.K8sClientBurst,
}
func (def ClientParams) Flags(flags *pflag.FlagSet) {
flags.Float32(option.K8sClientQPSLimit, def.K8sClientQPS, "Queries per second limit for the K8s client")
flags.Int(option.K8sClientBurst, def.K8sClientBurst, "Burst value allowed for the K8s client")
}
var defaultSharedConfig = SharedConfig{
EnableK8s: true,
K8sAPIServer: "",
K8sAPIServerURLs: []string{},
K8sKubeConfigPath: "",
K8sClientConnectionTimeout: 30 * time.Second,
K8sClientConnectionKeepAlive: 30 * time.Second,
K8sHeartbeatTimeout: 30 * time.Second,
EnableK8sAPIDiscovery: defaults.K8sEnableAPIDiscovery,
}
func (def SharedConfig) Flags(flags *pflag.FlagSet) {
flags.Bool(option.EnableK8s, def.EnableK8s, "Enable the k8s clientset")
flags.String(option.K8sAPIServer, def.K8sAPIServer, "Kubernetes API server URL")
flags.MarkDeprecated(option.K8sAPIServer, fmt.Sprintf("use --%s", option.K8sAPIServerURLs))
flags.StringSlice(option.K8sAPIServerURLs, def.K8sAPIServerURLs, "Kubernetes API server URLs")
flags.String(option.K8sKubeConfigPath, def.K8sKubeConfigPath, "Absolute path of the kubernetes kubeconfig file")
flags.Duration(option.K8sClientConnectionTimeout, def.K8sClientConnectionTimeout, "Configures the timeout of K8s client connections. K8s client is disabled if the value is set to 0")
flags.Duration(option.K8sClientConnectionKeepAlive, def.K8sClientConnectionKeepAlive, "Configures the keep alive duration of K8s client connections. K8 client is disabled if the value is set to 0")
flags.Duration(option.K8sHeartbeatTimeout, def.K8sHeartbeatTimeout, "Configures the timeout for api-server heartbeat, set to 0 to disable")
flags.Bool(option.K8sEnableAPIDiscovery, def.EnableK8sAPIDiscovery, "Enable discovery of Kubernetes API groups and resources with the discovery API")
}
func NewClientConfig(cfg SharedConfig, params ClientParams) Config {
return Config{
SharedConfig: cfg,
ClientParams: params,
}
}
func (cfg Config) isEnabled() bool {
if !cfg.EnableK8s {
return false
}
return cfg.K8sAPIServer != "" ||
len(cfg.K8sAPIServerURLs) >= 1 ||
cfg.K8sKubeConfigPath != "" ||
(os.Getenv("KUBERNETES_SERVICE_HOST") != "" &&
os.Getenv("KUBERNETES_SERVICE_PORT") != "") ||
os.Getenv("K8S_NODE_NAME") != ""
}
// SPDX-License-Identifier: Apache-2.0
// Copyright Authors of Cilium
package client
import (
"context"
"fmt"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
)
// Getters is a set of methods for retrieving common objects.
type Getters interface {
GetSecrets(ctx context.Context, namespace, name string) (map[string][]byte, error)
}
// ClientsetGetters implements the Getters interface in terms of the clientset.
type ClientsetGetters struct {
Clientset
}
// GetSecrets returns the secrets found in the given namespace and name.
func (cs *ClientsetGetters) GetSecrets(ctx context.Context, ns, name string) (map[string][]byte, error) {
if !cs.IsEnabled() {
return nil, fmt.Errorf("GetSecrets: No k8s, cannot access k8s secrets")
}
result, err := cs.CoreV1().Secrets(ns).Get(ctx, name, metav1.GetOptions{})
if err != nil {
return nil, err
}
return result.Data, nil
}
// SPDX-License-Identifier: Apache-2.0
// Copyright Authors of Cilium
package client
import (
"context"
"encoding/json"
"fmt"
"log/slog"
"math/rand/v2"
"net/http"
"net/url"
"os"
"path/filepath"
"slices"
"strings"
"github.com/cilium/hive/cell"
"github.com/cilium/hive/job"
"github.com/cilium/statedb"
"github.com/cloudflare/cfssl/log"
"k8s.io/apimachinery/pkg/util/wait"
"k8s.io/client-go/kubernetes"
"k8s.io/client-go/rest"
"k8s.io/client-go/tools/clientcmd"
"github.com/cilium/cilium/pkg/loadbalancer"
"github.com/cilium/cilium/pkg/lock"
"github.com/cilium/cilium/pkg/logging/logfields"
"github.com/cilium/cilium/pkg/option"
"github.com/cilium/cilium/pkg/time"
"github.com/cilium/cilium/pkg/version"
)
var (
// K8sAPIServerFilePath is the file path for storing kube-apiserver service and
// endpoints for high availability failover.
K8sAPIServerFilePath = filepath.Join(option.Config.StateDir, "k8sapi_server_state.json")
)
type K8sServiceEndpointMapping struct {
Service string `json:"service"`
Endpoints []string `json:"endpoints"`
}
func (m K8sServiceEndpointMapping) Equal(other K8sServiceEndpointMapping) bool {
return m.Service == other.Service && slices.Equal(m.Endpoints, other.Endpoints)
}
// restConfigManager manages the rest configuration for connecting to the API server, including the logic to fail over
// to an active kube-apiserver in order to support high availability.
//
// Below are the sequence of events to support kube-apiserver failover.
//
// Bootstrap: It parses the user provided configuration which may include multiple API server URLs. In case of multiple
// API servers, it wraps the rest configuration with an HTTP RoundTripper that enables updating the remote host while
// making API requests to the kube-apiserver. It also asynchronously monitors kube-apiserver service and endpoints related updates.
// Initially an active kube-apiserver URL is picked at random, and servers are manually rotated on connectivity failures.
//
// Runtime: After the agent's initial sync with the kube-apiserver, when the manager receives updates for the kube-apiserver
// service, it switches over to the service address as the remote host set in the rest configuration. Thereafter, manual
// rotation of API servers is not needed as Cilium datapath will load-balance API traffic to the kube-apiserver endpoints.
//
// Restore: The manager restores the persisted kube-apiserver state after restart after ensuring connectivity using
// the service address. If that fails, it'll fall back to user provided kube-apiserver URLs. Note that these could be
// different from the ones configured during initial bootstrap as those kube-apiservers may all have been rotated while
// the agent was down.
type restConfigManager struct {
restConfig *rest.Config
apiServerURLs []*url.URL
isConnectedToService bool
lock.RWMutex
log *slog.Logger
rt *rotatingHttpRoundTripper
}
func (r *restConfigManager) getConfig() *rest.Config {
r.RLock()
defer r.RUnlock()
return rest.CopyConfig(r.restConfig)
}
func (r *restConfigManager) canRotateAPIServerURL() bool {
r.RLock()
defer r.RUnlock()
// API server URLs are initially manually rotated when multiple
// servers are configured by the user. Once the connections are
// switched over to the kube-apiserver service address, manual
// rotation isn't needed as Cilium datapath will load balance
// connections to active kube-apiservers.
return len(r.apiServerURLs) > 1 && !r.isConnectedToService
}
func restConfigManagerInit(cfg Config, name string, log *slog.Logger) (*restConfigManager, error) {
var err error
manager := restConfigManager{
log: log,
rt: &rotatingHttpRoundTripper{
log: log,
},
}
manager.parseConfig(cfg)
cmdName := "cilium"
if len(os.Args[0]) != 0 {
cmdName = filepath.Base(os.Args[0])
}
userAgent := fmt.Sprintf("%s/%s", cmdName, version.Version)
if name != "" {
userAgent = fmt.Sprintf("%s %s", userAgent, name)
}
if manager.restConfig, err = manager.createConfig(cfg, userAgent); err != nil {
return nil, err
}
if manager.canRotateAPIServerURL() {
// Pick an API server at random.
manager.rotateAPIServerURL()
// Restore the mappings from disk.
manager.restoreFromDisk()
}
return &manager, err
}
// createConfig creates a rest.Config for connecting to k8s api-server.
//
// The precedence of the configuration selection is the following:
// 1. kubeCfgPath
// 2. apiServerURL(s) (https if specified)
// 3. rest.InClusterConfig().
func (r *restConfigManager) createConfig(cfg Config, userAgent string) (*rest.Config, error) {
var (
config *rest.Config
err error
apiServerURL string
)
if cfg.K8sAPIServer != "" {
apiServerURL = cfg.K8sAPIServer
} else if len(r.apiServerURLs) > 0 {
apiServerURL = r.apiServerURLs[0].String()
}
kubeCfgPath := cfg.K8sKubeConfigPath
qps := cfg.K8sClientQPS
burst := cfg.K8sClientBurst
switch {
// If the apiServerURL and the kubeCfgPath are empty then we can try getting
// the rest.Config from the InClusterConfig
case apiServerURL == "" && kubeCfgPath == "":
if config, err = rest.InClusterConfig(); err != nil {
return nil, err
}
case kubeCfgPath != "":
if config, err = clientcmd.BuildConfigFromFlags("", kubeCfgPath); err != nil {
return nil, err
}
case strings.HasPrefix(apiServerURL, "https://"):
if config, err = rest.InClusterConfig(); err != nil {
return nil, err
}
config.Host = apiServerURL
default:
//exhaustruct:ignore
config = &rest.Config{Host: apiServerURL, UserAgent: userAgent}
}
// The HTTP round tripper rotates API server URLs in case of connectivity failures.
if len(r.apiServerURLs) > 1 {
config.Wrap(r.WrapRoundTripper)
}
setConfig(config, userAgent, qps, burst)
return config, nil
}
func (r *restConfigManager) parseConfig(cfg Config) {
if cfg.K8sAPIServer != "" {
var (
serverURL *url.URL
err error
)
s := cfg.K8sAPIServer
if !strings.HasPrefix(s, "http") {
s = fmt.Sprintf("http://%s", s) // default to HTTP
}
serverURL, err = url.Parse(s)
if err != nil {
r.log.Error("Failed to parse APIServerURL, skipping",
logfields.Error, err,
logfields.URL, serverURL,
)
return
}
r.apiServerURLs = append(r.apiServerURLs, serverURL)
return
}
for _, apiServerURL := range cfg.K8sAPIServerURLs {
if apiServerURL == "" {
continue
}
if !strings.HasPrefix(apiServerURL, "http") && !strings.HasPrefix(apiServerURL, "https") {
apiServerURL = fmt.Sprintf("https://%s", apiServerURL)
}
serverURL, err := url.Parse(apiServerURL)
if err != nil {
r.log.Error("Failed to parse APIServerURL, skipping",
logfields.Error, err,
logfields.URL, apiServerURL,
)
continue
}
r.apiServerURLs = append(r.apiServerURLs, serverURL)
}
}
func setConfig(config *rest.Config, userAgent string, qps float32, burst int) {
if userAgent != "" {
config.UserAgent = userAgent
}
if qps != 0.0 {
config.QPS = qps
}
if burst != 0 {
config.Burst = burst
}
}
func (r *restConfigManager) rotateAPIServerURL() {
if len(r.apiServerURLs) <= 1 {
return
}
r.rt.Lock()
defer r.rt.Unlock()
for {
idx := rand.IntN(len(r.apiServerURLs))
if r.rt.apiServerURL != r.apiServerURLs[idx] {
r.rt.apiServerURL = r.apiServerURLs[idx]
break
}
}
r.Lock()
r.restConfig.Host = r.rt.apiServerURL.String()
r.Unlock()
r.log.Info("Rotated api server",
logfields.URL, r.rt.apiServerURL,
)
}
// rotatingHttpRoundTripper sets the remote host in the rest configuration used to make API requests to the API server.
type rotatingHttpRoundTripper struct {
delegate http.RoundTripper
log *slog.Logger
apiServerURL *url.URL
lock.RWMutex // Synchronizes access to apiServerURL
}
func (rt *rotatingHttpRoundTripper) RoundTrip(req *http.Request) (*http.Response, error) {
rt.RLock()
defer rt.RUnlock()
rt.log.Debug("Kubernetes api server host",
logfields.URL, rt.apiServerURL,
)
req.URL.Host = rt.apiServerURL.Host
return rt.delegate.RoundTrip(req)
}
func (r *restConfigManager) WrapRoundTripper(rt http.RoundTripper) http.RoundTripper {
r.rt.delegate = rt
return r.rt
}
func (r *restConfigManager) restoreFromDisk() {
f, err := os.Open(K8sAPIServerFilePath)
if err != nil {
r.log.Error("unable "+
"to open file, agent may not be able to fail over to an active kube-apiserver",
logfields.Path, K8sAPIServerFilePath,
logfields.Error, err,
)
return
}
defer f.Close()
if finfo, err := os.Stat(K8sAPIServerFilePath); err != nil || finfo.Size() == 0 {
return
}
var mapping K8sServiceEndpointMapping
if err = json.NewDecoder(f).Decode(&mapping); err != nil {
r.log.Error("failed to "+
"decode file entry, agent may not be able to fail over to an active kube-apiserver",
logfields.Error, err,
logfields.Path, K8sAPIServerFilePath,
logfields.Entry, mapping,
)
return
}
r.updateMappings(mapping)
}
func (r *restConfigManager) saveMapping(mapping K8sServiceEndpointMapping) {
// Write the mappings to disk so they can be restored on restart.
if f, err := os.OpenFile(K8sAPIServerFilePath, os.O_RDWR, 0644); err == nil {
defer f.Close()
if err = json.NewEncoder(f).Encode(mapping); err != nil {
log.Error("failed to write kubernetes service entry,"+
"agent may not be able to fail over to an active k8sapi-server",
logfields.Error, err,
logfields.Entry, mapping,
)
}
}
}
func (r *restConfigManager) updateMappings(mapping K8sServiceEndpointMapping) {
if err := r.checkConnToService(mapping.Service); err != nil {
return
}
r.saveMapping(mapping)
r.log.Info("Updated kubeapi server url host",
logfields.URL, mapping.Service,
)
// Set in tests
mapping.Service = strings.TrimPrefix(mapping.Service, "http://")
r.rt.Lock()
defer r.rt.Unlock()
r.rt.apiServerURL.Host = mapping.Service
r.Lock()
defer r.Unlock()
r.isConnectedToService = true
r.restConfig.Host = mapping.Service
updatedServerURLs := make([]*url.URL, 0)
for _, endpoint := range mapping.Endpoints {
endpoint = fmt.Sprintf("https://%s", endpoint)
serverURL, err := url.Parse(endpoint)
if err != nil {
r.log.Info("Failed to parse endpoint, skipping",
logfields.Endpoint, endpoint,
logfields.Error, err,
)
continue
}
updatedServerURLs = append(updatedServerURLs, serverURL)
}
if len(updatedServerURLs) != 0 {
r.apiServerURLs = updatedServerURLs
}
}
// checkConnToService ensures connectivity to the API server via the passed service address.
func (r *restConfigManager) checkConnToService(host string) error {
stop := make(chan struct{})
timeout := time.NewTimer(connTimeout)
defer timeout.Stop()
var (
config *rest.Config
err error
)
if strings.HasPrefix(host, "http") {
// Set in tests
//exhaustruct:ignore
config = &rest.Config{Host: host, Timeout: connTimeout}
} else {
hostURL := fmt.Sprintf("https://%s", host)
config, err = rest.InClusterConfig()
if err != nil {
r.log.Error("unable to read cluster config",
logfields.Error, err,
)
return err
}
config.Host = hostURL
}
wait.Until(func() {
r.log.Info("Checking connection to kubeapi service",
logfields.Address, config.Host,
)
httpClient, _ := rest.HTTPClientFor(config)
cs, _ := kubernetes.NewForConfigAndClient(config, httpClient)
if err = isConnReady(cs); err == nil {
close(stop)
return
}
select {
case <-timeout.C:
default:
return
}
r.log.Error("kubeapi service not ready yet",
logfields.Address, config.Host,
logfields.Error, err,
)
close(stop)
}, connRetryInterval, stop)
if err == nil {
r.log.Info("Connected to kubeapi service",
logfields.Address, config.Host,
)
}
return err
}
type mappingUpdaterParams struct {
cell.In
JobGroup job.Group
Log *slog.Logger
Manager *restConfigManager
DB *statedb.DB `optional:"true"`
Frontends statedb.Table[*loadbalancer.Frontend] `optional:"true"`
}
// registerMappingsUpdater watches the default/kubernetes frontend for
// changes and updates the mapping file.
// This is currently used for supporting high availability for kubeapi-server.
func registerMappingsUpdater(p mappingUpdaterParams) {
if p.DB == nil || p.Frontends == nil {
// These are optional to make the [Cell] usable without
// load-balancing control-plane.
return
}
if p.Manager == nil || !p.Manager.canRotateAPIServerURL() {
return
}
p.JobGroup.Add(
job.OneShot(
"update-k8s-api-service-mappings",
func(ctx context.Context, health cell.Health) error {
// Watch for changes to the default/kubernetes service frontend
// and update the mappings if it changes.
var previous K8sServiceEndpointMapping
for {
fe, _, watch, found := p.Frontends.GetWatch(
p.DB.ReadTxn(),
loadbalancer.FrontendByServiceName(loadbalancer.NewServiceName(
"default", "kubernetes")))
if found {
mapping := frontendToMapping(fe)
if !mapping.Equal(previous) {
previous = mapping
log.Info("updating kubernetes service mapping",
logfields.Entry, mapping,
)
p.Manager.updateMappings(mapping)
}
}
select {
case <-ctx.Done():
return nil
case <-watch:
}
}
}))
}
func frontendToMapping(fe *loadbalancer.Frontend) K8sServiceEndpointMapping {
var mapping K8sServiceEndpointMapping
mapping.Service = fe.Address.AddrString()
for be := range fe.Backends {
mapping.Endpoints = append(mapping.Endpoints, be.Address.AddrString())
}
return mapping
}
// SPDX-License-Identifier: Apache-2.0
// Copyright Authors of Cilium
package identitybackend
import (
"context"
"fmt"
"log/slog"
"reflect"
"sort"
"strconv"
"sync/atomic"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/client-go/tools/cache"
"github.com/cilium/cilium/pkg/allocator"
cacheKey "github.com/cilium/cilium/pkg/identity/key"
"github.com/cilium/cilium/pkg/idpool"
k8sConst "github.com/cilium/cilium/pkg/k8s/apis/cilium.io"
v2 "github.com/cilium/cilium/pkg/k8s/apis/cilium.io/v2"
clientset "github.com/cilium/cilium/pkg/k8s/client/clientset/versioned"
"github.com/cilium/cilium/pkg/k8s/informer"
k8sUtils "github.com/cilium/cilium/pkg/k8s/utils"
"github.com/cilium/cilium/pkg/kvstore"
"github.com/cilium/cilium/pkg/labels"
"github.com/cilium/cilium/pkg/logging/logfields"
"github.com/cilium/cilium/pkg/rate"
)
const (
// HeartBeatAnnotation is an annotation applied by the operator to indicate
// that a CiliumIdentity has been marked for deletion.
HeartBeatAnnotation = "io.cilium.heartbeat"
K8sPodNamespaceLabelKey = labels.LabelSourceK8s + ":" + k8sConst.PodNamespaceLabel
// byKeyIndex is the name of the index of the identities by key.
byKeyIndex = "by-key-index"
)
func NewCRDBackend(logger *slog.Logger, c CRDBackendConfiguration) (allocator.Backend, error) {
return &crdBackend{logger: logger, CRDBackendConfiguration: c}, nil
}
type CRDBackendConfiguration struct {
Store cache.Indexer
StoreSet *atomic.Bool
Client clientset.Interface
KeyFunc func(map[string]string) allocator.AllocatorKey
}
type crdBackend struct {
logger *slog.Logger
CRDBackendConfiguration
}
func (c *crdBackend) DeleteAllKeys(ctx context.Context) {
}
// Select Labels that should be added to CRD CiliumIdentity objects.
// Labels are added to metadata.Labels and have no effect on the
// Security Identity at all!
// If we ever want to add new label, we need to ensure that it has a length
// no greater than 63 characters
func SelectK8sLabels(old map[string]string) (selected map[string]string) {
selected = make(map[string]string, 1)
// Namespace name has a length limit of 63 characters
if namespace, ok := old[K8sPodNamespaceLabelKey]; ok {
selected[k8sConst.PodNamespaceLabel] = namespace
}
return selected
}
func (c *crdBackend) DeleteID(ctx context.Context, id idpool.ID) error {
return c.Client.CiliumV2().CiliumIdentities().Delete(ctx, id.String(), metav1.DeleteOptions{})
}
// AllocateID will create an identity CRD, thus creating the identity for this
// key-> ID mapping.
// Note: the lock field is not supported with the k8s CRD allocator.
// Returns an allocator key with the cilium identity stored in it.
func (c *crdBackend) AllocateID(ctx context.Context, id idpool.ID, key allocator.AllocatorKey) (allocator.AllocatorKey, error) {
securityLabels := key.GetAsMap()
selectedLabels := SelectK8sLabels(securityLabels)
identity := &v2.CiliumIdentity{
ObjectMeta: metav1.ObjectMeta{
Name: id.String(),
Labels: selectedLabels,
},
SecurityLabels: securityLabels,
}
ci, err := c.Client.CiliumV2().CiliumIdentities().Create(ctx, identity, metav1.CreateOptions{})
if err != nil {
return nil, err
}
return key.PutValue(cacheKey.MetadataKeyBackendKey, ci), nil
}
func (c *crdBackend) AllocateIDIfLocked(ctx context.Context, id idpool.ID, key allocator.AllocatorKey, lock kvstore.KVLocker) (allocator.AllocatorKey, error) {
return c.AllocateID(ctx, id, key)
}
// AcquireReference acquires a reference to the identity.
func (c *crdBackend) AcquireReference(ctx context.Context, id idpool.ID, key allocator.AllocatorKey, lock kvstore.KVLocker) error {
// For CiliumIdentity-based allocation, the reference counting is
// handled via CiliumEndpoint. Any CiliumEndpoint referring to a
// CiliumIdentity will keep the CiliumIdentity alive. However,
// there is a brief window where a CiliumEndpoint may not exist
// for a given CiliumIdentity (according to the operator), in
// which case the operator marks the CiliumIdentity for deletion.
// This checks to see if the CiliumIdentity has been marked for
// deletion and removes the mark so that the CiliumIdentity can
// be safely used.
//
// NOTE: A race against using a CiliumIdentity that might otherwise
// be (immediately) deleted is prevented by the operator logic that
// validates the ResourceVersion of the CiliumIdentity before deleting
// it. If a CiliumIdentity does (eventually) get deleted by the
// operator, the agent will then have a chance to recreate it.
var (
ts string
ok bool
)
// check to see if the cached copy of the identity
// has the annotation
ci, exists, err := c.getById(ctx, id)
if err != nil {
return err
}
if !exists {
// fall back to the key stored in the allocator key. If it's not present
// then return the error.
ci, ok = key.Value(cacheKey.MetadataKeyBackendKey).(*v2.CiliumIdentity)
if !ok {
return fmt.Errorf("identity (id:%q,key:%q) does not exist", id, key)
}
}
ts, ok = ci.Annotations[HeartBeatAnnotation]
if ok {
c.logger.Info(
"Identity marked for deletion; attempting to unmark it",
logfields.Timeout, ts,
logfields.Identity, ci,
)
ci = ci.DeepCopy()
delete(ci.Annotations, HeartBeatAnnotation)
_, err = c.Client.CiliumV2().CiliumIdentities().Update(ctx, ci, metav1.UpdateOptions{})
if err != nil {
return err
}
}
return nil
}
func (c *crdBackend) RunLocksGC(_ context.Context, _ map[string]kvstore.Value) (map[string]kvstore.Value, error) {
return nil, nil
}
func (c *crdBackend) RunGC(context.Context, *rate.Limiter, map[string]uint64, idpool.ID, idpool.ID) (map[string]uint64, *allocator.GCStats, error) {
return nil, nil, nil
}
// UpdateKey refreshes the reference that this node is using this key->ID
// mapping. It assumes that the identity already exists but will recreate it if
// reliablyMissing is true.
// Note: the lock field is not supported with the k8s CRD allocator.
func (c *crdBackend) UpdateKey(ctx context.Context, id idpool.ID, key allocator.AllocatorKey, reliablyMissing bool) error {
err := c.AcquireReference(ctx, id, key, nil)
if err == nil {
c.logger.Debug(
"Acquired reference for identity",
logfields.Identity, id,
logfields.Labels, key,
)
return nil
}
// The CRD (aka the master key) is missing. Try to recover by recreating it
// if reliablyMissing is set.
c.logger.Warn(
"Unable update CRD identity information with a reference for this node",
logfields.Error, err,
logfields.Identity, id,
logfields.Labels, key,
)
if reliablyMissing {
// Recreate a missing master key
if _, err = c.AllocateID(ctx, id, key); err != nil {
return fmt.Errorf("Unable recreate missing CRD identity %q->%q: %w", key, id, err)
}
return nil
}
return err
}
func (c *crdBackend) UpdateKeyIfLocked(ctx context.Context, id idpool.ID, key allocator.AllocatorKey, reliablyMissing bool, lock kvstore.KVLocker) error {
return c.UpdateKey(ctx, id, key, reliablyMissing)
}
// Lock does not return a lock object. Locking is not supported with the k8s
// CRD allocator. It is here to meet interface requirements.
func (c *crdBackend) Lock(ctx context.Context, key allocator.AllocatorKey) (kvstore.KVLocker, error) {
return &crdLock{}, nil
}
type crdLock struct{}
// Unlock does not unlock a lock object. Locking is not supported with the k8s
// CRD allocator. It is here to meet interface requirements.
func (c *crdLock) Unlock(ctx context.Context) error {
return nil
}
// Comparator does nothing. Locking is not supported with the k8s
// CRD allocator. It is here to meet interface requirements.
func (c *crdLock) Comparator() any {
return nil
}
// get returns the identity found for the given set of labels.
// In the case of duplicate entries, return an identity entry
// from a sorted list.
func (c *crdBackend) get(ctx context.Context, key allocator.AllocatorKey) *v2.CiliumIdentity {
if !c.StoreSet.Load() {
return nil
}
identities, err := c.Store.ByIndex(byKeyIndex, key.GetKey())
if err != nil || len(identities) == 0 {
return nil
}
sort.Slice(identities, func(i, j int) bool {
left, ok := identities[i].(*v2.CiliumIdentity)
if !ok {
return false
}
right, ok := identities[j].(*v2.CiliumIdentity)
if !ok {
return false
}
return left.CreationTimestamp.Before(&right.CreationTimestamp)
})
for _, identityObject := range identities {
identity, ok := identityObject.(*v2.CiliumIdentity)
if !ok {
return nil
}
if reflect.DeepEqual(identity.SecurityLabels, key.GetAsMap()) {
return identity
}
}
return nil
}
// Get returns the first ID which is allocated to a key in the identity CRDs in
// kubernetes.
// Note: the lock field is not supported with the k8s CRD allocator.
func (c *crdBackend) Get(ctx context.Context, key allocator.AllocatorKey) (idpool.ID, error) {
identity := c.get(ctx, key)
if identity == nil {
return idpool.NoID, nil
}
id, err := strconv.ParseUint(identity.Name, 10, 64)
if err != nil {
return idpool.NoID, fmt.Errorf("unable to parse value '%s': %w", identity.Name, err)
}
return idpool.ID(id), nil
}
func (c *crdBackend) GetIfLocked(ctx context.Context, key allocator.AllocatorKey, lock kvstore.KVLocker) (idpool.ID, error) {
return c.Get(ctx, key)
}
// getById fetches the identities from the local store. Returns a nil `err` and
// false `exists` if an Identity is not found for the given `id`.
func (c *crdBackend) getById(ctx context.Context, id idpool.ID) (idty *v2.CiliumIdentity, exists bool, err error) {
if !c.StoreSet.Load() {
return nil, false, fmt.Errorf("store is not available yet")
}
identityTemplate := &v2.CiliumIdentity{
ObjectMeta: metav1.ObjectMeta{
Name: id.String(),
},
}
obj, exists, err := c.Store.Get(identityTemplate)
if err != nil {
return nil, exists, err
}
if !exists {
return nil, exists, nil
}
identity, ok := obj.(*v2.CiliumIdentity)
if !ok {
return nil, false, fmt.Errorf("invalid object %T", obj)
}
return identity, true, nil
}
// GetByID returns the key associated with an ID. Returns nil if no key is
// associated with the ID.
// Note: the lock field is not supported with the k8s CRD allocator.
func (c *crdBackend) GetByID(ctx context.Context, id idpool.ID) (allocator.AllocatorKey, error) {
identity, exists, err := c.getById(ctx, id)
if err != nil {
return nil, err
}
if !exists {
return nil, nil
}
return c.KeyFunc(identity.SecurityLabels), nil
}
// Release dissociates this node from using the identity bound to the given ID.
// When an identity has no references it may be garbage collected.
func (c *crdBackend) Release(ctx context.Context, id idpool.ID, key allocator.AllocatorKey) (err error) {
// For CiliumIdentity-based allocation, the reference counting is
// handled via CiliumEndpoint. Any CiliumEndpoint referring to a
// CiliumIdentity will keep the CiliumIdentity alive. No action is
// needed to release the reference here.
return nil
}
func getIdentitiesByKeyFunc(keyFunc func(map[string]string) allocator.AllocatorKey) func(obj any) ([]string, error) {
return func(obj any) ([]string, error) {
if identity, ok := obj.(*v2.CiliumIdentity); ok {
return []string{keyFunc(identity.SecurityLabels).GetKey()}, nil
}
return []string{}, fmt.Errorf("object other than CiliumIdentity was pushed to the store")
}
}
func (c *crdBackend) ListIDs(ctx context.Context) (identityIDs []idpool.ID, err error) {
if !c.StoreSet.Load() {
return nil, fmt.Errorf("store is not available yet")
}
for _, identity := range c.Store.List() {
idParsed, err := strconv.ParseUint(identity.(*v2.CiliumIdentity).Name, 10, 64)
if err != nil {
c.logger.Warn(
"Cannot parse identity ID",
logfields.Identity, identity.(*v2.CiliumIdentity).Name,
)
continue
}
identityIDs = append(identityIDs, idpool.ID(idParsed))
}
return identityIDs, err
}
func (c *crdBackend) ListAndWatch(ctx context.Context, handler allocator.CacheMutations) {
c.Store = cache.NewIndexer(
cache.DeletionHandlingMetaNamespaceKeyFunc,
cache.Indexers{byKeyIndex: getIdentitiesByKeyFunc(c.KeyFunc)})
identityInformer := informer.NewInformerWithStore(
k8sUtils.ListerWatcherFromTyped[*v2.CiliumIdentityList](c.Client.CiliumV2().CiliumIdentities()),
&v2.CiliumIdentity{},
0,
cache.ResourceEventHandlerFuncs{
AddFunc: func(obj any) {
if identity, ok := obj.(*v2.CiliumIdentity); ok {
if id, err := strconv.ParseUint(identity.Name, 10, 64); err == nil {
handler.OnUpsert(idpool.ID(id), c.KeyFunc(identity.SecurityLabels))
}
}
},
UpdateFunc: func(oldObj, newObj any) {
if oldIdentity, ok := oldObj.(*v2.CiliumIdentity); ok {
if newIdentity, ok := newObj.(*v2.CiliumIdentity); ok {
if oldIdentity.DeepEqual(newIdentity) {
return
}
if id, err := strconv.ParseUint(newIdentity.Name, 10, 64); err == nil {
handler.OnUpsert(idpool.ID(id), c.KeyFunc(newIdentity.SecurityLabels))
}
}
}
},
DeleteFunc: func(obj any) {
// The delete event is sometimes for items with unknown state that are
// deleted anyway.
if deleteObj, isDeleteObj := obj.(cache.DeletedFinalStateUnknown); isDeleteObj {
obj = deleteObj.Obj
}
if identity, ok := obj.(*v2.CiliumIdentity); ok {
if id, err := strconv.ParseUint(identity.Name, 10, 64); err == nil {
handler.OnDelete(idpool.ID(id), c.KeyFunc(identity.SecurityLabels))
}
} else {
c.logger.Debug(
"Ignoring unknown delete event",
logfields.Object, obj,
)
}
},
},
nil,
c.Store,
)
go func() {
if ok := cache.WaitForCacheSync(ctx.Done(), identityInformer.HasSynced); ok {
c.StoreSet.Store(true)
handler.OnListDone()
}
}()
identityInformer.Run(ctx.Done())
}
// SPDX-License-Identifier: Apache-2.0
// Copyright Authors of Cilium
package informer
import (
"fmt"
"log/slog"
"k8s.io/client-go/tools/cache"
"github.com/cilium/cilium/pkg/logging/logfields"
)
// CastInformerEvent tries to cast obj to type typ, directly
// or by DeletedFinalStateUnknown type. It returns nil and logs
// an error if obj doesn't contain type typ.
func CastInformerEvent[typ any](logger *slog.Logger, obj any) *typ {
k8sObj, ok := obj.(*typ)
if ok {
return k8sObj
}
deletedObj, ok := obj.(cache.DeletedFinalStateUnknown)
if ok {
// Delete was not observed by the watcher but is
// removed from kube-apiserver. This is the last
// known state and the object no longer exists.
k8sObj, ok := deletedObj.Obj.(*typ)
if ok {
return k8sObj
}
}
logger.Warn(
fmt.Sprintf("Ignoring invalid type, expected: %T", new(typ)),
logfields.Object, obj,
)
return nil
}
// SPDX-License-Identifier: Apache-2.0
// Copyright Authors of Cilium
package informer
import (
"context"
"errors"
"fmt"
"net/http"
k8sRuntime "k8s.io/apimachinery/pkg/runtime"
utilRuntime "k8s.io/apimachinery/pkg/util/runtime"
"k8s.io/client-go/tools/cache"
"github.com/cilium/cilium/pkg/k8s/watchers/resources"
"github.com/cilium/cilium/pkg/logging"
"github.com/cilium/cilium/pkg/time"
)
func init() {
utilRuntime.PanicHandlers = append(
utilRuntime.PanicHandlers,
func(_ context.Context, r any) {
// from k8s library
if err, ok := r.(error); ok && errors.Is(err, http.ErrAbortHandler) {
// honor the http.ErrAbortHandler sentinel panic value:
// ErrAbortHandler is a sentinel panic value to abort a handler.
// While any panic from ServeHTTP aborts the response to the client,
// panicking with ErrAbortHandler also suppresses logging of a stack trace to the server's error log.
return
}
// slogloggercheck: it's safe to use the default logger here as it has been initialized by the program up to this point.
logging.Fatal(logging.DefaultSlogLogger, "Panic in Kubernetes runtime handler")
},
)
}
type privateRunner struct {
cache.Controller
cacheMutationDetector cache.MutationDetector
}
func (p *privateRunner) Run(stopCh <-chan struct{}) {
go p.cacheMutationDetector.Run(stopCh)
p.Controller.Run(stopCh)
}
// NewInformer is a copy of k8s.io/client-go/tools/cache/NewInformer includes the default cache MutationDetector.
func NewInformer(
lw cache.ListerWatcher,
objType k8sRuntime.Object,
resyncPeriod time.Duration,
h cache.ResourceEventHandler,
transformer cache.TransformFunc,
) (cache.Store, cache.Controller) {
// This will hold the client state, as we know it.
clientState := cache.NewStore(cache.DeletionHandlingMetaNamespaceKeyFunc)
return clientState, NewInformerWithStore(lw, objType, resyncPeriod, h, transformer, clientState)
}
// NewInformerWithStore uses the same arguments as NewInformer for which a caller can also set a
// cache.Store and includes the default cache MutationDetector.
func NewInformerWithStore(
lw cache.ListerWatcher,
objType k8sRuntime.Object,
resyncPeriod time.Duration,
h cache.ResourceEventHandler,
transformer cache.TransformFunc,
clientState cache.Store,
) cache.Controller {
// This will hold incoming changes. Note how we pass clientState in as a
// KeyLister, that way resync operations will result in the correct set
// of update/delete deltas.
opts := cache.DeltaFIFOOptions{KeyFunction: cache.MetaNamespaceKeyFunc, KnownObjects: clientState, EmitDeltaTypeReplaced: true}
fifo := cache.NewDeltaFIFOWithOptions(opts)
cacheMutationDetector := cache.NewCacheMutationDetector(fmt.Sprintf("%T", objType))
cfg := &cache.Config{
Queue: fifo,
ListerWatcher: lw,
ObjectType: objType,
FullResyncPeriod: resyncPeriod,
Process: func(obj any, isInInitialList bool) error {
// from oldest to newest
for _, d := range obj.(cache.Deltas) {
var obj any
if transformer != nil {
var err error
if obj, err = transformer(d.Object); err != nil {
return err
}
} else {
obj = d.Object
}
// Deduplicate the strings in the object metadata to reduce memory consumption.
resources.DedupMetadata(obj)
// In CI we detect if the objects were modified and panic
// this is a no-op in production environments.
cacheMutationDetector.AddObject(obj)
switch d.Type {
case cache.Sync, cache.Added, cache.Updated, cache.Replaced:
if old, exists, err := clientState.Get(obj); err == nil && exists {
if err := clientState.Update(obj); err != nil {
return err
}
h.OnUpdate(old, obj)
} else {
if err := clientState.Add(obj); err != nil {
return err
}
h.OnAdd(obj, isInInitialList)
}
case cache.Deleted:
if err := clientState.Delete(obj); err != nil {
return err
}
h.OnDelete(obj)
}
}
return nil
},
}
return &privateRunner{
Controller: cache.New(cfg),
cacheMutationDetector: cacheMutationDetector,
}
}
// SPDX-License-Identifier: Apache-2.0
// Copyright Authors of Cilium
package metrics
import (
"github.com/cilium/cilium/pkg/lock"
"github.com/cilium/cilium/pkg/time"
)
var (
// LastInteraction is the time at which the last apiserver interaction
// occurred
LastInteraction eventTimestamper
// LastSuccessInteraction is the time at which we have received a successful
// k8s apiserver reply (i.e. a response code 2xx or 4xx).
LastSuccessInteraction eventTimestamper
)
type eventTimestamper struct {
timestamp time.Time
lock lock.RWMutex
}
// Reset sets the timestamp to the current time
func (e *eventTimestamper) Reset() {
e.lock.Lock()
e.timestamp = time.Now()
e.lock.Unlock()
}
// Time returns the timestamp as set per Reset()
func (e *eventTimestamper) Time() time.Time {
e.lock.RLock()
t := e.timestamp
e.lock.RUnlock()
return t
}
// SPDX-License-Identifier: Apache-2.0
// Copyright Authors of Cilium
// Code generated by protoc-gen-gogo. DO NOT EDIT.
// source: github.com/cilium/cilium/pkg/k8s/slim/k8s/api/core/v1/generated.proto
package v1
import (
fmt "fmt"
v1 "github.com/cilium/cilium/pkg/k8s/slim/k8s/apis/meta/v1"
io "io"
proto "github.com/gogo/protobuf/proto"
github_com_gogo_protobuf_sortkeys "github.com/gogo/protobuf/sortkeys"
math "math"
math_bits "math/bits"
reflect "reflect"
strings "strings"
)
// Reference imports to suppress errors if they are not otherwise used.
var _ = proto.Marshal
var _ = fmt.Errorf
var _ = math.Inf
// This is a compile-time assertion to ensure that this generated file
// is compatible with the proto package it is being compiled against.
// A compilation error at this line likely means your copy of the
// proto package needs to be updated.
const _ = proto.GoGoProtoPackageIsVersion3 // please upgrade the proto package
func (m *ClientIPConfig) Reset() { *m = ClientIPConfig{} }
func (*ClientIPConfig) ProtoMessage() {}
func (*ClientIPConfig) Descriptor() ([]byte, []int) {
return fileDescriptor_871504499faea14d, []int{0}
}
func (m *ClientIPConfig) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
}
func (m *ClientIPConfig) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
b = b[:cap(b)]
n, err := m.MarshalToSizedBuffer(b)
if err != nil {
return nil, err
}
return b[:n], nil
}
func (m *ClientIPConfig) XXX_Merge(src proto.Message) {
xxx_messageInfo_ClientIPConfig.Merge(m, src)
}
func (m *ClientIPConfig) XXX_Size() int {
return m.Size()
}
func (m *ClientIPConfig) XXX_DiscardUnknown() {
xxx_messageInfo_ClientIPConfig.DiscardUnknown(m)
}
var xxx_messageInfo_ClientIPConfig proto.InternalMessageInfo
func (m *Container) Reset() { *m = Container{} }
func (*Container) ProtoMessage() {}
func (*Container) Descriptor() ([]byte, []int) {
return fileDescriptor_871504499faea14d, []int{1}
}
func (m *Container) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
}
func (m *Container) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
b = b[:cap(b)]
n, err := m.MarshalToSizedBuffer(b)
if err != nil {
return nil, err
}
return b[:n], nil
}
func (m *Container) XXX_Merge(src proto.Message) {
xxx_messageInfo_Container.Merge(m, src)
}
func (m *Container) XXX_Size() int {
return m.Size()
}
func (m *Container) XXX_DiscardUnknown() {
xxx_messageInfo_Container.DiscardUnknown(m)
}
var xxx_messageInfo_Container proto.InternalMessageInfo
func (m *ContainerPort) Reset() { *m = ContainerPort{} }
func (*ContainerPort) ProtoMessage() {}
func (*ContainerPort) Descriptor() ([]byte, []int) {
return fileDescriptor_871504499faea14d, []int{2}
}
func (m *ContainerPort) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
}
func (m *ContainerPort) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
b = b[:cap(b)]
n, err := m.MarshalToSizedBuffer(b)
if err != nil {
return nil, err
}
return b[:n], nil
}
func (m *ContainerPort) XXX_Merge(src proto.Message) {
xxx_messageInfo_ContainerPort.Merge(m, src)
}
func (m *ContainerPort) XXX_Size() int {
return m.Size()
}
func (m *ContainerPort) XXX_DiscardUnknown() {
xxx_messageInfo_ContainerPort.DiscardUnknown(m)
}
var xxx_messageInfo_ContainerPort proto.InternalMessageInfo
func (m *ContainerState) Reset() { *m = ContainerState{} }
func (*ContainerState) ProtoMessage() {}
func (*ContainerState) Descriptor() ([]byte, []int) {
return fileDescriptor_871504499faea14d, []int{3}
}
func (m *ContainerState) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
}
func (m *ContainerState) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
b = b[:cap(b)]
n, err := m.MarshalToSizedBuffer(b)
if err != nil {
return nil, err
}
return b[:n], nil
}
func (m *ContainerState) XXX_Merge(src proto.Message) {
xxx_messageInfo_ContainerState.Merge(m, src)
}
func (m *ContainerState) XXX_Size() int {
return m.Size()
}
func (m *ContainerState) XXX_DiscardUnknown() {
xxx_messageInfo_ContainerState.DiscardUnknown(m)
}
var xxx_messageInfo_ContainerState proto.InternalMessageInfo
func (m *ContainerStateRunning) Reset() { *m = ContainerStateRunning{} }
func (*ContainerStateRunning) ProtoMessage() {}
func (*ContainerStateRunning) Descriptor() ([]byte, []int) {
return fileDescriptor_871504499faea14d, []int{4}
}
func (m *ContainerStateRunning) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
}
func (m *ContainerStateRunning) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
b = b[:cap(b)]
n, err := m.MarshalToSizedBuffer(b)
if err != nil {
return nil, err
}
return b[:n], nil
}
func (m *ContainerStateRunning) XXX_Merge(src proto.Message) {
xxx_messageInfo_ContainerStateRunning.Merge(m, src)
}
func (m *ContainerStateRunning) XXX_Size() int {
return m.Size()
}
func (m *ContainerStateRunning) XXX_DiscardUnknown() {
xxx_messageInfo_ContainerStateRunning.DiscardUnknown(m)
}
var xxx_messageInfo_ContainerStateRunning proto.InternalMessageInfo
func (m *ContainerStatus) Reset() { *m = ContainerStatus{} }
func (*ContainerStatus) ProtoMessage() {}
func (*ContainerStatus) Descriptor() ([]byte, []int) {
return fileDescriptor_871504499faea14d, []int{5}
}
func (m *ContainerStatus) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
}
func (m *ContainerStatus) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
b = b[:cap(b)]
n, err := m.MarshalToSizedBuffer(b)
if err != nil {
return nil, err
}
return b[:n], nil
}
func (m *ContainerStatus) XXX_Merge(src proto.Message) {
xxx_messageInfo_ContainerStatus.Merge(m, src)
}
func (m *ContainerStatus) XXX_Size() int {
return m.Size()
}
func (m *ContainerStatus) XXX_DiscardUnknown() {
xxx_messageInfo_ContainerStatus.DiscardUnknown(m)
}
var xxx_messageInfo_ContainerStatus proto.InternalMessageInfo
func (m *EndpointAddress) Reset() { *m = EndpointAddress{} }
func (*EndpointAddress) ProtoMessage() {}
func (*EndpointAddress) Descriptor() ([]byte, []int) {
return fileDescriptor_871504499faea14d, []int{6}
}
func (m *EndpointAddress) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
}
func (m *EndpointAddress) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
b = b[:cap(b)]
n, err := m.MarshalToSizedBuffer(b)
if err != nil {
return nil, err
}
return b[:n], nil
}
func (m *EndpointAddress) XXX_Merge(src proto.Message) {
xxx_messageInfo_EndpointAddress.Merge(m, src)
}
func (m *EndpointAddress) XXX_Size() int {
return m.Size()
}
func (m *EndpointAddress) XXX_DiscardUnknown() {
xxx_messageInfo_EndpointAddress.DiscardUnknown(m)
}
var xxx_messageInfo_EndpointAddress proto.InternalMessageInfo
func (m *EndpointPort) Reset() { *m = EndpointPort{} }
func (*EndpointPort) ProtoMessage() {}
func (*EndpointPort) Descriptor() ([]byte, []int) {
return fileDescriptor_871504499faea14d, []int{7}
}
func (m *EndpointPort) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
}
func (m *EndpointPort) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
b = b[:cap(b)]
n, err := m.MarshalToSizedBuffer(b)
if err != nil {
return nil, err
}
return b[:n], nil
}
func (m *EndpointPort) XXX_Merge(src proto.Message) {
xxx_messageInfo_EndpointPort.Merge(m, src)
}
func (m *EndpointPort) XXX_Size() int {
return m.Size()
}
func (m *EndpointPort) XXX_DiscardUnknown() {
xxx_messageInfo_EndpointPort.DiscardUnknown(m)
}
var xxx_messageInfo_EndpointPort proto.InternalMessageInfo
func (m *EndpointSubset) Reset() { *m = EndpointSubset{} }
func (*EndpointSubset) ProtoMessage() {}
func (*EndpointSubset) Descriptor() ([]byte, []int) {
return fileDescriptor_871504499faea14d, []int{8}
}
func (m *EndpointSubset) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
}
func (m *EndpointSubset) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
b = b[:cap(b)]
n, err := m.MarshalToSizedBuffer(b)
if err != nil {
return nil, err
}
return b[:n], nil
}
func (m *EndpointSubset) XXX_Merge(src proto.Message) {
xxx_messageInfo_EndpointSubset.Merge(m, src)
}
func (m *EndpointSubset) XXX_Size() int {
return m.Size()
}
func (m *EndpointSubset) XXX_DiscardUnknown() {
xxx_messageInfo_EndpointSubset.DiscardUnknown(m)
}
var xxx_messageInfo_EndpointSubset proto.InternalMessageInfo
func (m *Endpoints) Reset() { *m = Endpoints{} }
func (*Endpoints) ProtoMessage() {}
func (*Endpoints) Descriptor() ([]byte, []int) {
return fileDescriptor_871504499faea14d, []int{9}
}
func (m *Endpoints) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
}
func (m *Endpoints) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
b = b[:cap(b)]
n, err := m.MarshalToSizedBuffer(b)
if err != nil {
return nil, err
}
return b[:n], nil
}
func (m *Endpoints) XXX_Merge(src proto.Message) {
xxx_messageInfo_Endpoints.Merge(m, src)
}
func (m *Endpoints) XXX_Size() int {
return m.Size()
}
func (m *Endpoints) XXX_DiscardUnknown() {
xxx_messageInfo_Endpoints.DiscardUnknown(m)
}
var xxx_messageInfo_Endpoints proto.InternalMessageInfo
func (m *EndpointsList) Reset() { *m = EndpointsList{} }
func (*EndpointsList) ProtoMessage() {}
func (*EndpointsList) Descriptor() ([]byte, []int) {
return fileDescriptor_871504499faea14d, []int{10}
}
func (m *EndpointsList) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
}
func (m *EndpointsList) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
b = b[:cap(b)]
n, err := m.MarshalToSizedBuffer(b)
if err != nil {
return nil, err
}
return b[:n], nil
}
func (m *EndpointsList) XXX_Merge(src proto.Message) {
xxx_messageInfo_EndpointsList.Merge(m, src)
}
func (m *EndpointsList) XXX_Size() int {
return m.Size()
}
func (m *EndpointsList) XXX_DiscardUnknown() {
xxx_messageInfo_EndpointsList.DiscardUnknown(m)
}
var xxx_messageInfo_EndpointsList proto.InternalMessageInfo
func (m *LoadBalancerIngress) Reset() { *m = LoadBalancerIngress{} }
func (*LoadBalancerIngress) ProtoMessage() {}
func (*LoadBalancerIngress) Descriptor() ([]byte, []int) {
return fileDescriptor_871504499faea14d, []int{11}
}
func (m *LoadBalancerIngress) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
}
func (m *LoadBalancerIngress) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
b = b[:cap(b)]
n, err := m.MarshalToSizedBuffer(b)
if err != nil {
return nil, err
}
return b[:n], nil
}
func (m *LoadBalancerIngress) XXX_Merge(src proto.Message) {
xxx_messageInfo_LoadBalancerIngress.Merge(m, src)
}
func (m *LoadBalancerIngress) XXX_Size() int {
return m.Size()
}
func (m *LoadBalancerIngress) XXX_DiscardUnknown() {
xxx_messageInfo_LoadBalancerIngress.DiscardUnknown(m)
}
var xxx_messageInfo_LoadBalancerIngress proto.InternalMessageInfo
func (m *LoadBalancerStatus) Reset() { *m = LoadBalancerStatus{} }
func (*LoadBalancerStatus) ProtoMessage() {}
func (*LoadBalancerStatus) Descriptor() ([]byte, []int) {
return fileDescriptor_871504499faea14d, []int{12}
}
func (m *LoadBalancerStatus) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
}
func (m *LoadBalancerStatus) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
b = b[:cap(b)]
n, err := m.MarshalToSizedBuffer(b)
if err != nil {
return nil, err
}
return b[:n], nil
}
func (m *LoadBalancerStatus) XXX_Merge(src proto.Message) {
xxx_messageInfo_LoadBalancerStatus.Merge(m, src)
}
func (m *LoadBalancerStatus) XXX_Size() int {
return m.Size()
}
func (m *LoadBalancerStatus) XXX_DiscardUnknown() {
xxx_messageInfo_LoadBalancerStatus.DiscardUnknown(m)
}
var xxx_messageInfo_LoadBalancerStatus proto.InternalMessageInfo
func (m *Namespace) Reset() { *m = Namespace{} }
func (*Namespace) ProtoMessage() {}
func (*Namespace) Descriptor() ([]byte, []int) {
return fileDescriptor_871504499faea14d, []int{13}
}
func (m *Namespace) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
}
func (m *Namespace) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
b = b[:cap(b)]
n, err := m.MarshalToSizedBuffer(b)
if err != nil {
return nil, err
}
return b[:n], nil
}
func (m *Namespace) XXX_Merge(src proto.Message) {
xxx_messageInfo_Namespace.Merge(m, src)
}
func (m *Namespace) XXX_Size() int {
return m.Size()
}
func (m *Namespace) XXX_DiscardUnknown() {
xxx_messageInfo_Namespace.DiscardUnknown(m)
}
var xxx_messageInfo_Namespace proto.InternalMessageInfo
func (m *NamespaceList) Reset() { *m = NamespaceList{} }
func (*NamespaceList) ProtoMessage() {}
func (*NamespaceList) Descriptor() ([]byte, []int) {
return fileDescriptor_871504499faea14d, []int{14}
}
func (m *NamespaceList) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
}
func (m *NamespaceList) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
b = b[:cap(b)]
n, err := m.MarshalToSizedBuffer(b)
if err != nil {
return nil, err
}
return b[:n], nil
}
func (m *NamespaceList) XXX_Merge(src proto.Message) {
xxx_messageInfo_NamespaceList.Merge(m, src)
}
func (m *NamespaceList) XXX_Size() int {
return m.Size()
}
func (m *NamespaceList) XXX_DiscardUnknown() {
xxx_messageInfo_NamespaceList.DiscardUnknown(m)
}
var xxx_messageInfo_NamespaceList proto.InternalMessageInfo
func (m *Node) Reset() { *m = Node{} }
func (*Node) ProtoMessage() {}
func (*Node) Descriptor() ([]byte, []int) {
return fileDescriptor_871504499faea14d, []int{15}
}
func (m *Node) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
}
func (m *Node) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
b = b[:cap(b)]
n, err := m.MarshalToSizedBuffer(b)
if err != nil {
return nil, err
}
return b[:n], nil
}
func (m *Node) XXX_Merge(src proto.Message) {
xxx_messageInfo_Node.Merge(m, src)
}
func (m *Node) XXX_Size() int {
return m.Size()
}
func (m *Node) XXX_DiscardUnknown() {
xxx_messageInfo_Node.DiscardUnknown(m)
}
var xxx_messageInfo_Node proto.InternalMessageInfo
func (m *NodeAddress) Reset() { *m = NodeAddress{} }
func (*NodeAddress) ProtoMessage() {}
func (*NodeAddress) Descriptor() ([]byte, []int) {
return fileDescriptor_871504499faea14d, []int{16}
}
func (m *NodeAddress) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
}
func (m *NodeAddress) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
b = b[:cap(b)]
n, err := m.MarshalToSizedBuffer(b)
if err != nil {
return nil, err
}
return b[:n], nil
}
func (m *NodeAddress) XXX_Merge(src proto.Message) {
xxx_messageInfo_NodeAddress.Merge(m, src)
}
func (m *NodeAddress) XXX_Size() int {
return m.Size()
}
func (m *NodeAddress) XXX_DiscardUnknown() {
xxx_messageInfo_NodeAddress.DiscardUnknown(m)
}
var xxx_messageInfo_NodeAddress proto.InternalMessageInfo
func (m *NodeCondition) Reset() { *m = NodeCondition{} }
func (*NodeCondition) ProtoMessage() {}
func (*NodeCondition) Descriptor() ([]byte, []int) {
return fileDescriptor_871504499faea14d, []int{17}
}
func (m *NodeCondition) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
}
func (m *NodeCondition) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
b = b[:cap(b)]
n, err := m.MarshalToSizedBuffer(b)
if err != nil {
return nil, err
}
return b[:n], nil
}
func (m *NodeCondition) XXX_Merge(src proto.Message) {
xxx_messageInfo_NodeCondition.Merge(m, src)
}
func (m *NodeCondition) XXX_Size() int {
return m.Size()
}
func (m *NodeCondition) XXX_DiscardUnknown() {
xxx_messageInfo_NodeCondition.DiscardUnknown(m)
}
var xxx_messageInfo_NodeCondition proto.InternalMessageInfo
func (m *NodeList) Reset() { *m = NodeList{} }
func (*NodeList) ProtoMessage() {}
func (*NodeList) Descriptor() ([]byte, []int) {
return fileDescriptor_871504499faea14d, []int{18}
}
func (m *NodeList) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
}
func (m *NodeList) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
b = b[:cap(b)]
n, err := m.MarshalToSizedBuffer(b)
if err != nil {
return nil, err
}
return b[:n], nil
}
func (m *NodeList) XXX_Merge(src proto.Message) {
xxx_messageInfo_NodeList.Merge(m, src)
}
func (m *NodeList) XXX_Size() int {
return m.Size()
}
func (m *NodeList) XXX_DiscardUnknown() {
xxx_messageInfo_NodeList.DiscardUnknown(m)
}
var xxx_messageInfo_NodeList proto.InternalMessageInfo
func (m *NodeSpec) Reset() { *m = NodeSpec{} }
func (*NodeSpec) ProtoMessage() {}
func (*NodeSpec) Descriptor() ([]byte, []int) {
return fileDescriptor_871504499faea14d, []int{19}
}
func (m *NodeSpec) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
}
func (m *NodeSpec) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
b = b[:cap(b)]
n, err := m.MarshalToSizedBuffer(b)
if err != nil {
return nil, err
}
return b[:n], nil
}
func (m *NodeSpec) XXX_Merge(src proto.Message) {
xxx_messageInfo_NodeSpec.Merge(m, src)
}
func (m *NodeSpec) XXX_Size() int {
return m.Size()
}
func (m *NodeSpec) XXX_DiscardUnknown() {
xxx_messageInfo_NodeSpec.DiscardUnknown(m)
}
var xxx_messageInfo_NodeSpec proto.InternalMessageInfo
func (m *NodeStatus) Reset() { *m = NodeStatus{} }
func (*NodeStatus) ProtoMessage() {}
func (*NodeStatus) Descriptor() ([]byte, []int) {
return fileDescriptor_871504499faea14d, []int{20}
}
func (m *NodeStatus) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
}
func (m *NodeStatus) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
b = b[:cap(b)]
n, err := m.MarshalToSizedBuffer(b)
if err != nil {
return nil, err
}
return b[:n], nil
}
func (m *NodeStatus) XXX_Merge(src proto.Message) {
xxx_messageInfo_NodeStatus.Merge(m, src)
}
func (m *NodeStatus) XXX_Size() int {
return m.Size()
}
func (m *NodeStatus) XXX_DiscardUnknown() {
xxx_messageInfo_NodeStatus.DiscardUnknown(m)
}
var xxx_messageInfo_NodeStatus proto.InternalMessageInfo
func (m *Pod) Reset() { *m = Pod{} }
func (*Pod) ProtoMessage() {}
func (*Pod) Descriptor() ([]byte, []int) {
return fileDescriptor_871504499faea14d, []int{21}
}
func (m *Pod) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
}
func (m *Pod) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
b = b[:cap(b)]
n, err := m.MarshalToSizedBuffer(b)
if err != nil {
return nil, err
}
return b[:n], nil
}
func (m *Pod) XXX_Merge(src proto.Message) {
xxx_messageInfo_Pod.Merge(m, src)
}
func (m *Pod) XXX_Size() int {
return m.Size()
}
func (m *Pod) XXX_DiscardUnknown() {
xxx_messageInfo_Pod.DiscardUnknown(m)
}
var xxx_messageInfo_Pod proto.InternalMessageInfo
func (m *PodCondition) Reset() { *m = PodCondition{} }
func (*PodCondition) ProtoMessage() {}
func (*PodCondition) Descriptor() ([]byte, []int) {
return fileDescriptor_871504499faea14d, []int{22}
}
func (m *PodCondition) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
}
func (m *PodCondition) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
b = b[:cap(b)]
n, err := m.MarshalToSizedBuffer(b)
if err != nil {
return nil, err
}
return b[:n], nil
}
func (m *PodCondition) XXX_Merge(src proto.Message) {
xxx_messageInfo_PodCondition.Merge(m, src)
}
func (m *PodCondition) XXX_Size() int {
return m.Size()
}
func (m *PodCondition) XXX_DiscardUnknown() {
xxx_messageInfo_PodCondition.DiscardUnknown(m)
}
var xxx_messageInfo_PodCondition proto.InternalMessageInfo
func (m *PodIP) Reset() { *m = PodIP{} }
func (*PodIP) ProtoMessage() {}
func (*PodIP) Descriptor() ([]byte, []int) {
return fileDescriptor_871504499faea14d, []int{23}
}
func (m *PodIP) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
}
func (m *PodIP) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
b = b[:cap(b)]
n, err := m.MarshalToSizedBuffer(b)
if err != nil {
return nil, err
}
return b[:n], nil
}
func (m *PodIP) XXX_Merge(src proto.Message) {
xxx_messageInfo_PodIP.Merge(m, src)
}
func (m *PodIP) XXX_Size() int {
return m.Size()
}
func (m *PodIP) XXX_DiscardUnknown() {
xxx_messageInfo_PodIP.DiscardUnknown(m)
}
var xxx_messageInfo_PodIP proto.InternalMessageInfo
func (m *PodList) Reset() { *m = PodList{} }
func (*PodList) ProtoMessage() {}
func (*PodList) Descriptor() ([]byte, []int) {
return fileDescriptor_871504499faea14d, []int{24}
}
func (m *PodList) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
}
func (m *PodList) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
b = b[:cap(b)]
n, err := m.MarshalToSizedBuffer(b)
if err != nil {
return nil, err
}
return b[:n], nil
}
func (m *PodList) XXX_Merge(src proto.Message) {
xxx_messageInfo_PodList.Merge(m, src)
}
func (m *PodList) XXX_Size() int {
return m.Size()
}
func (m *PodList) XXX_DiscardUnknown() {
xxx_messageInfo_PodList.DiscardUnknown(m)
}
var xxx_messageInfo_PodList proto.InternalMessageInfo
func (m *PodReadinessGate) Reset() { *m = PodReadinessGate{} }
func (*PodReadinessGate) ProtoMessage() {}
func (*PodReadinessGate) Descriptor() ([]byte, []int) {
return fileDescriptor_871504499faea14d, []int{25}
}
func (m *PodReadinessGate) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
}
func (m *PodReadinessGate) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
b = b[:cap(b)]
n, err := m.MarshalToSizedBuffer(b)
if err != nil {
return nil, err
}
return b[:n], nil
}
func (m *PodReadinessGate) XXX_Merge(src proto.Message) {
xxx_messageInfo_PodReadinessGate.Merge(m, src)
}
func (m *PodReadinessGate) XXX_Size() int {
return m.Size()
}
func (m *PodReadinessGate) XXX_DiscardUnknown() {
xxx_messageInfo_PodReadinessGate.DiscardUnknown(m)
}
var xxx_messageInfo_PodReadinessGate proto.InternalMessageInfo
func (m *PodSpec) Reset() { *m = PodSpec{} }
func (*PodSpec) ProtoMessage() {}
func (*PodSpec) Descriptor() ([]byte, []int) {
return fileDescriptor_871504499faea14d, []int{26}
}
func (m *PodSpec) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
}
func (m *PodSpec) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
b = b[:cap(b)]
n, err := m.MarshalToSizedBuffer(b)
if err != nil {
return nil, err
}
return b[:n], nil
}
func (m *PodSpec) XXX_Merge(src proto.Message) {
xxx_messageInfo_PodSpec.Merge(m, src)
}
func (m *PodSpec) XXX_Size() int {
return m.Size()
}
func (m *PodSpec) XXX_DiscardUnknown() {
xxx_messageInfo_PodSpec.DiscardUnknown(m)
}
var xxx_messageInfo_PodSpec proto.InternalMessageInfo
func (m *PodStatus) Reset() { *m = PodStatus{} }
func (*PodStatus) ProtoMessage() {}
func (*PodStatus) Descriptor() ([]byte, []int) {
return fileDescriptor_871504499faea14d, []int{27}
}
func (m *PodStatus) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
}
func (m *PodStatus) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
b = b[:cap(b)]
n, err := m.MarshalToSizedBuffer(b)
if err != nil {
return nil, err
}
return b[:n], nil
}
func (m *PodStatus) XXX_Merge(src proto.Message) {
xxx_messageInfo_PodStatus.Merge(m, src)
}
func (m *PodStatus) XXX_Size() int {
return m.Size()
}
func (m *PodStatus) XXX_DiscardUnknown() {
xxx_messageInfo_PodStatus.DiscardUnknown(m)
}
var xxx_messageInfo_PodStatus proto.InternalMessageInfo
func (m *PortStatus) Reset() { *m = PortStatus{} }
func (*PortStatus) ProtoMessage() {}
func (*PortStatus) Descriptor() ([]byte, []int) {
return fileDescriptor_871504499faea14d, []int{28}
}
func (m *PortStatus) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
}
func (m *PortStatus) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
b = b[:cap(b)]
n, err := m.MarshalToSizedBuffer(b)
if err != nil {
return nil, err
}
return b[:n], nil
}
func (m *PortStatus) XXX_Merge(src proto.Message) {
xxx_messageInfo_PortStatus.Merge(m, src)
}
func (m *PortStatus) XXX_Size() int {
return m.Size()
}
func (m *PortStatus) XXX_DiscardUnknown() {
xxx_messageInfo_PortStatus.DiscardUnknown(m)
}
var xxx_messageInfo_PortStatus proto.InternalMessageInfo
func (m *Secret) Reset() { *m = Secret{} }
func (*Secret) ProtoMessage() {}
func (*Secret) Descriptor() ([]byte, []int) {
return fileDescriptor_871504499faea14d, []int{29}
}
func (m *Secret) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
}
func (m *Secret) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
b = b[:cap(b)]
n, err := m.MarshalToSizedBuffer(b)
if err != nil {
return nil, err
}
return b[:n], nil
}
func (m *Secret) XXX_Merge(src proto.Message) {
xxx_messageInfo_Secret.Merge(m, src)
}
func (m *Secret) XXX_Size() int {
return m.Size()
}
func (m *Secret) XXX_DiscardUnknown() {
xxx_messageInfo_Secret.DiscardUnknown(m)
}
var xxx_messageInfo_Secret proto.InternalMessageInfo
func (m *SecretList) Reset() { *m = SecretList{} }
func (*SecretList) ProtoMessage() {}
func (*SecretList) Descriptor() ([]byte, []int) {
return fileDescriptor_871504499faea14d, []int{30}
}
func (m *SecretList) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
}
func (m *SecretList) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
b = b[:cap(b)]
n, err := m.MarshalToSizedBuffer(b)
if err != nil {
return nil, err
}
return b[:n], nil
}
func (m *SecretList) XXX_Merge(src proto.Message) {
xxx_messageInfo_SecretList.Merge(m, src)
}
func (m *SecretList) XXX_Size() int {
return m.Size()
}
func (m *SecretList) XXX_DiscardUnknown() {
xxx_messageInfo_SecretList.DiscardUnknown(m)
}
var xxx_messageInfo_SecretList proto.InternalMessageInfo
func (m *Service) Reset() { *m = Service{} }
func (*Service) ProtoMessage() {}
func (*Service) Descriptor() ([]byte, []int) {
return fileDescriptor_871504499faea14d, []int{31}
}
func (m *Service) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
}
func (m *Service) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
b = b[:cap(b)]
n, err := m.MarshalToSizedBuffer(b)
if err != nil {
return nil, err
}
return b[:n], nil
}
func (m *Service) XXX_Merge(src proto.Message) {
xxx_messageInfo_Service.Merge(m, src)
}
func (m *Service) XXX_Size() int {
return m.Size()
}
func (m *Service) XXX_DiscardUnknown() {
xxx_messageInfo_Service.DiscardUnknown(m)
}
var xxx_messageInfo_Service proto.InternalMessageInfo
func (m *ServiceList) Reset() { *m = ServiceList{} }
func (*ServiceList) ProtoMessage() {}
func (*ServiceList) Descriptor() ([]byte, []int) {
return fileDescriptor_871504499faea14d, []int{32}
}
func (m *ServiceList) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
}
func (m *ServiceList) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
b = b[:cap(b)]
n, err := m.MarshalToSizedBuffer(b)
if err != nil {
return nil, err
}
return b[:n], nil
}
func (m *ServiceList) XXX_Merge(src proto.Message) {
xxx_messageInfo_ServiceList.Merge(m, src)
}
func (m *ServiceList) XXX_Size() int {
return m.Size()
}
func (m *ServiceList) XXX_DiscardUnknown() {
xxx_messageInfo_ServiceList.DiscardUnknown(m)
}
var xxx_messageInfo_ServiceList proto.InternalMessageInfo
func (m *ServicePort) Reset() { *m = ServicePort{} }
func (*ServicePort) ProtoMessage() {}
func (*ServicePort) Descriptor() ([]byte, []int) {
return fileDescriptor_871504499faea14d, []int{33}
}
func (m *ServicePort) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
}
func (m *ServicePort) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
b = b[:cap(b)]
n, err := m.MarshalToSizedBuffer(b)
if err != nil {
return nil, err
}
return b[:n], nil
}
func (m *ServicePort) XXX_Merge(src proto.Message) {
xxx_messageInfo_ServicePort.Merge(m, src)
}
func (m *ServicePort) XXX_Size() int {
return m.Size()
}
func (m *ServicePort) XXX_DiscardUnknown() {
xxx_messageInfo_ServicePort.DiscardUnknown(m)
}
var xxx_messageInfo_ServicePort proto.InternalMessageInfo
func (m *ServiceSpec) Reset() { *m = ServiceSpec{} }
func (*ServiceSpec) ProtoMessage() {}
func (*ServiceSpec) Descriptor() ([]byte, []int) {
return fileDescriptor_871504499faea14d, []int{34}
}
func (m *ServiceSpec) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
}
func (m *ServiceSpec) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
b = b[:cap(b)]
n, err := m.MarshalToSizedBuffer(b)
if err != nil {
return nil, err
}
return b[:n], nil
}
func (m *ServiceSpec) XXX_Merge(src proto.Message) {
xxx_messageInfo_ServiceSpec.Merge(m, src)
}
func (m *ServiceSpec) XXX_Size() int {
return m.Size()
}
func (m *ServiceSpec) XXX_DiscardUnknown() {
xxx_messageInfo_ServiceSpec.DiscardUnknown(m)
}
var xxx_messageInfo_ServiceSpec proto.InternalMessageInfo
func (m *ServiceStatus) Reset() { *m = ServiceStatus{} }
func (*ServiceStatus) ProtoMessage() {}
func (*ServiceStatus) Descriptor() ([]byte, []int) {
return fileDescriptor_871504499faea14d, []int{35}
}
func (m *ServiceStatus) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
}
func (m *ServiceStatus) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
b = b[:cap(b)]
n, err := m.MarshalToSizedBuffer(b)
if err != nil {
return nil, err
}
return b[:n], nil
}
func (m *ServiceStatus) XXX_Merge(src proto.Message) {
xxx_messageInfo_ServiceStatus.Merge(m, src)
}
func (m *ServiceStatus) XXX_Size() int {
return m.Size()
}
func (m *ServiceStatus) XXX_DiscardUnknown() {
xxx_messageInfo_ServiceStatus.DiscardUnknown(m)
}
var xxx_messageInfo_ServiceStatus proto.InternalMessageInfo
func (m *SessionAffinityConfig) Reset() { *m = SessionAffinityConfig{} }
func (*SessionAffinityConfig) ProtoMessage() {}
func (*SessionAffinityConfig) Descriptor() ([]byte, []int) {
return fileDescriptor_871504499faea14d, []int{36}
}
func (m *SessionAffinityConfig) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
}
func (m *SessionAffinityConfig) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
b = b[:cap(b)]
n, err := m.MarshalToSizedBuffer(b)
if err != nil {
return nil, err
}
return b[:n], nil
}
func (m *SessionAffinityConfig) XXX_Merge(src proto.Message) {
xxx_messageInfo_SessionAffinityConfig.Merge(m, src)
}
func (m *SessionAffinityConfig) XXX_Size() int {
return m.Size()
}
func (m *SessionAffinityConfig) XXX_DiscardUnknown() {
xxx_messageInfo_SessionAffinityConfig.DiscardUnknown(m)
}
var xxx_messageInfo_SessionAffinityConfig proto.InternalMessageInfo
func (m *Taint) Reset() { *m = Taint{} }
func (*Taint) ProtoMessage() {}
func (*Taint) Descriptor() ([]byte, []int) {
return fileDescriptor_871504499faea14d, []int{37}
}
func (m *Taint) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
}
func (m *Taint) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
b = b[:cap(b)]
n, err := m.MarshalToSizedBuffer(b)
if err != nil {
return nil, err
}
return b[:n], nil
}
func (m *Taint) XXX_Merge(src proto.Message) {
xxx_messageInfo_Taint.Merge(m, src)
}
func (m *Taint) XXX_Size() int {
return m.Size()
}
func (m *Taint) XXX_DiscardUnknown() {
xxx_messageInfo_Taint.DiscardUnknown(m)
}
var xxx_messageInfo_Taint proto.InternalMessageInfo
func (m *TypedLocalObjectReference) Reset() { *m = TypedLocalObjectReference{} }
func (*TypedLocalObjectReference) ProtoMessage() {}
func (*TypedLocalObjectReference) Descriptor() ([]byte, []int) {
return fileDescriptor_871504499faea14d, []int{38}
}
func (m *TypedLocalObjectReference) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
}
func (m *TypedLocalObjectReference) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
b = b[:cap(b)]
n, err := m.MarshalToSizedBuffer(b)
if err != nil {
return nil, err
}
return b[:n], nil
}
func (m *TypedLocalObjectReference) XXX_Merge(src proto.Message) {
xxx_messageInfo_TypedLocalObjectReference.Merge(m, src)
}
func (m *TypedLocalObjectReference) XXX_Size() int {
return m.Size()
}
func (m *TypedLocalObjectReference) XXX_DiscardUnknown() {
xxx_messageInfo_TypedLocalObjectReference.DiscardUnknown(m)
}
var xxx_messageInfo_TypedLocalObjectReference proto.InternalMessageInfo
func (m *VolumeMount) Reset() { *m = VolumeMount{} }
func (*VolumeMount) ProtoMessage() {}
func (*VolumeMount) Descriptor() ([]byte, []int) {
return fileDescriptor_871504499faea14d, []int{39}
}
func (m *VolumeMount) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
}
func (m *VolumeMount) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
b = b[:cap(b)]
n, err := m.MarshalToSizedBuffer(b)
if err != nil {
return nil, err
}
return b[:n], nil
}
func (m *VolumeMount) XXX_Merge(src proto.Message) {
xxx_messageInfo_VolumeMount.Merge(m, src)
}
func (m *VolumeMount) XXX_Size() int {
return m.Size()
}
func (m *VolumeMount) XXX_DiscardUnknown() {
xxx_messageInfo_VolumeMount.DiscardUnknown(m)
}
var xxx_messageInfo_VolumeMount proto.InternalMessageInfo
func init() {
proto.RegisterType((*ClientIPConfig)(nil), "github.com.cilium.cilium.pkg.k8s.slim.k8s.api.core.v1.ClientIPConfig")
proto.RegisterType((*Container)(nil), "github.com.cilium.cilium.pkg.k8s.slim.k8s.api.core.v1.Container")
proto.RegisterType((*ContainerPort)(nil), "github.com.cilium.cilium.pkg.k8s.slim.k8s.api.core.v1.ContainerPort")
proto.RegisterType((*ContainerState)(nil), "github.com.cilium.cilium.pkg.k8s.slim.k8s.api.core.v1.ContainerState")
proto.RegisterType((*ContainerStateRunning)(nil), "github.com.cilium.cilium.pkg.k8s.slim.k8s.api.core.v1.ContainerStateRunning")
proto.RegisterType((*ContainerStatus)(nil), "github.com.cilium.cilium.pkg.k8s.slim.k8s.api.core.v1.ContainerStatus")
proto.RegisterType((*EndpointAddress)(nil), "github.com.cilium.cilium.pkg.k8s.slim.k8s.api.core.v1.EndpointAddress")
proto.RegisterType((*EndpointPort)(nil), "github.com.cilium.cilium.pkg.k8s.slim.k8s.api.core.v1.EndpointPort")
proto.RegisterType((*EndpointSubset)(nil), "github.com.cilium.cilium.pkg.k8s.slim.k8s.api.core.v1.EndpointSubset")
proto.RegisterType((*Endpoints)(nil), "github.com.cilium.cilium.pkg.k8s.slim.k8s.api.core.v1.Endpoints")
proto.RegisterType((*EndpointsList)(nil), "github.com.cilium.cilium.pkg.k8s.slim.k8s.api.core.v1.EndpointsList")
proto.RegisterType((*LoadBalancerIngress)(nil), "github.com.cilium.cilium.pkg.k8s.slim.k8s.api.core.v1.LoadBalancerIngress")
proto.RegisterType((*LoadBalancerStatus)(nil), "github.com.cilium.cilium.pkg.k8s.slim.k8s.api.core.v1.LoadBalancerStatus")
proto.RegisterType((*Namespace)(nil), "github.com.cilium.cilium.pkg.k8s.slim.k8s.api.core.v1.Namespace")
proto.RegisterType((*NamespaceList)(nil), "github.com.cilium.cilium.pkg.k8s.slim.k8s.api.core.v1.NamespaceList")
proto.RegisterType((*Node)(nil), "github.com.cilium.cilium.pkg.k8s.slim.k8s.api.core.v1.Node")
proto.RegisterType((*NodeAddress)(nil), "github.com.cilium.cilium.pkg.k8s.slim.k8s.api.core.v1.NodeAddress")
proto.RegisterType((*NodeCondition)(nil), "github.com.cilium.cilium.pkg.k8s.slim.k8s.api.core.v1.NodeCondition")
proto.RegisterType((*NodeList)(nil), "github.com.cilium.cilium.pkg.k8s.slim.k8s.api.core.v1.NodeList")
proto.RegisterType((*NodeSpec)(nil), "github.com.cilium.cilium.pkg.k8s.slim.k8s.api.core.v1.NodeSpec")
proto.RegisterType((*NodeStatus)(nil), "github.com.cilium.cilium.pkg.k8s.slim.k8s.api.core.v1.NodeStatus")
proto.RegisterType((*Pod)(nil), "github.com.cilium.cilium.pkg.k8s.slim.k8s.api.core.v1.Pod")
proto.RegisterType((*PodCondition)(nil), "github.com.cilium.cilium.pkg.k8s.slim.k8s.api.core.v1.PodCondition")
proto.RegisterType((*PodIP)(nil), "github.com.cilium.cilium.pkg.k8s.slim.k8s.api.core.v1.PodIP")
proto.RegisterType((*PodList)(nil), "github.com.cilium.cilium.pkg.k8s.slim.k8s.api.core.v1.PodList")
proto.RegisterType((*PodReadinessGate)(nil), "github.com.cilium.cilium.pkg.k8s.slim.k8s.api.core.v1.PodReadinessGate")
proto.RegisterType((*PodSpec)(nil), "github.com.cilium.cilium.pkg.k8s.slim.k8s.api.core.v1.PodSpec")
proto.RegisterType((*PodStatus)(nil), "github.com.cilium.cilium.pkg.k8s.slim.k8s.api.core.v1.PodStatus")
proto.RegisterType((*PortStatus)(nil), "github.com.cilium.cilium.pkg.k8s.slim.k8s.api.core.v1.PortStatus")
proto.RegisterType((*Secret)(nil), "github.com.cilium.cilium.pkg.k8s.slim.k8s.api.core.v1.Secret")
proto.RegisterMapType((map[string]Bytes)(nil), "github.com.cilium.cilium.pkg.k8s.slim.k8s.api.core.v1.Secret.DataEntry")
proto.RegisterMapType((map[string]string)(nil), "github.com.cilium.cilium.pkg.k8s.slim.k8s.api.core.v1.Secret.StringDataEntry")
proto.RegisterType((*SecretList)(nil), "github.com.cilium.cilium.pkg.k8s.slim.k8s.api.core.v1.SecretList")
proto.RegisterType((*Service)(nil), "github.com.cilium.cilium.pkg.k8s.slim.k8s.api.core.v1.Service")
proto.RegisterType((*ServiceList)(nil), "github.com.cilium.cilium.pkg.k8s.slim.k8s.api.core.v1.ServiceList")
proto.RegisterType((*ServicePort)(nil), "github.com.cilium.cilium.pkg.k8s.slim.k8s.api.core.v1.ServicePort")
proto.RegisterType((*ServiceSpec)(nil), "github.com.cilium.cilium.pkg.k8s.slim.k8s.api.core.v1.ServiceSpec")
proto.RegisterMapType((map[string]string)(nil), "github.com.cilium.cilium.pkg.k8s.slim.k8s.api.core.v1.ServiceSpec.SelectorEntry")
proto.RegisterType((*ServiceStatus)(nil), "github.com.cilium.cilium.pkg.k8s.slim.k8s.api.core.v1.ServiceStatus")
proto.RegisterType((*SessionAffinityConfig)(nil), "github.com.cilium.cilium.pkg.k8s.slim.k8s.api.core.v1.SessionAffinityConfig")
proto.RegisterType((*Taint)(nil), "github.com.cilium.cilium.pkg.k8s.slim.k8s.api.core.v1.Taint")
proto.RegisterType((*TypedLocalObjectReference)(nil), "github.com.cilium.cilium.pkg.k8s.slim.k8s.api.core.v1.TypedLocalObjectReference")
proto.RegisterType((*VolumeMount)(nil), "github.com.cilium.cilium.pkg.k8s.slim.k8s.api.core.v1.VolumeMount")
}
func init() {
proto.RegisterFile("github.com/cilium/cilium/pkg/k8s/slim/k8s/api/core/v1/generated.proto", fileDescriptor_871504499faea14d)
}
var fileDescriptor_871504499faea14d = []byte{
// 2800 bytes of a gzipped FileDescriptorProto
0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xd4, 0x5a, 0xcf, 0x6f, 0x24, 0x47,
0xf5, 0xdf, 0x9e, 0xf1, 0xd8, 0x33, 0x6f, 0xfc, 0x23, 0x5b, 0x1b, 0x27, 0x1d, 0x7f, 0x13, 0xdb,
0xdf, 0x5e, 0x84, 0x36, 0xfc, 0x98, 0xd1, 0x2e, 0x2c, 0x6c, 0x76, 0x93, 0x25, 0x9e, 0xb1, 0xb3,
0x3b, 0x89, 0xbd, 0x3b, 0xd4, 0x58, 0x11, 0x82, 0x03, 0x69, 0x77, 0x97, 0xc7, 0x15, 0xcf, 0x74,
0x37, 0x5d, 0x35, 0x9b, 0x8c, 0x40, 0x90, 0x08, 0x81, 0xc2, 0x0f, 0x09, 0xfe, 0x01, 0x2e, 0xdc,
0x38, 0xc3, 0x8d, 0x03, 0x27, 0xa4, 0xe5, 0x16, 0x4e, 0x44, 0x08, 0x99, 0xac, 0x91, 0x90, 0x38,
0x70, 0x40, 0x9c, 0x30, 0x12, 0x42, 0x55, 0x5d, 0x5d, 0xdd, 0x3d, 0x9e, 0xd9, 0xb5, 0x67, 0x2c,
0x59, 0x9c, 0x66, 0xfa, 0xbd, 0x57, 0xef, 0x53, 0xaf, 0xea, 0xbd, 0x57, 0xaf, 0x5e, 0x37, 0x6c,
0xb4, 0x29, 0xdf, 0xeb, 0xed, 0x54, 0x1c, 0xbf, 0x5b, 0x75, 0x68, 0x87, 0xf6, 0xf4, 0x4f, 0xb0,
0xdf, 0xae, 0xee, 0xdf, 0x60, 0x55, 0xd6, 0xa1, 0x5d, 0xf9, 0xc7, 0x0e, 0x68, 0xd5, 0xf1, 0x43,
0x52, 0x7d, 0x70, 0xb5, 0xda, 0x26, 0x1e, 0x09, 0x6d, 0x4e, 0xdc, 0x4a, 0x10, 0xfa, 0xdc, 0x47,
0xd7, 0x13, 0x35, 0x95, 0x68, 0x7c, 0xfc, 0x13, 0xec, 0xb7, 0x2b, 0xfb, 0x37, 0x58, 0x45, 0xa8,
0x91, 0x7f, 0xec, 0x80, 0x56, 0x84, 0x9a, 0xca, 0x83, 0xab, 0x4b, 0xaf, 0x9d, 0x0a, 0x9d, 0x55,
0xbb, 0x84, 0xdb, 0x43, 0xe0, 0x97, 0x5e, 0x3f, 0xa5, 0x9e, 0x1e, 0xa7, 0x9d, 0x2a, 0xf5, 0x38,
0xe3, 0xe1, 0x31, 0x5d, 0x9f, 0x4d, 0xe9, 0x6a, 0xfb, 0x6d, 0xbf, 0x2a, 0xc9, 0x3b, 0xbd, 0x5d,
0xf9, 0x24, 0x1f, 0xe4, 0x3f, 0x25, 0x2e, 0x94, 0x56, 0xa8, 0x2f, 0xf4, 0x76, 0x6d, 0x67, 0x8f,
0x7a, 0x24, 0xec, 0x4b, 0xd4, 0xb0, 0xe7, 0x71, 0xda, 0x25, 0xc7, 0xf4, 0x7f, 0xe1, 0x49, 0x03,
0x98, 0xb3, 0x47, 0xba, 0xf6, 0xe0, 0x38, 0x6b, 0x13, 0xe6, 0xeb, 0x1d, 0x4a, 0x3c, 0xde, 0x68,
0xd6, 0x7d, 0x6f, 0x97, 0xb6, 0xd1, 0x4d, 0x98, 0x17, 0x03, 0xfc, 0x1e, 0x6f, 0x11, 0xc7, 0xf7,
0x5c, 0x66, 0x1a, 0xab, 0xc6, 0x95, 0x42, 0x0d, 0x1d, 0x1e, 0xac, 0xcc, 0x6f, 0x67, 0x38, 0x78,
0x40, 0xd2, 0xfa, 0x75, 0x0e, 0x4a, 0x75, 0xdf, 0xe3, 0xb6, 0xc0, 0x47, 0xab, 0x30, 0xe5, 0xd9,
0x5d, 0x22, 0xc7, 0x97, 0x6a, 0xb3, 0x0f, 0x0f, 0x56, 0x2e, 0x1c, 0x1e, 0xac, 0x4c, 0xdd, 0xb3,
0xbb, 0x04, 0x4b, 0x0e, 0xba, 0x0c, 0x05, 0xda, 0xb5, 0xdb, 0xc4, 0xcc, 0x49, 0x91, 0x39, 0x25,
0x52, 0x68, 0x08, 0x22, 0x8e, 0x78, 0x88, 0x42, 0x21, 0xf0, 0x43, 0xce, 0xcc, 0xe9, 0xd5, 0xfc,
0x95, 0xf2, 0xb5, 0xf5, 0xca, 0x58, 0x5e, 0x51, 0xd1, 0xf3, 0x6a, 0xfa, 0x21, 0x4f, 0xa0, 0xc4,
0x13, 0xc3, 0x11, 0x02, 0xfa, 0x16, 0xcc, 0x3e, 0xf0, 0x3b, 0xbd, 0x2e, 0xd9, 0xf2, 0x7b, 0x1e,
0x67, 0x66, 0x49, 0x22, 0xd6, 0xc6, 0x44, 0x7c, 0x33, 0x51, 0x55, 0x7b, 0x5a, 0xe1, 0xcd, 0xa6,
0x88, 0x0c, 0x67, 0xd0, 0xac, 0xff, 0x18, 0x30, 0x97, 0x99, 0xe5, 0x09, 0x56, 0xf0, 0x33, 0x50,
0xdc, 0xf3, 0x19, 0x17, 0xd2, 0x72, 0x11, 0x0b, 0xb5, 0xa7, 0x94, 0x54, 0xf1, 0xae, 0xa2, 0x63,
0x2d, 0x81, 0x6e, 0xc1, 0x9c, 0x93, 0x06, 0x30, 0xf3, 0x72, 0xc8, 0xa2, 0x1a, 0x92, 0x45, 0xc7,
0x59, 0x59, 0x74, 0x03, 0x8a, 0xd2, 0x67, 0x1c, 0xbf, 0x63, 0x4e, 0xc9, 0x09, 0x3d, 0x1f, 0x43,
0x35, 0x15, 0xfd, 0x28, 0xf5, 0x1f, 0x6b, 0x69, 0xf4, 0x49, 0x98, 0x16, 0x53, 0x68, 0x34, 0xcd,
0x82, 0x1c, 0x37, 0xaf, 0xc6, 0x4d, 0xdf, 0x95, 0x54, 0xac, 0xb8, 0xd6, 0xf7, 0x0c, 0x98, 0xd7,
0x53, 0x68, 0x71, 0x9b, 0x13, 0xc4, 0x60, 0x26, 0xec, 0x79, 0x1e, 0xf5, 0xda, 0xd2, 0xbc, 0xf2,
0xb5, 0xcd, 0x49, 0xb7, 0x5f, 0xea, 0xc5, 0x91, 0xce, 0x5a, 0xf9, 0xf0, 0x60, 0x65, 0x46, 0x3d,
0xe0, 0x18, 0xc9, 0xfa, 0xbe, 0x01, 0x8b, 0x43, 0xe5, 0x51, 0x17, 0x4a, 0x8c, 0xdb, 0x21, 0x27,
0xee, 0x1a, 0x97, 0xbb, 0x52, 0xbe, 0xf6, 0xf2, 0xe9, 0x26, 0xc4, 0x2a, 0x22, 0xdd, 0x88, 0x19,
0x89, 0x58, 0xaa, 0x5d, 0x54, 0x4b, 0x51, 0x6a, 0xc5, 0x6a, 0x71, 0x82, 0x60, 0xfd, 0xca, 0x80,
0x85, 0xcc, 0x44, 0x7a, 0x0c, 0xbd, 0x0d, 0x05, 0x26, 0xa6, 0xa4, 0xd6, 0x63, 0xe3, 0x4c, 0xd6,
0x23, 0x89, 0x87, 0xc8, 0xdc, 0x08, 0x02, 0x5d, 0x87, 0xb2, 0xf6, 0x81, 0xc6, 0xba, 0x59, 0x94,
0xbb, 0x77, 0x49, 0x89, 0x96, 0xeb, 0x09, 0x0b, 0xa7, 0xe5, 0xc4, 0x3e, 0x2e, 0x6c, 0x78, 0x6e,
0xe0, 0x53, 0x8f, 0xaf, 0xb9, 0x6e, 0x48, 0x18, 0x43, 0x4b, 0x90, 0xa3, 0x81, 0x72, 0x64, 0x50,
0x1a, 0x72, 0x8d, 0x26, 0xce, 0xd1, 0x20, 0x76, 0x62, 0xe9, 0xea, 0x79, 0x29, 0x91, 0x71, 0x62,
0x41, 0xc7, 0x5a, 0x02, 0x5d, 0x81, 0xa2, 0xe7, 0xbb, 0x44, 0x04, 0x81, 0xf2, 0xc3, 0x59, 0x21,
0x79, 0x4f, 0xd1, 0xb0, 0xe6, 0x5a, 0xbf, 0x31, 0x60, 0x36, 0x9e, 0xc7, 0x09, 0xe3, 0x69, 0x15,
0xa6, 0x82, 0x24, 0x96, 0xb4, 0x84, 0x8c, 0x07, 0xc9, 0xc9, 0x84, 0x41, 0xfe, 0x54, 0x61, 0x70,
0x15, 0xca, 0x76, 0x10, 0x34, 0xb3, 0x31, 0xb4, 0x20, 0x56, 0x72, 0x2d, 0x21, 0xe3, 0xb4, 0x8c,
0xf5, 0x4f, 0x03, 0xe6, 0x63, 0x0b, 0x5a, 0xbd, 0x1d, 0x46, 0x38, 0x7a, 0x07, 0x4a, 0x76, 0xb4,
0xa6, 0x44, 0xa4, 0x66, 0x91, 0xa0, 0x5e, 0x1b, 0xd3, 0x07, 0x06, 0xf6, 0x28, 0x71, 0xc6, 0xb5,
0x18, 0x00, 0x27, 0x58, 0x68, 0x2f, 0xce, 0xc3, 0x79, 0x09, 0x5a, 0x9f, 0x10, 0x74, 0x74, 0x1a,
0xb6, 0xfe, 0x61, 0x40, 0x29, 0x16, 0x63, 0x28, 0x84, 0xa2, 0x08, 0x19, 0xd7, 0xe6, 0xb6, 0x0a,
0xb9, 0xda, 0xb8, 0x21, 0x77, 0x7f, 0xe7, 0x6d, 0xe2, 0xf0, 0x2d, 0xc2, 0xed, 0x1a, 0x52, 0xc8,
0x90, 0xd0, 0xb0, 0xc6, 0x41, 0x01, 0xcc, 0x30, 0xb9, 0xdc, 0xcc, 0xcc, 0x49, 0x6b, 0x37, 0x26,
0xb4, 0x36, 0xda, 0xbc, 0xda, 0x82, 0x42, 0x9d, 0x89, 0x9e, 0x19, 0x8e, 0x61, 0xac, 0xbf, 0x1a,
0x30, 0xa7, 0x6d, 0xde, 0xa4, 0x8c, 0x23, 0xef, 0x98, 0xdd, 0xaf, 0x8e, 0x6b, 0xb7, 0xd0, 0x27,
0xad, 0xd6, 0x71, 0x15, 0x53, 0x52, 0x36, 0x13, 0x28, 0x50, 0x4e, 0xba, 0xb1, 0xc5, 0xaf, 0x4e,
0x68, 0x31, 0x4b, 0x1d, 0xe7, 0x42, 0x2d, 0x8e, 0xb4, 0x5b, 0xef, 0xe7, 0xe0, 0xd2, 0xa6, 0x6f,
0xbb, 0x35, 0xbb, 0x63, 0x7b, 0x0e, 0x09, 0x1b, 0x5e, 0xfb, 0x54, 0x09, 0x22, 0xf7, 0xc4, 0x04,
0x71, 0x03, 0xa6, 0x69, 0xb0, 0xe5, 0xbb, 0x71, 0x32, 0x59, 0x15, 0x47, 0x4d, 0xa3, 0x29, 0x28,
0x47, 0x07, 0x2b, 0x28, 0x03, 0x2e, 0xa9, 0x58, 0xc9, 0xa3, 0xdd, 0xd8, 0xc5, 0xa7, 0xe4, 0x12,
0xac, 0x8d, 0xb9, 0x04, 0xc2, 0x99, 0xa3, 0x6c, 0x3d, 0xc2, 0xc1, 0x7f, 0x64, 0x40, 0x66, 0x1a,
0x2a, 0xb5, 0xf7, 0x60, 0x86, 0x46, 0xab, 0xa1, 0x02, 0xfb, 0xf5, 0x31, 0x27, 0x30, 0x64, 0x7d,
0x13, 0xd7, 0x53, 0x04, 0x1c, 0x63, 0x59, 0xdf, 0x81, 0x92, 0xc8, 0x80, 0x2c, 0xb0, 0x1d, 0x72,
0x1e, 0xd1, 0x26, 0x7d, 0x5f, 0xcf, 0xe0, 0x7f, 0xd9, 0xf7, 0xb5, 0x11, 0x23, 0x7c, 0xff, 0x61,
0x0e, 0xa6, 0xc4, 0x39, 0x75, 0x2e, 0x39, 0xcd, 0x86, 0x29, 0x16, 0x10, 0x47, 0xd5, 0x0d, 0x5f,
0x1a, 0xd7, 0x44, 0xdf, 0x25, 0xad, 0x80, 0x38, 0xc9, 0xd9, 0x28, 0x9e, 0xb0, 0x54, 0x8d, 0x28,
0x4c, 0x33, 0xe9, 0xca, 0x32, 0xf2, 0xc6, 0x0f, 0x20, 0x09, 0x12, 0x05, 0x90, 0xae, 0x15, 0xa3,
0x67, 0xac, 0x00, 0xac, 0x2e, 0x94, 0x85, 0x54, 0x5c, 0x5e, 0x7c, 0x0e, 0xa6, 0x78, 0x3f, 0x88,
0x4f, 0xf6, 0x95, 0x78, 0x6e, 0xdb, 0xfd, 0x40, 0xc4, 0xfc, 0x42, 0x4a, 0x54, 0x90, 0xb0, 0x14,
0x46, 0x2f, 0xc2, 0x8c, 0x3a, 0xde, 0x54, 0x56, 0xd1, 0x31, 0xa2, 0x64, 0x71, 0xcc, 0xb7, 0x7e,
0x21, 0x5c, 0xd4, 0x77, 0x49, 0xdd, 0xf7, 0x5c, 0xca, 0xa9, 0xef, 0xa1, 0xeb, 0x19, 0xc4, 0xff,
0x1f, 0x40, 0xbc, 0x98, 0x11, 0x4e, 0x61, 0xbe, 0xa4, 0x97, 0x28, 0x97, 0x19, 0xa8, 0xec, 0x13,
0x93, 0xd5, 0xc3, 0xb2, 0x26, 0x8b, 0x32, 0x3a, 0x24, 0x36, 0xf3, 0xbd, 0xc1, 0x32, 0x1a, 0x4b,
0x2a, 0x56, 0x5c, 0xeb, 0x4f, 0x06, 0xc8, 0x6a, 0xe8, 0x5c, 0x22, 0xe9, 0xad, 0x6c, 0x24, 0xdd,
0x9a, 0xc0, 0x03, 0x46, 0x04, 0xd1, 0xbf, 0x94, 0x79, 0xc2, 0xef, 0xc4, 0x16, 0x06, 0xbe, 0x5b,
0x6f, 0xac, 0x63, 0xb5, 0x11, 0x7a, 0x0b, 0x9b, 0x11, 0x19, 0xc7, 0x7c, 0x51, 0x37, 0xaa, 0xbf,
0xcc, 0x9c, 0x59, 0xcd, 0xc7, 0x75, 0xa3, 0x92, 0x63, 0x58, 0x73, 0xd1, 0x35, 0x80, 0x20, 0xf4,
0x1f, 0x50, 0x57, 0x56, 0xbd, 0xd1, 0x21, 0xa2, 0x63, 0xab, 0xa9, 0x39, 0x38, 0x25, 0x85, 0x5c,
0x98, 0x16, 0xf5, 0x2f, 0x67, 0x66, 0x41, 0x1a, 0xfe, 0xf2, 0x98, 0x86, 0x6f, 0x0b, 0x25, 0xc9,
0xd6, 0xca, 0x47, 0x86, 0x95, 0x6e, 0xeb, 0xdf, 0x06, 0x40, 0x12, 0x1c, 0xe8, 0x5d, 0x00, 0x27,
0x76, 0x96, 0xf8, 0xd0, 0x5a, 0x9f, 0x60, 0xc5, 0xb5, 0xe7, 0x25, 0xe6, 0x6a, 0x12, 0xc3, 0x29,
0x2c, 0xc4, 0xd2, 0x55, 0x68, 0x61, 0xa2, 0x6b, 0x72, 0x2a, 0x36, 0x1f, 0x5f, 0x81, 0x5a, 0xbf,
0xcd, 0x41, 0xbe, 0xe9, 0xbb, 0xe7, 0x92, 0x3d, 0xdf, 0xca, 0x64, 0xcf, 0xdb, 0x63, 0x57, 0x06,
0xee, 0xc8, 0xe4, 0xb9, 0x37, 0x90, 0x3c, 0x5f, 0x9d, 0x00, 0xe3, 0xf1, 0xb9, 0xf3, 0xa3, 0x3c,
0xcc, 0x0a, 0xb7, 0xd7, 0xb9, 0xec, 0xf3, 0x99, 0x5c, 0xb6, 0x3a, 0x90, 0xcb, 0x9e, 0x4a, 0xcb,
0x9e, 0x4d, 0x2a, 0xeb, 0xc3, 0x5c, 0xc7, 0x66, 0xbc, 0x19, 0xfa, 0x3b, 0x44, 0xdc, 0x83, 0x95,
0xc9, 0x93, 0xdd, 0xa5, 0x75, 0x1b, 0x63, 0x33, 0xad, 0x1a, 0x67, 0x91, 0xd0, 0x07, 0x06, 0x20,
0x41, 0xd9, 0x0e, 0x6d, 0x8f, 0x45, 0x26, 0x51, 0x75, 0x93, 0x9c, 0x74, 0x02, 0x4b, 0x6a, 0x02,
0x68, 0xf3, 0x98, 0x7e, 0x3c, 0x04, 0xf3, 0xa4, 0x09, 0x5d, 0x24, 0xb9, 0x2e, 0x61, 0xcc, 0x6e,
0x13, 0x73, 0x3a, 0x9b, 0xe4, 0xb6, 0x22, 0x32, 0x8e, 0xf9, 0xd6, 0x65, 0x28, 0x34, 0x7d, 0xb7,
0xd1, 0x7c, 0x5c, 0x39, 0x6d, 0xfd, 0xd1, 0x00, 0x91, 0x1e, 0xcf, 0xe5, 0x7c, 0xf8, 0x7a, 0xf6,
0x7c, 0xb8, 0x39, 0xbe, 0x93, 0x8f, 0x38, 0x1e, 0x1c, 0x10, 0xfe, 0x8a, 0x89, 0xed, 0x52, 0x8f,
0x30, 0x76, 0xc7, 0xe6, 0x04, 0xdd, 0x97, 0x7d, 0xaf, 0xc4, 0x81, 0xd5, 0xba, 0xbc, 0x98, 0xea,
0x7b, 0x25, 0xcc, 0xa1, 0x1e, 0x9f, 0x1d, 0x6f, 0xfd, 0x32, 0x2f, 0x57, 0x50, 0x1e, 0x41, 0xef,
0x19, 0x30, 0x4f, 0x3d, 0xca, 0x75, 0x3b, 0x84, 0x99, 0x4f, 0x4f, 0x54, 0x45, 0x6a, 0x45, 0xb5,
0x67, 0xd4, 0x04, 0xe7, 0x1b, 0x19, 0xfd, 0x78, 0x00, 0x0f, 0x71, 0x79, 0x0e, 0xc4, 0xe8, 0xb9,
0x33, 0x42, 0x4f, 0x9f, 0x01, 0x31, 0x72, 0x0a, 0x07, 0xbd, 0x0e, 0x88, 0x91, 0xf0, 0x01, 0x75,
0xc8, 0x9a, 0xe3, 0xf8, 0x3d, 0x8f, 0xcb, 0x96, 0x4c, 0xd4, 0x24, 0xd2, 0xa1, 0xd0, 0x3a, 0x26,
0x81, 0x87, 0x8c, 0x12, 0x37, 0x3c, 0xdd, 0xd4, 0x81, 0xec, 0x0d, 0xef, 0x78, 0x63, 0x07, 0x5d,
0x87, 0xb2, 0xb8, 0xed, 0xdd, 0x23, 0xfc, 0x1d, 0x3f, 0xdc, 0x37, 0xcb, 0xab, 0xc6, 0x95, 0x62,
0xd2, 0x97, 0xba, 0x9b, 0xb0, 0x70, 0x5a, 0xce, 0xfa, 0x79, 0x01, 0x4a, 0x3a, 0x3b, 0xa2, 0x2a,
0x14, 0x82, 0x3d, 0x9b, 0xc5, 0xce, 0xf0, 0x9c, 0xbe, 0xa9, 0x09, 0xe2, 0x51, 0x54, 0x19, 0xc8,
0xff, 0x38, 0x92, 0x43, 0xef, 0x64, 0x4e, 0xdb, 0xdc, 0x44, 0x5d, 0x90, 0xb4, 0x83, 0x3d, 0xf1,
0xb0, 0x3d, 0x61, 0xff, 0x14, 0x5d, 0x16, 0xd7, 0x57, 0xb7, 0xd1, 0x54, 0x59, 0x22, 0x75, 0xf7,
0x74, 0x1b, 0x4d, 0x1c, 0xf1, 0x44, 0xa1, 0x22, 0xff, 0x30, 0x73, 0x76, 0xa2, 0x42, 0x45, 0x2a,
0x4d, 0xa6, 0x22, 0x1f, 0x19, 0x56, 0xba, 0x11, 0x55, 0x8d, 0x52, 0x99, 0x5b, 0x67, 0xce, 0x20,
0xb7, 0xce, 0xe9, 0x26, 0xa9, 0x4c, 0xa7, 0x89, 0x76, 0xf4, 0x13, 0x03, 0x2e, 0x3a, 0xd9, 0x26,
0x29, 0x61, 0x66, 0x71, 0xa2, 0xce, 0xd8, 0x40, 0xd3, 0x55, 0x3b, 0xc7, 0xc5, 0xfa, 0x20, 0x10,
0x3e, 0x8e, 0x8d, 0x6e, 0x41, 0xf1, 0x1b, 0x3e, 0xab, 0x77, 0x6c, 0xc6, 0xcc, 0x52, 0xe6, 0x42,
0x52, 0xfc, 0xf2, 0xfd, 0x96, 0xa4, 0x1f, 0x1d, 0xac, 0x94, 0x9b, 0xbe, 0x1b, 0x3f, 0x62, 0x3d,
0xc0, 0xfa, 0x81, 0x01, 0x90, 0x34, 0x10, 0x74, 0x43, 0xd2, 0x38, 0x51, 0x43, 0x32, 0x77, 0xaa,
0x86, 0xe4, 0x0a, 0x14, 0x48, 0x18, 0xfa, 0xa1, 0x2a, 0x71, 0x4b, 0xc2, 0x57, 0x36, 0x04, 0x01,
0x47, 0x74, 0xeb, 0x77, 0x53, 0x30, 0xdd, 0x22, 0x4e, 0x48, 0xf8, 0xb9, 0xd4, 0x5c, 0x9f, 0x86,
0x12, 0xed, 0x76, 0x7b, 0xdc, 0xde, 0xe9, 0x10, 0xe9, 0xfa, 0xc5, 0xc8, 0x0d, 0x1a, 0x31, 0x11,
0x27, 0x7c, 0x14, 0xc2, 0x94, 0x9c, 0x5c, 0x14, 0x97, 0x77, 0xc6, 0xdc, 0xf8, 0xc8, 0xda, 0xca,
0xba, 0xcd, 0xed, 0x0d, 0x8f, 0x87, 0x7d, 0x5d, 0x54, 0x4c, 0x09, 0xd2, 0x0f, 0xff, 0xbc, 0x52,
0xa8, 0xf5, 0x39, 0x61, 0x58, 0x62, 0xa1, 0xf7, 0x0d, 0x00, 0xc6, 0x43, 0xea, 0xb5, 0x05, 0x57,
0x15, 0xe0, 0x5b, 0x93, 0x41, 0xb7, 0xb4, 0xbe, 0x68, 0x02, 0x7a, 0x89, 0x12, 0x06, 0x4e, 0x81,
0xa2, 0x8a, 0xaa, 0xdd, 0xf2, 0x99, 0xbc, 0x1b, 0xd7, 0x6e, 0x10, 0x69, 0x4d, 0xaa, 0xb6, 0xa5,
0x2f, 0x42, 0x49, 0x2b, 0x47, 0x4f, 0x41, 0x7e, 0x9f, 0xf4, 0xa3, 0x0c, 0x88, 0xc5, 0x5f, 0xf4,
0x34, 0x14, 0x1e, 0xd8, 0x9d, 0x5e, 0xd4, 0x67, 0x9b, 0xc5, 0xd1, 0xc3, 0xcd, 0xdc, 0x0d, 0x63,
0xe9, 0x15, 0x58, 0x18, 0x98, 0xdb, 0x93, 0x86, 0x97, 0x52, 0xc3, 0xad, 0x8f, 0x0d, 0x50, 0x93,
0x39, 0x97, 0xba, 0x63, 0x27, 0x5b, 0x77, 0xbc, 0x32, 0xd1, 0x26, 0x8d, 0x28, 0x3d, 0x7e, 0x9f,
0x83, 0x19, 0x75, 0xde, 0x9d, 0x4b, 0xbc, 0xb8, 0x99, 0x3b, 0x4a, 0x6d, 0x6c, 0x13, 0xa5, 0x05,
0x23, 0xef, 0x29, 0x9d, 0x81, 0x7b, 0xca, 0xfa, 0x84, 0x38, 0x8f, 0xbf, 0xab, 0x1c, 0x1a, 0x50,
0x56, 0x92, 0xe7, 0xe2, 0x37, 0x4e, 0xd6, 0x6f, 0x6e, 0x4f, 0x66, 0xec, 0x08, 0xc7, 0xf9, 0x5b,
0x4e, 0x1b, 0x79, 0xc2, 0xf7, 0x54, 0xe3, 0x27, 0xfd, 0x81, 0xb7, 0x50, 0xd3, 0x4f, 0x7e, 0x0b,
0xa5, 0xcf, 0xa0, 0xfc, 0xc8, 0x33, 0xe8, 0x9b, 0x00, 0xdc, 0x0e, 0xdb, 0x24, 0x7a, 0x11, 0x1d,
0xdd, 0xa5, 0xee, 0x9c, 0x76, 0x5f, 0x7a, 0x9c, 0x76, 0x2a, 0xd1, 0xf7, 0x13, 0x95, 0x86, 0xc7,
0xef, 0x87, 0x51, 0xba, 0x49, 0x9c, 0x7e, 0x5b, 0x43, 0xe0, 0x14, 0x5c, 0x5c, 0x3b, 0x4a, 0xe8,
0x42, 0xf6, 0x1d, 0xf8, 0x3d, 0x45, 0xc7, 0x5a, 0xc2, 0xfa, 0x43, 0x59, 0xaf, 0xb5, 0x2c, 0xdf,
0xdb, 0x71, 0xcf, 0xdf, 0x98, 0xa8, 0x8b, 0x91, 0xda, 0xbe, 0x11, 0x1f, 0x17, 0x7c, 0x1b, 0x8a,
0x8c, 0x74, 0x88, 0xc3, 0xfd, 0x50, 0x39, 0x53, 0x73, 0xf2, 0x08, 0xad, 0xb4, 0x94, 0xca, 0xe8,
0xb0, 0xd0, 0x86, 0xc7, 0x64, 0xac, 0x31, 0x51, 0x15, 0x4a, 0x4e, 0xa7, 0xc7, 0x38, 0x09, 0x1b,
0x4d, 0x75, 0x5a, 0xe8, 0x76, 0x4b, 0x3d, 0x66, 0xe0, 0x44, 0x06, 0x55, 0x00, 0xf4, 0x03, 0x33,
0x91, 0x6c, 0x99, 0xcd, 0xcb, 0x32, 0x55, 0x53, 0x71, 0x4a, 0x02, 0x55, 0xd5, 0x49, 0x14, 0xbd,
0xd8, 0xfc, 0xbf, 0x81, 0x93, 0x28, 0x5e, 0xf4, 0x54, 0x03, 0xe1, 0x2a, 0x94, 0xc9, 0xbb, 0x9c,
0x84, 0x9e, 0xdd, 0x11, 0x08, 0x05, 0x89, 0x20, 0x5d, 0x71, 0x23, 0x21, 0xe3, 0xb4, 0x0c, 0xda,
0x86, 0x05, 0x46, 0x18, 0xa3, 0xbe, 0xb7, 0xb6, 0xbb, 0x2b, 0x6e, 0x41, 0x7d, 0x59, 0x5d, 0x96,
0x6a, 0x9f, 0x52, 0x70, 0x0b, 0xad, 0x2c, 0xfb, 0x48, 0x92, 0xa2, 0xfb, 0x86, 0x22, 0xe1, 0x41,
0x15, 0xe8, 0x36, 0xcc, 0x77, 0x32, 0x6f, 0x85, 0xd4, 0x2d, 0x46, 0xdf, 0xbf, 0xb2, 0xef, 0x8c,
0xf0, 0x80, 0x34, 0xfa, 0x0a, 0x98, 0x69, 0x4a, 0xcb, 0xef, 0x85, 0x0e, 0xc1, 0xb6, 0xd7, 0x26,
0xd1, 0x37, 0x24, 0xa5, 0xda, 0xf3, 0x87, 0x07, 0x2b, 0xe6, 0xe6, 0x08, 0x19, 0x3c, 0x72, 0x34,
0x62, 0xb0, 0x18, 0x9b, 0xbf, 0x1d, 0xda, 0xbb, 0xbb, 0xd4, 0x69, 0xfa, 0x1d, 0xea, 0xf4, 0xe5,
0x9d, 0xa7, 0x54, 0x7b, 0x45, 0x4d, 0x70, 0x71, 0x63, 0x98, 0xd0, 0xd1, 0xc1, 0xca, 0xf3, 0xca,
0xf6, 0xa1, 0x7c, 0x3c, 0x5c, 0x37, 0xda, 0x82, 0x4b, 0x7b, 0xc4, 0xee, 0xf0, 0xbd, 0xfa, 0x1e,
0x71, 0xf6, 0xe3, 0x18, 0x32, 0x67, 0x65, 0x6c, 0xc5, 0xfb, 0x7a, 0xe9, 0xee, 0x71, 0x11, 0x3c,
0x6c, 0x1c, 0xfa, 0x99, 0x01, 0x8b, 0x03, 0x2b, 0x1e, 0x7d, 0x6b, 0x64, 0xce, 0x4f, 0xf4, 0x49,
0x47, 0x6b, 0x98, 0xce, 0xda, 0x73, 0x62, 0x39, 0x86, 0xb2, 0xf0, 0xf0, 0x59, 0xa0, 0x9b, 0x00,
0x34, 0x78, 0xcd, 0xee, 0xd2, 0x0e, 0x25, 0xcc, 0xbc, 0x24, 0xf7, 0x6b, 0x49, 0xf8, 0x79, 0xa3,
0x19, 0x53, 0x45, 0x2e, 0x55, 0x4f, 0x7d, 0x9c, 0x92, 0x46, 0x9b, 0x30, 0xaf, 0x9e, 0xfa, 0x6a,
0x63, 0x2e, 0xca, 0x8d, 0xf9, 0x84, 0xbc, 0xb5, 0x37, 0xd3, 0x9c, 0xa3, 0x63, 0x14, 0x3c, 0x30,
0x16, 0xd5, 0xe1, 0x62, 0xda, 0x13, 0xa2, 0x1b, 0xc4, 0xa2, 0x54, 0xb8, 0x28, 0x6e, 0x1f, 0x9b,
0x83, 0x4c, 0x7c, 0x5c, 0x1e, 0xf9, 0xb0, 0x48, 0xbd, 0x61, 0x2e, 0xf3, 0x8c, 0x54, 0xf4, 0x92,
0x58, 0x9f, 0x86, 0xf7, 0x78, 0x77, 0x19, 0xca, 0xc7, 0xc3, 0xf5, 0xa2, 0x06, 0x5c, 0xe2, 0x11,
0x61, 0x9d, 0x8a, 0xc2, 0x74, 0xa7, 0x27, 0xae, 0xad, 0xe6, 0xb3, 0x12, 0xee, 0x59, 0xe1, 0x2a,
0xdb, 0xc7, 0xd9, 0x78, 0xd8, 0x98, 0xa5, 0x5b, 0x30, 0x97, 0x49, 0x68, 0xa7, 0xaa, 0x30, 0x7f,
0x9c, 0x13, 0xa3, 0x53, 0x45, 0x05, 0xfa, 0xae, 0x01, 0xb3, 0xe9, 0x05, 0x52, 0x15, 0x43, 0xe3,
0x0c, 0x5e, 0xab, 0xaa, 0xb2, 0x45, 0x7f, 0xd7, 0x95, 0xe6, 0xe1, 0x0c, 0x28, 0xea, 0x0d, 0xe9,
0x1b, 0xac, 0x8d, 0x5b, 0xb4, 0x9c, 0xb8, 0x6b, 0x60, 0x7d, 0x60, 0xc0, 0xf0, 0x38, 0x40, 0x3e,
0x14, 0x1d, 0xf5, 0xd1, 0x9f, 0x5a, 0x91, 0xb1, 0xbf, 0x22, 0xca, 0x7c, 0x3b, 0x18, 0xbd, 0x50,
0x89, 0x69, 0x58, 0x83, 0x58, 0x7f, 0x37, 0xa0, 0x20, 0xdf, 0x64, 0xa0, 0x17, 0x52, 0xfb, 0x59,
0x2b, 0x2b, 0x0b, 0xf2, 0x6f, 0x90, 0x7e, 0xb4, 0xb9, 0x97, 0x33, 0x9b, 0x9b, 0x1c, 0xa4, 0x6f,
0x0a, 0xa2, 0xda, 0x6b, 0x74, 0x1d, 0xa6, 0xc9, 0xee, 0x2e, 0x71, 0xb8, 0x3a, 0xc5, 0x5e, 0x88,
0x4b, 0xc7, 0x0d, 0x49, 0x15, 0x67, 0x8d, 0x04, 0x8b, 0x1e, 0xb1, 0x12, 0x46, 0x14, 0x4a, 0x9c,
0x76, 0xc9, 0x9a, 0xeb, 0x12, 0xf7, 0x4c, 0xda, 0xbd, 0xf2, 0x2e, 0xba, 0x1d, 0xab, 0xc4, 0x89,
0x76, 0x71, 0x87, 0x7f, 0x4e, 0x9c, 0x73, 0xee, 0xa6, 0xef, 0xd8, 0x9d, 0xa8, 0x56, 0xc7, 0x64,
0x97, 0x84, 0xc4, 0x73, 0xe4, 0x07, 0x4c, 0x76, 0x40, 0xef, 0x84, 0x7e, 0x2f, 0x6e, 0xd0, 0xca,
0x75, 0x5b, 0x6b, 0x36, 0x24, 0x0d, 0x6b, 0xae, 0x28, 0xbc, 0xf6, 0xa9, 0xe7, 0xaa, 0xd5, 0xd0,
0x85, 0xd7, 0x1b, 0xd4, 0x73, 0xb1, 0xe4, 0xe8, 0x4a, 0x31, 0x3f, 0xaa, 0x52, 0xb4, 0x6e, 0x43,
0x39, 0xf5, 0xcd, 0xa1, 0xa8, 0x02, 0xba, 0xe2, 0x4f, 0xd3, 0xe6, 0x7b, 0x83, 0x55, 0xc0, 0x56,
0xcc, 0xc0, 0x89, 0x4c, 0xed, 0x6b, 0x0f, 0x1f, 0x2d, 0x5f, 0xf8, 0xf0, 0xd1, 0xf2, 0x85, 0x8f,
0x1e, 0x2d, 0x5f, 0x78, 0xef, 0x70, 0xd9, 0x78, 0x78, 0xb8, 0x6c, 0x7c, 0x78, 0xb8, 0x6c, 0x7c,
0x74, 0xb8, 0x6c, 0x7c, 0x7c, 0xb8, 0x6c, 0xfc, 0xf4, 0x2f, 0xcb, 0x17, 0xbe, 0x7a, 0x7d, 0xac,
0x2f, 0x7e, 0xff, 0x1b, 0x00, 0x00, 0xff, 0xff, 0x40, 0xb2, 0x39, 0xc1, 0x29, 0x2c, 0x00, 0x00,
}
func (m *ClientIPConfig) Marshal() (dAtA []byte, err error) {
size := m.Size()
dAtA = make([]byte, size)
n, err := m.MarshalToSizedBuffer(dAtA[:size])
if err != nil {
return nil, err
}
return dAtA[:n], nil
}
func (m *ClientIPConfig) MarshalTo(dAtA []byte) (int, error) {
size := m.Size()
return m.MarshalToSizedBuffer(dAtA[:size])
}
func (m *ClientIPConfig) MarshalToSizedBuffer(dAtA []byte) (int, error) {
i := len(dAtA)
_ = i
var l int
_ = l
if m.TimeoutSeconds != nil {
i = encodeVarintGenerated(dAtA, i, uint64(*m.TimeoutSeconds))
i--
dAtA[i] = 0x8
}
return len(dAtA) - i, nil
}
func (m *Container) Marshal() (dAtA []byte, err error) {
size := m.Size()
dAtA = make([]byte, size)
n, err := m.MarshalToSizedBuffer(dAtA[:size])
if err != nil {
return nil, err
}
return dAtA[:n], nil
}
func (m *Container) MarshalTo(dAtA []byte) (int, error) {
size := m.Size()
return m.MarshalToSizedBuffer(dAtA[:size])
}
func (m *Container) MarshalToSizedBuffer(dAtA []byte) (int, error) {
i := len(dAtA)
_ = i
var l int
_ = l
if len(m.VolumeMounts) > 0 {
for iNdEx := len(m.VolumeMounts) - 1; iNdEx >= 0; iNdEx-- {
{
size, err := m.VolumeMounts[iNdEx].MarshalToSizedBuffer(dAtA[:i])
if err != nil {
return 0, err
}
i -= size
i = encodeVarintGenerated(dAtA, i, uint64(size))
}
i--
dAtA[i] = 0x4a
}
}
if len(m.Ports) > 0 {
for iNdEx := len(m.Ports) - 1; iNdEx >= 0; iNdEx-- {
{
size, err := m.Ports[iNdEx].MarshalToSizedBuffer(dAtA[:i])
if err != nil {
return 0, err
}
i -= size
i = encodeVarintGenerated(dAtA, i, uint64(size))
}
i--
dAtA[i] = 0x32
}
}
i -= len(m.Image)
copy(dAtA[i:], m.Image)
i = encodeVarintGenerated(dAtA, i, uint64(len(m.Image)))
i--
dAtA[i] = 0x12
i -= len(m.Name)
copy(dAtA[i:], m.Name)
i = encodeVarintGenerated(dAtA, i, uint64(len(m.Name)))
i--
dAtA[i] = 0xa
return len(dAtA) - i, nil
}
func (m *ContainerPort) Marshal() (dAtA []byte, err error) {
size := m.Size()
dAtA = make([]byte, size)
n, err := m.MarshalToSizedBuffer(dAtA[:size])
if err != nil {
return nil, err
}
return dAtA[:n], nil
}
func (m *ContainerPort) MarshalTo(dAtA []byte) (int, error) {
size := m.Size()
return m.MarshalToSizedBuffer(dAtA[:size])
}
func (m *ContainerPort) MarshalToSizedBuffer(dAtA []byte) (int, error) {
i := len(dAtA)
_ = i
var l int
_ = l
i -= len(m.HostIP)
copy(dAtA[i:], m.HostIP)
i = encodeVarintGenerated(dAtA, i, uint64(len(m.HostIP)))
i--
dAtA[i] = 0x2a
i -= len(m.Protocol)
copy(dAtA[i:], m.Protocol)
i = encodeVarintGenerated(dAtA, i, uint64(len(m.Protocol)))
i--
dAtA[i] = 0x22
i = encodeVarintGenerated(dAtA, i, uint64(m.ContainerPort))
i--
dAtA[i] = 0x18
i = encodeVarintGenerated(dAtA, i, uint64(m.HostPort))
i--
dAtA[i] = 0x10
i -= len(m.Name)
copy(dAtA[i:], m.Name)
i = encodeVarintGenerated(dAtA, i, uint64(len(m.Name)))
i--
dAtA[i] = 0xa
return len(dAtA) - i, nil
}
func (m *ContainerState) Marshal() (dAtA []byte, err error) {
size := m.Size()
dAtA = make([]byte, size)
n, err := m.MarshalToSizedBuffer(dAtA[:size])
if err != nil {
return nil, err
}
return dAtA[:n], nil
}
func (m *ContainerState) MarshalTo(dAtA []byte) (int, error) {
size := m.Size()
return m.MarshalToSizedBuffer(dAtA[:size])
}
func (m *ContainerState) MarshalToSizedBuffer(dAtA []byte) (int, error) {
i := len(dAtA)
_ = i
var l int
_ = l
if m.Running != nil {
{
size, err := m.Running.MarshalToSizedBuffer(dAtA[:i])
if err != nil {
return 0, err
}
i -= size
i = encodeVarintGenerated(dAtA, i, uint64(size))
}
i--
dAtA[i] = 0x12
}
return len(dAtA) - i, nil
}
func (m *ContainerStateRunning) Marshal() (dAtA []byte, err error) {
size := m.Size()
dAtA = make([]byte, size)
n, err := m.MarshalToSizedBuffer(dAtA[:size])
if err != nil {
return nil, err
}
return dAtA[:n], nil
}
func (m *ContainerStateRunning) MarshalTo(dAtA []byte) (int, error) {
size := m.Size()
return m.MarshalToSizedBuffer(dAtA[:size])
}
func (m *ContainerStateRunning) MarshalToSizedBuffer(dAtA []byte) (int, error) {
i := len(dAtA)
_ = i
var l int
_ = l
{
size, err := m.StartedAt.MarshalToSizedBuffer(dAtA[:i])
if err != nil {
return 0, err
}
i -= size
i = encodeVarintGenerated(dAtA, i, uint64(size))
}
i--
dAtA[i] = 0xa
return len(dAtA) - i, nil
}
func (m *ContainerStatus) Marshal() (dAtA []byte, err error) {
size := m.Size()
dAtA = make([]byte, size)
n, err := m.MarshalToSizedBuffer(dAtA[:size])
if err != nil {
return nil, err
}
return dAtA[:n], nil
}
func (m *ContainerStatus) MarshalTo(dAtA []byte) (int, error) {
size := m.Size()
return m.MarshalToSizedBuffer(dAtA[:size])
}
func (m *ContainerStatus) MarshalToSizedBuffer(dAtA []byte) (int, error) {
i := len(dAtA)
_ = i
var l int
_ = l
i -= len(m.ContainerID)
copy(dAtA[i:], m.ContainerID)
i = encodeVarintGenerated(dAtA, i, uint64(len(m.ContainerID)))
i--
dAtA[i] = 0x42
{
size, err := m.State.MarshalToSizedBuffer(dAtA[:i])
if err != nil {
return 0, err
}
i -= size
i = encodeVarintGenerated(dAtA, i, uint64(size))
}
i--
dAtA[i] = 0x12
return len(dAtA) - i, nil
}
func (m *EndpointAddress) Marshal() (dAtA []byte, err error) {
size := m.Size()
dAtA = make([]byte, size)
n, err := m.MarshalToSizedBuffer(dAtA[:size])
if err != nil {
return nil, err
}
return dAtA[:n], nil
}
func (m *EndpointAddress) MarshalTo(dAtA []byte) (int, error) {
size := m.Size()
return m.MarshalToSizedBuffer(dAtA[:size])
}
func (m *EndpointAddress) MarshalToSizedBuffer(dAtA []byte) (int, error) {
i := len(dAtA)
_ = i
var l int
_ = l
if m.NodeName != nil {
i -= len(*m.NodeName)
copy(dAtA[i:], *m.NodeName)
i = encodeVarintGenerated(dAtA, i, uint64(len(*m.NodeName)))
i--
dAtA[i] = 0x22
}
i -= len(m.Hostname)
copy(dAtA[i:], m.Hostname)
i = encodeVarintGenerated(dAtA, i, uint64(len(m.Hostname)))
i--
dAtA[i] = 0x1a
i -= len(m.IP)
copy(dAtA[i:], m.IP)
i = encodeVarintGenerated(dAtA, i, uint64(len(m.IP)))
i--
dAtA[i] = 0xa
return len(dAtA) - i, nil
}
func (m *EndpointPort) Marshal() (dAtA []byte, err error) {
size := m.Size()
dAtA = make([]byte, size)
n, err := m.MarshalToSizedBuffer(dAtA[:size])
if err != nil {
return nil, err
}
return dAtA[:n], nil
}
func (m *EndpointPort) MarshalTo(dAtA []byte) (int, error) {
size := m.Size()
return m.MarshalToSizedBuffer(dAtA[:size])
}
func (m *EndpointPort) MarshalToSizedBuffer(dAtA []byte) (int, error) {
i := len(dAtA)
_ = i
var l int
_ = l
if m.AppProtocol != nil {
i -= len(*m.AppProtocol)
copy(dAtA[i:], *m.AppProtocol)
i = encodeVarintGenerated(dAtA, i, uint64(len(*m.AppProtocol)))
i--
dAtA[i] = 0x22
}
i -= len(m.Protocol)
copy(dAtA[i:], m.Protocol)
i = encodeVarintGenerated(dAtA, i, uint64(len(m.Protocol)))
i--
dAtA[i] = 0x1a
i = encodeVarintGenerated(dAtA, i, uint64(m.Port))
i--
dAtA[i] = 0x10
i -= len(m.Name)
copy(dAtA[i:], m.Name)
i = encodeVarintGenerated(dAtA, i, uint64(len(m.Name)))
i--
dAtA[i] = 0xa
return len(dAtA) - i, nil
}
func (m *EndpointSubset) Marshal() (dAtA []byte, err error) {
size := m.Size()
dAtA = make([]byte, size)
n, err := m.MarshalToSizedBuffer(dAtA[:size])
if err != nil {
return nil, err
}
return dAtA[:n], nil
}
func (m *EndpointSubset) MarshalTo(dAtA []byte) (int, error) {
size := m.Size()
return m.MarshalToSizedBuffer(dAtA[:size])
}
func (m *EndpointSubset) MarshalToSizedBuffer(dAtA []byte) (int, error) {
i := len(dAtA)
_ = i
var l int
_ = l
if len(m.Ports) > 0 {
for iNdEx := len(m.Ports) - 1; iNdEx >= 0; iNdEx-- {
{
size, err := m.Ports[iNdEx].MarshalToSizedBuffer(dAtA[:i])
if err != nil {
return 0, err
}
i -= size
i = encodeVarintGenerated(dAtA, i, uint64(size))
}
i--
dAtA[i] = 0x1a
}
}
if len(m.Addresses) > 0 {
for iNdEx := len(m.Addresses) - 1; iNdEx >= 0; iNdEx-- {
{
size, err := m.Addresses[iNdEx].MarshalToSizedBuffer(dAtA[:i])
if err != nil {
return 0, err
}
i -= size
i = encodeVarintGenerated(dAtA, i, uint64(size))
}
i--
dAtA[i] = 0xa
}
}
return len(dAtA) - i, nil
}
func (m *Endpoints) Marshal() (dAtA []byte, err error) {
size := m.Size()
dAtA = make([]byte, size)
n, err := m.MarshalToSizedBuffer(dAtA[:size])
if err != nil {
return nil, err
}
return dAtA[:n], nil
}
func (m *Endpoints) MarshalTo(dAtA []byte) (int, error) {
size := m.Size()
return m.MarshalToSizedBuffer(dAtA[:size])
}
func (m *Endpoints) MarshalToSizedBuffer(dAtA []byte) (int, error) {
i := len(dAtA)
_ = i
var l int
_ = l
if len(m.Subsets) > 0 {
for iNdEx := len(m.Subsets) - 1; iNdEx >= 0; iNdEx-- {
{
size, err := m.Subsets[iNdEx].MarshalToSizedBuffer(dAtA[:i])
if err != nil {
return 0, err
}
i -= size
i = encodeVarintGenerated(dAtA, i, uint64(size))
}
i--
dAtA[i] = 0x12
}
}
{
size, err := m.ObjectMeta.MarshalToSizedBuffer(dAtA[:i])
if err != nil {
return 0, err
}
i -= size
i = encodeVarintGenerated(dAtA, i, uint64(size))
}
i--
dAtA[i] = 0xa
return len(dAtA) - i, nil
}
func (m *EndpointsList) Marshal() (dAtA []byte, err error) {
size := m.Size()
dAtA = make([]byte, size)
n, err := m.MarshalToSizedBuffer(dAtA[:size])
if err != nil {
return nil, err
}
return dAtA[:n], nil
}
func (m *EndpointsList) MarshalTo(dAtA []byte) (int, error) {
size := m.Size()
return m.MarshalToSizedBuffer(dAtA[:size])
}
func (m *EndpointsList) MarshalToSizedBuffer(dAtA []byte) (int, error) {
i := len(dAtA)
_ = i
var l int
_ = l
if len(m.Items) > 0 {
for iNdEx := len(m.Items) - 1; iNdEx >= 0; iNdEx-- {
{
size, err := m.Items[iNdEx].MarshalToSizedBuffer(dAtA[:i])
if err != nil {
return 0, err
}
i -= size
i = encodeVarintGenerated(dAtA, i, uint64(size))
}
i--
dAtA[i] = 0x12
}
}
{
size, err := m.ListMeta.MarshalToSizedBuffer(dAtA[:i])
if err != nil {
return 0, err
}
i -= size
i = encodeVarintGenerated(dAtA, i, uint64(size))
}
i--
dAtA[i] = 0xa
return len(dAtA) - i, nil
}
func (m *LoadBalancerIngress) Marshal() (dAtA []byte, err error) {
size := m.Size()
dAtA = make([]byte, size)
n, err := m.MarshalToSizedBuffer(dAtA[:size])
if err != nil {
return nil, err
}
return dAtA[:n], nil
}
func (m *LoadBalancerIngress) MarshalTo(dAtA []byte) (int, error) {
size := m.Size()
return m.MarshalToSizedBuffer(dAtA[:size])
}
func (m *LoadBalancerIngress) MarshalToSizedBuffer(dAtA []byte) (int, error) {
i := len(dAtA)
_ = i
var l int
_ = l
if len(m.Ports) > 0 {
for iNdEx := len(m.Ports) - 1; iNdEx >= 0; iNdEx-- {
{
size, err := m.Ports[iNdEx].MarshalToSizedBuffer(dAtA[:i])
if err != nil {
return 0, err
}
i -= size
i = encodeVarintGenerated(dAtA, i, uint64(size))
}
i--
dAtA[i] = 0x22
}
}
if m.IPMode != nil {
i -= len(*m.IPMode)
copy(dAtA[i:], *m.IPMode)
i = encodeVarintGenerated(dAtA, i, uint64(len(*m.IPMode)))
i--
dAtA[i] = 0x1a
}
i -= len(m.Hostname)
copy(dAtA[i:], m.Hostname)
i = encodeVarintGenerated(dAtA, i, uint64(len(m.Hostname)))
i--
dAtA[i] = 0x12
i -= len(m.IP)
copy(dAtA[i:], m.IP)
i = encodeVarintGenerated(dAtA, i, uint64(len(m.IP)))
i--
dAtA[i] = 0xa
return len(dAtA) - i, nil
}
func (m *LoadBalancerStatus) Marshal() (dAtA []byte, err error) {
size := m.Size()
dAtA = make([]byte, size)
n, err := m.MarshalToSizedBuffer(dAtA[:size])
if err != nil {
return nil, err
}
return dAtA[:n], nil
}
func (m *LoadBalancerStatus) MarshalTo(dAtA []byte) (int, error) {
size := m.Size()
return m.MarshalToSizedBuffer(dAtA[:size])
}
func (m *LoadBalancerStatus) MarshalToSizedBuffer(dAtA []byte) (int, error) {
i := len(dAtA)
_ = i
var l int
_ = l
if len(m.Ingress) > 0 {
for iNdEx := len(m.Ingress) - 1; iNdEx >= 0; iNdEx-- {
{
size, err := m.Ingress[iNdEx].MarshalToSizedBuffer(dAtA[:i])
if err != nil {
return 0, err
}
i -= size
i = encodeVarintGenerated(dAtA, i, uint64(size))
}
i--
dAtA[i] = 0xa
}
}
return len(dAtA) - i, nil
}
func (m *Namespace) Marshal() (dAtA []byte, err error) {
size := m.Size()
dAtA = make([]byte, size)
n, err := m.MarshalToSizedBuffer(dAtA[:size])
if err != nil {
return nil, err
}
return dAtA[:n], nil
}
func (m *Namespace) MarshalTo(dAtA []byte) (int, error) {
size := m.Size()
return m.MarshalToSizedBuffer(dAtA[:size])
}
func (m *Namespace) MarshalToSizedBuffer(dAtA []byte) (int, error) {
i := len(dAtA)
_ = i
var l int
_ = l
{
size, err := m.ObjectMeta.MarshalToSizedBuffer(dAtA[:i])
if err != nil {
return 0, err
}
i -= size
i = encodeVarintGenerated(dAtA, i, uint64(size))
}
i--
dAtA[i] = 0xa
return len(dAtA) - i, nil
}
func (m *NamespaceList) Marshal() (dAtA []byte, err error) {
size := m.Size()
dAtA = make([]byte, size)
n, err := m.MarshalToSizedBuffer(dAtA[:size])
if err != nil {
return nil, err
}
return dAtA[:n], nil
}
func (m *NamespaceList) MarshalTo(dAtA []byte) (int, error) {
size := m.Size()
return m.MarshalToSizedBuffer(dAtA[:size])
}
func (m *NamespaceList) MarshalToSizedBuffer(dAtA []byte) (int, error) {
i := len(dAtA)
_ = i
var l int
_ = l
if len(m.Items) > 0 {
for iNdEx := len(m.Items) - 1; iNdEx >= 0; iNdEx-- {
{
size, err := m.Items[iNdEx].MarshalToSizedBuffer(dAtA[:i])
if err != nil {
return 0, err
}
i -= size
i = encodeVarintGenerated(dAtA, i, uint64(size))
}
i--
dAtA[i] = 0x12
}
}
{
size, err := m.ListMeta.MarshalToSizedBuffer(dAtA[:i])
if err != nil {
return 0, err
}
i -= size
i = encodeVarintGenerated(dAtA, i, uint64(size))
}
i--
dAtA[i] = 0xa
return len(dAtA) - i, nil
}
func (m *Node) Marshal() (dAtA []byte, err error) {
size := m.Size()
dAtA = make([]byte, size)
n, err := m.MarshalToSizedBuffer(dAtA[:size])
if err != nil {
return nil, err
}
return dAtA[:n], nil
}
func (m *Node) MarshalTo(dAtA []byte) (int, error) {
size := m.Size()
return m.MarshalToSizedBuffer(dAtA[:size])
}
func (m *Node) MarshalToSizedBuffer(dAtA []byte) (int, error) {
i := len(dAtA)
_ = i
var l int
_ = l
{
size, err := m.Status.MarshalToSizedBuffer(dAtA[:i])
if err != nil {
return 0, err
}
i -= size
i = encodeVarintGenerated(dAtA, i, uint64(size))
}
i--
dAtA[i] = 0x1a
{
size, err := m.Spec.MarshalToSizedBuffer(dAtA[:i])
if err != nil {
return 0, err
}
i -= size
i = encodeVarintGenerated(dAtA, i, uint64(size))
}
i--
dAtA[i] = 0x12
{
size, err := m.ObjectMeta.MarshalToSizedBuffer(dAtA[:i])
if err != nil {
return 0, err
}
i -= size
i = encodeVarintGenerated(dAtA, i, uint64(size))
}
i--
dAtA[i] = 0xa
return len(dAtA) - i, nil
}
func (m *NodeAddress) Marshal() (dAtA []byte, err error) {
size := m.Size()
dAtA = make([]byte, size)
n, err := m.MarshalToSizedBuffer(dAtA[:size])
if err != nil {
return nil, err
}
return dAtA[:n], nil
}
func (m *NodeAddress) MarshalTo(dAtA []byte) (int, error) {
size := m.Size()
return m.MarshalToSizedBuffer(dAtA[:size])
}
func (m *NodeAddress) MarshalToSizedBuffer(dAtA []byte) (int, error) {
i := len(dAtA)
_ = i
var l int
_ = l
i -= len(m.Address)
copy(dAtA[i:], m.Address)
i = encodeVarintGenerated(dAtA, i, uint64(len(m.Address)))
i--
dAtA[i] = 0x12
i -= len(m.Type)
copy(dAtA[i:], m.Type)
i = encodeVarintGenerated(dAtA, i, uint64(len(m.Type)))
i--
dAtA[i] = 0xa
return len(dAtA) - i, nil
}
func (m *NodeCondition) Marshal() (dAtA []byte, err error) {
size := m.Size()
dAtA = make([]byte, size)
n, err := m.MarshalToSizedBuffer(dAtA[:size])
if err != nil {
return nil, err
}
return dAtA[:n], nil
}
func (m *NodeCondition) MarshalTo(dAtA []byte) (int, error) {
size := m.Size()
return m.MarshalToSizedBuffer(dAtA[:size])
}
func (m *NodeCondition) MarshalToSizedBuffer(dAtA []byte) (int, error) {
i := len(dAtA)
_ = i
var l int
_ = l
i -= len(m.Reason)
copy(dAtA[i:], m.Reason)
i = encodeVarintGenerated(dAtA, i, uint64(len(m.Reason)))
i--
dAtA[i] = 0x2a
i -= len(m.Status)
copy(dAtA[i:], m.Status)
i = encodeVarintGenerated(dAtA, i, uint64(len(m.Status)))
i--
dAtA[i] = 0x12
i -= len(m.Type)
copy(dAtA[i:], m.Type)
i = encodeVarintGenerated(dAtA, i, uint64(len(m.Type)))
i--
dAtA[i] = 0xa
return len(dAtA) - i, nil
}
func (m *NodeList) Marshal() (dAtA []byte, err error) {
size := m.Size()
dAtA = make([]byte, size)
n, err := m.MarshalToSizedBuffer(dAtA[:size])
if err != nil {
return nil, err
}
return dAtA[:n], nil
}
func (m *NodeList) MarshalTo(dAtA []byte) (int, error) {
size := m.Size()
return m.MarshalToSizedBuffer(dAtA[:size])
}
func (m *NodeList) MarshalToSizedBuffer(dAtA []byte) (int, error) {
i := len(dAtA)
_ = i
var l int
_ = l
if len(m.Items) > 0 {
for iNdEx := len(m.Items) - 1; iNdEx >= 0; iNdEx-- {
{
size, err := m.Items[iNdEx].MarshalToSizedBuffer(dAtA[:i])
if err != nil {
return 0, err
}
i -= size
i = encodeVarintGenerated(dAtA, i, uint64(size))
}
i--
dAtA[i] = 0x12
}
}
{
size, err := m.ListMeta.MarshalToSizedBuffer(dAtA[:i])
if err != nil {
return 0, err
}
i -= size
i = encodeVarintGenerated(dAtA, i, uint64(size))
}
i--
dAtA[i] = 0xa
return len(dAtA) - i, nil
}
func (m *NodeSpec) Marshal() (dAtA []byte, err error) {
size := m.Size()
dAtA = make([]byte, size)
n, err := m.MarshalToSizedBuffer(dAtA[:size])
if err != nil {
return nil, err
}
return dAtA[:n], nil
}
func (m *NodeSpec) MarshalTo(dAtA []byte) (int, error) {
size := m.Size()
return m.MarshalToSizedBuffer(dAtA[:size])
}
func (m *NodeSpec) MarshalToSizedBuffer(dAtA []byte) (int, error) {
i := len(dAtA)
_ = i
var l int
_ = l
if len(m.PodCIDRs) > 0 {
for iNdEx := len(m.PodCIDRs) - 1; iNdEx >= 0; iNdEx-- {
i -= len(m.PodCIDRs[iNdEx])
copy(dAtA[i:], m.PodCIDRs[iNdEx])
i = encodeVarintGenerated(dAtA, i, uint64(len(m.PodCIDRs[iNdEx])))
i--
dAtA[i] = 0x3a
}
}
if len(m.Taints) > 0 {
for iNdEx := len(m.Taints) - 1; iNdEx >= 0; iNdEx-- {
{
size, err := m.Taints[iNdEx].MarshalToSizedBuffer(dAtA[:i])
if err != nil {
return 0, err
}
i -= size
i = encodeVarintGenerated(dAtA, i, uint64(size))
}
i--
dAtA[i] = 0x2a
}
}
i -= len(m.ProviderID)
copy(dAtA[i:], m.ProviderID)
i = encodeVarintGenerated(dAtA, i, uint64(len(m.ProviderID)))
i--
dAtA[i] = 0x1a
i -= len(m.PodCIDR)
copy(dAtA[i:], m.PodCIDR)
i = encodeVarintGenerated(dAtA, i, uint64(len(m.PodCIDR)))
i--
dAtA[i] = 0xa
return len(dAtA) - i, nil
}
func (m *NodeStatus) Marshal() (dAtA []byte, err error) {
size := m.Size()
dAtA = make([]byte, size)
n, err := m.MarshalToSizedBuffer(dAtA[:size])
if err != nil {
return nil, err
}
return dAtA[:n], nil
}
func (m *NodeStatus) MarshalTo(dAtA []byte) (int, error) {
size := m.Size()
return m.MarshalToSizedBuffer(dAtA[:size])
}
func (m *NodeStatus) MarshalToSizedBuffer(dAtA []byte) (int, error) {
i := len(dAtA)
_ = i
var l int
_ = l
if len(m.Addresses) > 0 {
for iNdEx := len(m.Addresses) - 1; iNdEx >= 0; iNdEx-- {
{
size, err := m.Addresses[iNdEx].MarshalToSizedBuffer(dAtA[:i])
if err != nil {
return 0, err
}
i -= size
i = encodeVarintGenerated(dAtA, i, uint64(size))
}
i--
dAtA[i] = 0x2a
}
}
if len(m.Conditions) > 0 {
for iNdEx := len(m.Conditions) - 1; iNdEx >= 0; iNdEx-- {
{
size, err := m.Conditions[iNdEx].MarshalToSizedBuffer(dAtA[:i])
if err != nil {
return 0, err
}
i -= size
i = encodeVarintGenerated(dAtA, i, uint64(size))
}
i--
dAtA[i] = 0x22
}
}
return len(dAtA) - i, nil
}
func (m *Pod) Marshal() (dAtA []byte, err error) {
size := m.Size()
dAtA = make([]byte, size)
n, err := m.MarshalToSizedBuffer(dAtA[:size])
if err != nil {
return nil, err
}
return dAtA[:n], nil
}
func (m *Pod) MarshalTo(dAtA []byte) (int, error) {
size := m.Size()
return m.MarshalToSizedBuffer(dAtA[:size])
}
func (m *Pod) MarshalToSizedBuffer(dAtA []byte) (int, error) {
i := len(dAtA)
_ = i
var l int
_ = l
{
size, err := m.Status.MarshalToSizedBuffer(dAtA[:i])
if err != nil {
return 0, err
}
i -= size
i = encodeVarintGenerated(dAtA, i, uint64(size))
}
i--
dAtA[i] = 0x1a
{
size, err := m.Spec.MarshalToSizedBuffer(dAtA[:i])
if err != nil {
return 0, err
}
i -= size
i = encodeVarintGenerated(dAtA, i, uint64(size))
}
i--
dAtA[i] = 0x12
{
size, err := m.ObjectMeta.MarshalToSizedBuffer(dAtA[:i])
if err != nil {
return 0, err
}
i -= size
i = encodeVarintGenerated(dAtA, i, uint64(size))
}
i--
dAtA[i] = 0xa
return len(dAtA) - i, nil
}
func (m *PodCondition) Marshal() (dAtA []byte, err error) {
size := m.Size()
dAtA = make([]byte, size)
n, err := m.MarshalToSizedBuffer(dAtA[:size])
if err != nil {
return nil, err
}
return dAtA[:n], nil
}
func (m *PodCondition) MarshalTo(dAtA []byte) (int, error) {
size := m.Size()
return m.MarshalToSizedBuffer(dAtA[:size])
}
func (m *PodCondition) MarshalToSizedBuffer(dAtA []byte) (int, error) {
i := len(dAtA)
_ = i
var l int
_ = l
i -= len(m.Message)
copy(dAtA[i:], m.Message)
i = encodeVarintGenerated(dAtA, i, uint64(len(m.Message)))
i--
dAtA[i] = 0x32
i -= len(m.Reason)
copy(dAtA[i:], m.Reason)
i = encodeVarintGenerated(dAtA, i, uint64(len(m.Reason)))
i--
dAtA[i] = 0x2a
{
size, err := m.LastTransitionTime.MarshalToSizedBuffer(dAtA[:i])
if err != nil {
return 0, err
}
i -= size
i = encodeVarintGenerated(dAtA, i, uint64(size))
}
i--
dAtA[i] = 0x22
{
size, err := m.LastProbeTime.MarshalToSizedBuffer(dAtA[:i])
if err != nil {
return 0, err
}
i -= size
i = encodeVarintGenerated(dAtA, i, uint64(size))
}
i--
dAtA[i] = 0x1a
i -= len(m.Status)
copy(dAtA[i:], m.Status)
i = encodeVarintGenerated(dAtA, i, uint64(len(m.Status)))
i--
dAtA[i] = 0x12
i -= len(m.Type)
copy(dAtA[i:], m.Type)
i = encodeVarintGenerated(dAtA, i, uint64(len(m.Type)))
i--
dAtA[i] = 0xa
return len(dAtA) - i, nil
}
func (m *PodIP) Marshal() (dAtA []byte, err error) {
size := m.Size()
dAtA = make([]byte, size)
n, err := m.MarshalToSizedBuffer(dAtA[:size])
if err != nil {
return nil, err
}
return dAtA[:n], nil
}
func (m *PodIP) MarshalTo(dAtA []byte) (int, error) {
size := m.Size()
return m.MarshalToSizedBuffer(dAtA[:size])
}
func (m *PodIP) MarshalToSizedBuffer(dAtA []byte) (int, error) {
i := len(dAtA)
_ = i
var l int
_ = l
i -= len(m.IP)
copy(dAtA[i:], m.IP)
i = encodeVarintGenerated(dAtA, i, uint64(len(m.IP)))
i--
dAtA[i] = 0xa
return len(dAtA) - i, nil
}
func (m *PodList) Marshal() (dAtA []byte, err error) {
size := m.Size()
dAtA = make([]byte, size)
n, err := m.MarshalToSizedBuffer(dAtA[:size])
if err != nil {
return nil, err
}
return dAtA[:n], nil
}
func (m *PodList) MarshalTo(dAtA []byte) (int, error) {
size := m.Size()
return m.MarshalToSizedBuffer(dAtA[:size])
}
func (m *PodList) MarshalToSizedBuffer(dAtA []byte) (int, error) {
i := len(dAtA)
_ = i
var l int
_ = l
if len(m.Items) > 0 {
for iNdEx := len(m.Items) - 1; iNdEx >= 0; iNdEx-- {
{
size, err := m.Items[iNdEx].MarshalToSizedBuffer(dAtA[:i])
if err != nil {
return 0, err
}
i -= size
i = encodeVarintGenerated(dAtA, i, uint64(size))
}
i--
dAtA[i] = 0x12
}
}
{
size, err := m.ListMeta.MarshalToSizedBuffer(dAtA[:i])
if err != nil {
return 0, err
}
i -= size
i = encodeVarintGenerated(dAtA, i, uint64(size))
}
i--
dAtA[i] = 0xa
return len(dAtA) - i, nil
}
func (m *PodReadinessGate) Marshal() (dAtA []byte, err error) {
size := m.Size()
dAtA = make([]byte, size)
n, err := m.MarshalToSizedBuffer(dAtA[:size])
if err != nil {
return nil, err
}
return dAtA[:n], nil
}
func (m *PodReadinessGate) MarshalTo(dAtA []byte) (int, error) {
size := m.Size()
return m.MarshalToSizedBuffer(dAtA[:size])
}
func (m *PodReadinessGate) MarshalToSizedBuffer(dAtA []byte) (int, error) {
i := len(dAtA)
_ = i
var l int
_ = l
i -= len(m.ConditionType)
copy(dAtA[i:], m.ConditionType)
i = encodeVarintGenerated(dAtA, i, uint64(len(m.ConditionType)))
i--
dAtA[i] = 0xa
return len(dAtA) - i, nil
}
func (m *PodSpec) Marshal() (dAtA []byte, err error) {
size := m.Size()
dAtA = make([]byte, size)
n, err := m.MarshalToSizedBuffer(dAtA[:size])
if err != nil {
return nil, err
}
return dAtA[:n], nil
}
func (m *PodSpec) MarshalTo(dAtA []byte) (int, error) {
size := m.Size()
return m.MarshalToSizedBuffer(dAtA[:size])
}
func (m *PodSpec) MarshalToSizedBuffer(dAtA []byte) (int, error) {
i := len(dAtA)
_ = i
var l int
_ = l
if len(m.InitContainers) > 0 {
for iNdEx := len(m.InitContainers) - 1; iNdEx >= 0; iNdEx-- {
{
size, err := m.InitContainers[iNdEx].MarshalToSizedBuffer(dAtA[:i])
if err != nil {
return 0, err
}
i -= size
i = encodeVarintGenerated(dAtA, i, uint64(size))
}
i--
dAtA[i] = 0x1
i--
dAtA[i] = 0xa2
}
}
i--
if m.HostNetwork {
dAtA[i] = 1
} else {
dAtA[i] = 0
}
i--
dAtA[i] = 0x58
i -= len(m.NodeName)
copy(dAtA[i:], m.NodeName)
i = encodeVarintGenerated(dAtA, i, uint64(len(m.NodeName)))
i--
dAtA[i] = 0x52
i -= len(m.ServiceAccountName)
copy(dAtA[i:], m.ServiceAccountName)
i = encodeVarintGenerated(dAtA, i, uint64(len(m.ServiceAccountName)))
i--
dAtA[i] = 0x42
if len(m.Containers) > 0 {
for iNdEx := len(m.Containers) - 1; iNdEx >= 0; iNdEx-- {
{
size, err := m.Containers[iNdEx].MarshalToSizedBuffer(dAtA[:i])
if err != nil {
return 0, err
}
i -= size
i = encodeVarintGenerated(dAtA, i, uint64(size))
}
i--
dAtA[i] = 0x12
}
}
return len(dAtA) - i, nil
}
func (m *PodStatus) Marshal() (dAtA []byte, err error) {
size := m.Size()
dAtA = make([]byte, size)
n, err := m.MarshalToSizedBuffer(dAtA[:size])
if err != nil {
return nil, err
}
return dAtA[:n], nil
}
func (m *PodStatus) MarshalTo(dAtA []byte) (int, error) {
size := m.Size()
return m.MarshalToSizedBuffer(dAtA[:size])
}
func (m *PodStatus) MarshalToSizedBuffer(dAtA []byte) (int, error) {
i := len(dAtA)
_ = i
var l int
_ = l
if len(m.PodIPs) > 0 {
for iNdEx := len(m.PodIPs) - 1; iNdEx >= 0; iNdEx-- {
{
size, err := m.PodIPs[iNdEx].MarshalToSizedBuffer(dAtA[:i])
if err != nil {
return 0, err
}
i -= size
i = encodeVarintGenerated(dAtA, i, uint64(size))
}
i--
dAtA[i] = 0x62
}
}
i -= len(m.QOSClass)
copy(dAtA[i:], m.QOSClass)
i = encodeVarintGenerated(dAtA, i, uint64(len(m.QOSClass)))
i--
dAtA[i] = 0x4a
if len(m.ContainerStatuses) > 0 {
for iNdEx := len(m.ContainerStatuses) - 1; iNdEx >= 0; iNdEx-- {
{
size, err := m.ContainerStatuses[iNdEx].MarshalToSizedBuffer(dAtA[:i])
if err != nil {
return 0, err
}
i -= size
i = encodeVarintGenerated(dAtA, i, uint64(size))
}
i--
dAtA[i] = 0x42
}
}
if m.StartTime != nil {
{
size, err := m.StartTime.MarshalToSizedBuffer(dAtA[:i])
if err != nil {
return 0, err
}
i -= size
i = encodeVarintGenerated(dAtA, i, uint64(size))
}
i--
dAtA[i] = 0x3a
}
i -= len(m.PodIP)
copy(dAtA[i:], m.PodIP)
i = encodeVarintGenerated(dAtA, i, uint64(len(m.PodIP)))
i--
dAtA[i] = 0x32
i -= len(m.HostIP)
copy(dAtA[i:], m.HostIP)
i = encodeVarintGenerated(dAtA, i, uint64(len(m.HostIP)))
i--
dAtA[i] = 0x2a
if len(m.Conditions) > 0 {
for iNdEx := len(m.Conditions) - 1; iNdEx >= 0; iNdEx-- {
{
size, err := m.Conditions[iNdEx].MarshalToSizedBuffer(dAtA[:i])
if err != nil {
return 0, err
}
i -= size
i = encodeVarintGenerated(dAtA, i, uint64(size))
}
i--
dAtA[i] = 0x12
}
}
i -= len(m.Phase)
copy(dAtA[i:], m.Phase)
i = encodeVarintGenerated(dAtA, i, uint64(len(m.Phase)))
i--
dAtA[i] = 0xa
return len(dAtA) - i, nil
}
func (m *PortStatus) Marshal() (dAtA []byte, err error) {
size := m.Size()
dAtA = make([]byte, size)
n, err := m.MarshalToSizedBuffer(dAtA[:size])
if err != nil {
return nil, err
}
return dAtA[:n], nil
}
func (m *PortStatus) MarshalTo(dAtA []byte) (int, error) {
size := m.Size()
return m.MarshalToSizedBuffer(dAtA[:size])
}
func (m *PortStatus) MarshalToSizedBuffer(dAtA []byte) (int, error) {
i := len(dAtA)
_ = i
var l int
_ = l
if m.Error != nil {
i -= len(*m.Error)
copy(dAtA[i:], *m.Error)
i = encodeVarintGenerated(dAtA, i, uint64(len(*m.Error)))
i--
dAtA[i] = 0x1a
}
i -= len(m.Protocol)
copy(dAtA[i:], m.Protocol)
i = encodeVarintGenerated(dAtA, i, uint64(len(m.Protocol)))
i--
dAtA[i] = 0x12
i = encodeVarintGenerated(dAtA, i, uint64(m.Port))
i--
dAtA[i] = 0x8
return len(dAtA) - i, nil
}
func (m *Secret) Marshal() (dAtA []byte, err error) {
size := m.Size()
dAtA = make([]byte, size)
n, err := m.MarshalToSizedBuffer(dAtA[:size])
if err != nil {
return nil, err
}
return dAtA[:n], nil
}
func (m *Secret) MarshalTo(dAtA []byte) (int, error) {
size := m.Size()
return m.MarshalToSizedBuffer(dAtA[:size])
}
func (m *Secret) MarshalToSizedBuffer(dAtA []byte) (int, error) {
i := len(dAtA)
_ = i
var l int
_ = l
if m.Immutable != nil {
i--
if *m.Immutable {
dAtA[i] = 1
} else {
dAtA[i] = 0
}
i--
dAtA[i] = 0x28
}
if len(m.StringData) > 0 {
keysForStringData := make([]string, 0, len(m.StringData))
for k := range m.StringData {
keysForStringData = append(keysForStringData, string(k))
}
github_com_gogo_protobuf_sortkeys.Strings(keysForStringData)
for iNdEx := len(keysForStringData) - 1; iNdEx >= 0; iNdEx-- {
v := m.StringData[string(keysForStringData[iNdEx])]
baseI := i
i -= len(v)
copy(dAtA[i:], v)
i = encodeVarintGenerated(dAtA, i, uint64(len(v)))
i--
dAtA[i] = 0x12
i -= len(keysForStringData[iNdEx])
copy(dAtA[i:], keysForStringData[iNdEx])
i = encodeVarintGenerated(dAtA, i, uint64(len(keysForStringData[iNdEx])))
i--
dAtA[i] = 0xa
i = encodeVarintGenerated(dAtA, i, uint64(baseI-i))
i--
dAtA[i] = 0x22
}
}
i -= len(m.Type)
copy(dAtA[i:], m.Type)
i = encodeVarintGenerated(dAtA, i, uint64(len(m.Type)))
i--
dAtA[i] = 0x1a
if len(m.Data) > 0 {
keysForData := make([]string, 0, len(m.Data))
for k := range m.Data {
keysForData = append(keysForData, string(k))
}
github_com_gogo_protobuf_sortkeys.Strings(keysForData)
for iNdEx := len(keysForData) - 1; iNdEx >= 0; iNdEx-- {
v := m.Data[string(keysForData[iNdEx])]
baseI := i
if v != nil {
i -= len(v)
copy(dAtA[i:], v)
i = encodeVarintGenerated(dAtA, i, uint64(len(v)))
i--
dAtA[i] = 0x12
}
i -= len(keysForData[iNdEx])
copy(dAtA[i:], keysForData[iNdEx])
i = encodeVarintGenerated(dAtA, i, uint64(len(keysForData[iNdEx])))
i--
dAtA[i] = 0xa
i = encodeVarintGenerated(dAtA, i, uint64(baseI-i))
i--
dAtA[i] = 0x12
}
}
{
size, err := m.ObjectMeta.MarshalToSizedBuffer(dAtA[:i])
if err != nil {
return 0, err
}
i -= size
i = encodeVarintGenerated(dAtA, i, uint64(size))
}
i--
dAtA[i] = 0xa
return len(dAtA) - i, nil
}
func (m *SecretList) Marshal() (dAtA []byte, err error) {
size := m.Size()
dAtA = make([]byte, size)
n, err := m.MarshalToSizedBuffer(dAtA[:size])
if err != nil {
return nil, err
}
return dAtA[:n], nil
}
func (m *SecretList) MarshalTo(dAtA []byte) (int, error) {
size := m.Size()
return m.MarshalToSizedBuffer(dAtA[:size])
}
func (m *SecretList) MarshalToSizedBuffer(dAtA []byte) (int, error) {
i := len(dAtA)
_ = i
var l int
_ = l
if len(m.Items) > 0 {
for iNdEx := len(m.Items) - 1; iNdEx >= 0; iNdEx-- {
{
size, err := m.Items[iNdEx].MarshalToSizedBuffer(dAtA[:i])
if err != nil {
return 0, err
}
i -= size
i = encodeVarintGenerated(dAtA, i, uint64(size))
}
i--
dAtA[i] = 0x12
}
}
{
size, err := m.ListMeta.MarshalToSizedBuffer(dAtA[:i])
if err != nil {
return 0, err
}
i -= size
i = encodeVarintGenerated(dAtA, i, uint64(size))
}
i--
dAtA[i] = 0xa
return len(dAtA) - i, nil
}
func (m *Service) Marshal() (dAtA []byte, err error) {
size := m.Size()
dAtA = make([]byte, size)
n, err := m.MarshalToSizedBuffer(dAtA[:size])
if err != nil {
return nil, err
}
return dAtA[:n], nil
}
func (m *Service) MarshalTo(dAtA []byte) (int, error) {
size := m.Size()
return m.MarshalToSizedBuffer(dAtA[:size])
}
func (m *Service) MarshalToSizedBuffer(dAtA []byte) (int, error) {
i := len(dAtA)
_ = i
var l int
_ = l
{
size, err := m.Status.MarshalToSizedBuffer(dAtA[:i])
if err != nil {
return 0, err
}
i -= size
i = encodeVarintGenerated(dAtA, i, uint64(size))
}
i--
dAtA[i] = 0x1a
{
size, err := m.Spec.MarshalToSizedBuffer(dAtA[:i])
if err != nil {
return 0, err
}
i -= size
i = encodeVarintGenerated(dAtA, i, uint64(size))
}
i--
dAtA[i] = 0x12
{
size, err := m.ObjectMeta.MarshalToSizedBuffer(dAtA[:i])
if err != nil {
return 0, err
}
i -= size
i = encodeVarintGenerated(dAtA, i, uint64(size))
}
i--
dAtA[i] = 0xa
return len(dAtA) - i, nil
}
func (m *ServiceList) Marshal() (dAtA []byte, err error) {
size := m.Size()
dAtA = make([]byte, size)
n, err := m.MarshalToSizedBuffer(dAtA[:size])
if err != nil {
return nil, err
}
return dAtA[:n], nil
}
func (m *ServiceList) MarshalTo(dAtA []byte) (int, error) {
size := m.Size()
return m.MarshalToSizedBuffer(dAtA[:size])
}
func (m *ServiceList) MarshalToSizedBuffer(dAtA []byte) (int, error) {
i := len(dAtA)
_ = i
var l int
_ = l
if len(m.Items) > 0 {
for iNdEx := len(m.Items) - 1; iNdEx >= 0; iNdEx-- {
{
size, err := m.Items[iNdEx].MarshalToSizedBuffer(dAtA[:i])
if err != nil {
return 0, err
}
i -= size
i = encodeVarintGenerated(dAtA, i, uint64(size))
}
i--
dAtA[i] = 0x12
}
}
{
size, err := m.ListMeta.MarshalToSizedBuffer(dAtA[:i])
if err != nil {
return 0, err
}
i -= size
i = encodeVarintGenerated(dAtA, i, uint64(size))
}
i--
dAtA[i] = 0xa
return len(dAtA) - i, nil
}
func (m *ServicePort) Marshal() (dAtA []byte, err error) {
size := m.Size()
dAtA = make([]byte, size)
n, err := m.MarshalToSizedBuffer(dAtA[:size])
if err != nil {
return nil, err
}
return dAtA[:n], nil
}
func (m *ServicePort) MarshalTo(dAtA []byte) (int, error) {
size := m.Size()
return m.MarshalToSizedBuffer(dAtA[:size])
}
func (m *ServicePort) MarshalToSizedBuffer(dAtA []byte) (int, error) {
i := len(dAtA)
_ = i
var l int
_ = l
if m.AppProtocol != nil {
i -= len(*m.AppProtocol)
copy(dAtA[i:], *m.AppProtocol)
i = encodeVarintGenerated(dAtA, i, uint64(len(*m.AppProtocol)))
i--
dAtA[i] = 0x32
}
i = encodeVarintGenerated(dAtA, i, uint64(m.NodePort))
i--
dAtA[i] = 0x28
{
size, err := m.TargetPort.MarshalToSizedBuffer(dAtA[:i])
if err != nil {
return 0, err
}
i -= size
i = encodeVarintGenerated(dAtA, i, uint64(size))
}
i--
dAtA[i] = 0x22
i = encodeVarintGenerated(dAtA, i, uint64(m.Port))
i--
dAtA[i] = 0x18
i -= len(m.Protocol)
copy(dAtA[i:], m.Protocol)
i = encodeVarintGenerated(dAtA, i, uint64(len(m.Protocol)))
i--
dAtA[i] = 0x12
i -= len(m.Name)
copy(dAtA[i:], m.Name)
i = encodeVarintGenerated(dAtA, i, uint64(len(m.Name)))
i--
dAtA[i] = 0xa
return len(dAtA) - i, nil
}
func (m *ServiceSpec) Marshal() (dAtA []byte, err error) {
size := m.Size()
dAtA = make([]byte, size)
n, err := m.MarshalToSizedBuffer(dAtA[:size])
if err != nil {
return nil, err
}
return dAtA[:n], nil
}
func (m *ServiceSpec) MarshalTo(dAtA []byte) (int, error) {
size := m.Size()
return m.MarshalToSizedBuffer(dAtA[:size])
}
func (m *ServiceSpec) MarshalToSizedBuffer(dAtA []byte) (int, error) {
i := len(dAtA)
_ = i
var l int
_ = l
if m.TrafficDistribution != nil {
i -= len(*m.TrafficDistribution)
copy(dAtA[i:], *m.TrafficDistribution)
i = encodeVarintGenerated(dAtA, i, uint64(len(*m.TrafficDistribution)))
i--
dAtA[i] = 0x1
i--
dAtA[i] = 0xba
}
if m.InternalTrafficPolicy != nil {
i -= len(*m.InternalTrafficPolicy)
copy(dAtA[i:], *m.InternalTrafficPolicy)
i = encodeVarintGenerated(dAtA, i, uint64(len(*m.InternalTrafficPolicy)))
i--
dAtA[i] = 0x1
i--
dAtA[i] = 0xb2
}
if m.LoadBalancerClass != nil {
i -= len(*m.LoadBalancerClass)
copy(dAtA[i:], *m.LoadBalancerClass)
i = encodeVarintGenerated(dAtA, i, uint64(len(*m.LoadBalancerClass)))
i--
dAtA[i] = 0x1
i--
dAtA[i] = 0xaa
}
if len(m.IPFamilies) > 0 {
for iNdEx := len(m.IPFamilies) - 1; iNdEx >= 0; iNdEx-- {
i -= len(m.IPFamilies[iNdEx])
copy(dAtA[i:], m.IPFamilies[iNdEx])
i = encodeVarintGenerated(dAtA, i, uint64(len(m.IPFamilies[iNdEx])))
i--
dAtA[i] = 0x1
i--
dAtA[i] = 0x9a
}
}
if len(m.ClusterIPs) > 0 {
for iNdEx := len(m.ClusterIPs) - 1; iNdEx >= 0; iNdEx-- {
i -= len(m.ClusterIPs[iNdEx])
copy(dAtA[i:], m.ClusterIPs[iNdEx])
i = encodeVarintGenerated(dAtA, i, uint64(len(m.ClusterIPs[iNdEx])))
i--
dAtA[i] = 0x1
i--
dAtA[i] = 0x92
}
}
if m.IPFamilyPolicy != nil {
i -= len(*m.IPFamilyPolicy)
copy(dAtA[i:], *m.IPFamilyPolicy)
i = encodeVarintGenerated(dAtA, i, uint64(len(*m.IPFamilyPolicy)))
i--
dAtA[i] = 0x1
i--
dAtA[i] = 0x8a
}
if m.SessionAffinityConfig != nil {
{
size, err := m.SessionAffinityConfig.MarshalToSizedBuffer(dAtA[:i])
if err != nil {
return 0, err
}
i -= size
i = encodeVarintGenerated(dAtA, i, uint64(size))
}
i--
dAtA[i] = 0x72
}
i = encodeVarintGenerated(dAtA, i, uint64(m.HealthCheckNodePort))
i--
dAtA[i] = 0x60
i -= len(m.ExternalTrafficPolicy)
copy(dAtA[i:], m.ExternalTrafficPolicy)
i = encodeVarintGenerated(dAtA, i, uint64(len(m.ExternalTrafficPolicy)))
i--
dAtA[i] = 0x5a
if len(m.LoadBalancerSourceRanges) > 0 {
for iNdEx := len(m.LoadBalancerSourceRanges) - 1; iNdEx >= 0; iNdEx-- {
i -= len(m.LoadBalancerSourceRanges[iNdEx])
copy(dAtA[i:], m.LoadBalancerSourceRanges[iNdEx])
i = encodeVarintGenerated(dAtA, i, uint64(len(m.LoadBalancerSourceRanges[iNdEx])))
i--
dAtA[i] = 0x4a
}
}
i -= len(m.LoadBalancerIP)
copy(dAtA[i:], m.LoadBalancerIP)
i = encodeVarintGenerated(dAtA, i, uint64(len(m.LoadBalancerIP)))
i--
dAtA[i] = 0x42
i -= len(m.SessionAffinity)
copy(dAtA[i:], m.SessionAffinity)
i = encodeVarintGenerated(dAtA, i, uint64(len(m.SessionAffinity)))
i--
dAtA[i] = 0x3a
if len(m.ExternalIPs) > 0 {
for iNdEx := len(m.ExternalIPs) - 1; iNdEx >= 0; iNdEx-- {
i -= len(m.ExternalIPs[iNdEx])
copy(dAtA[i:], m.ExternalIPs[iNdEx])
i = encodeVarintGenerated(dAtA, i, uint64(len(m.ExternalIPs[iNdEx])))
i--
dAtA[i] = 0x2a
}
}
i -= len(m.Type)
copy(dAtA[i:], m.Type)
i = encodeVarintGenerated(dAtA, i, uint64(len(m.Type)))
i--
dAtA[i] = 0x22
i -= len(m.ClusterIP)
copy(dAtA[i:], m.ClusterIP)
i = encodeVarintGenerated(dAtA, i, uint64(len(m.ClusterIP)))
i--
dAtA[i] = 0x1a
if len(m.Selector) > 0 {
keysForSelector := make([]string, 0, len(m.Selector))
for k := range m.Selector {
keysForSelector = append(keysForSelector, string(k))
}
github_com_gogo_protobuf_sortkeys.Strings(keysForSelector)
for iNdEx := len(keysForSelector) - 1; iNdEx >= 0; iNdEx-- {
v := m.Selector[string(keysForSelector[iNdEx])]
baseI := i
i -= len(v)
copy(dAtA[i:], v)
i = encodeVarintGenerated(dAtA, i, uint64(len(v)))
i--
dAtA[i] = 0x12
i -= len(keysForSelector[iNdEx])
copy(dAtA[i:], keysForSelector[iNdEx])
i = encodeVarintGenerated(dAtA, i, uint64(len(keysForSelector[iNdEx])))
i--
dAtA[i] = 0xa
i = encodeVarintGenerated(dAtA, i, uint64(baseI-i))
i--
dAtA[i] = 0x12
}
}
if len(m.Ports) > 0 {
for iNdEx := len(m.Ports) - 1; iNdEx >= 0; iNdEx-- {
{
size, err := m.Ports[iNdEx].MarshalToSizedBuffer(dAtA[:i])
if err != nil {
return 0, err
}
i -= size
i = encodeVarintGenerated(dAtA, i, uint64(size))
}
i--
dAtA[i] = 0xa
}
}
return len(dAtA) - i, nil
}
func (m *ServiceStatus) Marshal() (dAtA []byte, err error) {
size := m.Size()
dAtA = make([]byte, size)
n, err := m.MarshalToSizedBuffer(dAtA[:size])
if err != nil {
return nil, err
}
return dAtA[:n], nil
}
func (m *ServiceStatus) MarshalTo(dAtA []byte) (int, error) {
size := m.Size()
return m.MarshalToSizedBuffer(dAtA[:size])
}
func (m *ServiceStatus) MarshalToSizedBuffer(dAtA []byte) (int, error) {
i := len(dAtA)
_ = i
var l int
_ = l
if len(m.Conditions) > 0 {
for iNdEx := len(m.Conditions) - 1; iNdEx >= 0; iNdEx-- {
{
size, err := m.Conditions[iNdEx].MarshalToSizedBuffer(dAtA[:i])
if err != nil {
return 0, err
}
i -= size
i = encodeVarintGenerated(dAtA, i, uint64(size))
}
i--
dAtA[i] = 0x12
}
}
{
size, err := m.LoadBalancer.MarshalToSizedBuffer(dAtA[:i])
if err != nil {
return 0, err
}
i -= size
i = encodeVarintGenerated(dAtA, i, uint64(size))
}
i--
dAtA[i] = 0xa
return len(dAtA) - i, nil
}
func (m *SessionAffinityConfig) Marshal() (dAtA []byte, err error) {
size := m.Size()
dAtA = make([]byte, size)
n, err := m.MarshalToSizedBuffer(dAtA[:size])
if err != nil {
return nil, err
}
return dAtA[:n], nil
}
func (m *SessionAffinityConfig) MarshalTo(dAtA []byte) (int, error) {
size := m.Size()
return m.MarshalToSizedBuffer(dAtA[:size])
}
func (m *SessionAffinityConfig) MarshalToSizedBuffer(dAtA []byte) (int, error) {
i := len(dAtA)
_ = i
var l int
_ = l
if m.ClientIP != nil {
{
size, err := m.ClientIP.MarshalToSizedBuffer(dAtA[:i])
if err != nil {
return 0, err
}
i -= size
i = encodeVarintGenerated(dAtA, i, uint64(size))
}
i--
dAtA[i] = 0xa
}
return len(dAtA) - i, nil
}
func (m *Taint) Marshal() (dAtA []byte, err error) {
size := m.Size()
dAtA = make([]byte, size)
n, err := m.MarshalToSizedBuffer(dAtA[:size])
if err != nil {
return nil, err
}
return dAtA[:n], nil
}
func (m *Taint) MarshalTo(dAtA []byte) (int, error) {
size := m.Size()
return m.MarshalToSizedBuffer(dAtA[:size])
}
func (m *Taint) MarshalToSizedBuffer(dAtA []byte) (int, error) {
i := len(dAtA)
_ = i
var l int
_ = l
if m.TimeAdded != nil {
{
size, err := m.TimeAdded.MarshalToSizedBuffer(dAtA[:i])
if err != nil {
return 0, err
}
i -= size
i = encodeVarintGenerated(dAtA, i, uint64(size))
}
i--
dAtA[i] = 0x22
}
i -= len(m.Effect)
copy(dAtA[i:], m.Effect)
i = encodeVarintGenerated(dAtA, i, uint64(len(m.Effect)))
i--
dAtA[i] = 0x1a
i -= len(m.Value)
copy(dAtA[i:], m.Value)
i = encodeVarintGenerated(dAtA, i, uint64(len(m.Value)))
i--
dAtA[i] = 0x12
i -= len(m.Key)
copy(dAtA[i:], m.Key)
i = encodeVarintGenerated(dAtA, i, uint64(len(m.Key)))
i--
dAtA[i] = 0xa
return len(dAtA) - i, nil
}
func (m *TypedLocalObjectReference) Marshal() (dAtA []byte, err error) {
size := m.Size()
dAtA = make([]byte, size)
n, err := m.MarshalToSizedBuffer(dAtA[:size])
if err != nil {
return nil, err
}
return dAtA[:n], nil
}
func (m *TypedLocalObjectReference) MarshalTo(dAtA []byte) (int, error) {
size := m.Size()
return m.MarshalToSizedBuffer(dAtA[:size])
}
func (m *TypedLocalObjectReference) MarshalToSizedBuffer(dAtA []byte) (int, error) {
i := len(dAtA)
_ = i
var l int
_ = l
i -= len(m.Name)
copy(dAtA[i:], m.Name)
i = encodeVarintGenerated(dAtA, i, uint64(len(m.Name)))
i--
dAtA[i] = 0x1a
i -= len(m.Kind)
copy(dAtA[i:], m.Kind)
i = encodeVarintGenerated(dAtA, i, uint64(len(m.Kind)))
i--
dAtA[i] = 0x12
if m.APIGroup != nil {
i -= len(*m.APIGroup)
copy(dAtA[i:], *m.APIGroup)
i = encodeVarintGenerated(dAtA, i, uint64(len(*m.APIGroup)))
i--
dAtA[i] = 0xa
}
return len(dAtA) - i, nil
}
func (m *VolumeMount) Marshal() (dAtA []byte, err error) {
size := m.Size()
dAtA = make([]byte, size)
n, err := m.MarshalToSizedBuffer(dAtA[:size])
if err != nil {
return nil, err
}
return dAtA[:n], nil
}
func (m *VolumeMount) MarshalTo(dAtA []byte) (int, error) {
size := m.Size()
return m.MarshalToSizedBuffer(dAtA[:size])
}
func (m *VolumeMount) MarshalToSizedBuffer(dAtA []byte) (int, error) {
i := len(dAtA)
_ = i
var l int
_ = l
i -= len(m.MountPath)
copy(dAtA[i:], m.MountPath)
i = encodeVarintGenerated(dAtA, i, uint64(len(m.MountPath)))
i--
dAtA[i] = 0x1a
return len(dAtA) - i, nil
}
func encodeVarintGenerated(dAtA []byte, offset int, v uint64) int {
offset -= sovGenerated(v)
base := offset
for v >= 1<<7 {
dAtA[offset] = uint8(v&0x7f | 0x80)
v >>= 7
offset++
}
dAtA[offset] = uint8(v)
return base
}
func (m *ClientIPConfig) Size() (n int) {
if m == nil {
return 0
}
var l int
_ = l
if m.TimeoutSeconds != nil {
n += 1 + sovGenerated(uint64(*m.TimeoutSeconds))
}
return n
}
func (m *Container) Size() (n int) {
if m == nil {
return 0
}
var l int
_ = l
l = len(m.Name)
n += 1 + l + sovGenerated(uint64(l))
l = len(m.Image)
n += 1 + l + sovGenerated(uint64(l))
if len(m.Ports) > 0 {
for _, e := range m.Ports {
l = e.Size()
n += 1 + l + sovGenerated(uint64(l))
}
}
if len(m.VolumeMounts) > 0 {
for _, e := range m.VolumeMounts {
l = e.Size()
n += 1 + l + sovGenerated(uint64(l))
}
}
return n
}
func (m *ContainerPort) Size() (n int) {
if m == nil {
return 0
}
var l int
_ = l
l = len(m.Name)
n += 1 + l + sovGenerated(uint64(l))
n += 1 + sovGenerated(uint64(m.HostPort))
n += 1 + sovGenerated(uint64(m.ContainerPort))
l = len(m.Protocol)
n += 1 + l + sovGenerated(uint64(l))
l = len(m.HostIP)
n += 1 + l + sovGenerated(uint64(l))
return n
}
func (m *ContainerState) Size() (n int) {
if m == nil {
return 0
}
var l int
_ = l
if m.Running != nil {
l = m.Running.Size()
n += 1 + l + sovGenerated(uint64(l))
}
return n
}
func (m *ContainerStateRunning) Size() (n int) {
if m == nil {
return 0
}
var l int
_ = l
l = m.StartedAt.Size()
n += 1 + l + sovGenerated(uint64(l))
return n
}
func (m *ContainerStatus) Size() (n int) {
if m == nil {
return 0
}
var l int
_ = l
l = m.State.Size()
n += 1 + l + sovGenerated(uint64(l))
l = len(m.ContainerID)
n += 1 + l + sovGenerated(uint64(l))
return n
}
func (m *EndpointAddress) Size() (n int) {
if m == nil {
return 0
}
var l int
_ = l
l = len(m.IP)
n += 1 + l + sovGenerated(uint64(l))
l = len(m.Hostname)
n += 1 + l + sovGenerated(uint64(l))
if m.NodeName != nil {
l = len(*m.NodeName)
n += 1 + l + sovGenerated(uint64(l))
}
return n
}
func (m *EndpointPort) Size() (n int) {
if m == nil {
return 0
}
var l int
_ = l
l = len(m.Name)
n += 1 + l + sovGenerated(uint64(l))
n += 1 + sovGenerated(uint64(m.Port))
l = len(m.Protocol)
n += 1 + l + sovGenerated(uint64(l))
if m.AppProtocol != nil {
l = len(*m.AppProtocol)
n += 1 + l + sovGenerated(uint64(l))
}
return n
}
func (m *EndpointSubset) Size() (n int) {
if m == nil {
return 0
}
var l int
_ = l
if len(m.Addresses) > 0 {
for _, e := range m.Addresses {
l = e.Size()
n += 1 + l + sovGenerated(uint64(l))
}
}
if len(m.Ports) > 0 {
for _, e := range m.Ports {
l = e.Size()
n += 1 + l + sovGenerated(uint64(l))
}
}
return n
}
func (m *Endpoints) Size() (n int) {
if m == nil {
return 0
}
var l int
_ = l
l = m.ObjectMeta.Size()
n += 1 + l + sovGenerated(uint64(l))
if len(m.Subsets) > 0 {
for _, e := range m.Subsets {
l = e.Size()
n += 1 + l + sovGenerated(uint64(l))
}
}
return n
}
func (m *EndpointsList) Size() (n int) {
if m == nil {
return 0
}
var l int
_ = l
l = m.ListMeta.Size()
n += 1 + l + sovGenerated(uint64(l))
if len(m.Items) > 0 {
for _, e := range m.Items {
l = e.Size()
n += 1 + l + sovGenerated(uint64(l))
}
}
return n
}
func (m *LoadBalancerIngress) Size() (n int) {
if m == nil {
return 0
}
var l int
_ = l
l = len(m.IP)
n += 1 + l + sovGenerated(uint64(l))
l = len(m.Hostname)
n += 1 + l + sovGenerated(uint64(l))
if m.IPMode != nil {
l = len(*m.IPMode)
n += 1 + l + sovGenerated(uint64(l))
}
if len(m.Ports) > 0 {
for _, e := range m.Ports {
l = e.Size()
n += 1 + l + sovGenerated(uint64(l))
}
}
return n
}
func (m *LoadBalancerStatus) Size() (n int) {
if m == nil {
return 0
}
var l int
_ = l
if len(m.Ingress) > 0 {
for _, e := range m.Ingress {
l = e.Size()
n += 1 + l + sovGenerated(uint64(l))
}
}
return n
}
func (m *Namespace) Size() (n int) {
if m == nil {
return 0
}
var l int
_ = l
l = m.ObjectMeta.Size()
n += 1 + l + sovGenerated(uint64(l))
return n
}
func (m *NamespaceList) Size() (n int) {
if m == nil {
return 0
}
var l int
_ = l
l = m.ListMeta.Size()
n += 1 + l + sovGenerated(uint64(l))
if len(m.Items) > 0 {
for _, e := range m.Items {
l = e.Size()
n += 1 + l + sovGenerated(uint64(l))
}
}
return n
}
func (m *Node) Size() (n int) {
if m == nil {
return 0
}
var l int
_ = l
l = m.ObjectMeta.Size()
n += 1 + l + sovGenerated(uint64(l))
l = m.Spec.Size()
n += 1 + l + sovGenerated(uint64(l))
l = m.Status.Size()
n += 1 + l + sovGenerated(uint64(l))
return n
}
func (m *NodeAddress) Size() (n int) {
if m == nil {
return 0
}
var l int
_ = l
l = len(m.Type)
n += 1 + l + sovGenerated(uint64(l))
l = len(m.Address)
n += 1 + l + sovGenerated(uint64(l))
return n
}
func (m *NodeCondition) Size() (n int) {
if m == nil {
return 0
}
var l int
_ = l
l = len(m.Type)
n += 1 + l + sovGenerated(uint64(l))
l = len(m.Status)
n += 1 + l + sovGenerated(uint64(l))
l = len(m.Reason)
n += 1 + l + sovGenerated(uint64(l))
return n
}
func (m *NodeList) Size() (n int) {
if m == nil {
return 0
}
var l int
_ = l
l = m.ListMeta.Size()
n += 1 + l + sovGenerated(uint64(l))
if len(m.Items) > 0 {
for _, e := range m.Items {
l = e.Size()
n += 1 + l + sovGenerated(uint64(l))
}
}
return n
}
func (m *NodeSpec) Size() (n int) {
if m == nil {
return 0
}
var l int
_ = l
l = len(m.PodCIDR)
n += 1 + l + sovGenerated(uint64(l))
l = len(m.ProviderID)
n += 1 + l + sovGenerated(uint64(l))
if len(m.Taints) > 0 {
for _, e := range m.Taints {
l = e.Size()
n += 1 + l + sovGenerated(uint64(l))
}
}
if len(m.PodCIDRs) > 0 {
for _, s := range m.PodCIDRs {
l = len(s)
n += 1 + l + sovGenerated(uint64(l))
}
}
return n
}
func (m *NodeStatus) Size() (n int) {
if m == nil {
return 0
}
var l int
_ = l
if len(m.Conditions) > 0 {
for _, e := range m.Conditions {
l = e.Size()
n += 1 + l + sovGenerated(uint64(l))
}
}
if len(m.Addresses) > 0 {
for _, e := range m.Addresses {
l = e.Size()
n += 1 + l + sovGenerated(uint64(l))
}
}
return n
}
func (m *Pod) Size() (n int) {
if m == nil {
return 0
}
var l int
_ = l
l = m.ObjectMeta.Size()
n += 1 + l + sovGenerated(uint64(l))
l = m.Spec.Size()
n += 1 + l + sovGenerated(uint64(l))
l = m.Status.Size()
n += 1 + l + sovGenerated(uint64(l))
return n
}
func (m *PodCondition) Size() (n int) {
if m == nil {
return 0
}
var l int
_ = l
l = len(m.Type)
n += 1 + l + sovGenerated(uint64(l))
l = len(m.Status)
n += 1 + l + sovGenerated(uint64(l))
l = m.LastProbeTime.Size()
n += 1 + l + sovGenerated(uint64(l))
l = m.LastTransitionTime.Size()
n += 1 + l + sovGenerated(uint64(l))
l = len(m.Reason)
n += 1 + l + sovGenerated(uint64(l))
l = len(m.Message)
n += 1 + l + sovGenerated(uint64(l))
return n
}
func (m *PodIP) Size() (n int) {
if m == nil {
return 0
}
var l int
_ = l
l = len(m.IP)
n += 1 + l + sovGenerated(uint64(l))
return n
}
func (m *PodList) Size() (n int) {
if m == nil {
return 0
}
var l int
_ = l
l = m.ListMeta.Size()
n += 1 + l + sovGenerated(uint64(l))
if len(m.Items) > 0 {
for _, e := range m.Items {
l = e.Size()
n += 1 + l + sovGenerated(uint64(l))
}
}
return n
}
func (m *PodReadinessGate) Size() (n int) {
if m == nil {
return 0
}
var l int
_ = l
l = len(m.ConditionType)
n += 1 + l + sovGenerated(uint64(l))
return n
}
func (m *PodSpec) Size() (n int) {
if m == nil {
return 0
}
var l int
_ = l
if len(m.Containers) > 0 {
for _, e := range m.Containers {
l = e.Size()
n += 1 + l + sovGenerated(uint64(l))
}
}
l = len(m.ServiceAccountName)
n += 1 + l + sovGenerated(uint64(l))
l = len(m.NodeName)
n += 1 + l + sovGenerated(uint64(l))
n += 2
if len(m.InitContainers) > 0 {
for _, e := range m.InitContainers {
l = e.Size()
n += 2 + l + sovGenerated(uint64(l))
}
}
return n
}
func (m *PodStatus) Size() (n int) {
if m == nil {
return 0
}
var l int
_ = l
l = len(m.Phase)
n += 1 + l + sovGenerated(uint64(l))
if len(m.Conditions) > 0 {
for _, e := range m.Conditions {
l = e.Size()
n += 1 + l + sovGenerated(uint64(l))
}
}
l = len(m.HostIP)
n += 1 + l + sovGenerated(uint64(l))
l = len(m.PodIP)
n += 1 + l + sovGenerated(uint64(l))
if m.StartTime != nil {
l = m.StartTime.Size()
n += 1 + l + sovGenerated(uint64(l))
}
if len(m.ContainerStatuses) > 0 {
for _, e := range m.ContainerStatuses {
l = e.Size()
n += 1 + l + sovGenerated(uint64(l))
}
}
l = len(m.QOSClass)
n += 1 + l + sovGenerated(uint64(l))
if len(m.PodIPs) > 0 {
for _, e := range m.PodIPs {
l = e.Size()
n += 1 + l + sovGenerated(uint64(l))
}
}
return n
}
func (m *PortStatus) Size() (n int) {
if m == nil {
return 0
}
var l int
_ = l
n += 1 + sovGenerated(uint64(m.Port))
l = len(m.Protocol)
n += 1 + l + sovGenerated(uint64(l))
if m.Error != nil {
l = len(*m.Error)
n += 1 + l + sovGenerated(uint64(l))
}
return n
}
func (m *Secret) Size() (n int) {
if m == nil {
return 0
}
var l int
_ = l
l = m.ObjectMeta.Size()
n += 1 + l + sovGenerated(uint64(l))
if len(m.Data) > 0 {
for k, v := range m.Data {
_ = k
_ = v
l = 0
if v != nil {
l = 1 + len(v) + sovGenerated(uint64(len(v)))
}
mapEntrySize := 1 + len(k) + sovGenerated(uint64(len(k))) + l
n += mapEntrySize + 1 + sovGenerated(uint64(mapEntrySize))
}
}
l = len(m.Type)
n += 1 + l + sovGenerated(uint64(l))
if len(m.StringData) > 0 {
for k, v := range m.StringData {
_ = k
_ = v
mapEntrySize := 1 + len(k) + sovGenerated(uint64(len(k))) + 1 + len(v) + sovGenerated(uint64(len(v)))
n += mapEntrySize + 1 + sovGenerated(uint64(mapEntrySize))
}
}
if m.Immutable != nil {
n += 2
}
return n
}
func (m *SecretList) Size() (n int) {
if m == nil {
return 0
}
var l int
_ = l
l = m.ListMeta.Size()
n += 1 + l + sovGenerated(uint64(l))
if len(m.Items) > 0 {
for _, e := range m.Items {
l = e.Size()
n += 1 + l + sovGenerated(uint64(l))
}
}
return n
}
func (m *Service) Size() (n int) {
if m == nil {
return 0
}
var l int
_ = l
l = m.ObjectMeta.Size()
n += 1 + l + sovGenerated(uint64(l))
l = m.Spec.Size()
n += 1 + l + sovGenerated(uint64(l))
l = m.Status.Size()
n += 1 + l + sovGenerated(uint64(l))
return n
}
func (m *ServiceList) Size() (n int) {
if m == nil {
return 0
}
var l int
_ = l
l = m.ListMeta.Size()
n += 1 + l + sovGenerated(uint64(l))
if len(m.Items) > 0 {
for _, e := range m.Items {
l = e.Size()
n += 1 + l + sovGenerated(uint64(l))
}
}
return n
}
func (m *ServicePort) Size() (n int) {
if m == nil {
return 0
}
var l int
_ = l
l = len(m.Name)
n += 1 + l + sovGenerated(uint64(l))
l = len(m.Protocol)
n += 1 + l + sovGenerated(uint64(l))
n += 1 + sovGenerated(uint64(m.Port))
l = m.TargetPort.Size()
n += 1 + l + sovGenerated(uint64(l))
n += 1 + sovGenerated(uint64(m.NodePort))
if m.AppProtocol != nil {
l = len(*m.AppProtocol)
n += 1 + l + sovGenerated(uint64(l))
}
return n
}
func (m *ServiceSpec) Size() (n int) {
if m == nil {
return 0
}
var l int
_ = l
if len(m.Ports) > 0 {
for _, e := range m.Ports {
l = e.Size()
n += 1 + l + sovGenerated(uint64(l))
}
}
if len(m.Selector) > 0 {
for k, v := range m.Selector {
_ = k
_ = v
mapEntrySize := 1 + len(k) + sovGenerated(uint64(len(k))) + 1 + len(v) + sovGenerated(uint64(len(v)))
n += mapEntrySize + 1 + sovGenerated(uint64(mapEntrySize))
}
}
l = len(m.ClusterIP)
n += 1 + l + sovGenerated(uint64(l))
l = len(m.Type)
n += 1 + l + sovGenerated(uint64(l))
if len(m.ExternalIPs) > 0 {
for _, s := range m.ExternalIPs {
l = len(s)
n += 1 + l + sovGenerated(uint64(l))
}
}
l = len(m.SessionAffinity)
n += 1 + l + sovGenerated(uint64(l))
l = len(m.LoadBalancerIP)
n += 1 + l + sovGenerated(uint64(l))
if len(m.LoadBalancerSourceRanges) > 0 {
for _, s := range m.LoadBalancerSourceRanges {
l = len(s)
n += 1 + l + sovGenerated(uint64(l))
}
}
l = len(m.ExternalTrafficPolicy)
n += 1 + l + sovGenerated(uint64(l))
n += 1 + sovGenerated(uint64(m.HealthCheckNodePort))
if m.SessionAffinityConfig != nil {
l = m.SessionAffinityConfig.Size()
n += 1 + l + sovGenerated(uint64(l))
}
if m.IPFamilyPolicy != nil {
l = len(*m.IPFamilyPolicy)
n += 2 + l + sovGenerated(uint64(l))
}
if len(m.ClusterIPs) > 0 {
for _, s := range m.ClusterIPs {
l = len(s)
n += 2 + l + sovGenerated(uint64(l))
}
}
if len(m.IPFamilies) > 0 {
for _, s := range m.IPFamilies {
l = len(s)
n += 2 + l + sovGenerated(uint64(l))
}
}
if m.LoadBalancerClass != nil {
l = len(*m.LoadBalancerClass)
n += 2 + l + sovGenerated(uint64(l))
}
if m.InternalTrafficPolicy != nil {
l = len(*m.InternalTrafficPolicy)
n += 2 + l + sovGenerated(uint64(l))
}
if m.TrafficDistribution != nil {
l = len(*m.TrafficDistribution)
n += 2 + l + sovGenerated(uint64(l))
}
return n
}
func (m *ServiceStatus) Size() (n int) {
if m == nil {
return 0
}
var l int
_ = l
l = m.LoadBalancer.Size()
n += 1 + l + sovGenerated(uint64(l))
if len(m.Conditions) > 0 {
for _, e := range m.Conditions {
l = e.Size()
n += 1 + l + sovGenerated(uint64(l))
}
}
return n
}
func (m *SessionAffinityConfig) Size() (n int) {
if m == nil {
return 0
}
var l int
_ = l
if m.ClientIP != nil {
l = m.ClientIP.Size()
n += 1 + l + sovGenerated(uint64(l))
}
return n
}
func (m *Taint) Size() (n int) {
if m == nil {
return 0
}
var l int
_ = l
l = len(m.Key)
n += 1 + l + sovGenerated(uint64(l))
l = len(m.Value)
n += 1 + l + sovGenerated(uint64(l))
l = len(m.Effect)
n += 1 + l + sovGenerated(uint64(l))
if m.TimeAdded != nil {
l = m.TimeAdded.Size()
n += 1 + l + sovGenerated(uint64(l))
}
return n
}
func (m *TypedLocalObjectReference) Size() (n int) {
if m == nil {
return 0
}
var l int
_ = l
if m.APIGroup != nil {
l = len(*m.APIGroup)
n += 1 + l + sovGenerated(uint64(l))
}
l = len(m.Kind)
n += 1 + l + sovGenerated(uint64(l))
l = len(m.Name)
n += 1 + l + sovGenerated(uint64(l))
return n
}
func (m *VolumeMount) Size() (n int) {
if m == nil {
return 0
}
var l int
_ = l
l = len(m.MountPath)
n += 1 + l + sovGenerated(uint64(l))
return n
}
func sovGenerated(x uint64) (n int) {
return (math_bits.Len64(x|1) + 6) / 7
}
func sozGenerated(x uint64) (n int) {
return sovGenerated(uint64((x << 1) ^ uint64((int64(x) >> 63))))
}
func (this *ClientIPConfig) String() string {
if this == nil {
return "nil"
}
s := strings.Join([]string{`&ClientIPConfig{`,
`TimeoutSeconds:` + valueToStringGenerated(this.TimeoutSeconds) + `,`,
`}`,
}, "")
return s
}
func (this *Container) String() string {
if this == nil {
return "nil"
}
repeatedStringForPorts := "[]ContainerPort{"
for _, f := range this.Ports {
repeatedStringForPorts += strings.Replace(strings.Replace(f.String(), "ContainerPort", "ContainerPort", 1), `&`, ``, 1) + ","
}
repeatedStringForPorts += "}"
repeatedStringForVolumeMounts := "[]VolumeMount{"
for _, f := range this.VolumeMounts {
repeatedStringForVolumeMounts += strings.Replace(strings.Replace(f.String(), "VolumeMount", "VolumeMount", 1), `&`, ``, 1) + ","
}
repeatedStringForVolumeMounts += "}"
s := strings.Join([]string{`&Container{`,
`Name:` + fmt.Sprintf("%v", this.Name) + `,`,
`Image:` + fmt.Sprintf("%v", this.Image) + `,`,
`Ports:` + repeatedStringForPorts + `,`,
`VolumeMounts:` + repeatedStringForVolumeMounts + `,`,
`}`,
}, "")
return s
}
func (this *ContainerPort) String() string {
if this == nil {
return "nil"
}
s := strings.Join([]string{`&ContainerPort{`,
`Name:` + fmt.Sprintf("%v", this.Name) + `,`,
`HostPort:` + fmt.Sprintf("%v", this.HostPort) + `,`,
`ContainerPort:` + fmt.Sprintf("%v", this.ContainerPort) + `,`,
`Protocol:` + fmt.Sprintf("%v", this.Protocol) + `,`,
`HostIP:` + fmt.Sprintf("%v", this.HostIP) + `,`,
`}`,
}, "")
return s
}
func (this *ContainerState) String() string {
if this == nil {
return "nil"
}
s := strings.Join([]string{`&ContainerState{`,
`Running:` + strings.Replace(this.Running.String(), "ContainerStateRunning", "ContainerStateRunning", 1) + `,`,
`}`,
}, "")
return s
}
func (this *ContainerStateRunning) String() string {
if this == nil {
return "nil"
}
s := strings.Join([]string{`&ContainerStateRunning{`,
`StartedAt:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.StartedAt), "Time", "v1.Time", 1), `&`, ``, 1) + `,`,
`}`,
}, "")
return s
}
func (this *ContainerStatus) String() string {
if this == nil {
return "nil"
}
s := strings.Join([]string{`&ContainerStatus{`,
`State:` + strings.Replace(strings.Replace(this.State.String(), "ContainerState", "ContainerState", 1), `&`, ``, 1) + `,`,
`ContainerID:` + fmt.Sprintf("%v", this.ContainerID) + `,`,
`}`,
}, "")
return s
}
func (this *EndpointAddress) String() string {
if this == nil {
return "nil"
}
s := strings.Join([]string{`&EndpointAddress{`,
`IP:` + fmt.Sprintf("%v", this.IP) + `,`,
`Hostname:` + fmt.Sprintf("%v", this.Hostname) + `,`,
`NodeName:` + valueToStringGenerated(this.NodeName) + `,`,
`}`,
}, "")
return s
}
func (this *EndpointPort) String() string {
if this == nil {
return "nil"
}
s := strings.Join([]string{`&EndpointPort{`,
`Name:` + fmt.Sprintf("%v", this.Name) + `,`,
`Port:` + fmt.Sprintf("%v", this.Port) + `,`,
`Protocol:` + fmt.Sprintf("%v", this.Protocol) + `,`,
`AppProtocol:` + valueToStringGenerated(this.AppProtocol) + `,`,
`}`,
}, "")
return s
}
func (this *EndpointSubset) String() string {
if this == nil {
return "nil"
}
repeatedStringForAddresses := "[]EndpointAddress{"
for _, f := range this.Addresses {
repeatedStringForAddresses += strings.Replace(strings.Replace(f.String(), "EndpointAddress", "EndpointAddress", 1), `&`, ``, 1) + ","
}
repeatedStringForAddresses += "}"
repeatedStringForPorts := "[]EndpointPort{"
for _, f := range this.Ports {
repeatedStringForPorts += strings.Replace(strings.Replace(f.String(), "EndpointPort", "EndpointPort", 1), `&`, ``, 1) + ","
}
repeatedStringForPorts += "}"
s := strings.Join([]string{`&EndpointSubset{`,
`Addresses:` + repeatedStringForAddresses + `,`,
`Ports:` + repeatedStringForPorts + `,`,
`}`,
}, "")
return s
}
func (this *Endpoints) String() string {
if this == nil {
return "nil"
}
repeatedStringForSubsets := "[]EndpointSubset{"
for _, f := range this.Subsets {
repeatedStringForSubsets += strings.Replace(strings.Replace(f.String(), "EndpointSubset", "EndpointSubset", 1), `&`, ``, 1) + ","
}
repeatedStringForSubsets += "}"
s := strings.Join([]string{`&Endpoints{`,
`ObjectMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ObjectMeta), "ObjectMeta", "v1.ObjectMeta", 1), `&`, ``, 1) + `,`,
`Subsets:` + repeatedStringForSubsets + `,`,
`}`,
}, "")
return s
}
func (this *EndpointsList) String() string {
if this == nil {
return "nil"
}
repeatedStringForItems := "[]Endpoints{"
for _, f := range this.Items {
repeatedStringForItems += strings.Replace(strings.Replace(f.String(), "Endpoints", "Endpoints", 1), `&`, ``, 1) + ","
}
repeatedStringForItems += "}"
s := strings.Join([]string{`&EndpointsList{`,
`ListMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ListMeta), "ListMeta", "v1.ListMeta", 1), `&`, ``, 1) + `,`,
`Items:` + repeatedStringForItems + `,`,
`}`,
}, "")
return s
}
func (this *LoadBalancerIngress) String() string {
if this == nil {
return "nil"
}
repeatedStringForPorts := "[]PortStatus{"
for _, f := range this.Ports {
repeatedStringForPorts += strings.Replace(strings.Replace(f.String(), "PortStatus", "PortStatus", 1), `&`, ``, 1) + ","
}
repeatedStringForPorts += "}"
s := strings.Join([]string{`&LoadBalancerIngress{`,
`IP:` + fmt.Sprintf("%v", this.IP) + `,`,
`Hostname:` + fmt.Sprintf("%v", this.Hostname) + `,`,
`IPMode:` + valueToStringGenerated(this.IPMode) + `,`,
`Ports:` + repeatedStringForPorts + `,`,
`}`,
}, "")
return s
}
func (this *LoadBalancerStatus) String() string {
if this == nil {
return "nil"
}
repeatedStringForIngress := "[]LoadBalancerIngress{"
for _, f := range this.Ingress {
repeatedStringForIngress += strings.Replace(strings.Replace(f.String(), "LoadBalancerIngress", "LoadBalancerIngress", 1), `&`, ``, 1) + ","
}
repeatedStringForIngress += "}"
s := strings.Join([]string{`&LoadBalancerStatus{`,
`Ingress:` + repeatedStringForIngress + `,`,
`}`,
}, "")
return s
}
func (this *Namespace) String() string {
if this == nil {
return "nil"
}
s := strings.Join([]string{`&Namespace{`,
`ObjectMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ObjectMeta), "ObjectMeta", "v1.ObjectMeta", 1), `&`, ``, 1) + `,`,
`}`,
}, "")
return s
}
func (this *NamespaceList) String() string {
if this == nil {
return "nil"
}
repeatedStringForItems := "[]Namespace{"
for _, f := range this.Items {
repeatedStringForItems += strings.Replace(strings.Replace(f.String(), "Namespace", "Namespace", 1), `&`, ``, 1) + ","
}
repeatedStringForItems += "}"
s := strings.Join([]string{`&NamespaceList{`,
`ListMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ListMeta), "ListMeta", "v1.ListMeta", 1), `&`, ``, 1) + `,`,
`Items:` + repeatedStringForItems + `,`,
`}`,
}, "")
return s
}
func (this *Node) String() string {
if this == nil {
return "nil"
}
s := strings.Join([]string{`&Node{`,
`ObjectMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ObjectMeta), "ObjectMeta", "v1.ObjectMeta", 1), `&`, ``, 1) + `,`,
`Spec:` + strings.Replace(strings.Replace(this.Spec.String(), "NodeSpec", "NodeSpec", 1), `&`, ``, 1) + `,`,
`Status:` + strings.Replace(strings.Replace(this.Status.String(), "NodeStatus", "NodeStatus", 1), `&`, ``, 1) + `,`,
`}`,
}, "")
return s
}
func (this *NodeAddress) String() string {
if this == nil {
return "nil"
}
s := strings.Join([]string{`&NodeAddress{`,
`Type:` + fmt.Sprintf("%v", this.Type) + `,`,
`Address:` + fmt.Sprintf("%v", this.Address) + `,`,
`}`,
}, "")
return s
}
func (this *NodeCondition) String() string {
if this == nil {
return "nil"
}
s := strings.Join([]string{`&NodeCondition{`,
`Type:` + fmt.Sprintf("%v", this.Type) + `,`,
`Status:` + fmt.Sprintf("%v", this.Status) + `,`,
`Reason:` + fmt.Sprintf("%v", this.Reason) + `,`,
`}`,
}, "")
return s
}
func (this *NodeList) String() string {
if this == nil {
return "nil"
}
repeatedStringForItems := "[]Node{"
for _, f := range this.Items {
repeatedStringForItems += strings.Replace(strings.Replace(f.String(), "Node", "Node", 1), `&`, ``, 1) + ","
}
repeatedStringForItems += "}"
s := strings.Join([]string{`&NodeList{`,
`ListMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ListMeta), "ListMeta", "v1.ListMeta", 1), `&`, ``, 1) + `,`,
`Items:` + repeatedStringForItems + `,`,
`}`,
}, "")
return s
}
func (this *NodeSpec) String() string {
if this == nil {
return "nil"
}
repeatedStringForTaints := "[]Taint{"
for _, f := range this.Taints {
repeatedStringForTaints += strings.Replace(strings.Replace(f.String(), "Taint", "Taint", 1), `&`, ``, 1) + ","
}
repeatedStringForTaints += "}"
s := strings.Join([]string{`&NodeSpec{`,
`PodCIDR:` + fmt.Sprintf("%v", this.PodCIDR) + `,`,
`ProviderID:` + fmt.Sprintf("%v", this.ProviderID) + `,`,
`Taints:` + repeatedStringForTaints + `,`,
`PodCIDRs:` + fmt.Sprintf("%v", this.PodCIDRs) + `,`,
`}`,
}, "")
return s
}
func (this *NodeStatus) String() string {
if this == nil {
return "nil"
}
repeatedStringForConditions := "[]NodeCondition{"
for _, f := range this.Conditions {
repeatedStringForConditions += strings.Replace(strings.Replace(f.String(), "NodeCondition", "NodeCondition", 1), `&`, ``, 1) + ","
}
repeatedStringForConditions += "}"
repeatedStringForAddresses := "[]NodeAddress{"
for _, f := range this.Addresses {
repeatedStringForAddresses += strings.Replace(strings.Replace(f.String(), "NodeAddress", "NodeAddress", 1), `&`, ``, 1) + ","
}
repeatedStringForAddresses += "}"
s := strings.Join([]string{`&NodeStatus{`,
`Conditions:` + repeatedStringForConditions + `,`,
`Addresses:` + repeatedStringForAddresses + `,`,
`}`,
}, "")
return s
}
func (this *Pod) String() string {
if this == nil {
return "nil"
}
s := strings.Join([]string{`&Pod{`,
`ObjectMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ObjectMeta), "ObjectMeta", "v1.ObjectMeta", 1), `&`, ``, 1) + `,`,
`Spec:` + strings.Replace(strings.Replace(this.Spec.String(), "PodSpec", "PodSpec", 1), `&`, ``, 1) + `,`,
`Status:` + strings.Replace(strings.Replace(this.Status.String(), "PodStatus", "PodStatus", 1), `&`, ``, 1) + `,`,
`}`,
}, "")
return s
}
func (this *PodCondition) String() string {
if this == nil {
return "nil"
}
s := strings.Join([]string{`&PodCondition{`,
`Type:` + fmt.Sprintf("%v", this.Type) + `,`,
`Status:` + fmt.Sprintf("%v", this.Status) + `,`,
`LastProbeTime:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.LastProbeTime), "Time", "v1.Time", 1), `&`, ``, 1) + `,`,
`LastTransitionTime:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.LastTransitionTime), "Time", "v1.Time", 1), `&`, ``, 1) + `,`,
`Reason:` + fmt.Sprintf("%v", this.Reason) + `,`,
`Message:` + fmt.Sprintf("%v", this.Message) + `,`,
`}`,
}, "")
return s
}
func (this *PodIP) String() string {
if this == nil {
return "nil"
}
s := strings.Join([]string{`&PodIP{`,
`IP:` + fmt.Sprintf("%v", this.IP) + `,`,
`}`,
}, "")
return s
}
func (this *PodList) String() string {
if this == nil {
return "nil"
}
repeatedStringForItems := "[]Pod{"
for _, f := range this.Items {
repeatedStringForItems += strings.Replace(strings.Replace(f.String(), "Pod", "Pod", 1), `&`, ``, 1) + ","
}
repeatedStringForItems += "}"
s := strings.Join([]string{`&PodList{`,
`ListMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ListMeta), "ListMeta", "v1.ListMeta", 1), `&`, ``, 1) + `,`,
`Items:` + repeatedStringForItems + `,`,
`}`,
}, "")
return s
}
func (this *PodReadinessGate) String() string {
if this == nil {
return "nil"
}
s := strings.Join([]string{`&PodReadinessGate{`,
`ConditionType:` + fmt.Sprintf("%v", this.ConditionType) + `,`,
`}`,
}, "")
return s
}
func (this *PodSpec) String() string {
if this == nil {
return "nil"
}
repeatedStringForContainers := "[]Container{"
for _, f := range this.Containers {
repeatedStringForContainers += strings.Replace(strings.Replace(f.String(), "Container", "Container", 1), `&`, ``, 1) + ","
}
repeatedStringForContainers += "}"
repeatedStringForInitContainers := "[]Container{"
for _, f := range this.InitContainers {
repeatedStringForInitContainers += strings.Replace(strings.Replace(f.String(), "Container", "Container", 1), `&`, ``, 1) + ","
}
repeatedStringForInitContainers += "}"
s := strings.Join([]string{`&PodSpec{`,
`Containers:` + repeatedStringForContainers + `,`,
`ServiceAccountName:` + fmt.Sprintf("%v", this.ServiceAccountName) + `,`,
`NodeName:` + fmt.Sprintf("%v", this.NodeName) + `,`,
`HostNetwork:` + fmt.Sprintf("%v", this.HostNetwork) + `,`,
`InitContainers:` + repeatedStringForInitContainers + `,`,
`}`,
}, "")
return s
}
func (this *PodStatus) String() string {
if this == nil {
return "nil"
}
repeatedStringForConditions := "[]PodCondition{"
for _, f := range this.Conditions {
repeatedStringForConditions += strings.Replace(strings.Replace(f.String(), "PodCondition", "PodCondition", 1), `&`, ``, 1) + ","
}
repeatedStringForConditions += "}"
repeatedStringForContainerStatuses := "[]ContainerStatus{"
for _, f := range this.ContainerStatuses {
repeatedStringForContainerStatuses += strings.Replace(strings.Replace(f.String(), "ContainerStatus", "ContainerStatus", 1), `&`, ``, 1) + ","
}
repeatedStringForContainerStatuses += "}"
repeatedStringForPodIPs := "[]PodIP{"
for _, f := range this.PodIPs {
repeatedStringForPodIPs += strings.Replace(strings.Replace(f.String(), "PodIP", "PodIP", 1), `&`, ``, 1) + ","
}
repeatedStringForPodIPs += "}"
s := strings.Join([]string{`&PodStatus{`,
`Phase:` + fmt.Sprintf("%v", this.Phase) + `,`,
`Conditions:` + repeatedStringForConditions + `,`,
`HostIP:` + fmt.Sprintf("%v", this.HostIP) + `,`,
`PodIP:` + fmt.Sprintf("%v", this.PodIP) + `,`,
`StartTime:` + strings.Replace(fmt.Sprintf("%v", this.StartTime), "Time", "v1.Time", 1) + `,`,
`ContainerStatuses:` + repeatedStringForContainerStatuses + `,`,
`QOSClass:` + fmt.Sprintf("%v", this.QOSClass) + `,`,
`PodIPs:` + repeatedStringForPodIPs + `,`,
`}`,
}, "")
return s
}
func (this *PortStatus) String() string {
if this == nil {
return "nil"
}
s := strings.Join([]string{`&PortStatus{`,
`Port:` + fmt.Sprintf("%v", this.Port) + `,`,
`Protocol:` + fmt.Sprintf("%v", this.Protocol) + `,`,
`Error:` + valueToStringGenerated(this.Error) + `,`,
`}`,
}, "")
return s
}
func (this *Secret) String() string {
if this == nil {
return "nil"
}
keysForData := make([]string, 0, len(this.Data))
for k := range this.Data {
keysForData = append(keysForData, k)
}
github_com_gogo_protobuf_sortkeys.Strings(keysForData)
mapStringForData := "map[string]Bytes{"
for _, k := range keysForData {
mapStringForData += fmt.Sprintf("%v: %v,", k, this.Data[k])
}
mapStringForData += "}"
keysForStringData := make([]string, 0, len(this.StringData))
for k := range this.StringData {
keysForStringData = append(keysForStringData, k)
}
github_com_gogo_protobuf_sortkeys.Strings(keysForStringData)
mapStringForStringData := "map[string]string{"
for _, k := range keysForStringData {
mapStringForStringData += fmt.Sprintf("%v: %v,", k, this.StringData[k])
}
mapStringForStringData += "}"
s := strings.Join([]string{`&Secret{`,
`ObjectMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ObjectMeta), "ObjectMeta", "v1.ObjectMeta", 1), `&`, ``, 1) + `,`,
`Data:` + mapStringForData + `,`,
`Type:` + fmt.Sprintf("%v", this.Type) + `,`,
`StringData:` + mapStringForStringData + `,`,
`Immutable:` + valueToStringGenerated(this.Immutable) + `,`,
`}`,
}, "")
return s
}
func (this *SecretList) String() string {
if this == nil {
return "nil"
}
repeatedStringForItems := "[]Secret{"
for _, f := range this.Items {
repeatedStringForItems += strings.Replace(strings.Replace(f.String(), "Secret", "Secret", 1), `&`, ``, 1) + ","
}
repeatedStringForItems += "}"
s := strings.Join([]string{`&SecretList{`,
`ListMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ListMeta), "ListMeta", "v1.ListMeta", 1), `&`, ``, 1) + `,`,
`Items:` + repeatedStringForItems + `,`,
`}`,
}, "")
return s
}
func (this *Service) String() string {
if this == nil {
return "nil"
}
s := strings.Join([]string{`&Service{`,
`ObjectMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ObjectMeta), "ObjectMeta", "v1.ObjectMeta", 1), `&`, ``, 1) + `,`,
`Spec:` + strings.Replace(strings.Replace(this.Spec.String(), "ServiceSpec", "ServiceSpec", 1), `&`, ``, 1) + `,`,
`Status:` + strings.Replace(strings.Replace(this.Status.String(), "ServiceStatus", "ServiceStatus", 1), `&`, ``, 1) + `,`,
`}`,
}, "")
return s
}
func (this *ServiceList) String() string {
if this == nil {
return "nil"
}
repeatedStringForItems := "[]Service{"
for _, f := range this.Items {
repeatedStringForItems += strings.Replace(strings.Replace(f.String(), "Service", "Service", 1), `&`, ``, 1) + ","
}
repeatedStringForItems += "}"
s := strings.Join([]string{`&ServiceList{`,
`ListMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ListMeta), "ListMeta", "v1.ListMeta", 1), `&`, ``, 1) + `,`,
`Items:` + repeatedStringForItems + `,`,
`}`,
}, "")
return s
}
func (this *ServicePort) String() string {
if this == nil {
return "nil"
}
s := strings.Join([]string{`&ServicePort{`,
`Name:` + fmt.Sprintf("%v", this.Name) + `,`,
`Protocol:` + fmt.Sprintf("%v", this.Protocol) + `,`,
`Port:` + fmt.Sprintf("%v", this.Port) + `,`,
`TargetPort:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.TargetPort), "IntOrString", "intstr.IntOrString", 1), `&`, ``, 1) + `,`,
`NodePort:` + fmt.Sprintf("%v", this.NodePort) + `,`,
`AppProtocol:` + valueToStringGenerated(this.AppProtocol) + `,`,
`}`,
}, "")
return s
}
func (this *ServiceSpec) String() string {
if this == nil {
return "nil"
}
repeatedStringForPorts := "[]ServicePort{"
for _, f := range this.Ports {
repeatedStringForPorts += strings.Replace(strings.Replace(f.String(), "ServicePort", "ServicePort", 1), `&`, ``, 1) + ","
}
repeatedStringForPorts += "}"
keysForSelector := make([]string, 0, len(this.Selector))
for k := range this.Selector {
keysForSelector = append(keysForSelector, k)
}
github_com_gogo_protobuf_sortkeys.Strings(keysForSelector)
mapStringForSelector := "map[string]string{"
for _, k := range keysForSelector {
mapStringForSelector += fmt.Sprintf("%v: %v,", k, this.Selector[k])
}
mapStringForSelector += "}"
s := strings.Join([]string{`&ServiceSpec{`,
`Ports:` + repeatedStringForPorts + `,`,
`Selector:` + mapStringForSelector + `,`,
`ClusterIP:` + fmt.Sprintf("%v", this.ClusterIP) + `,`,
`Type:` + fmt.Sprintf("%v", this.Type) + `,`,
`ExternalIPs:` + fmt.Sprintf("%v", this.ExternalIPs) + `,`,
`SessionAffinity:` + fmt.Sprintf("%v", this.SessionAffinity) + `,`,
`LoadBalancerIP:` + fmt.Sprintf("%v", this.LoadBalancerIP) + `,`,
`LoadBalancerSourceRanges:` + fmt.Sprintf("%v", this.LoadBalancerSourceRanges) + `,`,
`ExternalTrafficPolicy:` + fmt.Sprintf("%v", this.ExternalTrafficPolicy) + `,`,
`HealthCheckNodePort:` + fmt.Sprintf("%v", this.HealthCheckNodePort) + `,`,
`SessionAffinityConfig:` + strings.Replace(this.SessionAffinityConfig.String(), "SessionAffinityConfig", "SessionAffinityConfig", 1) + `,`,
`IPFamilyPolicy:` + valueToStringGenerated(this.IPFamilyPolicy) + `,`,
`ClusterIPs:` + fmt.Sprintf("%v", this.ClusterIPs) + `,`,
`IPFamilies:` + fmt.Sprintf("%v", this.IPFamilies) + `,`,
`LoadBalancerClass:` + valueToStringGenerated(this.LoadBalancerClass) + `,`,
`InternalTrafficPolicy:` + valueToStringGenerated(this.InternalTrafficPolicy) + `,`,
`TrafficDistribution:` + valueToStringGenerated(this.TrafficDistribution) + `,`,
`}`,
}, "")
return s
}
func (this *ServiceStatus) String() string {
if this == nil {
return "nil"
}
repeatedStringForConditions := "[]Condition{"
for _, f := range this.Conditions {
repeatedStringForConditions += fmt.Sprintf("%v", f) + ","
}
repeatedStringForConditions += "}"
s := strings.Join([]string{`&ServiceStatus{`,
`LoadBalancer:` + strings.Replace(strings.Replace(this.LoadBalancer.String(), "LoadBalancerStatus", "LoadBalancerStatus", 1), `&`, ``, 1) + `,`,
`Conditions:` + repeatedStringForConditions + `,`,
`}`,
}, "")
return s
}
func (this *SessionAffinityConfig) String() string {
if this == nil {
return "nil"
}
s := strings.Join([]string{`&SessionAffinityConfig{`,
`ClientIP:` + strings.Replace(this.ClientIP.String(), "ClientIPConfig", "ClientIPConfig", 1) + `,`,
`}`,
}, "")
return s
}
func (this *Taint) String() string {
if this == nil {
return "nil"
}
s := strings.Join([]string{`&Taint{`,
`Key:` + fmt.Sprintf("%v", this.Key) + `,`,
`Value:` + fmt.Sprintf("%v", this.Value) + `,`,
`Effect:` + fmt.Sprintf("%v", this.Effect) + `,`,
`TimeAdded:` + strings.Replace(fmt.Sprintf("%v", this.TimeAdded), "Time", "v1.Time", 1) + `,`,
`}`,
}, "")
return s
}
func (this *TypedLocalObjectReference) String() string {
if this == nil {
return "nil"
}
s := strings.Join([]string{`&TypedLocalObjectReference{`,
`APIGroup:` + valueToStringGenerated(this.APIGroup) + `,`,
`Kind:` + fmt.Sprintf("%v", this.Kind) + `,`,
`Name:` + fmt.Sprintf("%v", this.Name) + `,`,
`}`,
}, "")
return s
}
func (this *VolumeMount) String() string {
if this == nil {
return "nil"
}
s := strings.Join([]string{`&VolumeMount{`,
`MountPath:` + fmt.Sprintf("%v", this.MountPath) + `,`,
`}`,
}, "")
return s
}
func valueToStringGenerated(v interface{}) string {
rv := reflect.ValueOf(v)
if rv.IsNil() {
return "nil"
}
pv := reflect.Indirect(rv).Interface()
return fmt.Sprintf("*%v", pv)
}
func (m *ClientIPConfig) Unmarshal(dAtA []byte) error {
l := len(dAtA)
iNdEx := 0
for iNdEx < l {
preIndex := iNdEx
var wire uint64
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowGenerated
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
wire |= uint64(b&0x7F) << shift
if b < 0x80 {
break
}
}
fieldNum := int32(wire >> 3)
wireType := int(wire & 0x7)
if wireType == 4 {
return fmt.Errorf("proto: ClientIPConfig: wiretype end group for non-group")
}
if fieldNum <= 0 {
return fmt.Errorf("proto: ClientIPConfig: illegal tag %d (wire type %d)", fieldNum, wire)
}
switch fieldNum {
case 1:
if wireType != 0 {
return fmt.Errorf("proto: wrong wireType = %d for field TimeoutSeconds", wireType)
}
var v int32
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowGenerated
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
v |= int32(b&0x7F) << shift
if b < 0x80 {
break
}
}
m.TimeoutSeconds = &v
default:
iNdEx = preIndex
skippy, err := skipGenerated(dAtA[iNdEx:])
if err != nil {
return err
}
if (skippy < 0) || (iNdEx+skippy) < 0 {
return ErrInvalidLengthGenerated
}
if (iNdEx + skippy) > l {
return io.ErrUnexpectedEOF
}
iNdEx += skippy
}
}
if iNdEx > l {
return io.ErrUnexpectedEOF
}
return nil
}
func (m *Container) Unmarshal(dAtA []byte) error {
l := len(dAtA)
iNdEx := 0
for iNdEx < l {
preIndex := iNdEx
var wire uint64
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowGenerated
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
wire |= uint64(b&0x7F) << shift
if b < 0x80 {
break
}
}
fieldNum := int32(wire >> 3)
wireType := int(wire & 0x7)
if wireType == 4 {
return fmt.Errorf("proto: Container: wiretype end group for non-group")
}
if fieldNum <= 0 {
return fmt.Errorf("proto: Container: illegal tag %d (wire type %d)", fieldNum, wire)
}
switch fieldNum {
case 1:
if wireType != 2 {
return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType)
}
var stringLen uint64
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowGenerated
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
stringLen |= uint64(b&0x7F) << shift
if b < 0x80 {
break
}
}
intStringLen := int(stringLen)
if intStringLen < 0 {
return ErrInvalidLengthGenerated
}
postIndex := iNdEx + intStringLen
if postIndex < 0 {
return ErrInvalidLengthGenerated
}
if postIndex > l {
return io.ErrUnexpectedEOF
}
m.Name = string(dAtA[iNdEx:postIndex])
iNdEx = postIndex
case 2:
if wireType != 2 {
return fmt.Errorf("proto: wrong wireType = %d for field Image", wireType)
}
var stringLen uint64
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowGenerated
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
stringLen |= uint64(b&0x7F) << shift
if b < 0x80 {
break
}
}
intStringLen := int(stringLen)
if intStringLen < 0 {
return ErrInvalidLengthGenerated
}
postIndex := iNdEx + intStringLen
if postIndex < 0 {
return ErrInvalidLengthGenerated
}
if postIndex > l {
return io.ErrUnexpectedEOF
}
m.Image = string(dAtA[iNdEx:postIndex])
iNdEx = postIndex
case 6:
if wireType != 2 {
return fmt.Errorf("proto: wrong wireType = %d for field Ports", wireType)
}
var msglen int
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowGenerated
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
msglen |= int(b&0x7F) << shift
if b < 0x80 {
break
}
}
if msglen < 0 {
return ErrInvalidLengthGenerated
}
postIndex := iNdEx + msglen
if postIndex < 0 {
return ErrInvalidLengthGenerated
}
if postIndex > l {
return io.ErrUnexpectedEOF
}
m.Ports = append(m.Ports, ContainerPort{})
if err := m.Ports[len(m.Ports)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
return err
}
iNdEx = postIndex
case 9:
if wireType != 2 {
return fmt.Errorf("proto: wrong wireType = %d for field VolumeMounts", wireType)
}
var msglen int
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowGenerated
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
msglen |= int(b&0x7F) << shift
if b < 0x80 {
break
}
}
if msglen < 0 {
return ErrInvalidLengthGenerated
}
postIndex := iNdEx + msglen
if postIndex < 0 {
return ErrInvalidLengthGenerated
}
if postIndex > l {
return io.ErrUnexpectedEOF
}
m.VolumeMounts = append(m.VolumeMounts, VolumeMount{})
if err := m.VolumeMounts[len(m.VolumeMounts)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
return err
}
iNdEx = postIndex
default:
iNdEx = preIndex
skippy, err := skipGenerated(dAtA[iNdEx:])
if err != nil {
return err
}
if (skippy < 0) || (iNdEx+skippy) < 0 {
return ErrInvalidLengthGenerated
}
if (iNdEx + skippy) > l {
return io.ErrUnexpectedEOF
}
iNdEx += skippy
}
}
if iNdEx > l {
return io.ErrUnexpectedEOF
}
return nil
}
func (m *ContainerPort) Unmarshal(dAtA []byte) error {
l := len(dAtA)
iNdEx := 0
for iNdEx < l {
preIndex := iNdEx
var wire uint64
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowGenerated
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
wire |= uint64(b&0x7F) << shift
if b < 0x80 {
break
}
}
fieldNum := int32(wire >> 3)
wireType := int(wire & 0x7)
if wireType == 4 {
return fmt.Errorf("proto: ContainerPort: wiretype end group for non-group")
}
if fieldNum <= 0 {
return fmt.Errorf("proto: ContainerPort: illegal tag %d (wire type %d)", fieldNum, wire)
}
switch fieldNum {
case 1:
if wireType != 2 {
return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType)
}
var stringLen uint64
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowGenerated
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
stringLen |= uint64(b&0x7F) << shift
if b < 0x80 {
break
}
}
intStringLen := int(stringLen)
if intStringLen < 0 {
return ErrInvalidLengthGenerated
}
postIndex := iNdEx + intStringLen
if postIndex < 0 {
return ErrInvalidLengthGenerated
}
if postIndex > l {
return io.ErrUnexpectedEOF
}
m.Name = string(dAtA[iNdEx:postIndex])
iNdEx = postIndex
case 2:
if wireType != 0 {
return fmt.Errorf("proto: wrong wireType = %d for field HostPort", wireType)
}
m.HostPort = 0
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowGenerated
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
m.HostPort |= int32(b&0x7F) << shift
if b < 0x80 {
break
}
}
case 3:
if wireType != 0 {
return fmt.Errorf("proto: wrong wireType = %d for field ContainerPort", wireType)
}
m.ContainerPort = 0
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowGenerated
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
m.ContainerPort |= int32(b&0x7F) << shift
if b < 0x80 {
break
}
}
case 4:
if wireType != 2 {
return fmt.Errorf("proto: wrong wireType = %d for field Protocol", wireType)
}
var stringLen uint64
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowGenerated
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
stringLen |= uint64(b&0x7F) << shift
if b < 0x80 {
break
}
}
intStringLen := int(stringLen)
if intStringLen < 0 {
return ErrInvalidLengthGenerated
}
postIndex := iNdEx + intStringLen
if postIndex < 0 {
return ErrInvalidLengthGenerated
}
if postIndex > l {
return io.ErrUnexpectedEOF
}
m.Protocol = Protocol(dAtA[iNdEx:postIndex])
iNdEx = postIndex
case 5:
if wireType != 2 {
return fmt.Errorf("proto: wrong wireType = %d for field HostIP", wireType)
}
var stringLen uint64
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowGenerated
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
stringLen |= uint64(b&0x7F) << shift
if b < 0x80 {
break
}
}
intStringLen := int(stringLen)
if intStringLen < 0 {
return ErrInvalidLengthGenerated
}
postIndex := iNdEx + intStringLen
if postIndex < 0 {
return ErrInvalidLengthGenerated
}
if postIndex > l {
return io.ErrUnexpectedEOF
}
m.HostIP = string(dAtA[iNdEx:postIndex])
iNdEx = postIndex
default:
iNdEx = preIndex
skippy, err := skipGenerated(dAtA[iNdEx:])
if err != nil {
return err
}
if (skippy < 0) || (iNdEx+skippy) < 0 {
return ErrInvalidLengthGenerated
}
if (iNdEx + skippy) > l {
return io.ErrUnexpectedEOF
}
iNdEx += skippy
}
}
if iNdEx > l {
return io.ErrUnexpectedEOF
}
return nil
}
func (m *ContainerState) Unmarshal(dAtA []byte) error {
l := len(dAtA)
iNdEx := 0
for iNdEx < l {
preIndex := iNdEx
var wire uint64
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowGenerated
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
wire |= uint64(b&0x7F) << shift
if b < 0x80 {
break
}
}
fieldNum := int32(wire >> 3)
wireType := int(wire & 0x7)
if wireType == 4 {
return fmt.Errorf("proto: ContainerState: wiretype end group for non-group")
}
if fieldNum <= 0 {
return fmt.Errorf("proto: ContainerState: illegal tag %d (wire type %d)", fieldNum, wire)
}
switch fieldNum {
case 2:
if wireType != 2 {
return fmt.Errorf("proto: wrong wireType = %d for field Running", wireType)
}
var msglen int
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowGenerated
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
msglen |= int(b&0x7F) << shift
if b < 0x80 {
break
}
}
if msglen < 0 {
return ErrInvalidLengthGenerated
}
postIndex := iNdEx + msglen
if postIndex < 0 {
return ErrInvalidLengthGenerated
}
if postIndex > l {
return io.ErrUnexpectedEOF
}
if m.Running == nil {
m.Running = &ContainerStateRunning{}
}
if err := m.Running.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
return err
}
iNdEx = postIndex
default:
iNdEx = preIndex
skippy, err := skipGenerated(dAtA[iNdEx:])
if err != nil {
return err
}
if (skippy < 0) || (iNdEx+skippy) < 0 {
return ErrInvalidLengthGenerated
}
if (iNdEx + skippy) > l {
return io.ErrUnexpectedEOF
}
iNdEx += skippy
}
}
if iNdEx > l {
return io.ErrUnexpectedEOF
}
return nil
}
func (m *ContainerStateRunning) Unmarshal(dAtA []byte) error {
l := len(dAtA)
iNdEx := 0
for iNdEx < l {
preIndex := iNdEx
var wire uint64
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowGenerated
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
wire |= uint64(b&0x7F) << shift
if b < 0x80 {
break
}
}
fieldNum := int32(wire >> 3)
wireType := int(wire & 0x7)
if wireType == 4 {
return fmt.Errorf("proto: ContainerStateRunning: wiretype end group for non-group")
}
if fieldNum <= 0 {
return fmt.Errorf("proto: ContainerStateRunning: illegal tag %d (wire type %d)", fieldNum, wire)
}
switch fieldNum {
case 1:
if wireType != 2 {
return fmt.Errorf("proto: wrong wireType = %d for field StartedAt", wireType)
}
var msglen int
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowGenerated
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
msglen |= int(b&0x7F) << shift
if b < 0x80 {
break
}
}
if msglen < 0 {
return ErrInvalidLengthGenerated
}
postIndex := iNdEx + msglen
if postIndex < 0 {
return ErrInvalidLengthGenerated
}
if postIndex > l {
return io.ErrUnexpectedEOF
}
if err := m.StartedAt.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
return err
}
iNdEx = postIndex
default:
iNdEx = preIndex
skippy, err := skipGenerated(dAtA[iNdEx:])
if err != nil {
return err
}
if (skippy < 0) || (iNdEx+skippy) < 0 {
return ErrInvalidLengthGenerated
}
if (iNdEx + skippy) > l {
return io.ErrUnexpectedEOF
}
iNdEx += skippy
}
}
if iNdEx > l {
return io.ErrUnexpectedEOF
}
return nil
}
func (m *ContainerStatus) Unmarshal(dAtA []byte) error {
l := len(dAtA)
iNdEx := 0
for iNdEx < l {
preIndex := iNdEx
var wire uint64
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowGenerated
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
wire |= uint64(b&0x7F) << shift
if b < 0x80 {
break
}
}
fieldNum := int32(wire >> 3)
wireType := int(wire & 0x7)
if wireType == 4 {
return fmt.Errorf("proto: ContainerStatus: wiretype end group for non-group")
}
if fieldNum <= 0 {
return fmt.Errorf("proto: ContainerStatus: illegal tag %d (wire type %d)", fieldNum, wire)
}
switch fieldNum {
case 2:
if wireType != 2 {
return fmt.Errorf("proto: wrong wireType = %d for field State", wireType)
}
var msglen int
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowGenerated
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
msglen |= int(b&0x7F) << shift
if b < 0x80 {
break
}
}
if msglen < 0 {
return ErrInvalidLengthGenerated
}
postIndex := iNdEx + msglen
if postIndex < 0 {
return ErrInvalidLengthGenerated
}
if postIndex > l {
return io.ErrUnexpectedEOF
}
if err := m.State.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
return err
}
iNdEx = postIndex
case 8:
if wireType != 2 {
return fmt.Errorf("proto: wrong wireType = %d for field ContainerID", wireType)
}
var stringLen uint64
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowGenerated
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
stringLen |= uint64(b&0x7F) << shift
if b < 0x80 {
break
}
}
intStringLen := int(stringLen)
if intStringLen < 0 {
return ErrInvalidLengthGenerated
}
postIndex := iNdEx + intStringLen
if postIndex < 0 {
return ErrInvalidLengthGenerated
}
if postIndex > l {
return io.ErrUnexpectedEOF
}
m.ContainerID = string(dAtA[iNdEx:postIndex])
iNdEx = postIndex
default:
iNdEx = preIndex
skippy, err := skipGenerated(dAtA[iNdEx:])
if err != nil {
return err
}
if (skippy < 0) || (iNdEx+skippy) < 0 {
return ErrInvalidLengthGenerated
}
if (iNdEx + skippy) > l {
return io.ErrUnexpectedEOF
}
iNdEx += skippy
}
}
if iNdEx > l {
return io.ErrUnexpectedEOF
}
return nil
}
func (m *EndpointAddress) Unmarshal(dAtA []byte) error {
l := len(dAtA)
iNdEx := 0
for iNdEx < l {
preIndex := iNdEx
var wire uint64
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowGenerated
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
wire |= uint64(b&0x7F) << shift
if b < 0x80 {
break
}
}
fieldNum := int32(wire >> 3)
wireType := int(wire & 0x7)
if wireType == 4 {
return fmt.Errorf("proto: EndpointAddress: wiretype end group for non-group")
}
if fieldNum <= 0 {
return fmt.Errorf("proto: EndpointAddress: illegal tag %d (wire type %d)", fieldNum, wire)
}
switch fieldNum {
case 1:
if wireType != 2 {
return fmt.Errorf("proto: wrong wireType = %d for field IP", wireType)
}
var stringLen uint64
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowGenerated
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
stringLen |= uint64(b&0x7F) << shift
if b < 0x80 {
break
}
}
intStringLen := int(stringLen)
if intStringLen < 0 {
return ErrInvalidLengthGenerated
}
postIndex := iNdEx + intStringLen
if postIndex < 0 {
return ErrInvalidLengthGenerated
}
if postIndex > l {
return io.ErrUnexpectedEOF
}
m.IP = string(dAtA[iNdEx:postIndex])
iNdEx = postIndex
case 3:
if wireType != 2 {
return fmt.Errorf("proto: wrong wireType = %d for field Hostname", wireType)
}
var stringLen uint64
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowGenerated
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
stringLen |= uint64(b&0x7F) << shift
if b < 0x80 {
break
}
}
intStringLen := int(stringLen)
if intStringLen < 0 {
return ErrInvalidLengthGenerated
}
postIndex := iNdEx + intStringLen
if postIndex < 0 {
return ErrInvalidLengthGenerated
}
if postIndex > l {
return io.ErrUnexpectedEOF
}
m.Hostname = string(dAtA[iNdEx:postIndex])
iNdEx = postIndex
case 4:
if wireType != 2 {
return fmt.Errorf("proto: wrong wireType = %d for field NodeName", wireType)
}
var stringLen uint64
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowGenerated
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
stringLen |= uint64(b&0x7F) << shift
if b < 0x80 {
break
}
}
intStringLen := int(stringLen)
if intStringLen < 0 {
return ErrInvalidLengthGenerated
}
postIndex := iNdEx + intStringLen
if postIndex < 0 {
return ErrInvalidLengthGenerated
}
if postIndex > l {
return io.ErrUnexpectedEOF
}
s := string(dAtA[iNdEx:postIndex])
m.NodeName = &s
iNdEx = postIndex
default:
iNdEx = preIndex
skippy, err := skipGenerated(dAtA[iNdEx:])
if err != nil {
return err
}
if (skippy < 0) || (iNdEx+skippy) < 0 {
return ErrInvalidLengthGenerated
}
if (iNdEx + skippy) > l {
return io.ErrUnexpectedEOF
}
iNdEx += skippy
}
}
if iNdEx > l {
return io.ErrUnexpectedEOF
}
return nil
}
func (m *EndpointPort) Unmarshal(dAtA []byte) error {
l := len(dAtA)
iNdEx := 0
for iNdEx < l {
preIndex := iNdEx
var wire uint64
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowGenerated
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
wire |= uint64(b&0x7F) << shift
if b < 0x80 {
break
}
}
fieldNum := int32(wire >> 3)
wireType := int(wire & 0x7)
if wireType == 4 {
return fmt.Errorf("proto: EndpointPort: wiretype end group for non-group")
}
if fieldNum <= 0 {
return fmt.Errorf("proto: EndpointPort: illegal tag %d (wire type %d)", fieldNum, wire)
}
switch fieldNum {
case 1:
if wireType != 2 {
return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType)
}
var stringLen uint64
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowGenerated
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
stringLen |= uint64(b&0x7F) << shift
if b < 0x80 {
break
}
}
intStringLen := int(stringLen)
if intStringLen < 0 {
return ErrInvalidLengthGenerated
}
postIndex := iNdEx + intStringLen
if postIndex < 0 {
return ErrInvalidLengthGenerated
}
if postIndex > l {
return io.ErrUnexpectedEOF
}
m.Name = string(dAtA[iNdEx:postIndex])
iNdEx = postIndex
case 2:
if wireType != 0 {
return fmt.Errorf("proto: wrong wireType = %d for field Port", wireType)
}
m.Port = 0
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowGenerated
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
m.Port |= int32(b&0x7F) << shift
if b < 0x80 {
break
}
}
case 3:
if wireType != 2 {
return fmt.Errorf("proto: wrong wireType = %d for field Protocol", wireType)
}
var stringLen uint64
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowGenerated
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
stringLen |= uint64(b&0x7F) << shift
if b < 0x80 {
break
}
}
intStringLen := int(stringLen)
if intStringLen < 0 {
return ErrInvalidLengthGenerated
}
postIndex := iNdEx + intStringLen
if postIndex < 0 {
return ErrInvalidLengthGenerated
}
if postIndex > l {
return io.ErrUnexpectedEOF
}
m.Protocol = Protocol(dAtA[iNdEx:postIndex])
iNdEx = postIndex
case 4:
if wireType != 2 {
return fmt.Errorf("proto: wrong wireType = %d for field AppProtocol", wireType)
}
var stringLen uint64
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowGenerated
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
stringLen |= uint64(b&0x7F) << shift
if b < 0x80 {
break
}
}
intStringLen := int(stringLen)
if intStringLen < 0 {
return ErrInvalidLengthGenerated
}
postIndex := iNdEx + intStringLen
if postIndex < 0 {
return ErrInvalidLengthGenerated
}
if postIndex > l {
return io.ErrUnexpectedEOF
}
s := string(dAtA[iNdEx:postIndex])
m.AppProtocol = &s
iNdEx = postIndex
default:
iNdEx = preIndex
skippy, err := skipGenerated(dAtA[iNdEx:])
if err != nil {
return err
}
if (skippy < 0) || (iNdEx+skippy) < 0 {
return ErrInvalidLengthGenerated
}
if (iNdEx + skippy) > l {
return io.ErrUnexpectedEOF
}
iNdEx += skippy
}
}
if iNdEx > l {
return io.ErrUnexpectedEOF
}
return nil
}
func (m *EndpointSubset) Unmarshal(dAtA []byte) error {
l := len(dAtA)
iNdEx := 0
for iNdEx < l {
preIndex := iNdEx
var wire uint64
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowGenerated
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
wire |= uint64(b&0x7F) << shift
if b < 0x80 {
break
}
}
fieldNum := int32(wire >> 3)
wireType := int(wire & 0x7)
if wireType == 4 {
return fmt.Errorf("proto: EndpointSubset: wiretype end group for non-group")
}
if fieldNum <= 0 {
return fmt.Errorf("proto: EndpointSubset: illegal tag %d (wire type %d)", fieldNum, wire)
}
switch fieldNum {
case 1:
if wireType != 2 {
return fmt.Errorf("proto: wrong wireType = %d for field Addresses", wireType)
}
var msglen int
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowGenerated
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
msglen |= int(b&0x7F) << shift
if b < 0x80 {
break
}
}
if msglen < 0 {
return ErrInvalidLengthGenerated
}
postIndex := iNdEx + msglen
if postIndex < 0 {
return ErrInvalidLengthGenerated
}
if postIndex > l {
return io.ErrUnexpectedEOF
}
m.Addresses = append(m.Addresses, EndpointAddress{})
if err := m.Addresses[len(m.Addresses)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
return err
}
iNdEx = postIndex
case 3:
if wireType != 2 {
return fmt.Errorf("proto: wrong wireType = %d for field Ports", wireType)
}
var msglen int
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowGenerated
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
msglen |= int(b&0x7F) << shift
if b < 0x80 {
break
}
}
if msglen < 0 {
return ErrInvalidLengthGenerated
}
postIndex := iNdEx + msglen
if postIndex < 0 {
return ErrInvalidLengthGenerated
}
if postIndex > l {
return io.ErrUnexpectedEOF
}
m.Ports = append(m.Ports, EndpointPort{})
if err := m.Ports[len(m.Ports)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
return err
}
iNdEx = postIndex
default:
iNdEx = preIndex
skippy, err := skipGenerated(dAtA[iNdEx:])
if err != nil {
return err
}
if (skippy < 0) || (iNdEx+skippy) < 0 {
return ErrInvalidLengthGenerated
}
if (iNdEx + skippy) > l {
return io.ErrUnexpectedEOF
}
iNdEx += skippy
}
}
if iNdEx > l {
return io.ErrUnexpectedEOF
}
return nil
}
func (m *Endpoints) Unmarshal(dAtA []byte) error {
l := len(dAtA)
iNdEx := 0
for iNdEx < l {
preIndex := iNdEx
var wire uint64
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowGenerated
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
wire |= uint64(b&0x7F) << shift
if b < 0x80 {
break
}
}
fieldNum := int32(wire >> 3)
wireType := int(wire & 0x7)
if wireType == 4 {
return fmt.Errorf("proto: Endpoints: wiretype end group for non-group")
}
if fieldNum <= 0 {
return fmt.Errorf("proto: Endpoints: illegal tag %d (wire type %d)", fieldNum, wire)
}
switch fieldNum {
case 1:
if wireType != 2 {
return fmt.Errorf("proto: wrong wireType = %d for field ObjectMeta", wireType)
}
var msglen int
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowGenerated
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
msglen |= int(b&0x7F) << shift
if b < 0x80 {
break
}
}
if msglen < 0 {
return ErrInvalidLengthGenerated
}
postIndex := iNdEx + msglen
if postIndex < 0 {
return ErrInvalidLengthGenerated
}
if postIndex > l {
return io.ErrUnexpectedEOF
}
if err := m.ObjectMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
return err
}
iNdEx = postIndex
case 2:
if wireType != 2 {
return fmt.Errorf("proto: wrong wireType = %d for field Subsets", wireType)
}
var msglen int
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowGenerated
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
msglen |= int(b&0x7F) << shift
if b < 0x80 {
break
}
}
if msglen < 0 {
return ErrInvalidLengthGenerated
}
postIndex := iNdEx + msglen
if postIndex < 0 {
return ErrInvalidLengthGenerated
}
if postIndex > l {
return io.ErrUnexpectedEOF
}
m.Subsets = append(m.Subsets, EndpointSubset{})
if err := m.Subsets[len(m.Subsets)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
return err
}
iNdEx = postIndex
default:
iNdEx = preIndex
skippy, err := skipGenerated(dAtA[iNdEx:])
if err != nil {
return err
}
if (skippy < 0) || (iNdEx+skippy) < 0 {
return ErrInvalidLengthGenerated
}
if (iNdEx + skippy) > l {
return io.ErrUnexpectedEOF
}
iNdEx += skippy
}
}
if iNdEx > l {
return io.ErrUnexpectedEOF
}
return nil
}
func (m *EndpointsList) Unmarshal(dAtA []byte) error {
l := len(dAtA)
iNdEx := 0
for iNdEx < l {
preIndex := iNdEx
var wire uint64
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowGenerated
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
wire |= uint64(b&0x7F) << shift
if b < 0x80 {
break
}
}
fieldNum := int32(wire >> 3)
wireType := int(wire & 0x7)
if wireType == 4 {
return fmt.Errorf("proto: EndpointsList: wiretype end group for non-group")
}
if fieldNum <= 0 {
return fmt.Errorf("proto: EndpointsList: illegal tag %d (wire type %d)", fieldNum, wire)
}
switch fieldNum {
case 1:
if wireType != 2 {
return fmt.Errorf("proto: wrong wireType = %d for field ListMeta", wireType)
}
var msglen int
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowGenerated
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
msglen |= int(b&0x7F) << shift
if b < 0x80 {
break
}
}
if msglen < 0 {
return ErrInvalidLengthGenerated
}
postIndex := iNdEx + msglen
if postIndex < 0 {
return ErrInvalidLengthGenerated
}
if postIndex > l {
return io.ErrUnexpectedEOF
}
if err := m.ListMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
return err
}
iNdEx = postIndex
case 2:
if wireType != 2 {
return fmt.Errorf("proto: wrong wireType = %d for field Items", wireType)
}
var msglen int
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowGenerated
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
msglen |= int(b&0x7F) << shift
if b < 0x80 {
break
}
}
if msglen < 0 {
return ErrInvalidLengthGenerated
}
postIndex := iNdEx + msglen
if postIndex < 0 {
return ErrInvalidLengthGenerated
}
if postIndex > l {
return io.ErrUnexpectedEOF
}
m.Items = append(m.Items, Endpoints{})
if err := m.Items[len(m.Items)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
return err
}
iNdEx = postIndex
default:
iNdEx = preIndex
skippy, err := skipGenerated(dAtA[iNdEx:])
if err != nil {
return err
}
if (skippy < 0) || (iNdEx+skippy) < 0 {
return ErrInvalidLengthGenerated
}
if (iNdEx + skippy) > l {
return io.ErrUnexpectedEOF
}
iNdEx += skippy
}
}
if iNdEx > l {
return io.ErrUnexpectedEOF
}
return nil
}
func (m *LoadBalancerIngress) Unmarshal(dAtA []byte) error {
l := len(dAtA)
iNdEx := 0
for iNdEx < l {
preIndex := iNdEx
var wire uint64
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowGenerated
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
wire |= uint64(b&0x7F) << shift
if b < 0x80 {
break
}
}
fieldNum := int32(wire >> 3)
wireType := int(wire & 0x7)
if wireType == 4 {
return fmt.Errorf("proto: LoadBalancerIngress: wiretype end group for non-group")
}
if fieldNum <= 0 {
return fmt.Errorf("proto: LoadBalancerIngress: illegal tag %d (wire type %d)", fieldNum, wire)
}
switch fieldNum {
case 1:
if wireType != 2 {
return fmt.Errorf("proto: wrong wireType = %d for field IP", wireType)
}
var stringLen uint64
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowGenerated
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
stringLen |= uint64(b&0x7F) << shift
if b < 0x80 {
break
}
}
intStringLen := int(stringLen)
if intStringLen < 0 {
return ErrInvalidLengthGenerated
}
postIndex := iNdEx + intStringLen
if postIndex < 0 {
return ErrInvalidLengthGenerated
}
if postIndex > l {
return io.ErrUnexpectedEOF
}
m.IP = string(dAtA[iNdEx:postIndex])
iNdEx = postIndex
case 2:
if wireType != 2 {
return fmt.Errorf("proto: wrong wireType = %d for field Hostname", wireType)
}
var stringLen uint64
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowGenerated
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
stringLen |= uint64(b&0x7F) << shift
if b < 0x80 {
break
}
}
intStringLen := int(stringLen)
if intStringLen < 0 {
return ErrInvalidLengthGenerated
}
postIndex := iNdEx + intStringLen
if postIndex < 0 {
return ErrInvalidLengthGenerated
}
if postIndex > l {
return io.ErrUnexpectedEOF
}
m.Hostname = string(dAtA[iNdEx:postIndex])
iNdEx = postIndex
case 3:
if wireType != 2 {
return fmt.Errorf("proto: wrong wireType = %d for field IPMode", wireType)
}
var stringLen uint64
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowGenerated
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
stringLen |= uint64(b&0x7F) << shift
if b < 0x80 {
break
}
}
intStringLen := int(stringLen)
if intStringLen < 0 {
return ErrInvalidLengthGenerated
}
postIndex := iNdEx + intStringLen
if postIndex < 0 {
return ErrInvalidLengthGenerated
}
if postIndex > l {
return io.ErrUnexpectedEOF
}
s := LoadBalancerIPMode(dAtA[iNdEx:postIndex])
m.IPMode = &s
iNdEx = postIndex
case 4:
if wireType != 2 {
return fmt.Errorf("proto: wrong wireType = %d for field Ports", wireType)
}
var msglen int
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowGenerated
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
msglen |= int(b&0x7F) << shift
if b < 0x80 {
break
}
}
if msglen < 0 {
return ErrInvalidLengthGenerated
}
postIndex := iNdEx + msglen
if postIndex < 0 {
return ErrInvalidLengthGenerated
}
if postIndex > l {
return io.ErrUnexpectedEOF
}
m.Ports = append(m.Ports, PortStatus{})
if err := m.Ports[len(m.Ports)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
return err
}
iNdEx = postIndex
default:
iNdEx = preIndex
skippy, err := skipGenerated(dAtA[iNdEx:])
if err != nil {
return err
}
if (skippy < 0) || (iNdEx+skippy) < 0 {
return ErrInvalidLengthGenerated
}
if (iNdEx + skippy) > l {
return io.ErrUnexpectedEOF
}
iNdEx += skippy
}
}
if iNdEx > l {
return io.ErrUnexpectedEOF
}
return nil
}
func (m *LoadBalancerStatus) Unmarshal(dAtA []byte) error {
l := len(dAtA)
iNdEx := 0
for iNdEx < l {
preIndex := iNdEx
var wire uint64
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowGenerated
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
wire |= uint64(b&0x7F) << shift
if b < 0x80 {
break
}
}
fieldNum := int32(wire >> 3)
wireType := int(wire & 0x7)
if wireType == 4 {
return fmt.Errorf("proto: LoadBalancerStatus: wiretype end group for non-group")
}
if fieldNum <= 0 {
return fmt.Errorf("proto: LoadBalancerStatus: illegal tag %d (wire type %d)", fieldNum, wire)
}
switch fieldNum {
case 1:
if wireType != 2 {
return fmt.Errorf("proto: wrong wireType = %d for field Ingress", wireType)
}
var msglen int
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowGenerated
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
msglen |= int(b&0x7F) << shift
if b < 0x80 {
break
}
}
if msglen < 0 {
return ErrInvalidLengthGenerated
}
postIndex := iNdEx + msglen
if postIndex < 0 {
return ErrInvalidLengthGenerated
}
if postIndex > l {
return io.ErrUnexpectedEOF
}
m.Ingress = append(m.Ingress, LoadBalancerIngress{})
if err := m.Ingress[len(m.Ingress)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
return err
}
iNdEx = postIndex
default:
iNdEx = preIndex
skippy, err := skipGenerated(dAtA[iNdEx:])
if err != nil {
return err
}
if (skippy < 0) || (iNdEx+skippy) < 0 {
return ErrInvalidLengthGenerated
}
if (iNdEx + skippy) > l {
return io.ErrUnexpectedEOF
}
iNdEx += skippy
}
}
if iNdEx > l {
return io.ErrUnexpectedEOF
}
return nil
}
func (m *Namespace) Unmarshal(dAtA []byte) error {
l := len(dAtA)
iNdEx := 0
for iNdEx < l {
preIndex := iNdEx
var wire uint64
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowGenerated
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
wire |= uint64(b&0x7F) << shift
if b < 0x80 {
break
}
}
fieldNum := int32(wire >> 3)
wireType := int(wire & 0x7)
if wireType == 4 {
return fmt.Errorf("proto: Namespace: wiretype end group for non-group")
}
if fieldNum <= 0 {
return fmt.Errorf("proto: Namespace: illegal tag %d (wire type %d)", fieldNum, wire)
}
switch fieldNum {
case 1:
if wireType != 2 {
return fmt.Errorf("proto: wrong wireType = %d for field ObjectMeta", wireType)
}
var msglen int
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowGenerated
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
msglen |= int(b&0x7F) << shift
if b < 0x80 {
break
}
}
if msglen < 0 {
return ErrInvalidLengthGenerated
}
postIndex := iNdEx + msglen
if postIndex < 0 {
return ErrInvalidLengthGenerated
}
if postIndex > l {
return io.ErrUnexpectedEOF
}
if err := m.ObjectMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
return err
}
iNdEx = postIndex
default:
iNdEx = preIndex
skippy, err := skipGenerated(dAtA[iNdEx:])
if err != nil {
return err
}
if (skippy < 0) || (iNdEx+skippy) < 0 {
return ErrInvalidLengthGenerated
}
if (iNdEx + skippy) > l {
return io.ErrUnexpectedEOF
}
iNdEx += skippy
}
}
if iNdEx > l {
return io.ErrUnexpectedEOF
}
return nil
}
func (m *NamespaceList) Unmarshal(dAtA []byte) error {
l := len(dAtA)
iNdEx := 0
for iNdEx < l {
preIndex := iNdEx
var wire uint64
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowGenerated
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
wire |= uint64(b&0x7F) << shift
if b < 0x80 {
break
}
}
fieldNum := int32(wire >> 3)
wireType := int(wire & 0x7)
if wireType == 4 {
return fmt.Errorf("proto: NamespaceList: wiretype end group for non-group")
}
if fieldNum <= 0 {
return fmt.Errorf("proto: NamespaceList: illegal tag %d (wire type %d)", fieldNum, wire)
}
switch fieldNum {
case 1:
if wireType != 2 {
return fmt.Errorf("proto: wrong wireType = %d for field ListMeta", wireType)
}
var msglen int
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowGenerated
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
msglen |= int(b&0x7F) << shift
if b < 0x80 {
break
}
}
if msglen < 0 {
return ErrInvalidLengthGenerated
}
postIndex := iNdEx + msglen
if postIndex < 0 {
return ErrInvalidLengthGenerated
}
if postIndex > l {
return io.ErrUnexpectedEOF
}
if err := m.ListMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
return err
}
iNdEx = postIndex
case 2:
if wireType != 2 {
return fmt.Errorf("proto: wrong wireType = %d for field Items", wireType)
}
var msglen int
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowGenerated
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
msglen |= int(b&0x7F) << shift
if b < 0x80 {
break
}
}
if msglen < 0 {
return ErrInvalidLengthGenerated
}
postIndex := iNdEx + msglen
if postIndex < 0 {
return ErrInvalidLengthGenerated
}
if postIndex > l {
return io.ErrUnexpectedEOF
}
m.Items = append(m.Items, Namespace{})
if err := m.Items[len(m.Items)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
return err
}
iNdEx = postIndex
default:
iNdEx = preIndex
skippy, err := skipGenerated(dAtA[iNdEx:])
if err != nil {
return err
}
if (skippy < 0) || (iNdEx+skippy) < 0 {
return ErrInvalidLengthGenerated
}
if (iNdEx + skippy) > l {
return io.ErrUnexpectedEOF
}
iNdEx += skippy
}
}
if iNdEx > l {
return io.ErrUnexpectedEOF
}
return nil
}
func (m *Node) Unmarshal(dAtA []byte) error {
l := len(dAtA)
iNdEx := 0
for iNdEx < l {
preIndex := iNdEx
var wire uint64
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowGenerated
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
wire |= uint64(b&0x7F) << shift
if b < 0x80 {
break
}
}
fieldNum := int32(wire >> 3)
wireType := int(wire & 0x7)
if wireType == 4 {
return fmt.Errorf("proto: Node: wiretype end group for non-group")
}
if fieldNum <= 0 {
return fmt.Errorf("proto: Node: illegal tag %d (wire type %d)", fieldNum, wire)
}
switch fieldNum {
case 1:
if wireType != 2 {
return fmt.Errorf("proto: wrong wireType = %d for field ObjectMeta", wireType)
}
var msglen int
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowGenerated
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
msglen |= int(b&0x7F) << shift
if b < 0x80 {
break
}
}
if msglen < 0 {
return ErrInvalidLengthGenerated
}
postIndex := iNdEx + msglen
if postIndex < 0 {
return ErrInvalidLengthGenerated
}
if postIndex > l {
return io.ErrUnexpectedEOF
}
if err := m.ObjectMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
return err
}
iNdEx = postIndex
case 2:
if wireType != 2 {
return fmt.Errorf("proto: wrong wireType = %d for field Spec", wireType)
}
var msglen int
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowGenerated
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
msglen |= int(b&0x7F) << shift
if b < 0x80 {
break
}
}
if msglen < 0 {
return ErrInvalidLengthGenerated
}
postIndex := iNdEx + msglen
if postIndex < 0 {
return ErrInvalidLengthGenerated
}
if postIndex > l {
return io.ErrUnexpectedEOF
}
if err := m.Spec.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
return err
}
iNdEx = postIndex
case 3:
if wireType != 2 {
return fmt.Errorf("proto: wrong wireType = %d for field Status", wireType)
}
var msglen int
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowGenerated
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
msglen |= int(b&0x7F) << shift
if b < 0x80 {
break
}
}
if msglen < 0 {
return ErrInvalidLengthGenerated
}
postIndex := iNdEx + msglen
if postIndex < 0 {
return ErrInvalidLengthGenerated
}
if postIndex > l {
return io.ErrUnexpectedEOF
}
if err := m.Status.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
return err
}
iNdEx = postIndex
default:
iNdEx = preIndex
skippy, err := skipGenerated(dAtA[iNdEx:])
if err != nil {
return err
}
if (skippy < 0) || (iNdEx+skippy) < 0 {
return ErrInvalidLengthGenerated
}
if (iNdEx + skippy) > l {
return io.ErrUnexpectedEOF
}
iNdEx += skippy
}
}
if iNdEx > l {
return io.ErrUnexpectedEOF
}
return nil
}
func (m *NodeAddress) Unmarshal(dAtA []byte) error {
l := len(dAtA)
iNdEx := 0
for iNdEx < l {
preIndex := iNdEx
var wire uint64
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowGenerated
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
wire |= uint64(b&0x7F) << shift
if b < 0x80 {
break
}
}
fieldNum := int32(wire >> 3)
wireType := int(wire & 0x7)
if wireType == 4 {
return fmt.Errorf("proto: NodeAddress: wiretype end group for non-group")
}
if fieldNum <= 0 {
return fmt.Errorf("proto: NodeAddress: illegal tag %d (wire type %d)", fieldNum, wire)
}
switch fieldNum {
case 1:
if wireType != 2 {
return fmt.Errorf("proto: wrong wireType = %d for field Type", wireType)
}
var stringLen uint64
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowGenerated
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
stringLen |= uint64(b&0x7F) << shift
if b < 0x80 {
break
}
}
intStringLen := int(stringLen)
if intStringLen < 0 {
return ErrInvalidLengthGenerated
}
postIndex := iNdEx + intStringLen
if postIndex < 0 {
return ErrInvalidLengthGenerated
}
if postIndex > l {
return io.ErrUnexpectedEOF
}
m.Type = NodeAddressType(dAtA[iNdEx:postIndex])
iNdEx = postIndex
case 2:
if wireType != 2 {
return fmt.Errorf("proto: wrong wireType = %d for field Address", wireType)
}
var stringLen uint64
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowGenerated
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
stringLen |= uint64(b&0x7F) << shift
if b < 0x80 {
break
}
}
intStringLen := int(stringLen)
if intStringLen < 0 {
return ErrInvalidLengthGenerated
}
postIndex := iNdEx + intStringLen
if postIndex < 0 {
return ErrInvalidLengthGenerated
}
if postIndex > l {
return io.ErrUnexpectedEOF
}
m.Address = string(dAtA[iNdEx:postIndex])
iNdEx = postIndex
default:
iNdEx = preIndex
skippy, err := skipGenerated(dAtA[iNdEx:])
if err != nil {
return err
}
if (skippy < 0) || (iNdEx+skippy) < 0 {
return ErrInvalidLengthGenerated
}
if (iNdEx + skippy) > l {
return io.ErrUnexpectedEOF
}
iNdEx += skippy
}
}
if iNdEx > l {
return io.ErrUnexpectedEOF
}
return nil
}
func (m *NodeCondition) Unmarshal(dAtA []byte) error {
l := len(dAtA)
iNdEx := 0
for iNdEx < l {
preIndex := iNdEx
var wire uint64
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowGenerated
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
wire |= uint64(b&0x7F) << shift
if b < 0x80 {
break
}
}
fieldNum := int32(wire >> 3)
wireType := int(wire & 0x7)
if wireType == 4 {
return fmt.Errorf("proto: NodeCondition: wiretype end group for non-group")
}
if fieldNum <= 0 {
return fmt.Errorf("proto: NodeCondition: illegal tag %d (wire type %d)", fieldNum, wire)
}
switch fieldNum {
case 1:
if wireType != 2 {
return fmt.Errorf("proto: wrong wireType = %d for field Type", wireType)
}
var stringLen uint64
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowGenerated
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
stringLen |= uint64(b&0x7F) << shift
if b < 0x80 {
break
}
}
intStringLen := int(stringLen)
if intStringLen < 0 {
return ErrInvalidLengthGenerated
}
postIndex := iNdEx + intStringLen
if postIndex < 0 {
return ErrInvalidLengthGenerated
}
if postIndex > l {
return io.ErrUnexpectedEOF
}
m.Type = NodeConditionType(dAtA[iNdEx:postIndex])
iNdEx = postIndex
case 2:
if wireType != 2 {
return fmt.Errorf("proto: wrong wireType = %d for field Status", wireType)
}
var stringLen uint64
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowGenerated
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
stringLen |= uint64(b&0x7F) << shift
if b < 0x80 {
break
}
}
intStringLen := int(stringLen)
if intStringLen < 0 {
return ErrInvalidLengthGenerated
}
postIndex := iNdEx + intStringLen
if postIndex < 0 {
return ErrInvalidLengthGenerated
}
if postIndex > l {
return io.ErrUnexpectedEOF
}
m.Status = ConditionStatus(dAtA[iNdEx:postIndex])
iNdEx = postIndex
case 5:
if wireType != 2 {
return fmt.Errorf("proto: wrong wireType = %d for field Reason", wireType)
}
var stringLen uint64
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowGenerated
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
stringLen |= uint64(b&0x7F) << shift
if b < 0x80 {
break
}
}
intStringLen := int(stringLen)
if intStringLen < 0 {
return ErrInvalidLengthGenerated
}
postIndex := iNdEx + intStringLen
if postIndex < 0 {
return ErrInvalidLengthGenerated
}
if postIndex > l {
return io.ErrUnexpectedEOF
}
m.Reason = string(dAtA[iNdEx:postIndex])
iNdEx = postIndex
default:
iNdEx = preIndex
skippy, err := skipGenerated(dAtA[iNdEx:])
if err != nil {
return err
}
if (skippy < 0) || (iNdEx+skippy) < 0 {
return ErrInvalidLengthGenerated
}
if (iNdEx + skippy) > l {
return io.ErrUnexpectedEOF
}
iNdEx += skippy
}
}
if iNdEx > l {
return io.ErrUnexpectedEOF
}
return nil
}
func (m *NodeList) Unmarshal(dAtA []byte) error {
l := len(dAtA)
iNdEx := 0
for iNdEx < l {
preIndex := iNdEx
var wire uint64
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowGenerated
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
wire |= uint64(b&0x7F) << shift
if b < 0x80 {
break
}
}
fieldNum := int32(wire >> 3)
wireType := int(wire & 0x7)
if wireType == 4 {
return fmt.Errorf("proto: NodeList: wiretype end group for non-group")
}
if fieldNum <= 0 {
return fmt.Errorf("proto: NodeList: illegal tag %d (wire type %d)", fieldNum, wire)
}
switch fieldNum {
case 1:
if wireType != 2 {
return fmt.Errorf("proto: wrong wireType = %d for field ListMeta", wireType)
}
var msglen int
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowGenerated
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
msglen |= int(b&0x7F) << shift
if b < 0x80 {
break
}
}
if msglen < 0 {
return ErrInvalidLengthGenerated
}
postIndex := iNdEx + msglen
if postIndex < 0 {
return ErrInvalidLengthGenerated
}
if postIndex > l {
return io.ErrUnexpectedEOF
}
if err := m.ListMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
return err
}
iNdEx = postIndex
case 2:
if wireType != 2 {
return fmt.Errorf("proto: wrong wireType = %d for field Items", wireType)
}
var msglen int
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowGenerated
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
msglen |= int(b&0x7F) << shift
if b < 0x80 {
break
}
}
if msglen < 0 {
return ErrInvalidLengthGenerated
}
postIndex := iNdEx + msglen
if postIndex < 0 {
return ErrInvalidLengthGenerated
}
if postIndex > l {
return io.ErrUnexpectedEOF
}
m.Items = append(m.Items, Node{})
if err := m.Items[len(m.Items)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
return err
}
iNdEx = postIndex
default:
iNdEx = preIndex
skippy, err := skipGenerated(dAtA[iNdEx:])
if err != nil {
return err
}
if (skippy < 0) || (iNdEx+skippy) < 0 {
return ErrInvalidLengthGenerated
}
if (iNdEx + skippy) > l {
return io.ErrUnexpectedEOF
}
iNdEx += skippy
}
}
if iNdEx > l {
return io.ErrUnexpectedEOF
}
return nil
}
func (m *NodeSpec) Unmarshal(dAtA []byte) error {
l := len(dAtA)
iNdEx := 0
for iNdEx < l {
preIndex := iNdEx
var wire uint64
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowGenerated
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
wire |= uint64(b&0x7F) << shift
if b < 0x80 {
break
}
}
fieldNum := int32(wire >> 3)
wireType := int(wire & 0x7)
if wireType == 4 {
return fmt.Errorf("proto: NodeSpec: wiretype end group for non-group")
}
if fieldNum <= 0 {
return fmt.Errorf("proto: NodeSpec: illegal tag %d (wire type %d)", fieldNum, wire)
}
switch fieldNum {
case 1:
if wireType != 2 {
return fmt.Errorf("proto: wrong wireType = %d for field PodCIDR", wireType)
}
var stringLen uint64
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowGenerated
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
stringLen |= uint64(b&0x7F) << shift
if b < 0x80 {
break
}
}
intStringLen := int(stringLen)
if intStringLen < 0 {
return ErrInvalidLengthGenerated
}
postIndex := iNdEx + intStringLen
if postIndex < 0 {
return ErrInvalidLengthGenerated
}
if postIndex > l {
return io.ErrUnexpectedEOF
}
m.PodCIDR = string(dAtA[iNdEx:postIndex])
iNdEx = postIndex
case 3:
if wireType != 2 {
return fmt.Errorf("proto: wrong wireType = %d for field ProviderID", wireType)
}
var stringLen uint64
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowGenerated
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
stringLen |= uint64(b&0x7F) << shift
if b < 0x80 {
break
}
}
intStringLen := int(stringLen)
if intStringLen < 0 {
return ErrInvalidLengthGenerated
}
postIndex := iNdEx + intStringLen
if postIndex < 0 {
return ErrInvalidLengthGenerated
}
if postIndex > l {
return io.ErrUnexpectedEOF
}
m.ProviderID = string(dAtA[iNdEx:postIndex])
iNdEx = postIndex
case 5:
if wireType != 2 {
return fmt.Errorf("proto: wrong wireType = %d for field Taints", wireType)
}
var msglen int
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowGenerated
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
msglen |= int(b&0x7F) << shift
if b < 0x80 {
break
}
}
if msglen < 0 {
return ErrInvalidLengthGenerated
}
postIndex := iNdEx + msglen
if postIndex < 0 {
return ErrInvalidLengthGenerated
}
if postIndex > l {
return io.ErrUnexpectedEOF
}
m.Taints = append(m.Taints, Taint{})
if err := m.Taints[len(m.Taints)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
return err
}
iNdEx = postIndex
case 7:
if wireType != 2 {
return fmt.Errorf("proto: wrong wireType = %d for field PodCIDRs", wireType)
}
var stringLen uint64
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowGenerated
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
stringLen |= uint64(b&0x7F) << shift
if b < 0x80 {
break
}
}
intStringLen := int(stringLen)
if intStringLen < 0 {
return ErrInvalidLengthGenerated
}
postIndex := iNdEx + intStringLen
if postIndex < 0 {
return ErrInvalidLengthGenerated
}
if postIndex > l {
return io.ErrUnexpectedEOF
}
m.PodCIDRs = append(m.PodCIDRs, string(dAtA[iNdEx:postIndex]))
iNdEx = postIndex
default:
iNdEx = preIndex
skippy, err := skipGenerated(dAtA[iNdEx:])
if err != nil {
return err
}
if (skippy < 0) || (iNdEx+skippy) < 0 {
return ErrInvalidLengthGenerated
}
if (iNdEx + skippy) > l {
return io.ErrUnexpectedEOF
}
iNdEx += skippy
}
}
if iNdEx > l {
return io.ErrUnexpectedEOF
}
return nil
}
func (m *NodeStatus) Unmarshal(dAtA []byte) error {
l := len(dAtA)
iNdEx := 0
for iNdEx < l {
preIndex := iNdEx
var wire uint64
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowGenerated
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
wire |= uint64(b&0x7F) << shift
if b < 0x80 {
break
}
}
fieldNum := int32(wire >> 3)
wireType := int(wire & 0x7)
if wireType == 4 {
return fmt.Errorf("proto: NodeStatus: wiretype end group for non-group")
}
if fieldNum <= 0 {
return fmt.Errorf("proto: NodeStatus: illegal tag %d (wire type %d)", fieldNum, wire)
}
switch fieldNum {
case 4:
if wireType != 2 {
return fmt.Errorf("proto: wrong wireType = %d for field Conditions", wireType)
}
var msglen int
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowGenerated
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
msglen |= int(b&0x7F) << shift
if b < 0x80 {
break
}
}
if msglen < 0 {
return ErrInvalidLengthGenerated
}
postIndex := iNdEx + msglen
if postIndex < 0 {
return ErrInvalidLengthGenerated
}
if postIndex > l {
return io.ErrUnexpectedEOF
}
m.Conditions = append(m.Conditions, NodeCondition{})
if err := m.Conditions[len(m.Conditions)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
return err
}
iNdEx = postIndex
case 5:
if wireType != 2 {
return fmt.Errorf("proto: wrong wireType = %d for field Addresses", wireType)
}
var msglen int
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowGenerated
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
msglen |= int(b&0x7F) << shift
if b < 0x80 {
break
}
}
if msglen < 0 {
return ErrInvalidLengthGenerated
}
postIndex := iNdEx + msglen
if postIndex < 0 {
return ErrInvalidLengthGenerated
}
if postIndex > l {
return io.ErrUnexpectedEOF
}
m.Addresses = append(m.Addresses, NodeAddress{})
if err := m.Addresses[len(m.Addresses)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
return err
}
iNdEx = postIndex
default:
iNdEx = preIndex
skippy, err := skipGenerated(dAtA[iNdEx:])
if err != nil {
return err
}
if (skippy < 0) || (iNdEx+skippy) < 0 {
return ErrInvalidLengthGenerated
}
if (iNdEx + skippy) > l {
return io.ErrUnexpectedEOF
}
iNdEx += skippy
}
}
if iNdEx > l {
return io.ErrUnexpectedEOF
}
return nil
}
func (m *Pod) Unmarshal(dAtA []byte) error {
l := len(dAtA)
iNdEx := 0
for iNdEx < l {
preIndex := iNdEx
var wire uint64
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowGenerated
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
wire |= uint64(b&0x7F) << shift
if b < 0x80 {
break
}
}
fieldNum := int32(wire >> 3)
wireType := int(wire & 0x7)
if wireType == 4 {
return fmt.Errorf("proto: Pod: wiretype end group for non-group")
}
if fieldNum <= 0 {
return fmt.Errorf("proto: Pod: illegal tag %d (wire type %d)", fieldNum, wire)
}
switch fieldNum {
case 1:
if wireType != 2 {
return fmt.Errorf("proto: wrong wireType = %d for field ObjectMeta", wireType)
}
var msglen int
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowGenerated
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
msglen |= int(b&0x7F) << shift
if b < 0x80 {
break
}
}
if msglen < 0 {
return ErrInvalidLengthGenerated
}
postIndex := iNdEx + msglen
if postIndex < 0 {
return ErrInvalidLengthGenerated
}
if postIndex > l {
return io.ErrUnexpectedEOF
}
if err := m.ObjectMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
return err
}
iNdEx = postIndex
case 2:
if wireType != 2 {
return fmt.Errorf("proto: wrong wireType = %d for field Spec", wireType)
}
var msglen int
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowGenerated
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
msglen |= int(b&0x7F) << shift
if b < 0x80 {
break
}
}
if msglen < 0 {
return ErrInvalidLengthGenerated
}
postIndex := iNdEx + msglen
if postIndex < 0 {
return ErrInvalidLengthGenerated
}
if postIndex > l {
return io.ErrUnexpectedEOF
}
if err := m.Spec.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
return err
}
iNdEx = postIndex
case 3:
if wireType != 2 {
return fmt.Errorf("proto: wrong wireType = %d for field Status", wireType)
}
var msglen int
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowGenerated
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
msglen |= int(b&0x7F) << shift
if b < 0x80 {
break
}
}
if msglen < 0 {
return ErrInvalidLengthGenerated
}
postIndex := iNdEx + msglen
if postIndex < 0 {
return ErrInvalidLengthGenerated
}
if postIndex > l {
return io.ErrUnexpectedEOF
}
if err := m.Status.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
return err
}
iNdEx = postIndex
default:
iNdEx = preIndex
skippy, err := skipGenerated(dAtA[iNdEx:])
if err != nil {
return err
}
if (skippy < 0) || (iNdEx+skippy) < 0 {
return ErrInvalidLengthGenerated
}
if (iNdEx + skippy) > l {
return io.ErrUnexpectedEOF
}
iNdEx += skippy
}
}
if iNdEx > l {
return io.ErrUnexpectedEOF
}
return nil
}
func (m *PodCondition) Unmarshal(dAtA []byte) error {
l := len(dAtA)
iNdEx := 0
for iNdEx < l {
preIndex := iNdEx
var wire uint64
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowGenerated
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
wire |= uint64(b&0x7F) << shift
if b < 0x80 {
break
}
}
fieldNum := int32(wire >> 3)
wireType := int(wire & 0x7)
if wireType == 4 {
return fmt.Errorf("proto: PodCondition: wiretype end group for non-group")
}
if fieldNum <= 0 {
return fmt.Errorf("proto: PodCondition: illegal tag %d (wire type %d)", fieldNum, wire)
}
switch fieldNum {
case 1:
if wireType != 2 {
return fmt.Errorf("proto: wrong wireType = %d for field Type", wireType)
}
var stringLen uint64
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowGenerated
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
stringLen |= uint64(b&0x7F) << shift
if b < 0x80 {
break
}
}
intStringLen := int(stringLen)
if intStringLen < 0 {
return ErrInvalidLengthGenerated
}
postIndex := iNdEx + intStringLen
if postIndex < 0 {
return ErrInvalidLengthGenerated
}
if postIndex > l {
return io.ErrUnexpectedEOF
}
m.Type = PodConditionType(dAtA[iNdEx:postIndex])
iNdEx = postIndex
case 2:
if wireType != 2 {
return fmt.Errorf("proto: wrong wireType = %d for field Status", wireType)
}
var stringLen uint64
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowGenerated
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
stringLen |= uint64(b&0x7F) << shift
if b < 0x80 {
break
}
}
intStringLen := int(stringLen)
if intStringLen < 0 {
return ErrInvalidLengthGenerated
}
postIndex := iNdEx + intStringLen
if postIndex < 0 {
return ErrInvalidLengthGenerated
}
if postIndex > l {
return io.ErrUnexpectedEOF
}
m.Status = ConditionStatus(dAtA[iNdEx:postIndex])
iNdEx = postIndex
case 3:
if wireType != 2 {
return fmt.Errorf("proto: wrong wireType = %d for field LastProbeTime", wireType)
}
var msglen int
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowGenerated
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
msglen |= int(b&0x7F) << shift
if b < 0x80 {
break
}
}
if msglen < 0 {
return ErrInvalidLengthGenerated
}
postIndex := iNdEx + msglen
if postIndex < 0 {
return ErrInvalidLengthGenerated
}
if postIndex > l {
return io.ErrUnexpectedEOF
}
if err := m.LastProbeTime.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
return err
}
iNdEx = postIndex
case 4:
if wireType != 2 {
return fmt.Errorf("proto: wrong wireType = %d for field LastTransitionTime", wireType)
}
var msglen int
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowGenerated
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
msglen |= int(b&0x7F) << shift
if b < 0x80 {
break
}
}
if msglen < 0 {
return ErrInvalidLengthGenerated
}
postIndex := iNdEx + msglen
if postIndex < 0 {
return ErrInvalidLengthGenerated
}
if postIndex > l {
return io.ErrUnexpectedEOF
}
if err := m.LastTransitionTime.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
return err
}
iNdEx = postIndex
case 5:
if wireType != 2 {
return fmt.Errorf("proto: wrong wireType = %d for field Reason", wireType)
}
var stringLen uint64
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowGenerated
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
stringLen |= uint64(b&0x7F) << shift
if b < 0x80 {
break
}
}
intStringLen := int(stringLen)
if intStringLen < 0 {
return ErrInvalidLengthGenerated
}
postIndex := iNdEx + intStringLen
if postIndex < 0 {
return ErrInvalidLengthGenerated
}
if postIndex > l {
return io.ErrUnexpectedEOF
}
m.Reason = string(dAtA[iNdEx:postIndex])
iNdEx = postIndex
case 6:
if wireType != 2 {
return fmt.Errorf("proto: wrong wireType = %d for field Message", wireType)
}
var stringLen uint64
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowGenerated
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
stringLen |= uint64(b&0x7F) << shift
if b < 0x80 {
break
}
}
intStringLen := int(stringLen)
if intStringLen < 0 {
return ErrInvalidLengthGenerated
}
postIndex := iNdEx + intStringLen
if postIndex < 0 {
return ErrInvalidLengthGenerated
}
if postIndex > l {
return io.ErrUnexpectedEOF
}
m.Message = string(dAtA[iNdEx:postIndex])
iNdEx = postIndex
default:
iNdEx = preIndex
skippy, err := skipGenerated(dAtA[iNdEx:])
if err != nil {
return err
}
if (skippy < 0) || (iNdEx+skippy) < 0 {
return ErrInvalidLengthGenerated
}
if (iNdEx + skippy) > l {
return io.ErrUnexpectedEOF
}
iNdEx += skippy
}
}
if iNdEx > l {
return io.ErrUnexpectedEOF
}
return nil
}
func (m *PodIP) Unmarshal(dAtA []byte) error {
l := len(dAtA)
iNdEx := 0
for iNdEx < l {
preIndex := iNdEx
var wire uint64
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowGenerated
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
wire |= uint64(b&0x7F) << shift
if b < 0x80 {
break
}
}
fieldNum := int32(wire >> 3)
wireType := int(wire & 0x7)
if wireType == 4 {
return fmt.Errorf("proto: PodIP: wiretype end group for non-group")
}
if fieldNum <= 0 {
return fmt.Errorf("proto: PodIP: illegal tag %d (wire type %d)", fieldNum, wire)
}
switch fieldNum {
case 1:
if wireType != 2 {
return fmt.Errorf("proto: wrong wireType = %d for field IP", wireType)
}
var stringLen uint64
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowGenerated
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
stringLen |= uint64(b&0x7F) << shift
if b < 0x80 {
break
}
}
intStringLen := int(stringLen)
if intStringLen < 0 {
return ErrInvalidLengthGenerated
}
postIndex := iNdEx + intStringLen
if postIndex < 0 {
return ErrInvalidLengthGenerated
}
if postIndex > l {
return io.ErrUnexpectedEOF
}
m.IP = string(dAtA[iNdEx:postIndex])
iNdEx = postIndex
default:
iNdEx = preIndex
skippy, err := skipGenerated(dAtA[iNdEx:])
if err != nil {
return err
}
if (skippy < 0) || (iNdEx+skippy) < 0 {
return ErrInvalidLengthGenerated
}
if (iNdEx + skippy) > l {
return io.ErrUnexpectedEOF
}
iNdEx += skippy
}
}
if iNdEx > l {
return io.ErrUnexpectedEOF
}
return nil
}
func (m *PodList) Unmarshal(dAtA []byte) error {
l := len(dAtA)
iNdEx := 0
for iNdEx < l {
preIndex := iNdEx
var wire uint64
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowGenerated
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
wire |= uint64(b&0x7F) << shift
if b < 0x80 {
break
}
}
fieldNum := int32(wire >> 3)
wireType := int(wire & 0x7)
if wireType == 4 {
return fmt.Errorf("proto: PodList: wiretype end group for non-group")
}
if fieldNum <= 0 {
return fmt.Errorf("proto: PodList: illegal tag %d (wire type %d)", fieldNum, wire)
}
switch fieldNum {
case 1:
if wireType != 2 {
return fmt.Errorf("proto: wrong wireType = %d for field ListMeta", wireType)
}
var msglen int
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowGenerated
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
msglen |= int(b&0x7F) << shift
if b < 0x80 {
break
}
}
if msglen < 0 {
return ErrInvalidLengthGenerated
}
postIndex := iNdEx + msglen
if postIndex < 0 {
return ErrInvalidLengthGenerated
}
if postIndex > l {
return io.ErrUnexpectedEOF
}
if err := m.ListMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
return err
}
iNdEx = postIndex
case 2:
if wireType != 2 {
return fmt.Errorf("proto: wrong wireType = %d for field Items", wireType)
}
var msglen int
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowGenerated
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
msglen |= int(b&0x7F) << shift
if b < 0x80 {
break
}
}
if msglen < 0 {
return ErrInvalidLengthGenerated
}
postIndex := iNdEx + msglen
if postIndex < 0 {
return ErrInvalidLengthGenerated
}
if postIndex > l {
return io.ErrUnexpectedEOF
}
m.Items = append(m.Items, Pod{})
if err := m.Items[len(m.Items)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
return err
}
iNdEx = postIndex
default:
iNdEx = preIndex
skippy, err := skipGenerated(dAtA[iNdEx:])
if err != nil {
return err
}
if (skippy < 0) || (iNdEx+skippy) < 0 {
return ErrInvalidLengthGenerated
}
if (iNdEx + skippy) > l {
return io.ErrUnexpectedEOF
}
iNdEx += skippy
}
}
if iNdEx > l {
return io.ErrUnexpectedEOF
}
return nil
}
func (m *PodReadinessGate) Unmarshal(dAtA []byte) error {
l := len(dAtA)
iNdEx := 0
for iNdEx < l {
preIndex := iNdEx
var wire uint64
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowGenerated
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
wire |= uint64(b&0x7F) << shift
if b < 0x80 {
break
}
}
fieldNum := int32(wire >> 3)
wireType := int(wire & 0x7)
if wireType == 4 {
return fmt.Errorf("proto: PodReadinessGate: wiretype end group for non-group")
}
if fieldNum <= 0 {
return fmt.Errorf("proto: PodReadinessGate: illegal tag %d (wire type %d)", fieldNum, wire)
}
switch fieldNum {
case 1:
if wireType != 2 {
return fmt.Errorf("proto: wrong wireType = %d for field ConditionType", wireType)
}
var stringLen uint64
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowGenerated
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
stringLen |= uint64(b&0x7F) << shift
if b < 0x80 {
break
}
}
intStringLen := int(stringLen)
if intStringLen < 0 {
return ErrInvalidLengthGenerated
}
postIndex := iNdEx + intStringLen
if postIndex < 0 {
return ErrInvalidLengthGenerated
}
if postIndex > l {
return io.ErrUnexpectedEOF
}
m.ConditionType = PodConditionType(dAtA[iNdEx:postIndex])
iNdEx = postIndex
default:
iNdEx = preIndex
skippy, err := skipGenerated(dAtA[iNdEx:])
if err != nil {
return err
}
if (skippy < 0) || (iNdEx+skippy) < 0 {
return ErrInvalidLengthGenerated
}
if (iNdEx + skippy) > l {
return io.ErrUnexpectedEOF
}
iNdEx += skippy
}
}
if iNdEx > l {
return io.ErrUnexpectedEOF
}
return nil
}
func (m *PodSpec) Unmarshal(dAtA []byte) error {
l := len(dAtA)
iNdEx := 0
for iNdEx < l {
preIndex := iNdEx
var wire uint64
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowGenerated
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
wire |= uint64(b&0x7F) << shift
if b < 0x80 {
break
}
}
fieldNum := int32(wire >> 3)
wireType := int(wire & 0x7)
if wireType == 4 {
return fmt.Errorf("proto: PodSpec: wiretype end group for non-group")
}
if fieldNum <= 0 {
return fmt.Errorf("proto: PodSpec: illegal tag %d (wire type %d)", fieldNum, wire)
}
switch fieldNum {
case 2:
if wireType != 2 {
return fmt.Errorf("proto: wrong wireType = %d for field Containers", wireType)
}
var msglen int
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowGenerated
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
msglen |= int(b&0x7F) << shift
if b < 0x80 {
break
}
}
if msglen < 0 {
return ErrInvalidLengthGenerated
}
postIndex := iNdEx + msglen
if postIndex < 0 {
return ErrInvalidLengthGenerated
}
if postIndex > l {
return io.ErrUnexpectedEOF
}
m.Containers = append(m.Containers, Container{})
if err := m.Containers[len(m.Containers)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
return err
}
iNdEx = postIndex
case 8:
if wireType != 2 {
return fmt.Errorf("proto: wrong wireType = %d for field ServiceAccountName", wireType)
}
var stringLen uint64
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowGenerated
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
stringLen |= uint64(b&0x7F) << shift
if b < 0x80 {
break
}
}
intStringLen := int(stringLen)
if intStringLen < 0 {
return ErrInvalidLengthGenerated
}
postIndex := iNdEx + intStringLen
if postIndex < 0 {
return ErrInvalidLengthGenerated
}
if postIndex > l {
return io.ErrUnexpectedEOF
}
m.ServiceAccountName = string(dAtA[iNdEx:postIndex])
iNdEx = postIndex
case 10:
if wireType != 2 {
return fmt.Errorf("proto: wrong wireType = %d for field NodeName", wireType)
}
var stringLen uint64
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowGenerated
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
stringLen |= uint64(b&0x7F) << shift
if b < 0x80 {
break
}
}
intStringLen := int(stringLen)
if intStringLen < 0 {
return ErrInvalidLengthGenerated
}
postIndex := iNdEx + intStringLen
if postIndex < 0 {
return ErrInvalidLengthGenerated
}
if postIndex > l {
return io.ErrUnexpectedEOF
}
m.NodeName = string(dAtA[iNdEx:postIndex])
iNdEx = postIndex
case 11:
if wireType != 0 {
return fmt.Errorf("proto: wrong wireType = %d for field HostNetwork", wireType)
}
var v int
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowGenerated
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
v |= int(b&0x7F) << shift
if b < 0x80 {
break
}
}
m.HostNetwork = bool(v != 0)
case 20:
if wireType != 2 {
return fmt.Errorf("proto: wrong wireType = %d for field InitContainers", wireType)
}
var msglen int
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowGenerated
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
msglen |= int(b&0x7F) << shift
if b < 0x80 {
break
}
}
if msglen < 0 {
return ErrInvalidLengthGenerated
}
postIndex := iNdEx + msglen
if postIndex < 0 {
return ErrInvalidLengthGenerated
}
if postIndex > l {
return io.ErrUnexpectedEOF
}
m.InitContainers = append(m.InitContainers, Container{})
if err := m.InitContainers[len(m.InitContainers)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
return err
}
iNdEx = postIndex
default:
iNdEx = preIndex
skippy, err := skipGenerated(dAtA[iNdEx:])
if err != nil {
return err
}
if (skippy < 0) || (iNdEx+skippy) < 0 {
return ErrInvalidLengthGenerated
}
if (iNdEx + skippy) > l {
return io.ErrUnexpectedEOF
}
iNdEx += skippy
}
}
if iNdEx > l {
return io.ErrUnexpectedEOF
}
return nil
}
func (m *PodStatus) Unmarshal(dAtA []byte) error {
l := len(dAtA)
iNdEx := 0
for iNdEx < l {
preIndex := iNdEx
var wire uint64
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowGenerated
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
wire |= uint64(b&0x7F) << shift
if b < 0x80 {
break
}
}
fieldNum := int32(wire >> 3)
wireType := int(wire & 0x7)
if wireType == 4 {
return fmt.Errorf("proto: PodStatus: wiretype end group for non-group")
}
if fieldNum <= 0 {
return fmt.Errorf("proto: PodStatus: illegal tag %d (wire type %d)", fieldNum, wire)
}
switch fieldNum {
case 1:
if wireType != 2 {
return fmt.Errorf("proto: wrong wireType = %d for field Phase", wireType)
}
var stringLen uint64
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowGenerated
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
stringLen |= uint64(b&0x7F) << shift
if b < 0x80 {
break
}
}
intStringLen := int(stringLen)
if intStringLen < 0 {
return ErrInvalidLengthGenerated
}
postIndex := iNdEx + intStringLen
if postIndex < 0 {
return ErrInvalidLengthGenerated
}
if postIndex > l {
return io.ErrUnexpectedEOF
}
m.Phase = PodPhase(dAtA[iNdEx:postIndex])
iNdEx = postIndex
case 2:
if wireType != 2 {
return fmt.Errorf("proto: wrong wireType = %d for field Conditions", wireType)
}
var msglen int
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowGenerated
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
msglen |= int(b&0x7F) << shift
if b < 0x80 {
break
}
}
if msglen < 0 {
return ErrInvalidLengthGenerated
}
postIndex := iNdEx + msglen
if postIndex < 0 {
return ErrInvalidLengthGenerated
}
if postIndex > l {
return io.ErrUnexpectedEOF
}
m.Conditions = append(m.Conditions, PodCondition{})
if err := m.Conditions[len(m.Conditions)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
return err
}
iNdEx = postIndex
case 5:
if wireType != 2 {
return fmt.Errorf("proto: wrong wireType = %d for field HostIP", wireType)
}
var stringLen uint64
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowGenerated
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
stringLen |= uint64(b&0x7F) << shift
if b < 0x80 {
break
}
}
intStringLen := int(stringLen)
if intStringLen < 0 {
return ErrInvalidLengthGenerated
}
postIndex := iNdEx + intStringLen
if postIndex < 0 {
return ErrInvalidLengthGenerated
}
if postIndex > l {
return io.ErrUnexpectedEOF
}
m.HostIP = string(dAtA[iNdEx:postIndex])
iNdEx = postIndex
case 6:
if wireType != 2 {
return fmt.Errorf("proto: wrong wireType = %d for field PodIP", wireType)
}
var stringLen uint64
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowGenerated
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
stringLen |= uint64(b&0x7F) << shift
if b < 0x80 {
break
}
}
intStringLen := int(stringLen)
if intStringLen < 0 {
return ErrInvalidLengthGenerated
}
postIndex := iNdEx + intStringLen
if postIndex < 0 {
return ErrInvalidLengthGenerated
}
if postIndex > l {
return io.ErrUnexpectedEOF
}
m.PodIP = string(dAtA[iNdEx:postIndex])
iNdEx = postIndex
case 7:
if wireType != 2 {
return fmt.Errorf("proto: wrong wireType = %d for field StartTime", wireType)
}
var msglen int
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowGenerated
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
msglen |= int(b&0x7F) << shift
if b < 0x80 {
break
}
}
if msglen < 0 {
return ErrInvalidLengthGenerated
}
postIndex := iNdEx + msglen
if postIndex < 0 {
return ErrInvalidLengthGenerated
}
if postIndex > l {
return io.ErrUnexpectedEOF
}
if m.StartTime == nil {
m.StartTime = &v1.Time{}
}
if err := m.StartTime.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
return err
}
iNdEx = postIndex
case 8:
if wireType != 2 {
return fmt.Errorf("proto: wrong wireType = %d for field ContainerStatuses", wireType)
}
var msglen int
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowGenerated
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
msglen |= int(b&0x7F) << shift
if b < 0x80 {
break
}
}
if msglen < 0 {
return ErrInvalidLengthGenerated
}
postIndex := iNdEx + msglen
if postIndex < 0 {
return ErrInvalidLengthGenerated
}
if postIndex > l {
return io.ErrUnexpectedEOF
}
m.ContainerStatuses = append(m.ContainerStatuses, ContainerStatus{})
if err := m.ContainerStatuses[len(m.ContainerStatuses)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
return err
}
iNdEx = postIndex
case 9:
if wireType != 2 {
return fmt.Errorf("proto: wrong wireType = %d for field QOSClass", wireType)
}
var stringLen uint64
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowGenerated
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
stringLen |= uint64(b&0x7F) << shift
if b < 0x80 {
break
}
}
intStringLen := int(stringLen)
if intStringLen < 0 {
return ErrInvalidLengthGenerated
}
postIndex := iNdEx + intStringLen
if postIndex < 0 {
return ErrInvalidLengthGenerated
}
if postIndex > l {
return io.ErrUnexpectedEOF
}
m.QOSClass = PodQOSClass(dAtA[iNdEx:postIndex])
iNdEx = postIndex
case 12:
if wireType != 2 {
return fmt.Errorf("proto: wrong wireType = %d for field PodIPs", wireType)
}
var msglen int
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowGenerated
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
msglen |= int(b&0x7F) << shift
if b < 0x80 {
break
}
}
if msglen < 0 {
return ErrInvalidLengthGenerated
}
postIndex := iNdEx + msglen
if postIndex < 0 {
return ErrInvalidLengthGenerated
}
if postIndex > l {
return io.ErrUnexpectedEOF
}
m.PodIPs = append(m.PodIPs, PodIP{})
if err := m.PodIPs[len(m.PodIPs)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
return err
}
iNdEx = postIndex
default:
iNdEx = preIndex
skippy, err := skipGenerated(dAtA[iNdEx:])
if err != nil {
return err
}
if (skippy < 0) || (iNdEx+skippy) < 0 {
return ErrInvalidLengthGenerated
}
if (iNdEx + skippy) > l {
return io.ErrUnexpectedEOF
}
iNdEx += skippy
}
}
if iNdEx > l {
return io.ErrUnexpectedEOF
}
return nil
}
func (m *PortStatus) Unmarshal(dAtA []byte) error {
l := len(dAtA)
iNdEx := 0
for iNdEx < l {
preIndex := iNdEx
var wire uint64
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowGenerated
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
wire |= uint64(b&0x7F) << shift
if b < 0x80 {
break
}
}
fieldNum := int32(wire >> 3)
wireType := int(wire & 0x7)
if wireType == 4 {
return fmt.Errorf("proto: PortStatus: wiretype end group for non-group")
}
if fieldNum <= 0 {
return fmt.Errorf("proto: PortStatus: illegal tag %d (wire type %d)", fieldNum, wire)
}
switch fieldNum {
case 1:
if wireType != 0 {
return fmt.Errorf("proto: wrong wireType = %d for field Port", wireType)
}
m.Port = 0
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowGenerated
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
m.Port |= int32(b&0x7F) << shift
if b < 0x80 {
break
}
}
case 2:
if wireType != 2 {
return fmt.Errorf("proto: wrong wireType = %d for field Protocol", wireType)
}
var stringLen uint64
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowGenerated
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
stringLen |= uint64(b&0x7F) << shift
if b < 0x80 {
break
}
}
intStringLen := int(stringLen)
if intStringLen < 0 {
return ErrInvalidLengthGenerated
}
postIndex := iNdEx + intStringLen
if postIndex < 0 {
return ErrInvalidLengthGenerated
}
if postIndex > l {
return io.ErrUnexpectedEOF
}
m.Protocol = Protocol(dAtA[iNdEx:postIndex])
iNdEx = postIndex
case 3:
if wireType != 2 {
return fmt.Errorf("proto: wrong wireType = %d for field Error", wireType)
}
var stringLen uint64
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowGenerated
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
stringLen |= uint64(b&0x7F) << shift
if b < 0x80 {
break
}
}
intStringLen := int(stringLen)
if intStringLen < 0 {
return ErrInvalidLengthGenerated
}
postIndex := iNdEx + intStringLen
if postIndex < 0 {
return ErrInvalidLengthGenerated
}
if postIndex > l {
return io.ErrUnexpectedEOF
}
s := string(dAtA[iNdEx:postIndex])
m.Error = &s
iNdEx = postIndex
default:
iNdEx = preIndex
skippy, err := skipGenerated(dAtA[iNdEx:])
if err != nil {
return err
}
if (skippy < 0) || (iNdEx+skippy) < 0 {
return ErrInvalidLengthGenerated
}
if (iNdEx + skippy) > l {
return io.ErrUnexpectedEOF
}
iNdEx += skippy
}
}
if iNdEx > l {
return io.ErrUnexpectedEOF
}
return nil
}
func (m *Secret) Unmarshal(dAtA []byte) error {
l := len(dAtA)
iNdEx := 0
for iNdEx < l {
preIndex := iNdEx
var wire uint64
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowGenerated
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
wire |= uint64(b&0x7F) << shift
if b < 0x80 {
break
}
}
fieldNum := int32(wire >> 3)
wireType := int(wire & 0x7)
if wireType == 4 {
return fmt.Errorf("proto: Secret: wiretype end group for non-group")
}
if fieldNum <= 0 {
return fmt.Errorf("proto: Secret: illegal tag %d (wire type %d)", fieldNum, wire)
}
switch fieldNum {
case 1:
if wireType != 2 {
return fmt.Errorf("proto: wrong wireType = %d for field ObjectMeta", wireType)
}
var msglen int
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowGenerated
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
msglen |= int(b&0x7F) << shift
if b < 0x80 {
break
}
}
if msglen < 0 {
return ErrInvalidLengthGenerated
}
postIndex := iNdEx + msglen
if postIndex < 0 {
return ErrInvalidLengthGenerated
}
if postIndex > l {
return io.ErrUnexpectedEOF
}
if err := m.ObjectMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
return err
}
iNdEx = postIndex
case 2:
if wireType != 2 {
return fmt.Errorf("proto: wrong wireType = %d for field Data", wireType)
}
var msglen int
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowGenerated
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
msglen |= int(b&0x7F) << shift
if b < 0x80 {
break
}
}
if msglen < 0 {
return ErrInvalidLengthGenerated
}
postIndex := iNdEx + msglen
if postIndex < 0 {
return ErrInvalidLengthGenerated
}
if postIndex > l {
return io.ErrUnexpectedEOF
}
if m.Data == nil {
m.Data = make(map[string]Bytes)
}
var mapkey string
mapvalue := []byte{}
for iNdEx < postIndex {
entryPreIndex := iNdEx
var wire uint64
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowGenerated
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
wire |= uint64(b&0x7F) << shift
if b < 0x80 {
break
}
}
fieldNum := int32(wire >> 3)
if fieldNum == 1 {
var stringLenmapkey uint64
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowGenerated
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
stringLenmapkey |= uint64(b&0x7F) << shift
if b < 0x80 {
break
}
}
intStringLenmapkey := int(stringLenmapkey)
if intStringLenmapkey < 0 {
return ErrInvalidLengthGenerated
}
postStringIndexmapkey := iNdEx + intStringLenmapkey
if postStringIndexmapkey < 0 {
return ErrInvalidLengthGenerated
}
if postStringIndexmapkey > l {
return io.ErrUnexpectedEOF
}
mapkey = string(dAtA[iNdEx:postStringIndexmapkey])
iNdEx = postStringIndexmapkey
} else if fieldNum == 2 {
var mapbyteLen uint64
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowGenerated
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
mapbyteLen |= uint64(b&0x7F) << shift
if b < 0x80 {
break
}
}
intMapbyteLen := int(mapbyteLen)
if intMapbyteLen < 0 {
return ErrInvalidLengthGenerated
}
postbytesIndex := iNdEx + intMapbyteLen
if postbytesIndex < 0 {
return ErrInvalidLengthGenerated
}
if postbytesIndex > l {
return io.ErrUnexpectedEOF
}
mapvalue = make([]byte, mapbyteLen)
copy(mapvalue, dAtA[iNdEx:postbytesIndex])
iNdEx = postbytesIndex
} else {
iNdEx = entryPreIndex
skippy, err := skipGenerated(dAtA[iNdEx:])
if err != nil {
return err
}
if (skippy < 0) || (iNdEx+skippy) < 0 {
return ErrInvalidLengthGenerated
}
if (iNdEx + skippy) > postIndex {
return io.ErrUnexpectedEOF
}
iNdEx += skippy
}
}
m.Data[mapkey] = ((Bytes)(mapvalue))
iNdEx = postIndex
case 3:
if wireType != 2 {
return fmt.Errorf("proto: wrong wireType = %d for field Type", wireType)
}
var stringLen uint64
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowGenerated
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
stringLen |= uint64(b&0x7F) << shift
if b < 0x80 {
break
}
}
intStringLen := int(stringLen)
if intStringLen < 0 {
return ErrInvalidLengthGenerated
}
postIndex := iNdEx + intStringLen
if postIndex < 0 {
return ErrInvalidLengthGenerated
}
if postIndex > l {
return io.ErrUnexpectedEOF
}
m.Type = SecretType(dAtA[iNdEx:postIndex])
iNdEx = postIndex
case 4:
if wireType != 2 {
return fmt.Errorf("proto: wrong wireType = %d for field StringData", wireType)
}
var msglen int
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowGenerated
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
msglen |= int(b&0x7F) << shift
if b < 0x80 {
break
}
}
if msglen < 0 {
return ErrInvalidLengthGenerated
}
postIndex := iNdEx + msglen
if postIndex < 0 {
return ErrInvalidLengthGenerated
}
if postIndex > l {
return io.ErrUnexpectedEOF
}
if m.StringData == nil {
m.StringData = make(map[string]string)
}
var mapkey string
var mapvalue string
for iNdEx < postIndex {
entryPreIndex := iNdEx
var wire uint64
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowGenerated
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
wire |= uint64(b&0x7F) << shift
if b < 0x80 {
break
}
}
fieldNum := int32(wire >> 3)
if fieldNum == 1 {
var stringLenmapkey uint64
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowGenerated
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
stringLenmapkey |= uint64(b&0x7F) << shift
if b < 0x80 {
break
}
}
intStringLenmapkey := int(stringLenmapkey)
if intStringLenmapkey < 0 {
return ErrInvalidLengthGenerated
}
postStringIndexmapkey := iNdEx + intStringLenmapkey
if postStringIndexmapkey < 0 {
return ErrInvalidLengthGenerated
}
if postStringIndexmapkey > l {
return io.ErrUnexpectedEOF
}
mapkey = string(dAtA[iNdEx:postStringIndexmapkey])
iNdEx = postStringIndexmapkey
} else if fieldNum == 2 {
var stringLenmapvalue uint64
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowGenerated
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
stringLenmapvalue |= uint64(b&0x7F) << shift
if b < 0x80 {
break
}
}
intStringLenmapvalue := int(stringLenmapvalue)
if intStringLenmapvalue < 0 {
return ErrInvalidLengthGenerated
}
postStringIndexmapvalue := iNdEx + intStringLenmapvalue
if postStringIndexmapvalue < 0 {
return ErrInvalidLengthGenerated
}
if postStringIndexmapvalue > l {
return io.ErrUnexpectedEOF
}
mapvalue = string(dAtA[iNdEx:postStringIndexmapvalue])
iNdEx = postStringIndexmapvalue
} else {
iNdEx = entryPreIndex
skippy, err := skipGenerated(dAtA[iNdEx:])
if err != nil {
return err
}
if (skippy < 0) || (iNdEx+skippy) < 0 {
return ErrInvalidLengthGenerated
}
if (iNdEx + skippy) > postIndex {
return io.ErrUnexpectedEOF
}
iNdEx += skippy
}
}
m.StringData[mapkey] = mapvalue
iNdEx = postIndex
case 5:
if wireType != 0 {
return fmt.Errorf("proto: wrong wireType = %d for field Immutable", wireType)
}
var v int
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowGenerated
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
v |= int(b&0x7F) << shift
if b < 0x80 {
break
}
}
b := bool(v != 0)
m.Immutable = &b
default:
iNdEx = preIndex
skippy, err := skipGenerated(dAtA[iNdEx:])
if err != nil {
return err
}
if (skippy < 0) || (iNdEx+skippy) < 0 {
return ErrInvalidLengthGenerated
}
if (iNdEx + skippy) > l {
return io.ErrUnexpectedEOF
}
iNdEx += skippy
}
}
if iNdEx > l {
return io.ErrUnexpectedEOF
}
return nil
}
func (m *SecretList) Unmarshal(dAtA []byte) error {
l := len(dAtA)
iNdEx := 0
for iNdEx < l {
preIndex := iNdEx
var wire uint64
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowGenerated
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
wire |= uint64(b&0x7F) << shift
if b < 0x80 {
break
}
}
fieldNum := int32(wire >> 3)
wireType := int(wire & 0x7)
if wireType == 4 {
return fmt.Errorf("proto: SecretList: wiretype end group for non-group")
}
if fieldNum <= 0 {
return fmt.Errorf("proto: SecretList: illegal tag %d (wire type %d)", fieldNum, wire)
}
switch fieldNum {
case 1:
if wireType != 2 {
return fmt.Errorf("proto: wrong wireType = %d for field ListMeta", wireType)
}
var msglen int
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowGenerated
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
msglen |= int(b&0x7F) << shift
if b < 0x80 {
break
}
}
if msglen < 0 {
return ErrInvalidLengthGenerated
}
postIndex := iNdEx + msglen
if postIndex < 0 {
return ErrInvalidLengthGenerated
}
if postIndex > l {
return io.ErrUnexpectedEOF
}
if err := m.ListMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
return err
}
iNdEx = postIndex
case 2:
if wireType != 2 {
return fmt.Errorf("proto: wrong wireType = %d for field Items", wireType)
}
var msglen int
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowGenerated
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
msglen |= int(b&0x7F) << shift
if b < 0x80 {
break
}
}
if msglen < 0 {
return ErrInvalidLengthGenerated
}
postIndex := iNdEx + msglen
if postIndex < 0 {
return ErrInvalidLengthGenerated
}
if postIndex > l {
return io.ErrUnexpectedEOF
}
m.Items = append(m.Items, Secret{})
if err := m.Items[len(m.Items)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
return err
}
iNdEx = postIndex
default:
iNdEx = preIndex
skippy, err := skipGenerated(dAtA[iNdEx:])
if err != nil {
return err
}
if (skippy < 0) || (iNdEx+skippy) < 0 {
return ErrInvalidLengthGenerated
}
if (iNdEx + skippy) > l {
return io.ErrUnexpectedEOF
}
iNdEx += skippy
}
}
if iNdEx > l {
return io.ErrUnexpectedEOF
}
return nil
}
func (m *Service) Unmarshal(dAtA []byte) error {
l := len(dAtA)
iNdEx := 0
for iNdEx < l {
preIndex := iNdEx
var wire uint64
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowGenerated
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
wire |= uint64(b&0x7F) << shift
if b < 0x80 {
break
}
}
fieldNum := int32(wire >> 3)
wireType := int(wire & 0x7)
if wireType == 4 {
return fmt.Errorf("proto: Service: wiretype end group for non-group")
}
if fieldNum <= 0 {
return fmt.Errorf("proto: Service: illegal tag %d (wire type %d)", fieldNum, wire)
}
switch fieldNum {
case 1:
if wireType != 2 {
return fmt.Errorf("proto: wrong wireType = %d for field ObjectMeta", wireType)
}
var msglen int
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowGenerated
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
msglen |= int(b&0x7F) << shift
if b < 0x80 {
break
}
}
if msglen < 0 {
return ErrInvalidLengthGenerated
}
postIndex := iNdEx + msglen
if postIndex < 0 {
return ErrInvalidLengthGenerated
}
if postIndex > l {
return io.ErrUnexpectedEOF
}
if err := m.ObjectMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
return err
}
iNdEx = postIndex
case 2:
if wireType != 2 {
return fmt.Errorf("proto: wrong wireType = %d for field Spec", wireType)
}
var msglen int
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowGenerated
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
msglen |= int(b&0x7F) << shift
if b < 0x80 {
break
}
}
if msglen < 0 {
return ErrInvalidLengthGenerated
}
postIndex := iNdEx + msglen
if postIndex < 0 {
return ErrInvalidLengthGenerated
}
if postIndex > l {
return io.ErrUnexpectedEOF
}
if err := m.Spec.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
return err
}
iNdEx = postIndex
case 3:
if wireType != 2 {
return fmt.Errorf("proto: wrong wireType = %d for field Status", wireType)
}
var msglen int
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowGenerated
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
msglen |= int(b&0x7F) << shift
if b < 0x80 {
break
}
}
if msglen < 0 {
return ErrInvalidLengthGenerated
}
postIndex := iNdEx + msglen
if postIndex < 0 {
return ErrInvalidLengthGenerated
}
if postIndex > l {
return io.ErrUnexpectedEOF
}
if err := m.Status.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
return err
}
iNdEx = postIndex
default:
iNdEx = preIndex
skippy, err := skipGenerated(dAtA[iNdEx:])
if err != nil {
return err
}
if (skippy < 0) || (iNdEx+skippy) < 0 {
return ErrInvalidLengthGenerated
}
if (iNdEx + skippy) > l {
return io.ErrUnexpectedEOF
}
iNdEx += skippy
}
}
if iNdEx > l {
return io.ErrUnexpectedEOF
}
return nil
}
func (m *ServiceList) Unmarshal(dAtA []byte) error {
l := len(dAtA)
iNdEx := 0
for iNdEx < l {
preIndex := iNdEx
var wire uint64
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowGenerated
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
wire |= uint64(b&0x7F) << shift
if b < 0x80 {
break
}
}
fieldNum := int32(wire >> 3)
wireType := int(wire & 0x7)
if wireType == 4 {
return fmt.Errorf("proto: ServiceList: wiretype end group for non-group")
}
if fieldNum <= 0 {
return fmt.Errorf("proto: ServiceList: illegal tag %d (wire type %d)", fieldNum, wire)
}
switch fieldNum {
case 1:
if wireType != 2 {
return fmt.Errorf("proto: wrong wireType = %d for field ListMeta", wireType)
}
var msglen int
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowGenerated
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
msglen |= int(b&0x7F) << shift
if b < 0x80 {
break
}
}
if msglen < 0 {
return ErrInvalidLengthGenerated
}
postIndex := iNdEx + msglen
if postIndex < 0 {
return ErrInvalidLengthGenerated
}
if postIndex > l {
return io.ErrUnexpectedEOF
}
if err := m.ListMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
return err
}
iNdEx = postIndex
case 2:
if wireType != 2 {
return fmt.Errorf("proto: wrong wireType = %d for field Items", wireType)
}
var msglen int
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowGenerated
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
msglen |= int(b&0x7F) << shift
if b < 0x80 {
break
}
}
if msglen < 0 {
return ErrInvalidLengthGenerated
}
postIndex := iNdEx + msglen
if postIndex < 0 {
return ErrInvalidLengthGenerated
}
if postIndex > l {
return io.ErrUnexpectedEOF
}
m.Items = append(m.Items, Service{})
if err := m.Items[len(m.Items)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
return err
}
iNdEx = postIndex
default:
iNdEx = preIndex
skippy, err := skipGenerated(dAtA[iNdEx:])
if err != nil {
return err
}
if (skippy < 0) || (iNdEx+skippy) < 0 {
return ErrInvalidLengthGenerated
}
if (iNdEx + skippy) > l {
return io.ErrUnexpectedEOF
}
iNdEx += skippy
}
}
if iNdEx > l {
return io.ErrUnexpectedEOF
}
return nil
}
func (m *ServicePort) Unmarshal(dAtA []byte) error {
l := len(dAtA)
iNdEx := 0
for iNdEx < l {
preIndex := iNdEx
var wire uint64
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowGenerated
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
wire |= uint64(b&0x7F) << shift
if b < 0x80 {
break
}
}
fieldNum := int32(wire >> 3)
wireType := int(wire & 0x7)
if wireType == 4 {
return fmt.Errorf("proto: ServicePort: wiretype end group for non-group")
}
if fieldNum <= 0 {
return fmt.Errorf("proto: ServicePort: illegal tag %d (wire type %d)", fieldNum, wire)
}
switch fieldNum {
case 1:
if wireType != 2 {
return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType)
}
var stringLen uint64
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowGenerated
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
stringLen |= uint64(b&0x7F) << shift
if b < 0x80 {
break
}
}
intStringLen := int(stringLen)
if intStringLen < 0 {
return ErrInvalidLengthGenerated
}
postIndex := iNdEx + intStringLen
if postIndex < 0 {
return ErrInvalidLengthGenerated
}
if postIndex > l {
return io.ErrUnexpectedEOF
}
m.Name = string(dAtA[iNdEx:postIndex])
iNdEx = postIndex
case 2:
if wireType != 2 {
return fmt.Errorf("proto: wrong wireType = %d for field Protocol", wireType)
}
var stringLen uint64
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowGenerated
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
stringLen |= uint64(b&0x7F) << shift
if b < 0x80 {
break
}
}
intStringLen := int(stringLen)
if intStringLen < 0 {
return ErrInvalidLengthGenerated
}
postIndex := iNdEx + intStringLen
if postIndex < 0 {
return ErrInvalidLengthGenerated
}
if postIndex > l {
return io.ErrUnexpectedEOF
}
m.Protocol = Protocol(dAtA[iNdEx:postIndex])
iNdEx = postIndex
case 3:
if wireType != 0 {
return fmt.Errorf("proto: wrong wireType = %d for field Port", wireType)
}
m.Port = 0
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowGenerated
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
m.Port |= int32(b&0x7F) << shift
if b < 0x80 {
break
}
}
case 4:
if wireType != 2 {
return fmt.Errorf("proto: wrong wireType = %d for field TargetPort", wireType)
}
var msglen int
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowGenerated
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
msglen |= int(b&0x7F) << shift
if b < 0x80 {
break
}
}
if msglen < 0 {
return ErrInvalidLengthGenerated
}
postIndex := iNdEx + msglen
if postIndex < 0 {
return ErrInvalidLengthGenerated
}
if postIndex > l {
return io.ErrUnexpectedEOF
}
if err := m.TargetPort.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
return err
}
iNdEx = postIndex
case 5:
if wireType != 0 {
return fmt.Errorf("proto: wrong wireType = %d for field NodePort", wireType)
}
m.NodePort = 0
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowGenerated
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
m.NodePort |= int32(b&0x7F) << shift
if b < 0x80 {
break
}
}
case 6:
if wireType != 2 {
return fmt.Errorf("proto: wrong wireType = %d for field AppProtocol", wireType)
}
var stringLen uint64
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowGenerated
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
stringLen |= uint64(b&0x7F) << shift
if b < 0x80 {
break
}
}
intStringLen := int(stringLen)
if intStringLen < 0 {
return ErrInvalidLengthGenerated
}
postIndex := iNdEx + intStringLen
if postIndex < 0 {
return ErrInvalidLengthGenerated
}
if postIndex > l {
return io.ErrUnexpectedEOF
}
s := string(dAtA[iNdEx:postIndex])
m.AppProtocol = &s
iNdEx = postIndex
default:
iNdEx = preIndex
skippy, err := skipGenerated(dAtA[iNdEx:])
if err != nil {
return err
}
if (skippy < 0) || (iNdEx+skippy) < 0 {
return ErrInvalidLengthGenerated
}
if (iNdEx + skippy) > l {
return io.ErrUnexpectedEOF
}
iNdEx += skippy
}
}
if iNdEx > l {
return io.ErrUnexpectedEOF
}
return nil
}
func (m *ServiceSpec) Unmarshal(dAtA []byte) error {
l := len(dAtA)
iNdEx := 0
for iNdEx < l {
preIndex := iNdEx
var wire uint64
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowGenerated
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
wire |= uint64(b&0x7F) << shift
if b < 0x80 {
break
}
}
fieldNum := int32(wire >> 3)
wireType := int(wire & 0x7)
if wireType == 4 {
return fmt.Errorf("proto: ServiceSpec: wiretype end group for non-group")
}
if fieldNum <= 0 {
return fmt.Errorf("proto: ServiceSpec: illegal tag %d (wire type %d)", fieldNum, wire)
}
switch fieldNum {
case 1:
if wireType != 2 {
return fmt.Errorf("proto: wrong wireType = %d for field Ports", wireType)
}
var msglen int
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowGenerated
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
msglen |= int(b&0x7F) << shift
if b < 0x80 {
break
}
}
if msglen < 0 {
return ErrInvalidLengthGenerated
}
postIndex := iNdEx + msglen
if postIndex < 0 {
return ErrInvalidLengthGenerated
}
if postIndex > l {
return io.ErrUnexpectedEOF
}
m.Ports = append(m.Ports, ServicePort{})
if err := m.Ports[len(m.Ports)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
return err
}
iNdEx = postIndex
case 2:
if wireType != 2 {
return fmt.Errorf("proto: wrong wireType = %d for field Selector", wireType)
}
var msglen int
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowGenerated
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
msglen |= int(b&0x7F) << shift
if b < 0x80 {
break
}
}
if msglen < 0 {
return ErrInvalidLengthGenerated
}
postIndex := iNdEx + msglen
if postIndex < 0 {
return ErrInvalidLengthGenerated
}
if postIndex > l {
return io.ErrUnexpectedEOF
}
if m.Selector == nil {
m.Selector = make(map[string]string)
}
var mapkey string
var mapvalue string
for iNdEx < postIndex {
entryPreIndex := iNdEx
var wire uint64
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowGenerated
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
wire |= uint64(b&0x7F) << shift
if b < 0x80 {
break
}
}
fieldNum := int32(wire >> 3)
if fieldNum == 1 {
var stringLenmapkey uint64
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowGenerated
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
stringLenmapkey |= uint64(b&0x7F) << shift
if b < 0x80 {
break
}
}
intStringLenmapkey := int(stringLenmapkey)
if intStringLenmapkey < 0 {
return ErrInvalidLengthGenerated
}
postStringIndexmapkey := iNdEx + intStringLenmapkey
if postStringIndexmapkey < 0 {
return ErrInvalidLengthGenerated
}
if postStringIndexmapkey > l {
return io.ErrUnexpectedEOF
}
mapkey = string(dAtA[iNdEx:postStringIndexmapkey])
iNdEx = postStringIndexmapkey
} else if fieldNum == 2 {
var stringLenmapvalue uint64
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowGenerated
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
stringLenmapvalue |= uint64(b&0x7F) << shift
if b < 0x80 {
break
}
}
intStringLenmapvalue := int(stringLenmapvalue)
if intStringLenmapvalue < 0 {
return ErrInvalidLengthGenerated
}
postStringIndexmapvalue := iNdEx + intStringLenmapvalue
if postStringIndexmapvalue < 0 {
return ErrInvalidLengthGenerated
}
if postStringIndexmapvalue > l {
return io.ErrUnexpectedEOF
}
mapvalue = string(dAtA[iNdEx:postStringIndexmapvalue])
iNdEx = postStringIndexmapvalue
} else {
iNdEx = entryPreIndex
skippy, err := skipGenerated(dAtA[iNdEx:])
if err != nil {
return err
}
if (skippy < 0) || (iNdEx+skippy) < 0 {
return ErrInvalidLengthGenerated
}
if (iNdEx + skippy) > postIndex {
return io.ErrUnexpectedEOF
}
iNdEx += skippy
}
}
m.Selector[mapkey] = mapvalue
iNdEx = postIndex
case 3:
if wireType != 2 {
return fmt.Errorf("proto: wrong wireType = %d for field ClusterIP", wireType)
}
var stringLen uint64
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowGenerated
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
stringLen |= uint64(b&0x7F) << shift
if b < 0x80 {
break
}
}
intStringLen := int(stringLen)
if intStringLen < 0 {
return ErrInvalidLengthGenerated
}
postIndex := iNdEx + intStringLen
if postIndex < 0 {
return ErrInvalidLengthGenerated
}
if postIndex > l {
return io.ErrUnexpectedEOF
}
m.ClusterIP = string(dAtA[iNdEx:postIndex])
iNdEx = postIndex
case 4:
if wireType != 2 {
return fmt.Errorf("proto: wrong wireType = %d for field Type", wireType)
}
var stringLen uint64
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowGenerated
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
stringLen |= uint64(b&0x7F) << shift
if b < 0x80 {
break
}
}
intStringLen := int(stringLen)
if intStringLen < 0 {
return ErrInvalidLengthGenerated
}
postIndex := iNdEx + intStringLen
if postIndex < 0 {
return ErrInvalidLengthGenerated
}
if postIndex > l {
return io.ErrUnexpectedEOF
}
m.Type = ServiceType(dAtA[iNdEx:postIndex])
iNdEx = postIndex
case 5:
if wireType != 2 {
return fmt.Errorf("proto: wrong wireType = %d for field ExternalIPs", wireType)
}
var stringLen uint64
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowGenerated
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
stringLen |= uint64(b&0x7F) << shift
if b < 0x80 {
break
}
}
intStringLen := int(stringLen)
if intStringLen < 0 {
return ErrInvalidLengthGenerated
}
postIndex := iNdEx + intStringLen
if postIndex < 0 {
return ErrInvalidLengthGenerated
}
if postIndex > l {
return io.ErrUnexpectedEOF
}
m.ExternalIPs = append(m.ExternalIPs, string(dAtA[iNdEx:postIndex]))
iNdEx = postIndex
case 7:
if wireType != 2 {
return fmt.Errorf("proto: wrong wireType = %d for field SessionAffinity", wireType)
}
var stringLen uint64
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowGenerated
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
stringLen |= uint64(b&0x7F) << shift
if b < 0x80 {
break
}
}
intStringLen := int(stringLen)
if intStringLen < 0 {
return ErrInvalidLengthGenerated
}
postIndex := iNdEx + intStringLen
if postIndex < 0 {
return ErrInvalidLengthGenerated
}
if postIndex > l {
return io.ErrUnexpectedEOF
}
m.SessionAffinity = ServiceAffinity(dAtA[iNdEx:postIndex])
iNdEx = postIndex
case 8:
if wireType != 2 {
return fmt.Errorf("proto: wrong wireType = %d for field LoadBalancerIP", wireType)
}
var stringLen uint64
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowGenerated
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
stringLen |= uint64(b&0x7F) << shift
if b < 0x80 {
break
}
}
intStringLen := int(stringLen)
if intStringLen < 0 {
return ErrInvalidLengthGenerated
}
postIndex := iNdEx + intStringLen
if postIndex < 0 {
return ErrInvalidLengthGenerated
}
if postIndex > l {
return io.ErrUnexpectedEOF
}
m.LoadBalancerIP = string(dAtA[iNdEx:postIndex])
iNdEx = postIndex
case 9:
if wireType != 2 {
return fmt.Errorf("proto: wrong wireType = %d for field LoadBalancerSourceRanges", wireType)
}
var stringLen uint64
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowGenerated
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
stringLen |= uint64(b&0x7F) << shift
if b < 0x80 {
break
}
}
intStringLen := int(stringLen)
if intStringLen < 0 {
return ErrInvalidLengthGenerated
}
postIndex := iNdEx + intStringLen
if postIndex < 0 {
return ErrInvalidLengthGenerated
}
if postIndex > l {
return io.ErrUnexpectedEOF
}
m.LoadBalancerSourceRanges = append(m.LoadBalancerSourceRanges, string(dAtA[iNdEx:postIndex]))
iNdEx = postIndex
case 11:
if wireType != 2 {
return fmt.Errorf("proto: wrong wireType = %d for field ExternalTrafficPolicy", wireType)
}
var stringLen uint64
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowGenerated
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
stringLen |= uint64(b&0x7F) << shift
if b < 0x80 {
break
}
}
intStringLen := int(stringLen)
if intStringLen < 0 {
return ErrInvalidLengthGenerated
}
postIndex := iNdEx + intStringLen
if postIndex < 0 {
return ErrInvalidLengthGenerated
}
if postIndex > l {
return io.ErrUnexpectedEOF
}
m.ExternalTrafficPolicy = ServiceExternalTrafficPolicy(dAtA[iNdEx:postIndex])
iNdEx = postIndex
case 12:
if wireType != 0 {
return fmt.Errorf("proto: wrong wireType = %d for field HealthCheckNodePort", wireType)
}
m.HealthCheckNodePort = 0
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowGenerated
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
m.HealthCheckNodePort |= int32(b&0x7F) << shift
if b < 0x80 {
break
}
}
case 14:
if wireType != 2 {
return fmt.Errorf("proto: wrong wireType = %d for field SessionAffinityConfig", wireType)
}
var msglen int
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowGenerated
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
msglen |= int(b&0x7F) << shift
if b < 0x80 {
break
}
}
if msglen < 0 {
return ErrInvalidLengthGenerated
}
postIndex := iNdEx + msglen
if postIndex < 0 {
return ErrInvalidLengthGenerated
}
if postIndex > l {
return io.ErrUnexpectedEOF
}
if m.SessionAffinityConfig == nil {
m.SessionAffinityConfig = &SessionAffinityConfig{}
}
if err := m.SessionAffinityConfig.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
return err
}
iNdEx = postIndex
case 17:
if wireType != 2 {
return fmt.Errorf("proto: wrong wireType = %d for field IPFamilyPolicy", wireType)
}
var stringLen uint64
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowGenerated
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
stringLen |= uint64(b&0x7F) << shift
if b < 0x80 {
break
}
}
intStringLen := int(stringLen)
if intStringLen < 0 {
return ErrInvalidLengthGenerated
}
postIndex := iNdEx + intStringLen
if postIndex < 0 {
return ErrInvalidLengthGenerated
}
if postIndex > l {
return io.ErrUnexpectedEOF
}
s := IPFamilyPolicy(dAtA[iNdEx:postIndex])
m.IPFamilyPolicy = &s
iNdEx = postIndex
case 18:
if wireType != 2 {
return fmt.Errorf("proto: wrong wireType = %d for field ClusterIPs", wireType)
}
var stringLen uint64
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowGenerated
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
stringLen |= uint64(b&0x7F) << shift
if b < 0x80 {
break
}
}
intStringLen := int(stringLen)
if intStringLen < 0 {
return ErrInvalidLengthGenerated
}
postIndex := iNdEx + intStringLen
if postIndex < 0 {
return ErrInvalidLengthGenerated
}
if postIndex > l {
return io.ErrUnexpectedEOF
}
m.ClusterIPs = append(m.ClusterIPs, string(dAtA[iNdEx:postIndex]))
iNdEx = postIndex
case 19:
if wireType != 2 {
return fmt.Errorf("proto: wrong wireType = %d for field IPFamilies", wireType)
}
var stringLen uint64
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowGenerated
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
stringLen |= uint64(b&0x7F) << shift
if b < 0x80 {
break
}
}
intStringLen := int(stringLen)
if intStringLen < 0 {
return ErrInvalidLengthGenerated
}
postIndex := iNdEx + intStringLen
if postIndex < 0 {
return ErrInvalidLengthGenerated
}
if postIndex > l {
return io.ErrUnexpectedEOF
}
m.IPFamilies = append(m.IPFamilies, IPFamily(dAtA[iNdEx:postIndex]))
iNdEx = postIndex
case 21:
if wireType != 2 {
return fmt.Errorf("proto: wrong wireType = %d for field LoadBalancerClass", wireType)
}
var stringLen uint64
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowGenerated
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
stringLen |= uint64(b&0x7F) << shift
if b < 0x80 {
break
}
}
intStringLen := int(stringLen)
if intStringLen < 0 {
return ErrInvalidLengthGenerated
}
postIndex := iNdEx + intStringLen
if postIndex < 0 {
return ErrInvalidLengthGenerated
}
if postIndex > l {
return io.ErrUnexpectedEOF
}
s := string(dAtA[iNdEx:postIndex])
m.LoadBalancerClass = &s
iNdEx = postIndex
case 22:
if wireType != 2 {
return fmt.Errorf("proto: wrong wireType = %d for field InternalTrafficPolicy", wireType)
}
var stringLen uint64
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowGenerated
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
stringLen |= uint64(b&0x7F) << shift
if b < 0x80 {
break
}
}
intStringLen := int(stringLen)
if intStringLen < 0 {
return ErrInvalidLengthGenerated
}
postIndex := iNdEx + intStringLen
if postIndex < 0 {
return ErrInvalidLengthGenerated
}
if postIndex > l {
return io.ErrUnexpectedEOF
}
s := ServiceInternalTrafficPolicy(dAtA[iNdEx:postIndex])
m.InternalTrafficPolicy = &s
iNdEx = postIndex
case 23:
if wireType != 2 {
return fmt.Errorf("proto: wrong wireType = %d for field TrafficDistribution", wireType)
}
var stringLen uint64
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowGenerated
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
stringLen |= uint64(b&0x7F) << shift
if b < 0x80 {
break
}
}
intStringLen := int(stringLen)
if intStringLen < 0 {
return ErrInvalidLengthGenerated
}
postIndex := iNdEx + intStringLen
if postIndex < 0 {
return ErrInvalidLengthGenerated
}
if postIndex > l {
return io.ErrUnexpectedEOF
}
s := string(dAtA[iNdEx:postIndex])
m.TrafficDistribution = &s
iNdEx = postIndex
default:
iNdEx = preIndex
skippy, err := skipGenerated(dAtA[iNdEx:])
if err != nil {
return err
}
if (skippy < 0) || (iNdEx+skippy) < 0 {
return ErrInvalidLengthGenerated
}
if (iNdEx + skippy) > l {
return io.ErrUnexpectedEOF
}
iNdEx += skippy
}
}
if iNdEx > l {
return io.ErrUnexpectedEOF
}
return nil
}
func (m *ServiceStatus) Unmarshal(dAtA []byte) error {
l := len(dAtA)
iNdEx := 0
for iNdEx < l {
preIndex := iNdEx
var wire uint64
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowGenerated
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
wire |= uint64(b&0x7F) << shift
if b < 0x80 {
break
}
}
fieldNum := int32(wire >> 3)
wireType := int(wire & 0x7)
if wireType == 4 {
return fmt.Errorf("proto: ServiceStatus: wiretype end group for non-group")
}
if fieldNum <= 0 {
return fmt.Errorf("proto: ServiceStatus: illegal tag %d (wire type %d)", fieldNum, wire)
}
switch fieldNum {
case 1:
if wireType != 2 {
return fmt.Errorf("proto: wrong wireType = %d for field LoadBalancer", wireType)
}
var msglen int
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowGenerated
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
msglen |= int(b&0x7F) << shift
if b < 0x80 {
break
}
}
if msglen < 0 {
return ErrInvalidLengthGenerated
}
postIndex := iNdEx + msglen
if postIndex < 0 {
return ErrInvalidLengthGenerated
}
if postIndex > l {
return io.ErrUnexpectedEOF
}
if err := m.LoadBalancer.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
return err
}
iNdEx = postIndex
case 2:
if wireType != 2 {
return fmt.Errorf("proto: wrong wireType = %d for field Conditions", wireType)
}
var msglen int
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowGenerated
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
msglen |= int(b&0x7F) << shift
if b < 0x80 {
break
}
}
if msglen < 0 {
return ErrInvalidLengthGenerated
}
postIndex := iNdEx + msglen
if postIndex < 0 {
return ErrInvalidLengthGenerated
}
if postIndex > l {
return io.ErrUnexpectedEOF
}
m.Conditions = append(m.Conditions, v1.Condition{})
if err := m.Conditions[len(m.Conditions)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
return err
}
iNdEx = postIndex
default:
iNdEx = preIndex
skippy, err := skipGenerated(dAtA[iNdEx:])
if err != nil {
return err
}
if (skippy < 0) || (iNdEx+skippy) < 0 {
return ErrInvalidLengthGenerated
}
if (iNdEx + skippy) > l {
return io.ErrUnexpectedEOF
}
iNdEx += skippy
}
}
if iNdEx > l {
return io.ErrUnexpectedEOF
}
return nil
}
func (m *SessionAffinityConfig) Unmarshal(dAtA []byte) error {
l := len(dAtA)
iNdEx := 0
for iNdEx < l {
preIndex := iNdEx
var wire uint64
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowGenerated
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
wire |= uint64(b&0x7F) << shift
if b < 0x80 {
break
}
}
fieldNum := int32(wire >> 3)
wireType := int(wire & 0x7)
if wireType == 4 {
return fmt.Errorf("proto: SessionAffinityConfig: wiretype end group for non-group")
}
if fieldNum <= 0 {
return fmt.Errorf("proto: SessionAffinityConfig: illegal tag %d (wire type %d)", fieldNum, wire)
}
switch fieldNum {
case 1:
if wireType != 2 {
return fmt.Errorf("proto: wrong wireType = %d for field ClientIP", wireType)
}
var msglen int
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowGenerated
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
msglen |= int(b&0x7F) << shift
if b < 0x80 {
break
}
}
if msglen < 0 {
return ErrInvalidLengthGenerated
}
postIndex := iNdEx + msglen
if postIndex < 0 {
return ErrInvalidLengthGenerated
}
if postIndex > l {
return io.ErrUnexpectedEOF
}
if m.ClientIP == nil {
m.ClientIP = &ClientIPConfig{}
}
if err := m.ClientIP.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
return err
}
iNdEx = postIndex
default:
iNdEx = preIndex
skippy, err := skipGenerated(dAtA[iNdEx:])
if err != nil {
return err
}
if (skippy < 0) || (iNdEx+skippy) < 0 {
return ErrInvalidLengthGenerated
}
if (iNdEx + skippy) > l {
return io.ErrUnexpectedEOF
}
iNdEx += skippy
}
}
if iNdEx > l {
return io.ErrUnexpectedEOF
}
return nil
}
func (m *Taint) Unmarshal(dAtA []byte) error {
l := len(dAtA)
iNdEx := 0
for iNdEx < l {
preIndex := iNdEx
var wire uint64
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowGenerated
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
wire |= uint64(b&0x7F) << shift
if b < 0x80 {
break
}
}
fieldNum := int32(wire >> 3)
wireType := int(wire & 0x7)
if wireType == 4 {
return fmt.Errorf("proto: Taint: wiretype end group for non-group")
}
if fieldNum <= 0 {
return fmt.Errorf("proto: Taint: illegal tag %d (wire type %d)", fieldNum, wire)
}
switch fieldNum {
case 1:
if wireType != 2 {
return fmt.Errorf("proto: wrong wireType = %d for field Key", wireType)
}
var stringLen uint64
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowGenerated
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
stringLen |= uint64(b&0x7F) << shift
if b < 0x80 {
break
}
}
intStringLen := int(stringLen)
if intStringLen < 0 {
return ErrInvalidLengthGenerated
}
postIndex := iNdEx + intStringLen
if postIndex < 0 {
return ErrInvalidLengthGenerated
}
if postIndex > l {
return io.ErrUnexpectedEOF
}
m.Key = string(dAtA[iNdEx:postIndex])
iNdEx = postIndex
case 2:
if wireType != 2 {
return fmt.Errorf("proto: wrong wireType = %d for field Value", wireType)
}
var stringLen uint64
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowGenerated
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
stringLen |= uint64(b&0x7F) << shift
if b < 0x80 {
break
}
}
intStringLen := int(stringLen)
if intStringLen < 0 {
return ErrInvalidLengthGenerated
}
postIndex := iNdEx + intStringLen
if postIndex < 0 {
return ErrInvalidLengthGenerated
}
if postIndex > l {
return io.ErrUnexpectedEOF
}
m.Value = string(dAtA[iNdEx:postIndex])
iNdEx = postIndex
case 3:
if wireType != 2 {
return fmt.Errorf("proto: wrong wireType = %d for field Effect", wireType)
}
var stringLen uint64
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowGenerated
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
stringLen |= uint64(b&0x7F) << shift
if b < 0x80 {
break
}
}
intStringLen := int(stringLen)
if intStringLen < 0 {
return ErrInvalidLengthGenerated
}
postIndex := iNdEx + intStringLen
if postIndex < 0 {
return ErrInvalidLengthGenerated
}
if postIndex > l {
return io.ErrUnexpectedEOF
}
m.Effect = TaintEffect(dAtA[iNdEx:postIndex])
iNdEx = postIndex
case 4:
if wireType != 2 {
return fmt.Errorf("proto: wrong wireType = %d for field TimeAdded", wireType)
}
var msglen int
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowGenerated
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
msglen |= int(b&0x7F) << shift
if b < 0x80 {
break
}
}
if msglen < 0 {
return ErrInvalidLengthGenerated
}
postIndex := iNdEx + msglen
if postIndex < 0 {
return ErrInvalidLengthGenerated
}
if postIndex > l {
return io.ErrUnexpectedEOF
}
if m.TimeAdded == nil {
m.TimeAdded = &v1.Time{}
}
if err := m.TimeAdded.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
return err
}
iNdEx = postIndex
default:
iNdEx = preIndex
skippy, err := skipGenerated(dAtA[iNdEx:])
if err != nil {
return err
}
if (skippy < 0) || (iNdEx+skippy) < 0 {
return ErrInvalidLengthGenerated
}
if (iNdEx + skippy) > l {
return io.ErrUnexpectedEOF
}
iNdEx += skippy
}
}
if iNdEx > l {
return io.ErrUnexpectedEOF
}
return nil
}
func (m *TypedLocalObjectReference) Unmarshal(dAtA []byte) error {
l := len(dAtA)
iNdEx := 0
for iNdEx < l {
preIndex := iNdEx
var wire uint64
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowGenerated
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
wire |= uint64(b&0x7F) << shift
if b < 0x80 {
break
}
}
fieldNum := int32(wire >> 3)
wireType := int(wire & 0x7)
if wireType == 4 {
return fmt.Errorf("proto: TypedLocalObjectReference: wiretype end group for non-group")
}
if fieldNum <= 0 {
return fmt.Errorf("proto: TypedLocalObjectReference: illegal tag %d (wire type %d)", fieldNum, wire)
}
switch fieldNum {
case 1:
if wireType != 2 {
return fmt.Errorf("proto: wrong wireType = %d for field APIGroup", wireType)
}
var stringLen uint64
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowGenerated
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
stringLen |= uint64(b&0x7F) << shift
if b < 0x80 {
break
}
}
intStringLen := int(stringLen)
if intStringLen < 0 {
return ErrInvalidLengthGenerated
}
postIndex := iNdEx + intStringLen
if postIndex < 0 {
return ErrInvalidLengthGenerated
}
if postIndex > l {
return io.ErrUnexpectedEOF
}
s := string(dAtA[iNdEx:postIndex])
m.APIGroup = &s
iNdEx = postIndex
case 2:
if wireType != 2 {
return fmt.Errorf("proto: wrong wireType = %d for field Kind", wireType)
}
var stringLen uint64
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowGenerated
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
stringLen |= uint64(b&0x7F) << shift
if b < 0x80 {
break
}
}
intStringLen := int(stringLen)
if intStringLen < 0 {
return ErrInvalidLengthGenerated
}
postIndex := iNdEx + intStringLen
if postIndex < 0 {
return ErrInvalidLengthGenerated
}
if postIndex > l {
return io.ErrUnexpectedEOF
}
m.Kind = string(dAtA[iNdEx:postIndex])
iNdEx = postIndex
case 3:
if wireType != 2 {
return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType)
}
var stringLen uint64
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowGenerated
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
stringLen |= uint64(b&0x7F) << shift
if b < 0x80 {
break
}
}
intStringLen := int(stringLen)
if intStringLen < 0 {
return ErrInvalidLengthGenerated
}
postIndex := iNdEx + intStringLen
if postIndex < 0 {
return ErrInvalidLengthGenerated
}
if postIndex > l {
return io.ErrUnexpectedEOF
}
m.Name = string(dAtA[iNdEx:postIndex])
iNdEx = postIndex
default:
iNdEx = preIndex
skippy, err := skipGenerated(dAtA[iNdEx:])
if err != nil {
return err
}
if (skippy < 0) || (iNdEx+skippy) < 0 {
return ErrInvalidLengthGenerated
}
if (iNdEx + skippy) > l {
return io.ErrUnexpectedEOF
}
iNdEx += skippy
}
}
if iNdEx > l {
return io.ErrUnexpectedEOF
}
return nil
}
func (m *VolumeMount) Unmarshal(dAtA []byte) error {
l := len(dAtA)
iNdEx := 0
for iNdEx < l {
preIndex := iNdEx
var wire uint64
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowGenerated
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
wire |= uint64(b&0x7F) << shift
if b < 0x80 {
break
}
}
fieldNum := int32(wire >> 3)
wireType := int(wire & 0x7)
if wireType == 4 {
return fmt.Errorf("proto: VolumeMount: wiretype end group for non-group")
}
if fieldNum <= 0 {
return fmt.Errorf("proto: VolumeMount: illegal tag %d (wire type %d)", fieldNum, wire)
}
switch fieldNum {
case 3:
if wireType != 2 {
return fmt.Errorf("proto: wrong wireType = %d for field MountPath", wireType)
}
var stringLen uint64
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowGenerated
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
stringLen |= uint64(b&0x7F) << shift
if b < 0x80 {
break
}
}
intStringLen := int(stringLen)
if intStringLen < 0 {
return ErrInvalidLengthGenerated
}
postIndex := iNdEx + intStringLen
if postIndex < 0 {
return ErrInvalidLengthGenerated
}
if postIndex > l {
return io.ErrUnexpectedEOF
}
m.MountPath = string(dAtA[iNdEx:postIndex])
iNdEx = postIndex
default:
iNdEx = preIndex
skippy, err := skipGenerated(dAtA[iNdEx:])
if err != nil {
return err
}
if (skippy < 0) || (iNdEx+skippy) < 0 {
return ErrInvalidLengthGenerated
}
if (iNdEx + skippy) > l {
return io.ErrUnexpectedEOF
}
iNdEx += skippy
}
}
if iNdEx > l {
return io.ErrUnexpectedEOF
}
return nil
}
func skipGenerated(dAtA []byte) (n int, err error) {
l := len(dAtA)
iNdEx := 0
depth := 0
for iNdEx < l {
var wire uint64
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return 0, ErrIntOverflowGenerated
}
if iNdEx >= l {
return 0, io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
wire |= (uint64(b) & 0x7F) << shift
if b < 0x80 {
break
}
}
wireType := int(wire & 0x7)
switch wireType {
case 0:
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return 0, ErrIntOverflowGenerated
}
if iNdEx >= l {
return 0, io.ErrUnexpectedEOF
}
iNdEx++
if dAtA[iNdEx-1] < 0x80 {
break
}
}
case 1:
iNdEx += 8
case 2:
var length int
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return 0, ErrIntOverflowGenerated
}
if iNdEx >= l {
return 0, io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
length |= (int(b) & 0x7F) << shift
if b < 0x80 {
break
}
}
if length < 0 {
return 0, ErrInvalidLengthGenerated
}
iNdEx += length
case 3:
depth++
case 4:
if depth == 0 {
return 0, ErrUnexpectedEndOfGroupGenerated
}
depth--
case 5:
iNdEx += 4
default:
return 0, fmt.Errorf("proto: illegal wireType %d", wireType)
}
if iNdEx < 0 {
return 0, ErrInvalidLengthGenerated
}
if depth == 0 {
return iNdEx, nil
}
}
return 0, io.ErrUnexpectedEOF
}
var (
ErrInvalidLengthGenerated = fmt.Errorf("proto: negative length found during unmarshaling")
ErrIntOverflowGenerated = fmt.Errorf("proto: integer overflow")
ErrUnexpectedEndOfGroupGenerated = fmt.Errorf("proto: unexpected end of group")
)
// SPDX-License-Identifier: Apache-2.0
// Copyright Authors of Cilium
// Copyright 2015 The Kubernetes Authors.
package v1
import (
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/runtime"
"k8s.io/apimachinery/pkg/runtime/schema"
)
// GroupName is the group name use in this package
const GroupName = ""
// SchemeGroupVersion is group version used to register these objects
var SchemeGroupVersion = schema.GroupVersion{Group: GroupName, Version: "v1"}
// Resource takes an unqualified resource and returns a Group qualified GroupResource
func Resource(resource string) schema.GroupResource {
return SchemeGroupVersion.WithResource(resource).GroupResource()
}
var (
// We only register manually written functions here. The registration of the
// generated functions takes place in the generated files. The separation
// makes the code compile even when the generated files are missing.
SchemeBuilder = runtime.NewSchemeBuilder(addKnownTypes)
AddToScheme = SchemeBuilder.AddToScheme
)
// Adds the list of known types to the given scheme.
func addKnownTypes(scheme *runtime.Scheme) error {
scheme.AddKnownTypes(SchemeGroupVersion,
&Pod{},
&PodList{},
&Service{},
&ServiceList{},
&Endpoints{},
&EndpointsList{},
&Node{},
&NodeList{},
&Namespace{},
&NamespaceList{},
&Secret{},
&SecretList{},
)
// Add common types
scheme.AddKnownTypes(SchemeGroupVersion, &metav1.Status{})
// Add the watch version that applies
metav1.AddToGroupVersion(scheme, SchemeGroupVersion)
return nil
}
// SPDX-License-Identifier: Apache-2.0
// Copyright Authors of Cilium
// Copyright 2017 The Kubernetes Authors.
package v1
import "fmt"
// MatchTaint checks if the taint matches taintToMatch. Taints are unique by key:effect,
// if the two taints have same key:effect, regard as they match.
func (t *Taint) MatchTaint(taintToMatch *Taint) bool {
return t.Key == taintToMatch.Key && t.Effect == taintToMatch.Effect
}
// taint.ToString() converts taint struct to string in format '<key>=<value>:<effect>', '<key>=<value>:', '<key>:<effect>', or '<key>'.
func (t *Taint) ToString() string {
if len(t.Effect) == 0 {
if len(t.Value) == 0 {
return fmt.Sprintf("%v", t.Key)
}
return fmt.Sprintf("%v=%v:", t.Key, t.Value)
}
if len(t.Value) == 0 {
return fmt.Sprintf("%v:%v", t.Key, t.Effect)
}
return fmt.Sprintf("%v=%v:%v", t.Key, t.Value, t.Effect)
}
// SPDX-License-Identifier: Apache-2.0
// Copyright Authors of Cilium
package v1
// GetHostIP returns the Host IP of the pod.
func (p *Pod) GetHostIP() string {
return p.Status.HostIP
}
// GetAPIVersion returns the API Version for the pod.
func (p *Pod) GetAPIVersion() string {
return SchemeGroupVersion.Version
}
// GetKind returns its Kind.
func (p *Pod) GetKind() string {
return "Pod"
}
// IsNil returns true if this structure is nil.
func (p *Pod) IsNil() bool {
return p == nil
}
//go:build !ignore_autogenerated
// +build !ignore_autogenerated
// SPDX-License-Identifier: Apache-2.0
// Copyright Authors of Cilium
// Code generated by deepcopy-gen. DO NOT EDIT.
package v1
import (
metav1 "github.com/cilium/cilium/pkg/k8s/slim/k8s/apis/meta/v1"
runtime "k8s.io/apimachinery/pkg/runtime"
)
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in Bytes) DeepCopyInto(out *Bytes) {
{
in := &in
*out = make(Bytes, len(*in))
copy(*out, *in)
return
}
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Bytes.
func (in Bytes) DeepCopy() Bytes {
if in == nil {
return nil
}
out := new(Bytes)
in.DeepCopyInto(out)
return *out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *ClientIPConfig) DeepCopyInto(out *ClientIPConfig) {
*out = *in
if in.TimeoutSeconds != nil {
in, out := &in.TimeoutSeconds, &out.TimeoutSeconds
*out = new(int32)
**out = **in
}
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ClientIPConfig.
func (in *ClientIPConfig) DeepCopy() *ClientIPConfig {
if in == nil {
return nil
}
out := new(ClientIPConfig)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *Container) DeepCopyInto(out *Container) {
*out = *in
if in.Ports != nil {
in, out := &in.Ports, &out.Ports
*out = make([]ContainerPort, len(*in))
copy(*out, *in)
}
if in.VolumeMounts != nil {
in, out := &in.VolumeMounts, &out.VolumeMounts
*out = make([]VolumeMount, len(*in))
copy(*out, *in)
}
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Container.
func (in *Container) DeepCopy() *Container {
if in == nil {
return nil
}
out := new(Container)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *ContainerPort) DeepCopyInto(out *ContainerPort) {
*out = *in
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ContainerPort.
func (in *ContainerPort) DeepCopy() *ContainerPort {
if in == nil {
return nil
}
out := new(ContainerPort)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *ContainerState) DeepCopyInto(out *ContainerState) {
*out = *in
if in.Running != nil {
in, out := &in.Running, &out.Running
*out = new(ContainerStateRunning)
(*in).DeepCopyInto(*out)
}
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ContainerState.
func (in *ContainerState) DeepCopy() *ContainerState {
if in == nil {
return nil
}
out := new(ContainerState)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *ContainerStateRunning) DeepCopyInto(out *ContainerStateRunning) {
*out = *in
in.StartedAt.DeepCopyInto(&out.StartedAt)
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ContainerStateRunning.
func (in *ContainerStateRunning) DeepCopy() *ContainerStateRunning {
if in == nil {
return nil
}
out := new(ContainerStateRunning)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *ContainerStatus) DeepCopyInto(out *ContainerStatus) {
*out = *in
in.State.DeepCopyInto(&out.State)
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ContainerStatus.
func (in *ContainerStatus) DeepCopy() *ContainerStatus {
if in == nil {
return nil
}
out := new(ContainerStatus)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *EndpointAddress) DeepCopyInto(out *EndpointAddress) {
*out = *in
if in.NodeName != nil {
in, out := &in.NodeName, &out.NodeName
*out = new(string)
**out = **in
}
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new EndpointAddress.
func (in *EndpointAddress) DeepCopy() *EndpointAddress {
if in == nil {
return nil
}
out := new(EndpointAddress)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *EndpointPort) DeepCopyInto(out *EndpointPort) {
*out = *in
if in.AppProtocol != nil {
in, out := &in.AppProtocol, &out.AppProtocol
*out = new(string)
**out = **in
}
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new EndpointPort.
func (in *EndpointPort) DeepCopy() *EndpointPort {
if in == nil {
return nil
}
out := new(EndpointPort)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *EndpointSubset) DeepCopyInto(out *EndpointSubset) {
*out = *in
if in.Addresses != nil {
in, out := &in.Addresses, &out.Addresses
*out = make([]EndpointAddress, len(*in))
for i := range *in {
(*in)[i].DeepCopyInto(&(*out)[i])
}
}
if in.Ports != nil {
in, out := &in.Ports, &out.Ports
*out = make([]EndpointPort, len(*in))
for i := range *in {
(*in)[i].DeepCopyInto(&(*out)[i])
}
}
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new EndpointSubset.
func (in *EndpointSubset) DeepCopy() *EndpointSubset {
if in == nil {
return nil
}
out := new(EndpointSubset)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *Endpoints) DeepCopyInto(out *Endpoints) {
*out = *in
out.TypeMeta = in.TypeMeta
in.ObjectMeta.DeepCopyInto(&out.ObjectMeta)
if in.Subsets != nil {
in, out := &in.Subsets, &out.Subsets
*out = make([]EndpointSubset, len(*in))
for i := range *in {
(*in)[i].DeepCopyInto(&(*out)[i])
}
}
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Endpoints.
func (in *Endpoints) DeepCopy() *Endpoints {
if in == nil {
return nil
}
out := new(Endpoints)
in.DeepCopyInto(out)
return out
}
// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
func (in *Endpoints) DeepCopyObject() runtime.Object {
if c := in.DeepCopy(); c != nil {
return c
}
return nil
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *EndpointsList) DeepCopyInto(out *EndpointsList) {
*out = *in
out.TypeMeta = in.TypeMeta
in.ListMeta.DeepCopyInto(&out.ListMeta)
if in.Items != nil {
in, out := &in.Items, &out.Items
*out = make([]Endpoints, len(*in))
for i := range *in {
(*in)[i].DeepCopyInto(&(*out)[i])
}
}
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new EndpointsList.
func (in *EndpointsList) DeepCopy() *EndpointsList {
if in == nil {
return nil
}
out := new(EndpointsList)
in.DeepCopyInto(out)
return out
}
// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
func (in *EndpointsList) DeepCopyObject() runtime.Object {
if c := in.DeepCopy(); c != nil {
return c
}
return nil
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *LoadBalancerIngress) DeepCopyInto(out *LoadBalancerIngress) {
*out = *in
if in.IPMode != nil {
in, out := &in.IPMode, &out.IPMode
*out = new(LoadBalancerIPMode)
**out = **in
}
if in.Ports != nil {
in, out := &in.Ports, &out.Ports
*out = make([]PortStatus, len(*in))
for i := range *in {
(*in)[i].DeepCopyInto(&(*out)[i])
}
}
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new LoadBalancerIngress.
func (in *LoadBalancerIngress) DeepCopy() *LoadBalancerIngress {
if in == nil {
return nil
}
out := new(LoadBalancerIngress)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *LoadBalancerStatus) DeepCopyInto(out *LoadBalancerStatus) {
*out = *in
if in.Ingress != nil {
in, out := &in.Ingress, &out.Ingress
*out = make([]LoadBalancerIngress, len(*in))
for i := range *in {
(*in)[i].DeepCopyInto(&(*out)[i])
}
}
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new LoadBalancerStatus.
func (in *LoadBalancerStatus) DeepCopy() *LoadBalancerStatus {
if in == nil {
return nil
}
out := new(LoadBalancerStatus)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *Namespace) DeepCopyInto(out *Namespace) {
*out = *in
out.TypeMeta = in.TypeMeta
in.ObjectMeta.DeepCopyInto(&out.ObjectMeta)
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Namespace.
func (in *Namespace) DeepCopy() *Namespace {
if in == nil {
return nil
}
out := new(Namespace)
in.DeepCopyInto(out)
return out
}
// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
func (in *Namespace) DeepCopyObject() runtime.Object {
if c := in.DeepCopy(); c != nil {
return c
}
return nil
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *NamespaceList) DeepCopyInto(out *NamespaceList) {
*out = *in
out.TypeMeta = in.TypeMeta
in.ListMeta.DeepCopyInto(&out.ListMeta)
if in.Items != nil {
in, out := &in.Items, &out.Items
*out = make([]Namespace, len(*in))
for i := range *in {
(*in)[i].DeepCopyInto(&(*out)[i])
}
}
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NamespaceList.
func (in *NamespaceList) DeepCopy() *NamespaceList {
if in == nil {
return nil
}
out := new(NamespaceList)
in.DeepCopyInto(out)
return out
}
// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
func (in *NamespaceList) DeepCopyObject() runtime.Object {
if c := in.DeepCopy(); c != nil {
return c
}
return nil
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *Node) DeepCopyInto(out *Node) {
*out = *in
out.TypeMeta = in.TypeMeta
in.ObjectMeta.DeepCopyInto(&out.ObjectMeta)
in.Spec.DeepCopyInto(&out.Spec)
in.Status.DeepCopyInto(&out.Status)
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Node.
func (in *Node) DeepCopy() *Node {
if in == nil {
return nil
}
out := new(Node)
in.DeepCopyInto(out)
return out
}
// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
func (in *Node) DeepCopyObject() runtime.Object {
if c := in.DeepCopy(); c != nil {
return c
}
return nil
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *NodeAddress) DeepCopyInto(out *NodeAddress) {
*out = *in
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NodeAddress.
func (in *NodeAddress) DeepCopy() *NodeAddress {
if in == nil {
return nil
}
out := new(NodeAddress)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *NodeCondition) DeepCopyInto(out *NodeCondition) {
*out = *in
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NodeCondition.
func (in *NodeCondition) DeepCopy() *NodeCondition {
if in == nil {
return nil
}
out := new(NodeCondition)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *NodeList) DeepCopyInto(out *NodeList) {
*out = *in
out.TypeMeta = in.TypeMeta
in.ListMeta.DeepCopyInto(&out.ListMeta)
if in.Items != nil {
in, out := &in.Items, &out.Items
*out = make([]Node, len(*in))
for i := range *in {
(*in)[i].DeepCopyInto(&(*out)[i])
}
}
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NodeList.
func (in *NodeList) DeepCopy() *NodeList {
if in == nil {
return nil
}
out := new(NodeList)
in.DeepCopyInto(out)
return out
}
// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
func (in *NodeList) DeepCopyObject() runtime.Object {
if c := in.DeepCopy(); c != nil {
return c
}
return nil
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *NodeSpec) DeepCopyInto(out *NodeSpec) {
*out = *in
if in.PodCIDRs != nil {
in, out := &in.PodCIDRs, &out.PodCIDRs
*out = make([]string, len(*in))
copy(*out, *in)
}
if in.Taints != nil {
in, out := &in.Taints, &out.Taints
*out = make([]Taint, len(*in))
for i := range *in {
(*in)[i].DeepCopyInto(&(*out)[i])
}
}
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NodeSpec.
func (in *NodeSpec) DeepCopy() *NodeSpec {
if in == nil {
return nil
}
out := new(NodeSpec)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *NodeStatus) DeepCopyInto(out *NodeStatus) {
*out = *in
if in.Conditions != nil {
in, out := &in.Conditions, &out.Conditions
*out = make([]NodeCondition, len(*in))
copy(*out, *in)
}
if in.Addresses != nil {
in, out := &in.Addresses, &out.Addresses
*out = make([]NodeAddress, len(*in))
copy(*out, *in)
}
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NodeStatus.
func (in *NodeStatus) DeepCopy() *NodeStatus {
if in == nil {
return nil
}
out := new(NodeStatus)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *Pod) DeepCopyInto(out *Pod) {
*out = *in
out.TypeMeta = in.TypeMeta
in.ObjectMeta.DeepCopyInto(&out.ObjectMeta)
in.Spec.DeepCopyInto(&out.Spec)
in.Status.DeepCopyInto(&out.Status)
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Pod.
func (in *Pod) DeepCopy() *Pod {
if in == nil {
return nil
}
out := new(Pod)
in.DeepCopyInto(out)
return out
}
// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
func (in *Pod) DeepCopyObject() runtime.Object {
if c := in.DeepCopy(); c != nil {
return c
}
return nil
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *PodCondition) DeepCopyInto(out *PodCondition) {
*out = *in
in.LastProbeTime.DeepCopyInto(&out.LastProbeTime)
in.LastTransitionTime.DeepCopyInto(&out.LastTransitionTime)
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PodCondition.
func (in *PodCondition) DeepCopy() *PodCondition {
if in == nil {
return nil
}
out := new(PodCondition)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *PodIP) DeepCopyInto(out *PodIP) {
*out = *in
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PodIP.
func (in *PodIP) DeepCopy() *PodIP {
if in == nil {
return nil
}
out := new(PodIP)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *PodList) DeepCopyInto(out *PodList) {
*out = *in
out.TypeMeta = in.TypeMeta
in.ListMeta.DeepCopyInto(&out.ListMeta)
if in.Items != nil {
in, out := &in.Items, &out.Items
*out = make([]Pod, len(*in))
for i := range *in {
(*in)[i].DeepCopyInto(&(*out)[i])
}
}
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PodList.
func (in *PodList) DeepCopy() *PodList {
if in == nil {
return nil
}
out := new(PodList)
in.DeepCopyInto(out)
return out
}
// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
func (in *PodList) DeepCopyObject() runtime.Object {
if c := in.DeepCopy(); c != nil {
return c
}
return nil
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *PodReadinessGate) DeepCopyInto(out *PodReadinessGate) {
*out = *in
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PodReadinessGate.
func (in *PodReadinessGate) DeepCopy() *PodReadinessGate {
if in == nil {
return nil
}
out := new(PodReadinessGate)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *PodSpec) DeepCopyInto(out *PodSpec) {
*out = *in
if in.InitContainers != nil {
in, out := &in.InitContainers, &out.InitContainers
*out = make([]Container, len(*in))
for i := range *in {
(*in)[i].DeepCopyInto(&(*out)[i])
}
}
if in.Containers != nil {
in, out := &in.Containers, &out.Containers
*out = make([]Container, len(*in))
for i := range *in {
(*in)[i].DeepCopyInto(&(*out)[i])
}
}
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PodSpec.
func (in *PodSpec) DeepCopy() *PodSpec {
if in == nil {
return nil
}
out := new(PodSpec)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *PodStatus) DeepCopyInto(out *PodStatus) {
*out = *in
if in.Conditions != nil {
in, out := &in.Conditions, &out.Conditions
*out = make([]PodCondition, len(*in))
for i := range *in {
(*in)[i].DeepCopyInto(&(*out)[i])
}
}
if in.PodIPs != nil {
in, out := &in.PodIPs, &out.PodIPs
*out = make([]PodIP, len(*in))
copy(*out, *in)
}
if in.StartTime != nil {
in, out := &in.StartTime, &out.StartTime
*out = (*in).DeepCopy()
}
if in.ContainerStatuses != nil {
in, out := &in.ContainerStatuses, &out.ContainerStatuses
*out = make([]ContainerStatus, len(*in))
for i := range *in {
(*in)[i].DeepCopyInto(&(*out)[i])
}
}
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PodStatus.
func (in *PodStatus) DeepCopy() *PodStatus {
if in == nil {
return nil
}
out := new(PodStatus)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *PortStatus) DeepCopyInto(out *PortStatus) {
*out = *in
if in.Error != nil {
in, out := &in.Error, &out.Error
*out = new(string)
**out = **in
}
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PortStatus.
func (in *PortStatus) DeepCopy() *PortStatus {
if in == nil {
return nil
}
out := new(PortStatus)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *Secret) DeepCopyInto(out *Secret) {
*out = *in
out.TypeMeta = in.TypeMeta
in.ObjectMeta.DeepCopyInto(&out.ObjectMeta)
if in.Immutable != nil {
in, out := &in.Immutable, &out.Immutable
*out = new(bool)
**out = **in
}
if in.Data != nil {
in, out := &in.Data, &out.Data
*out = make(map[string]Bytes, len(*in))
for key, val := range *in {
var outVal []byte
if val == nil {
(*out)[key] = nil
} else {
in, out := &val, &outVal
*out = make(Bytes, len(*in))
copy(*out, *in)
}
(*out)[key] = outVal
}
}
if in.StringData != nil {
in, out := &in.StringData, &out.StringData
*out = make(map[string]string, len(*in))
for key, val := range *in {
(*out)[key] = val
}
}
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Secret.
func (in *Secret) DeepCopy() *Secret {
if in == nil {
return nil
}
out := new(Secret)
in.DeepCopyInto(out)
return out
}
// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
func (in *Secret) DeepCopyObject() runtime.Object {
if c := in.DeepCopy(); c != nil {
return c
}
return nil
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *SecretList) DeepCopyInto(out *SecretList) {
*out = *in
out.TypeMeta = in.TypeMeta
in.ListMeta.DeepCopyInto(&out.ListMeta)
if in.Items != nil {
in, out := &in.Items, &out.Items
*out = make([]Secret, len(*in))
for i := range *in {
(*in)[i].DeepCopyInto(&(*out)[i])
}
}
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SecretList.
func (in *SecretList) DeepCopy() *SecretList {
if in == nil {
return nil
}
out := new(SecretList)
in.DeepCopyInto(out)
return out
}
// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
func (in *SecretList) DeepCopyObject() runtime.Object {
if c := in.DeepCopy(); c != nil {
return c
}
return nil
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *Service) DeepCopyInto(out *Service) {
*out = *in
out.TypeMeta = in.TypeMeta
in.ObjectMeta.DeepCopyInto(&out.ObjectMeta)
in.Spec.DeepCopyInto(&out.Spec)
in.Status.DeepCopyInto(&out.Status)
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Service.
func (in *Service) DeepCopy() *Service {
if in == nil {
return nil
}
out := new(Service)
in.DeepCopyInto(out)
return out
}
// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
func (in *Service) DeepCopyObject() runtime.Object {
if c := in.DeepCopy(); c != nil {
return c
}
return nil
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *ServiceList) DeepCopyInto(out *ServiceList) {
*out = *in
out.TypeMeta = in.TypeMeta
in.ListMeta.DeepCopyInto(&out.ListMeta)
if in.Items != nil {
in, out := &in.Items, &out.Items
*out = make([]Service, len(*in))
for i := range *in {
(*in)[i].DeepCopyInto(&(*out)[i])
}
}
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ServiceList.
func (in *ServiceList) DeepCopy() *ServiceList {
if in == nil {
return nil
}
out := new(ServiceList)
in.DeepCopyInto(out)
return out
}
// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
func (in *ServiceList) DeepCopyObject() runtime.Object {
if c := in.DeepCopy(); c != nil {
return c
}
return nil
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *ServicePort) DeepCopyInto(out *ServicePort) {
*out = *in
if in.AppProtocol != nil {
in, out := &in.AppProtocol, &out.AppProtocol
*out = new(string)
**out = **in
}
out.TargetPort = in.TargetPort
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ServicePort.
func (in *ServicePort) DeepCopy() *ServicePort {
if in == nil {
return nil
}
out := new(ServicePort)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *ServiceSpec) DeepCopyInto(out *ServiceSpec) {
*out = *in
if in.Ports != nil {
in, out := &in.Ports, &out.Ports
*out = make([]ServicePort, len(*in))
for i := range *in {
(*in)[i].DeepCopyInto(&(*out)[i])
}
}
if in.Selector != nil {
in, out := &in.Selector, &out.Selector
*out = make(map[string]string, len(*in))
for key, val := range *in {
(*out)[key] = val
}
}
if in.ClusterIPs != nil {
in, out := &in.ClusterIPs, &out.ClusterIPs
*out = make([]string, len(*in))
copy(*out, *in)
}
if in.ExternalIPs != nil {
in, out := &in.ExternalIPs, &out.ExternalIPs
*out = make([]string, len(*in))
copy(*out, *in)
}
if in.LoadBalancerSourceRanges != nil {
in, out := &in.LoadBalancerSourceRanges, &out.LoadBalancerSourceRanges
*out = make([]string, len(*in))
copy(*out, *in)
}
if in.SessionAffinityConfig != nil {
in, out := &in.SessionAffinityConfig, &out.SessionAffinityConfig
*out = new(SessionAffinityConfig)
(*in).DeepCopyInto(*out)
}
if in.IPFamilies != nil {
in, out := &in.IPFamilies, &out.IPFamilies
*out = make([]IPFamily, len(*in))
copy(*out, *in)
}
if in.IPFamilyPolicy != nil {
in, out := &in.IPFamilyPolicy, &out.IPFamilyPolicy
*out = new(IPFamilyPolicy)
**out = **in
}
if in.LoadBalancerClass != nil {
in, out := &in.LoadBalancerClass, &out.LoadBalancerClass
*out = new(string)
**out = **in
}
if in.InternalTrafficPolicy != nil {
in, out := &in.InternalTrafficPolicy, &out.InternalTrafficPolicy
*out = new(ServiceInternalTrafficPolicy)
**out = **in
}
if in.TrafficDistribution != nil {
in, out := &in.TrafficDistribution, &out.TrafficDistribution
*out = new(string)
**out = **in
}
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ServiceSpec.
func (in *ServiceSpec) DeepCopy() *ServiceSpec {
if in == nil {
return nil
}
out := new(ServiceSpec)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *ServiceStatus) DeepCopyInto(out *ServiceStatus) {
*out = *in
in.LoadBalancer.DeepCopyInto(&out.LoadBalancer)
if in.Conditions != nil {
in, out := &in.Conditions, &out.Conditions
*out = make([]metav1.Condition, len(*in))
for i := range *in {
(*in)[i].DeepCopyInto(&(*out)[i])
}
}
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ServiceStatus.
func (in *ServiceStatus) DeepCopy() *ServiceStatus {
if in == nil {
return nil
}
out := new(ServiceStatus)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *SessionAffinityConfig) DeepCopyInto(out *SessionAffinityConfig) {
*out = *in
if in.ClientIP != nil {
in, out := &in.ClientIP, &out.ClientIP
*out = new(ClientIPConfig)
(*in).DeepCopyInto(*out)
}
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SessionAffinityConfig.
func (in *SessionAffinityConfig) DeepCopy() *SessionAffinityConfig {
if in == nil {
return nil
}
out := new(SessionAffinityConfig)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *Taint) DeepCopyInto(out *Taint) {
*out = *in
if in.TimeAdded != nil {
in, out := &in.TimeAdded, &out.TimeAdded
*out = (*in).DeepCopy()
}
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Taint.
func (in *Taint) DeepCopy() *Taint {
if in == nil {
return nil
}
out := new(Taint)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *TypedLocalObjectReference) DeepCopyInto(out *TypedLocalObjectReference) {
*out = *in
if in.APIGroup != nil {
in, out := &in.APIGroup, &out.APIGroup
*out = new(string)
**out = **in
}
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TypedLocalObjectReference.
func (in *TypedLocalObjectReference) DeepCopy() *TypedLocalObjectReference {
if in == nil {
return nil
}
out := new(TypedLocalObjectReference)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *VolumeMount) DeepCopyInto(out *VolumeMount) {
*out = *in
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new VolumeMount.
func (in *VolumeMount) DeepCopy() *VolumeMount {
if in == nil {
return nil
}
out := new(VolumeMount)
in.DeepCopyInto(out)
return out
}
//go:build !ignore_autogenerated
// +build !ignore_autogenerated
// SPDX-License-Identifier: Apache-2.0
// Copyright Authors of Cilium
// Code generated by deepequal-gen. DO NOT EDIT.
package v1
// DeepEqual is an autogenerated deepequal function, deeply comparing the
// receiver with other. in must be non-nil.
func (in *Bytes) DeepEqual(other *Bytes) bool {
if other == nil {
return false
}
if len(*in) != len(*other) {
return false
} else {
for i, inElement := range *in {
if inElement != (*other)[i] {
return false
}
}
}
return true
}
// DeepEqual is an autogenerated deepequal function, deeply comparing the
// receiver with other. in must be non-nil.
func (in *ClientIPConfig) DeepEqual(other *ClientIPConfig) bool {
if other == nil {
return false
}
if (in.TimeoutSeconds == nil) != (other.TimeoutSeconds == nil) {
return false
} else if in.TimeoutSeconds != nil {
if *in.TimeoutSeconds != *other.TimeoutSeconds {
return false
}
}
return true
}
// DeepEqual is an autogenerated deepequal function, deeply comparing the
// receiver with other. in must be non-nil.
func (in *Container) DeepEqual(other *Container) bool {
if other == nil {
return false
}
if in.Name != other.Name {
return false
}
if in.Image != other.Image {
return false
}
if ((in.Ports != nil) && (other.Ports != nil)) || ((in.Ports == nil) != (other.Ports == nil)) {
in, other := &in.Ports, &other.Ports
if other == nil {
return false
}
if len(*in) != len(*other) {
return false
} else {
for i, inElement := range *in {
if !inElement.DeepEqual(&(*other)[i]) {
return false
}
}
}
}
if ((in.VolumeMounts != nil) && (other.VolumeMounts != nil)) || ((in.VolumeMounts == nil) != (other.VolumeMounts == nil)) {
in, other := &in.VolumeMounts, &other.VolumeMounts
if other == nil {
return false
}
if len(*in) != len(*other) {
return false
} else {
for i, inElement := range *in {
if !inElement.DeepEqual(&(*other)[i]) {
return false
}
}
}
}
return true
}
// DeepEqual is an autogenerated deepequal function, deeply comparing the
// receiver with other. in must be non-nil.
func (in *ContainerPort) DeepEqual(other *ContainerPort) bool {
if other == nil {
return false
}
if in.Name != other.Name {
return false
}
if in.HostPort != other.HostPort {
return false
}
if in.ContainerPort != other.ContainerPort {
return false
}
if in.Protocol != other.Protocol {
return false
}
if in.HostIP != other.HostIP {
return false
}
return true
}
// DeepEqual is an autogenerated deepequal function, deeply comparing the
// receiver with other. in must be non-nil.
func (in *ContainerState) DeepEqual(other *ContainerState) bool {
if other == nil {
return false
}
if (in.Running == nil) != (other.Running == nil) {
return false
} else if in.Running != nil {
if !in.Running.DeepEqual(other.Running) {
return false
}
}
return true
}
// DeepEqual is an autogenerated deepequal function, deeply comparing the
// receiver with other. in must be non-nil.
func (in *ContainerStateRunning) DeepEqual(other *ContainerStateRunning) bool {
if other == nil {
return false
}
if !in.StartedAt.DeepEqual(&other.StartedAt) {
return false
}
return true
}
// DeepEqual is an autogenerated deepequal function, deeply comparing the
// receiver with other. in must be non-nil.
func (in *ContainerStatus) DeepEqual(other *ContainerStatus) bool {
if other == nil {
return false
}
if !in.State.DeepEqual(&other.State) {
return false
}
if in.ContainerID != other.ContainerID {
return false
}
return true
}
// DeepEqual is an autogenerated deepequal function, deeply comparing the
// receiver with other. in must be non-nil.
func (in *EndpointAddress) DeepEqual(other *EndpointAddress) bool {
if other == nil {
return false
}
if in.IP != other.IP {
return false
}
if in.Hostname != other.Hostname {
return false
}
if (in.NodeName == nil) != (other.NodeName == nil) {
return false
} else if in.NodeName != nil {
if *in.NodeName != *other.NodeName {
return false
}
}
return true
}
// DeepEqual is an autogenerated deepequal function, deeply comparing the
// receiver with other. in must be non-nil.
func (in *EndpointPort) DeepEqual(other *EndpointPort) bool {
if other == nil {
return false
}
if in.Name != other.Name {
return false
}
if in.Port != other.Port {
return false
}
if in.Protocol != other.Protocol {
return false
}
if (in.AppProtocol == nil) != (other.AppProtocol == nil) {
return false
} else if in.AppProtocol != nil {
if *in.AppProtocol != *other.AppProtocol {
return false
}
}
return true
}
// DeepEqual is an autogenerated deepequal function, deeply comparing the
// receiver with other. in must be non-nil.
func (in *EndpointSubset) DeepEqual(other *EndpointSubset) bool {
if other == nil {
return false
}
if ((in.Addresses != nil) && (other.Addresses != nil)) || ((in.Addresses == nil) != (other.Addresses == nil)) {
in, other := &in.Addresses, &other.Addresses
if other == nil {
return false
}
if len(*in) != len(*other) {
return false
} else {
for i, inElement := range *in {
if !inElement.DeepEqual(&(*other)[i]) {
return false
}
}
}
}
if ((in.Ports != nil) && (other.Ports != nil)) || ((in.Ports == nil) != (other.Ports == nil)) {
in, other := &in.Ports, &other.Ports
if other == nil {
return false
}
if len(*in) != len(*other) {
return false
} else {
for i, inElement := range *in {
if !inElement.DeepEqual(&(*other)[i]) {
return false
}
}
}
}
return true
}
// DeepEqual is an autogenerated deepequal function, deeply comparing the
// receiver with other. in must be non-nil.
func (in *Endpoints) DeepEqual(other *Endpoints) bool {
if other == nil {
return false
}
if in.TypeMeta != other.TypeMeta {
return false
}
if !in.ObjectMeta.DeepEqual(&other.ObjectMeta) {
return false
}
if ((in.Subsets != nil) && (other.Subsets != nil)) || ((in.Subsets == nil) != (other.Subsets == nil)) {
in, other := &in.Subsets, &other.Subsets
if other == nil {
return false
}
if len(*in) != len(*other) {
return false
} else {
for i, inElement := range *in {
if !inElement.DeepEqual(&(*other)[i]) {
return false
}
}
}
}
return true
}
// DeepEqual is an autogenerated deepequal function, deeply comparing the
// receiver with other. in must be non-nil.
func (in *EndpointsList) DeepEqual(other *EndpointsList) bool {
if other == nil {
return false
}
if in.TypeMeta != other.TypeMeta {
return false
}
if !in.ListMeta.DeepEqual(&other.ListMeta) {
return false
}
if ((in.Items != nil) && (other.Items != nil)) || ((in.Items == nil) != (other.Items == nil)) {
in, other := &in.Items, &other.Items
if other == nil {
return false
}
if len(*in) != len(*other) {
return false
} else {
for i, inElement := range *in {
if !inElement.DeepEqual(&(*other)[i]) {
return false
}
}
}
}
return true
}
// DeepEqual is an autogenerated deepequal function, deeply comparing the
// receiver with other. in must be non-nil.
func (in *LoadBalancerIngress) DeepEqual(other *LoadBalancerIngress) bool {
if other == nil {
return false
}
if in.IP != other.IP {
return false
}
if in.Hostname != other.Hostname {
return false
}
if (in.IPMode == nil) != (other.IPMode == nil) {
return false
} else if in.IPMode != nil {
if *in.IPMode != *other.IPMode {
return false
}
}
if ((in.Ports != nil) && (other.Ports != nil)) || ((in.Ports == nil) != (other.Ports == nil)) {
in, other := &in.Ports, &other.Ports
if other == nil {
return false
}
if len(*in) != len(*other) {
return false
} else {
for i, inElement := range *in {
if !inElement.DeepEqual(&(*other)[i]) {
return false
}
}
}
}
return true
}
// DeepEqual is an autogenerated deepequal function, deeply comparing the
// receiver with other. in must be non-nil.
func (in *LoadBalancerStatus) DeepEqual(other *LoadBalancerStatus) bool {
if other == nil {
return false
}
if ((in.Ingress != nil) && (other.Ingress != nil)) || ((in.Ingress == nil) != (other.Ingress == nil)) {
in, other := &in.Ingress, &other.Ingress
if other == nil {
return false
}
if len(*in) != len(*other) {
return false
} else {
for i, inElement := range *in {
if !inElement.DeepEqual(&(*other)[i]) {
return false
}
}
}
}
return true
}
// DeepEqual is an autogenerated deepequal function, deeply comparing the
// receiver with other. in must be non-nil.
func (in *Namespace) DeepEqual(other *Namespace) bool {
if other == nil {
return false
}
if in.TypeMeta != other.TypeMeta {
return false
}
if !in.ObjectMeta.DeepEqual(&other.ObjectMeta) {
return false
}
return true
}
// DeepEqual is an autogenerated deepequal function, deeply comparing the
// receiver with other. in must be non-nil.
func (in *NamespaceList) DeepEqual(other *NamespaceList) bool {
if other == nil {
return false
}
if in.TypeMeta != other.TypeMeta {
return false
}
if !in.ListMeta.DeepEqual(&other.ListMeta) {
return false
}
if ((in.Items != nil) && (other.Items != nil)) || ((in.Items == nil) != (other.Items == nil)) {
in, other := &in.Items, &other.Items
if other == nil {
return false
}
if len(*in) != len(*other) {
return false
} else {
for i, inElement := range *in {
if !inElement.DeepEqual(&(*other)[i]) {
return false
}
}
}
}
return true
}
// DeepEqual is an autogenerated deepequal function, deeply comparing the
// receiver with other. in must be non-nil.
func (in *Node) DeepEqual(other *Node) bool {
if other == nil {
return false
}
if in.TypeMeta != other.TypeMeta {
return false
}
if !in.ObjectMeta.DeepEqual(&other.ObjectMeta) {
return false
}
if !in.Spec.DeepEqual(&other.Spec) {
return false
}
if !in.Status.DeepEqual(&other.Status) {
return false
}
return true
}
// DeepEqual is an autogenerated deepequal function, deeply comparing the
// receiver with other. in must be non-nil.
func (in *NodeAddress) DeepEqual(other *NodeAddress) bool {
if other == nil {
return false
}
if in.Type != other.Type {
return false
}
if in.Address != other.Address {
return false
}
return true
}
// DeepEqual is an autogenerated deepequal function, deeply comparing the
// receiver with other. in must be non-nil.
func (in *NodeCondition) DeepEqual(other *NodeCondition) bool {
if other == nil {
return false
}
if in.Type != other.Type {
return false
}
if in.Status != other.Status {
return false
}
if in.Reason != other.Reason {
return false
}
return true
}
// DeepEqual is an autogenerated deepequal function, deeply comparing the
// receiver with other. in must be non-nil.
func (in *NodeList) DeepEqual(other *NodeList) bool {
if other == nil {
return false
}
if in.TypeMeta != other.TypeMeta {
return false
}
if !in.ListMeta.DeepEqual(&other.ListMeta) {
return false
}
if ((in.Items != nil) && (other.Items != nil)) || ((in.Items == nil) != (other.Items == nil)) {
in, other := &in.Items, &other.Items
if other == nil {
return false
}
if len(*in) != len(*other) {
return false
} else {
for i, inElement := range *in {
if !inElement.DeepEqual(&(*other)[i]) {
return false
}
}
}
}
return true
}
// DeepEqual is an autogenerated deepequal function, deeply comparing the
// receiver with other. in must be non-nil.
func (in *NodeSpec) DeepEqual(other *NodeSpec) bool {
if other == nil {
return false
}
if in.PodCIDR != other.PodCIDR {
return false
}
if ((in.PodCIDRs != nil) && (other.PodCIDRs != nil)) || ((in.PodCIDRs == nil) != (other.PodCIDRs == nil)) {
in, other := &in.PodCIDRs, &other.PodCIDRs
if other == nil {
return false
}
if len(*in) != len(*other) {
return false
} else {
for i, inElement := range *in {
if inElement != (*other)[i] {
return false
}
}
}
}
if in.ProviderID != other.ProviderID {
return false
}
if ((in.Taints != nil) && (other.Taints != nil)) || ((in.Taints == nil) != (other.Taints == nil)) {
in, other := &in.Taints, &other.Taints
if other == nil {
return false
}
if len(*in) != len(*other) {
return false
} else {
for i, inElement := range *in {
if !inElement.DeepEqual(&(*other)[i]) {
return false
}
}
}
}
return true
}
// DeepEqual is an autogenerated deepequal function, deeply comparing the
// receiver with other. in must be non-nil.
func (in *NodeStatus) DeepEqual(other *NodeStatus) bool {
if other == nil {
return false
}
if ((in.Conditions != nil) && (other.Conditions != nil)) || ((in.Conditions == nil) != (other.Conditions == nil)) {
in, other := &in.Conditions, &other.Conditions
if other == nil {
return false
}
if len(*in) != len(*other) {
return false
} else {
for i, inElement := range *in {
if !inElement.DeepEqual(&(*other)[i]) {
return false
}
}
}
}
if ((in.Addresses != nil) && (other.Addresses != nil)) || ((in.Addresses == nil) != (other.Addresses == nil)) {
in, other := &in.Addresses, &other.Addresses
if other == nil {
return false
}
if len(*in) != len(*other) {
return false
} else {
for i, inElement := range *in {
if !inElement.DeepEqual(&(*other)[i]) {
return false
}
}
}
}
return true
}
// DeepEqual is an autogenerated deepequal function, deeply comparing the
// receiver with other. in must be non-nil.
func (in *Pod) DeepEqual(other *Pod) bool {
if other == nil {
return false
}
if in.TypeMeta != other.TypeMeta {
return false
}
if !in.ObjectMeta.DeepEqual(&other.ObjectMeta) {
return false
}
if !in.Spec.DeepEqual(&other.Spec) {
return false
}
if !in.Status.DeepEqual(&other.Status) {
return false
}
return true
}
// DeepEqual is an autogenerated deepequal function, deeply comparing the
// receiver with other. in must be non-nil.
func (in *PodCondition) DeepEqual(other *PodCondition) bool {
if other == nil {
return false
}
if in.Type != other.Type {
return false
}
if in.Status != other.Status {
return false
}
if !in.LastProbeTime.DeepEqual(&other.LastProbeTime) {
return false
}
if !in.LastTransitionTime.DeepEqual(&other.LastTransitionTime) {
return false
}
if in.Reason != other.Reason {
return false
}
if in.Message != other.Message {
return false
}
return true
}
// DeepEqual is an autogenerated deepequal function, deeply comparing the
// receiver with other. in must be non-nil.
func (in *PodIP) DeepEqual(other *PodIP) bool {
if other == nil {
return false
}
if in.IP != other.IP {
return false
}
return true
}
// DeepEqual is an autogenerated deepequal function, deeply comparing the
// receiver with other. in must be non-nil.
func (in *PodList) DeepEqual(other *PodList) bool {
if other == nil {
return false
}
if in.TypeMeta != other.TypeMeta {
return false
}
if !in.ListMeta.DeepEqual(&other.ListMeta) {
return false
}
if ((in.Items != nil) && (other.Items != nil)) || ((in.Items == nil) != (other.Items == nil)) {
in, other := &in.Items, &other.Items
if other == nil {
return false
}
if len(*in) != len(*other) {
return false
} else {
for i, inElement := range *in {
if !inElement.DeepEqual(&(*other)[i]) {
return false
}
}
}
}
return true
}
// DeepEqual is an autogenerated deepequal function, deeply comparing the
// receiver with other. in must be non-nil.
func (in *PodReadinessGate) DeepEqual(other *PodReadinessGate) bool {
if other == nil {
return false
}
if in.ConditionType != other.ConditionType {
return false
}
return true
}
// DeepEqual is an autogenerated deepequal function, deeply comparing the
// receiver with other. in must be non-nil.
func (in *PodSpec) DeepEqual(other *PodSpec) bool {
if other == nil {
return false
}
if ((in.InitContainers != nil) && (other.InitContainers != nil)) || ((in.InitContainers == nil) != (other.InitContainers == nil)) {
in, other := &in.InitContainers, &other.InitContainers
if other == nil {
return false
}
if len(*in) != len(*other) {
return false
} else {
for i, inElement := range *in {
if !inElement.DeepEqual(&(*other)[i]) {
return false
}
}
}
}
if ((in.Containers != nil) && (other.Containers != nil)) || ((in.Containers == nil) != (other.Containers == nil)) {
in, other := &in.Containers, &other.Containers
if other == nil {
return false
}
if len(*in) != len(*other) {
return false
} else {
for i, inElement := range *in {
if !inElement.DeepEqual(&(*other)[i]) {
return false
}
}
}
}
if in.ServiceAccountName != other.ServiceAccountName {
return false
}
if in.NodeName != other.NodeName {
return false
}
if in.HostNetwork != other.HostNetwork {
return false
}
return true
}
// DeepEqual is an autogenerated deepequal function, deeply comparing the
// receiver with other. in must be non-nil.
func (in *PodStatus) DeepEqual(other *PodStatus) bool {
if other == nil {
return false
}
if in.Phase != other.Phase {
return false
}
if ((in.Conditions != nil) && (other.Conditions != nil)) || ((in.Conditions == nil) != (other.Conditions == nil)) {
in, other := &in.Conditions, &other.Conditions
if other == nil {
return false
}
if len(*in) != len(*other) {
return false
} else {
for i, inElement := range *in {
if !inElement.DeepEqual(&(*other)[i]) {
return false
}
}
}
}
if in.HostIP != other.HostIP {
return false
}
if in.PodIP != other.PodIP {
return false
}
if ((in.PodIPs != nil) && (other.PodIPs != nil)) || ((in.PodIPs == nil) != (other.PodIPs == nil)) {
in, other := &in.PodIPs, &other.PodIPs
if other == nil {
return false
}
if len(*in) != len(*other) {
return false
} else {
for i, inElement := range *in {
if !inElement.DeepEqual(&(*other)[i]) {
return false
}
}
}
}
if (in.StartTime == nil) != (other.StartTime == nil) {
return false
} else if in.StartTime != nil {
if !in.StartTime.DeepEqual(other.StartTime) {
return false
}
}
if ((in.ContainerStatuses != nil) && (other.ContainerStatuses != nil)) || ((in.ContainerStatuses == nil) != (other.ContainerStatuses == nil)) {
in, other := &in.ContainerStatuses, &other.ContainerStatuses
if other == nil {
return false
}
if len(*in) != len(*other) {
return false
} else {
for i, inElement := range *in {
if !inElement.DeepEqual(&(*other)[i]) {
return false
}
}
}
}
if in.QOSClass != other.QOSClass {
return false
}
return true
}
// DeepEqual is an autogenerated deepequal function, deeply comparing the
// receiver with other. in must be non-nil.
func (in *PortStatus) DeepEqual(other *PortStatus) bool {
if other == nil {
return false
}
if in.Port != other.Port {
return false
}
if in.Protocol != other.Protocol {
return false
}
if (in.Error == nil) != (other.Error == nil) {
return false
} else if in.Error != nil {
if *in.Error != *other.Error {
return false
}
}
return true
}
// DeepEqual is an autogenerated deepequal function, deeply comparing the
// receiver with other. in must be non-nil.
func (in *Secret) DeepEqual(other *Secret) bool {
if other == nil {
return false
}
if in.TypeMeta != other.TypeMeta {
return false
}
if !in.ObjectMeta.DeepEqual(&other.ObjectMeta) {
return false
}
if (in.Immutable == nil) != (other.Immutable == nil) {
return false
} else if in.Immutable != nil {
if *in.Immutable != *other.Immutable {
return false
}
}
if ((in.Data != nil) && (other.Data != nil)) || ((in.Data == nil) != (other.Data == nil)) {
in, other := &in.Data, &other.Data
if other == nil {
return false
}
if len(*in) != len(*other) {
return false
} else {
for key, inValue := range *in {
if otherValue, present := (*other)[key]; !present {
return false
} else {
if !inValue.DeepEqual(&otherValue) {
return false
}
}
}
}
}
if ((in.StringData != nil) && (other.StringData != nil)) || ((in.StringData == nil) != (other.StringData == nil)) {
in, other := &in.StringData, &other.StringData
if other == nil {
return false
}
if len(*in) != len(*other) {
return false
} else {
for key, inValue := range *in {
if otherValue, present := (*other)[key]; !present {
return false
} else {
if inValue != otherValue {
return false
}
}
}
}
}
if in.Type != other.Type {
return false
}
return true
}
// DeepEqual is an autogenerated deepequal function, deeply comparing the
// receiver with other. in must be non-nil.
func (in *SecretList) DeepEqual(other *SecretList) bool {
if other == nil {
return false
}
if in.TypeMeta != other.TypeMeta {
return false
}
if !in.ListMeta.DeepEqual(&other.ListMeta) {
return false
}
if ((in.Items != nil) && (other.Items != nil)) || ((in.Items == nil) != (other.Items == nil)) {
in, other := &in.Items, &other.Items
if other == nil {
return false
}
if len(*in) != len(*other) {
return false
} else {
for i, inElement := range *in {
if !inElement.DeepEqual(&(*other)[i]) {
return false
}
}
}
}
return true
}
// DeepEqual is an autogenerated deepequal function, deeply comparing the
// receiver with other. in must be non-nil.
func (in *Service) DeepEqual(other *Service) bool {
if other == nil {
return false
}
if in.TypeMeta != other.TypeMeta {
return false
}
if !in.ObjectMeta.DeepEqual(&other.ObjectMeta) {
return false
}
if !in.Spec.DeepEqual(&other.Spec) {
return false
}
if !in.Status.DeepEqual(&other.Status) {
return false
}
return true
}
// DeepEqual is an autogenerated deepequal function, deeply comparing the
// receiver with other. in must be non-nil.
func (in *ServiceList) DeepEqual(other *ServiceList) bool {
if other == nil {
return false
}
if in.TypeMeta != other.TypeMeta {
return false
}
if !in.ListMeta.DeepEqual(&other.ListMeta) {
return false
}
if ((in.Items != nil) && (other.Items != nil)) || ((in.Items == nil) != (other.Items == nil)) {
in, other := &in.Items, &other.Items
if other == nil {
return false
}
if len(*in) != len(*other) {
return false
} else {
for i, inElement := range *in {
if !inElement.DeepEqual(&(*other)[i]) {
return false
}
}
}
}
return true
}
// DeepEqual is an autogenerated deepequal function, deeply comparing the
// receiver with other. in must be non-nil.
func (in *ServicePort) DeepEqual(other *ServicePort) bool {
if other == nil {
return false
}
if in.Name != other.Name {
return false
}
if in.Protocol != other.Protocol {
return false
}
if (in.AppProtocol == nil) != (other.AppProtocol == nil) {
return false
} else if in.AppProtocol != nil {
if *in.AppProtocol != *other.AppProtocol {
return false
}
}
if in.Port != other.Port {
return false
}
if in.TargetPort != other.TargetPort {
return false
}
if in.NodePort != other.NodePort {
return false
}
return true
}
// DeepEqual is an autogenerated deepequal function, deeply comparing the
// receiver with other. in must be non-nil.
func (in *ServiceSpec) DeepEqual(other *ServiceSpec) bool {
if other == nil {
return false
}
if ((in.Ports != nil) && (other.Ports != nil)) || ((in.Ports == nil) != (other.Ports == nil)) {
in, other := &in.Ports, &other.Ports
if other == nil {
return false
}
if len(*in) != len(*other) {
return false
} else {
for i, inElement := range *in {
if !inElement.DeepEqual(&(*other)[i]) {
return false
}
}
}
}
if ((in.Selector != nil) && (other.Selector != nil)) || ((in.Selector == nil) != (other.Selector == nil)) {
in, other := &in.Selector, &other.Selector
if other == nil {
return false
}
if len(*in) != len(*other) {
return false
} else {
for key, inValue := range *in {
if otherValue, present := (*other)[key]; !present {
return false
} else {
if inValue != otherValue {
return false
}
}
}
}
}
if in.ClusterIP != other.ClusterIP {
return false
}
if ((in.ClusterIPs != nil) && (other.ClusterIPs != nil)) || ((in.ClusterIPs == nil) != (other.ClusterIPs == nil)) {
in, other := &in.ClusterIPs, &other.ClusterIPs
if other == nil {
return false
}
if len(*in) != len(*other) {
return false
} else {
for i, inElement := range *in {
if inElement != (*other)[i] {
return false
}
}
}
}
if in.Type != other.Type {
return false
}
if ((in.ExternalIPs != nil) && (other.ExternalIPs != nil)) || ((in.ExternalIPs == nil) != (other.ExternalIPs == nil)) {
in, other := &in.ExternalIPs, &other.ExternalIPs
if other == nil {
return false
}
if len(*in) != len(*other) {
return false
} else {
for i, inElement := range *in {
if inElement != (*other)[i] {
return false
}
}
}
}
if in.SessionAffinity != other.SessionAffinity {
return false
}
if in.LoadBalancerIP != other.LoadBalancerIP {
return false
}
if ((in.LoadBalancerSourceRanges != nil) && (other.LoadBalancerSourceRanges != nil)) || ((in.LoadBalancerSourceRanges == nil) != (other.LoadBalancerSourceRanges == nil)) {
in, other := &in.LoadBalancerSourceRanges, &other.LoadBalancerSourceRanges
if other == nil {
return false
}
if len(*in) != len(*other) {
return false
} else {
for i, inElement := range *in {
if inElement != (*other)[i] {
return false
}
}
}
}
if in.ExternalTrafficPolicy != other.ExternalTrafficPolicy {
return false
}
if in.HealthCheckNodePort != other.HealthCheckNodePort {
return false
}
if (in.SessionAffinityConfig == nil) != (other.SessionAffinityConfig == nil) {
return false
} else if in.SessionAffinityConfig != nil {
if !in.SessionAffinityConfig.DeepEqual(other.SessionAffinityConfig) {
return false
}
}
if ((in.IPFamilies != nil) && (other.IPFamilies != nil)) || ((in.IPFamilies == nil) != (other.IPFamilies == nil)) {
in, other := &in.IPFamilies, &other.IPFamilies
if other == nil {
return false
}
if len(*in) != len(*other) {
return false
} else {
for i, inElement := range *in {
if inElement != (*other)[i] {
return false
}
}
}
}
if (in.IPFamilyPolicy == nil) != (other.IPFamilyPolicy == nil) {
return false
} else if in.IPFamilyPolicy != nil {
if *in.IPFamilyPolicy != *other.IPFamilyPolicy {
return false
}
}
if (in.LoadBalancerClass == nil) != (other.LoadBalancerClass == nil) {
return false
} else if in.LoadBalancerClass != nil {
if *in.LoadBalancerClass != *other.LoadBalancerClass {
return false
}
}
if (in.InternalTrafficPolicy == nil) != (other.InternalTrafficPolicy == nil) {
return false
} else if in.InternalTrafficPolicy != nil {
if *in.InternalTrafficPolicy != *other.InternalTrafficPolicy {
return false
}
}
if (in.TrafficDistribution == nil) != (other.TrafficDistribution == nil) {
return false
} else if in.TrafficDistribution != nil {
if *in.TrafficDistribution != *other.TrafficDistribution {
return false
}
}
return true
}
// DeepEqual is an autogenerated deepequal function, deeply comparing the
// receiver with other. in must be non-nil.
func (in *ServiceStatus) DeepEqual(other *ServiceStatus) bool {
if other == nil {
return false
}
if !in.LoadBalancer.DeepEqual(&other.LoadBalancer) {
return false
}
if ((in.Conditions != nil) && (other.Conditions != nil)) || ((in.Conditions == nil) != (other.Conditions == nil)) {
in, other := &in.Conditions, &other.Conditions
if other == nil {
return false
}
if len(*in) != len(*other) {
return false
} else {
for i, inElement := range *in {
if !inElement.DeepEqual(&(*other)[i]) {
return false
}
}
}
}
return true
}
// DeepEqual is an autogenerated deepequal function, deeply comparing the
// receiver with other. in must be non-nil.
func (in *SessionAffinityConfig) DeepEqual(other *SessionAffinityConfig) bool {
if other == nil {
return false
}
if (in.ClientIP == nil) != (other.ClientIP == nil) {
return false
} else if in.ClientIP != nil {
if !in.ClientIP.DeepEqual(other.ClientIP) {
return false
}
}
return true
}
// DeepEqual is an autogenerated deepequal function, deeply comparing the
// receiver with other. in must be non-nil.
func (in *Taint) DeepEqual(other *Taint) bool {
if other == nil {
return false
}
if in.Key != other.Key {
return false
}
if in.Value != other.Value {
return false
}
if in.Effect != other.Effect {
return false
}
if (in.TimeAdded == nil) != (other.TimeAdded == nil) {
return false
} else if in.TimeAdded != nil {
if !in.TimeAdded.DeepEqual(other.TimeAdded) {
return false
}
}
return true
}
// DeepEqual is an autogenerated deepequal function, deeply comparing the
// receiver with other. in must be non-nil.
func (in *TypedLocalObjectReference) DeepEqual(other *TypedLocalObjectReference) bool {
if other == nil {
return false
}
if (in.APIGroup == nil) != (other.APIGroup == nil) {
return false
} else if in.APIGroup != nil {
if *in.APIGroup != *other.APIGroup {
return false
}
}
if in.Kind != other.Kind {
return false
}
if in.Name != other.Name {
return false
}
return true
}
// DeepEqual is an autogenerated deepequal function, deeply comparing the
// receiver with other. in must be non-nil.
func (in *VolumeMount) DeepEqual(other *VolumeMount) bool {
if other == nil {
return false
}
if in.MountPath != other.MountPath {
return false
}
return true
}
// SPDX-License-Identifier: Apache-2.0
// Copyright Authors of Cilium
// Code generated by protoc-gen-gogo. DO NOT EDIT.
// source: github.com/cilium/cilium/pkg/k8s/slim/k8s/api/discovery/v1/generated.proto
package v1
import (
fmt "fmt"
github_com_cilium_cilium_pkg_k8s_slim_k8s_api_core_v1 "github.com/cilium/cilium/pkg/k8s/slim/k8s/api/core/v1"
io "io"
proto "github.com/gogo/protobuf/proto"
github_com_gogo_protobuf_sortkeys "github.com/gogo/protobuf/sortkeys"
math "math"
math_bits "math/bits"
reflect "reflect"
strings "strings"
)
// Reference imports to suppress errors if they are not otherwise used.
var _ = proto.Marshal
var _ = fmt.Errorf
var _ = math.Inf
// This is a compile-time assertion to ensure that this generated file
// is compatible with the proto package it is being compiled against.
// A compilation error at this line likely means your copy of the
// proto package needs to be updated.
const _ = proto.GoGoProtoPackageIsVersion3 // please upgrade the proto package
func (m *Endpoint) Reset() { *m = Endpoint{} }
func (*Endpoint) ProtoMessage() {}
func (*Endpoint) Descriptor() ([]byte, []int) {
return fileDescriptor_824daf76e2aebd1d, []int{0}
}
func (m *Endpoint) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
}
func (m *Endpoint) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
b = b[:cap(b)]
n, err := m.MarshalToSizedBuffer(b)
if err != nil {
return nil, err
}
return b[:n], nil
}
func (m *Endpoint) XXX_Merge(src proto.Message) {
xxx_messageInfo_Endpoint.Merge(m, src)
}
func (m *Endpoint) XXX_Size() int {
return m.Size()
}
func (m *Endpoint) XXX_DiscardUnknown() {
xxx_messageInfo_Endpoint.DiscardUnknown(m)
}
var xxx_messageInfo_Endpoint proto.InternalMessageInfo
func (m *EndpointConditions) Reset() { *m = EndpointConditions{} }
func (*EndpointConditions) ProtoMessage() {}
func (*EndpointConditions) Descriptor() ([]byte, []int) {
return fileDescriptor_824daf76e2aebd1d, []int{1}
}
func (m *EndpointConditions) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
}
func (m *EndpointConditions) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
b = b[:cap(b)]
n, err := m.MarshalToSizedBuffer(b)
if err != nil {
return nil, err
}
return b[:n], nil
}
func (m *EndpointConditions) XXX_Merge(src proto.Message) {
xxx_messageInfo_EndpointConditions.Merge(m, src)
}
func (m *EndpointConditions) XXX_Size() int {
return m.Size()
}
func (m *EndpointConditions) XXX_DiscardUnknown() {
xxx_messageInfo_EndpointConditions.DiscardUnknown(m)
}
var xxx_messageInfo_EndpointConditions proto.InternalMessageInfo
func (m *EndpointHints) Reset() { *m = EndpointHints{} }
func (*EndpointHints) ProtoMessage() {}
func (*EndpointHints) Descriptor() ([]byte, []int) {
return fileDescriptor_824daf76e2aebd1d, []int{2}
}
func (m *EndpointHints) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
}
func (m *EndpointHints) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
b = b[:cap(b)]
n, err := m.MarshalToSizedBuffer(b)
if err != nil {
return nil, err
}
return b[:n], nil
}
func (m *EndpointHints) XXX_Merge(src proto.Message) {
xxx_messageInfo_EndpointHints.Merge(m, src)
}
func (m *EndpointHints) XXX_Size() int {
return m.Size()
}
func (m *EndpointHints) XXX_DiscardUnknown() {
xxx_messageInfo_EndpointHints.DiscardUnknown(m)
}
var xxx_messageInfo_EndpointHints proto.InternalMessageInfo
func (m *EndpointPort) Reset() { *m = EndpointPort{} }
func (*EndpointPort) ProtoMessage() {}
func (*EndpointPort) Descriptor() ([]byte, []int) {
return fileDescriptor_824daf76e2aebd1d, []int{3}
}
func (m *EndpointPort) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
}
func (m *EndpointPort) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
b = b[:cap(b)]
n, err := m.MarshalToSizedBuffer(b)
if err != nil {
return nil, err
}
return b[:n], nil
}
func (m *EndpointPort) XXX_Merge(src proto.Message) {
xxx_messageInfo_EndpointPort.Merge(m, src)
}
func (m *EndpointPort) XXX_Size() int {
return m.Size()
}
func (m *EndpointPort) XXX_DiscardUnknown() {
xxx_messageInfo_EndpointPort.DiscardUnknown(m)
}
var xxx_messageInfo_EndpointPort proto.InternalMessageInfo
func (m *EndpointSlice) Reset() { *m = EndpointSlice{} }
func (*EndpointSlice) ProtoMessage() {}
func (*EndpointSlice) Descriptor() ([]byte, []int) {
return fileDescriptor_824daf76e2aebd1d, []int{4}
}
func (m *EndpointSlice) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
}
func (m *EndpointSlice) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
b = b[:cap(b)]
n, err := m.MarshalToSizedBuffer(b)
if err != nil {
return nil, err
}
return b[:n], nil
}
func (m *EndpointSlice) XXX_Merge(src proto.Message) {
xxx_messageInfo_EndpointSlice.Merge(m, src)
}
func (m *EndpointSlice) XXX_Size() int {
return m.Size()
}
func (m *EndpointSlice) XXX_DiscardUnknown() {
xxx_messageInfo_EndpointSlice.DiscardUnknown(m)
}
var xxx_messageInfo_EndpointSlice proto.InternalMessageInfo
func (m *EndpointSliceList) Reset() { *m = EndpointSliceList{} }
func (*EndpointSliceList) ProtoMessage() {}
func (*EndpointSliceList) Descriptor() ([]byte, []int) {
return fileDescriptor_824daf76e2aebd1d, []int{5}
}
func (m *EndpointSliceList) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
}
func (m *EndpointSliceList) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
b = b[:cap(b)]
n, err := m.MarshalToSizedBuffer(b)
if err != nil {
return nil, err
}
return b[:n], nil
}
func (m *EndpointSliceList) XXX_Merge(src proto.Message) {
xxx_messageInfo_EndpointSliceList.Merge(m, src)
}
func (m *EndpointSliceList) XXX_Size() int {
return m.Size()
}
func (m *EndpointSliceList) XXX_DiscardUnknown() {
xxx_messageInfo_EndpointSliceList.DiscardUnknown(m)
}
var xxx_messageInfo_EndpointSliceList proto.InternalMessageInfo
func (m *ForZone) Reset() { *m = ForZone{} }
func (*ForZone) ProtoMessage() {}
func (*ForZone) Descriptor() ([]byte, []int) {
return fileDescriptor_824daf76e2aebd1d, []int{6}
}
func (m *ForZone) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
}
func (m *ForZone) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
b = b[:cap(b)]
n, err := m.MarshalToSizedBuffer(b)
if err != nil {
return nil, err
}
return b[:n], nil
}
func (m *ForZone) XXX_Merge(src proto.Message) {
xxx_messageInfo_ForZone.Merge(m, src)
}
func (m *ForZone) XXX_Size() int {
return m.Size()
}
func (m *ForZone) XXX_DiscardUnknown() {
xxx_messageInfo_ForZone.DiscardUnknown(m)
}
var xxx_messageInfo_ForZone proto.InternalMessageInfo
func init() {
proto.RegisterType((*Endpoint)(nil), "github.com.cilium.cilium.pkg.k8s.slim.k8s.api.discovery.v1.Endpoint")
proto.RegisterMapType((map[string]string)(nil), "github.com.cilium.cilium.pkg.k8s.slim.k8s.api.discovery.v1.Endpoint.DeprecatedTopologyEntry")
proto.RegisterType((*EndpointConditions)(nil), "github.com.cilium.cilium.pkg.k8s.slim.k8s.api.discovery.v1.EndpointConditions")
proto.RegisterType((*EndpointHints)(nil), "github.com.cilium.cilium.pkg.k8s.slim.k8s.api.discovery.v1.EndpointHints")
proto.RegisterType((*EndpointPort)(nil), "github.com.cilium.cilium.pkg.k8s.slim.k8s.api.discovery.v1.EndpointPort")
proto.RegisterType((*EndpointSlice)(nil), "github.com.cilium.cilium.pkg.k8s.slim.k8s.api.discovery.v1.EndpointSlice")
proto.RegisterType((*EndpointSliceList)(nil), "github.com.cilium.cilium.pkg.k8s.slim.k8s.api.discovery.v1.EndpointSliceList")
proto.RegisterType((*ForZone)(nil), "github.com.cilium.cilium.pkg.k8s.slim.k8s.api.discovery.v1.ForZone")
}
func init() {
proto.RegisterFile("github.com/cilium/cilium/pkg/k8s/slim/k8s/api/discovery/v1/generated.proto", fileDescriptor_824daf76e2aebd1d)
}
var fileDescriptor_824daf76e2aebd1d = []byte{
// 877 bytes of a gzipped FileDescriptorProto
0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xac, 0x56, 0xcf, 0x6b, 0xe3, 0x46,
0x14, 0xf6, 0xac, 0xa3, 0x46, 0x1e, 0x25, 0x74, 0x77, 0x28, 0x54, 0x84, 0x22, 0x19, 0x43, 0xc1,
0xb0, 0x54, 0x26, 0x39, 0x94, 0x90, 0x43, 0x69, 0xb4, 0xc9, 0x92, 0x2d, 0x6d, 0xba, 0xcc, 0xe6,
0xb4, 0xf4, 0x50, 0x45, 0x9a, 0x95, 0x27, 0xb6, 0x66, 0x54, 0xcd, 0xd8, 0xe0, 0x9e, 0xca, 0x9e,
0x0b, 0xed, 0x9f, 0xd1, 0x5b, 0xff, 0x8d, 0x1c, 0xf7, 0xb8, 0x27, 0xd1, 0xa8, 0xa7, 0x9e, 0x7b,
0xdb, 0x53, 0x99, 0xd1, 0x2f, 0xa7, 0x4e, 0x29, 0xf1, 0xfa, 0xa4, 0x99, 0x6f, 0xde, 0x7c, 0xef,
0x7b, 0xef, 0xcd, 0x7b, 0x08, 0x7e, 0x15, 0x53, 0x39, 0x9e, 0x5d, 0x7a, 0x21, 0x4f, 0x46, 0x21,
0x9d, 0xd2, 0x59, 0xf3, 0x49, 0x27, 0xf1, 0x68, 0x72, 0x28, 0x46, 0x62, 0x4a, 0x13, 0xbd, 0x08,
0x52, 0x3a, 0x8a, 0xa8, 0x08, 0xf9, 0x9c, 0x64, 0x8b, 0xd1, 0x7c, 0x7f, 0x14, 0x13, 0x46, 0xb2,
0x40, 0x92, 0xc8, 0x4b, 0x33, 0x2e, 0x39, 0x3a, 0x6a, 0xb9, 0xbc, 0x92, 0xa4, 0xfe, 0xa4, 0x93,
0xd8, 0x9b, 0x1c, 0x0a, 0x4f, 0x71, 0xe9, 0x45, 0x90, 0x52, 0xaf, 0xe1, 0xf2, 0xe6, 0xfb, 0x7b,
0xa7, 0xf7, 0xd3, 0x11, 0xf2, 0x8c, 0xdc, 0x21, 0x61, 0xef, 0xe9, 0xbd, 0x68, 0xc4, 0x28, 0x21,
0x32, 0xb8, 0x8b, 0xe7, 0xb3, 0x25, 0x9e, 0x98, 0xc7, 0x7c, 0xa4, 0xe1, 0xcb, 0xd9, 0x2b, 0xbd,
0xd3, 0x1b, 0xbd, 0xaa, 0xcc, 0x15, 0xa1, 0x47, 0xb9, 0xe2, 0x4c, 0x82, 0x70, 0x4c, 0x99, 0xca,
0x90, 0xf2, 0x98, 0xcd, 0x98, 0xa4, 0x09, 0x59, 0xe1, 0xff, 0xfc, 0xff, 0x2e, 0x88, 0x70, 0x4c,
0x92, 0xe0, 0xdf, 0xf7, 0x06, 0x3f, 0x1b, 0xd0, 0x3c, 0x65, 0x51, 0xca, 0x29, 0x93, 0xe8, 0x31,
0xec, 0x05, 0x51, 0x94, 0x11, 0x21, 0x88, 0xb0, 0x41, 0xbf, 0x3b, 0xec, 0xf9, 0xbb, 0x45, 0xee,
0xf6, 0x8e, 0x6b, 0x10, 0xb7, 0xe7, 0xe8, 0x35, 0x80, 0x30, 0xe4, 0x2c, 0xa2, 0x92, 0x72, 0x26,
0xec, 0x07, 0x7d, 0x30, 0xb4, 0x0e, 0xce, 0xbd, 0xf5, 0x4b, 0xe6, 0xd5, 0x3a, 0x9e, 0x34, 0xac,
0x3e, 0xba, 0xce, 0xdd, 0x4e, 0x91, 0xbb, 0xb0, 0xc5, 0xf0, 0x92, 0x57, 0x34, 0x84, 0xe6, 0x98,
0x0b, 0xc9, 0x82, 0x84, 0xd8, 0xdd, 0x3e, 0x18, 0xf6, 0xfc, 0x9d, 0x22, 0x77, 0xcd, 0xb3, 0x0a,
0xc3, 0xcd, 0x29, 0xfa, 0x0d, 0x40, 0x14, 0x91, 0x34, 0x23, 0xa1, 0x8a, 0xfe, 0x82, 0xa7, 0x7c,
0xca, 0xe3, 0x85, 0x6d, 0xf4, 0xbb, 0x43, 0xeb, 0xe0, 0xbb, 0x4d, 0xc8, 0xf6, 0x4e, 0x56, 0xe8,
0x4f, 0x99, 0xcc, 0x16, 0xfe, 0x5e, 0x15, 0x04, 0x5a, 0x35, 0xc0, 0x77, 0x68, 0x52, 0x41, 0x31,
0x1e, 0x91, 0x73, 0x15, 0xd4, 0x07, 0x6d, 0x50, 0xe7, 0x15, 0x86, 0x9b, 0x53, 0xf4, 0x09, 0xdc,
0xfa, 0x91, 0x33, 0x62, 0x6f, 0x6b, 0x2b, 0xb3, 0xc8, 0xdd, 0xad, 0x97, 0x9c, 0x11, 0xac, 0x51,
0x74, 0x05, 0x8d, 0x31, 0x65, 0x52, 0xd8, 0xa6, 0xae, 0xcd, 0xb3, 0x4d, 0x04, 0x79, 0xa6, 0x08,
0xfd, 0x5e, 0x91, 0xbb, 0x86, 0x5e, 0xe2, 0xd2, 0xc5, 0xde, 0x29, 0xfc, 0xf8, 0x3f, 0xc2, 0x47,
0x0f, 0x61, 0x77, 0x42, 0x16, 0x36, 0x50, 0x1a, 0xb1, 0x5a, 0xa2, 0x8f, 0xa0, 0x31, 0x0f, 0xa6,
0x33, 0xa2, 0x1f, 0x4d, 0x0f, 0x97, 0x9b, 0xa3, 0x07, 0x87, 0x60, 0xf0, 0x0b, 0x80, 0x68, 0xf5,
0x19, 0x20, 0x17, 0x1a, 0x19, 0x09, 0xa2, 0x92, 0xc4, 0x2c, 0xdd, 0x63, 0x05, 0xe0, 0x12, 0x47,
0x9f, 0xc2, 0x6d, 0x41, 0xb2, 0x39, 0x65, 0xb1, 0xe6, 0x34, 0x7d, 0xab, 0xc8, 0xdd, 0xed, 0x17,
0x25, 0x84, 0xeb, 0x33, 0xb4, 0x0f, 0x2d, 0x49, 0xb2, 0x84, 0xb2, 0x40, 0x2a, 0xd3, 0xae, 0x36,
0xfd, 0xb0, 0xc8, 0x5d, 0xeb, 0xa2, 0x85, 0xf1, 0xb2, 0xcd, 0xe0, 0x35, 0x80, 0xbb, 0xb7, 0x82,
0x47, 0x3f, 0x40, 0xf3, 0x15, 0xcf, 0x54, 0x9e, 0xcb, 0x26, 0xb1, 0x0e, 0x9e, 0xbc, 0x4f, 0x66,
0x9f, 0x96, 0x5c, 0xfe, 0xc3, 0xea, 0x95, 0x98, 0x15, 0x20, 0x70, 0xe3, 0x66, 0xf0, 0x17, 0x80,
0x3b, 0xb5, 0x88, 0xe7, 0x3c, 0x93, 0xaa, 0xf0, 0xfa, 0xcd, 0x83, 0xb6, 0xf0, 0xfa, 0x69, 0x68,
0x14, 0x5d, 0x41, 0x53, 0x77, 0x77, 0xc8, 0xa7, 0x65, 0x8a, 0xfd, 0x73, 0x45, 0xfc, 0xbc, 0xc2,
0xde, 0xe5, 0xee, 0x17, 0x6b, 0x4d, 0x47, 0xaf, 0x66, 0xc0, 0x0d, 0xbf, 0x52, 0x92, 0xf2, 0x4c,
0xea, 0x5c, 0x1a, 0xa5, 0x12, 0xa5, 0x10, 0x6b, 0x54, 0x25, 0x3c, 0x48, 0xd3, 0xfa, 0x9a, 0xbd,
0xa5, 0xc5, 0xe8, 0x84, 0x1f, 0xb7, 0x30, 0x5e, 0xb6, 0x19, 0xfc, 0xde, 0x6d, 0x13, 0xfe, 0x62,
0x4a, 0x43, 0x82, 0x32, 0x68, 0xaa, 0xb1, 0x1a, 0x05, 0x32, 0xd0, 0x01, 0x5b, 0x07, 0xfe, 0xfd,
0x12, 0x2e, 0x3c, 0x75, 0x5f, 0x25, 0xfb, 0xdb, 0xcb, 0x2b, 0x12, 0xca, 0x6f, 0x88, 0x0c, 0xda,
0xd1, 0xd2, 0x62, 0xb8, 0xf1, 0x83, 0x4e, 0xa0, 0x55, 0x8d, 0xba, 0x8b, 0x45, 0x4a, 0x2a, 0xe1,
0x83, 0xea, 0x8a, 0x75, 0xdc, 0x1e, 0xbd, 0xbb, 0xbd, 0xc5, 0xcb, 0xd7, 0xd0, 0x0c, 0xf6, 0x48,
0x15, 0x8a, 0x9a, 0x90, 0xea, 0xad, 0x9c, 0x6c, 0xa2, 0x0b, 0xfd, 0x47, 0x95, 0x92, 0x5e, 0x8d,
0x08, 0xdc, 0x7a, 0x42, 0x09, 0x34, 0x54, 0xf6, 0x85, 0xdd, 0xd5, 0x2e, 0xcf, 0x36, 0xe1, 0x52,
0x15, 0xd5, 0xdf, 0xad, 0xdc, 0x1a, 0x6a, 0x27, 0x70, 0xe9, 0x65, 0xf0, 0x37, 0x80, 0x8f, 0x6e,
0x55, 0xec, 0x6b, 0x2a, 0x24, 0x62, 0x2b, 0x55, 0xfb, 0x72, 0xdd, 0xaa, 0x29, 0x3e, 0x5d, 0xb3,
0xa6, 0x47, 0x6a, 0x64, 0xa9, 0x62, 0x0c, 0x1a, 0x54, 0x92, 0xa4, 0xce, 0xf3, 0x46, 0xa6, 0x9d,
0x8e, 0xa6, 0x8d, 0xfa, 0x99, 0xe2, 0xc7, 0xa5, 0x9b, 0xc1, 0x63, 0xb8, 0x5d, 0x75, 0x2a, 0xea,
0xdf, 0xea, 0xc6, 0x9d, 0xca, 0x7c, 0xa9, 0x23, 0xfd, 0xef, 0xaf, 0x6f, 0x9c, 0xce, 0x9b, 0x1b,
0xa7, 0xf3, 0xf6, 0xc6, 0xe9, 0xfc, 0x54, 0x38, 0xe0, 0xba, 0x70, 0xc0, 0x9b, 0xc2, 0x01, 0x6f,
0x0b, 0x07, 0xfc, 0x51, 0x38, 0xe0, 0xd7, 0x3f, 0x9d, 0xce, 0xcb, 0xa3, 0xf5, 0xff, 0x9d, 0xfe,
0x09, 0x00, 0x00, 0xff, 0xff, 0x31, 0x15, 0x83, 0xdd, 0x78, 0x09, 0x00, 0x00,
}
func (m *Endpoint) Marshal() (dAtA []byte, err error) {
size := m.Size()
dAtA = make([]byte, size)
n, err := m.MarshalToSizedBuffer(dAtA[:size])
if err != nil {
return nil, err
}
return dAtA[:n], nil
}
func (m *Endpoint) MarshalTo(dAtA []byte) (int, error) {
size := m.Size()
return m.MarshalToSizedBuffer(dAtA[:size])
}
func (m *Endpoint) MarshalToSizedBuffer(dAtA []byte) (int, error) {
i := len(dAtA)
_ = i
var l int
_ = l
if m.Hints != nil {
{
size, err := m.Hints.MarshalToSizedBuffer(dAtA[:i])
if err != nil {
return 0, err
}
i -= size
i = encodeVarintGenerated(dAtA, i, uint64(size))
}
i--
dAtA[i] = 0x42
}
if m.Zone != nil {
i -= len(*m.Zone)
copy(dAtA[i:], *m.Zone)
i = encodeVarintGenerated(dAtA, i, uint64(len(*m.Zone)))
i--
dAtA[i] = 0x3a
}
if m.NodeName != nil {
i -= len(*m.NodeName)
copy(dAtA[i:], *m.NodeName)
i = encodeVarintGenerated(dAtA, i, uint64(len(*m.NodeName)))
i--
dAtA[i] = 0x32
}
if len(m.DeprecatedTopology) > 0 {
keysForDeprecatedTopology := make([]string, 0, len(m.DeprecatedTopology))
for k := range m.DeprecatedTopology {
keysForDeprecatedTopology = append(keysForDeprecatedTopology, string(k))
}
github_com_gogo_protobuf_sortkeys.Strings(keysForDeprecatedTopology)
for iNdEx := len(keysForDeprecatedTopology) - 1; iNdEx >= 0; iNdEx-- {
v := m.DeprecatedTopology[string(keysForDeprecatedTopology[iNdEx])]
baseI := i
i -= len(v)
copy(dAtA[i:], v)
i = encodeVarintGenerated(dAtA, i, uint64(len(v)))
i--
dAtA[i] = 0x12
i -= len(keysForDeprecatedTopology[iNdEx])
copy(dAtA[i:], keysForDeprecatedTopology[iNdEx])
i = encodeVarintGenerated(dAtA, i, uint64(len(keysForDeprecatedTopology[iNdEx])))
i--
dAtA[i] = 0xa
i = encodeVarintGenerated(dAtA, i, uint64(baseI-i))
i--
dAtA[i] = 0x2a
}
}
if m.Hostname != nil {
i -= len(*m.Hostname)
copy(dAtA[i:], *m.Hostname)
i = encodeVarintGenerated(dAtA, i, uint64(len(*m.Hostname)))
i--
dAtA[i] = 0x1a
}
{
size, err := m.Conditions.MarshalToSizedBuffer(dAtA[:i])
if err != nil {
return 0, err
}
i -= size
i = encodeVarintGenerated(dAtA, i, uint64(size))
}
i--
dAtA[i] = 0x12
if len(m.Addresses) > 0 {
for iNdEx := len(m.Addresses) - 1; iNdEx >= 0; iNdEx-- {
i -= len(m.Addresses[iNdEx])
copy(dAtA[i:], m.Addresses[iNdEx])
i = encodeVarintGenerated(dAtA, i, uint64(len(m.Addresses[iNdEx])))
i--
dAtA[i] = 0xa
}
}
return len(dAtA) - i, nil
}
func (m *EndpointConditions) Marshal() (dAtA []byte, err error) {
size := m.Size()
dAtA = make([]byte, size)
n, err := m.MarshalToSizedBuffer(dAtA[:size])
if err != nil {
return nil, err
}
return dAtA[:n], nil
}
func (m *EndpointConditions) MarshalTo(dAtA []byte) (int, error) {
size := m.Size()
return m.MarshalToSizedBuffer(dAtA[:size])
}
func (m *EndpointConditions) MarshalToSizedBuffer(dAtA []byte) (int, error) {
i := len(dAtA)
_ = i
var l int
_ = l
if m.Terminating != nil {
i--
if *m.Terminating {
dAtA[i] = 1
} else {
dAtA[i] = 0
}
i--
dAtA[i] = 0x18
}
if m.Serving != nil {
i--
if *m.Serving {
dAtA[i] = 1
} else {
dAtA[i] = 0
}
i--
dAtA[i] = 0x10
}
if m.Ready != nil {
i--
if *m.Ready {
dAtA[i] = 1
} else {
dAtA[i] = 0
}
i--
dAtA[i] = 0x8
}
return len(dAtA) - i, nil
}
func (m *EndpointHints) Marshal() (dAtA []byte, err error) {
size := m.Size()
dAtA = make([]byte, size)
n, err := m.MarshalToSizedBuffer(dAtA[:size])
if err != nil {
return nil, err
}
return dAtA[:n], nil
}
func (m *EndpointHints) MarshalTo(dAtA []byte) (int, error) {
size := m.Size()
return m.MarshalToSizedBuffer(dAtA[:size])
}
func (m *EndpointHints) MarshalToSizedBuffer(dAtA []byte) (int, error) {
i := len(dAtA)
_ = i
var l int
_ = l
if len(m.ForZones) > 0 {
for iNdEx := len(m.ForZones) - 1; iNdEx >= 0; iNdEx-- {
{
size, err := m.ForZones[iNdEx].MarshalToSizedBuffer(dAtA[:i])
if err != nil {
return 0, err
}
i -= size
i = encodeVarintGenerated(dAtA, i, uint64(size))
}
i--
dAtA[i] = 0xa
}
}
return len(dAtA) - i, nil
}
func (m *EndpointPort) Marshal() (dAtA []byte, err error) {
size := m.Size()
dAtA = make([]byte, size)
n, err := m.MarshalToSizedBuffer(dAtA[:size])
if err != nil {
return nil, err
}
return dAtA[:n], nil
}
func (m *EndpointPort) MarshalTo(dAtA []byte) (int, error) {
size := m.Size()
return m.MarshalToSizedBuffer(dAtA[:size])
}
func (m *EndpointPort) MarshalToSizedBuffer(dAtA []byte) (int, error) {
i := len(dAtA)
_ = i
var l int
_ = l
if m.AppProtocol != nil {
i -= len(*m.AppProtocol)
copy(dAtA[i:], *m.AppProtocol)
i = encodeVarintGenerated(dAtA, i, uint64(len(*m.AppProtocol)))
i--
dAtA[i] = 0x22
}
if m.Port != nil {
i = encodeVarintGenerated(dAtA, i, uint64(*m.Port))
i--
dAtA[i] = 0x18
}
if m.Protocol != nil {
i -= len(*m.Protocol)
copy(dAtA[i:], *m.Protocol)
i = encodeVarintGenerated(dAtA, i, uint64(len(*m.Protocol)))
i--
dAtA[i] = 0x12
}
if m.Name != nil {
i -= len(*m.Name)
copy(dAtA[i:], *m.Name)
i = encodeVarintGenerated(dAtA, i, uint64(len(*m.Name)))
i--
dAtA[i] = 0xa
}
return len(dAtA) - i, nil
}
func (m *EndpointSlice) Marshal() (dAtA []byte, err error) {
size := m.Size()
dAtA = make([]byte, size)
n, err := m.MarshalToSizedBuffer(dAtA[:size])
if err != nil {
return nil, err
}
return dAtA[:n], nil
}
func (m *EndpointSlice) MarshalTo(dAtA []byte) (int, error) {
size := m.Size()
return m.MarshalToSizedBuffer(dAtA[:size])
}
func (m *EndpointSlice) MarshalToSizedBuffer(dAtA []byte) (int, error) {
i := len(dAtA)
_ = i
var l int
_ = l
i -= len(m.AddressType)
copy(dAtA[i:], m.AddressType)
i = encodeVarintGenerated(dAtA, i, uint64(len(m.AddressType)))
i--
dAtA[i] = 0x22
if len(m.Ports) > 0 {
for iNdEx := len(m.Ports) - 1; iNdEx >= 0; iNdEx-- {
{
size, err := m.Ports[iNdEx].MarshalToSizedBuffer(dAtA[:i])
if err != nil {
return 0, err
}
i -= size
i = encodeVarintGenerated(dAtA, i, uint64(size))
}
i--
dAtA[i] = 0x1a
}
}
if len(m.Endpoints) > 0 {
for iNdEx := len(m.Endpoints) - 1; iNdEx >= 0; iNdEx-- {
{
size, err := m.Endpoints[iNdEx].MarshalToSizedBuffer(dAtA[:i])
if err != nil {
return 0, err
}
i -= size
i = encodeVarintGenerated(dAtA, i, uint64(size))
}
i--
dAtA[i] = 0x12
}
}
{
size, err := m.ObjectMeta.MarshalToSizedBuffer(dAtA[:i])
if err != nil {
return 0, err
}
i -= size
i = encodeVarintGenerated(dAtA, i, uint64(size))
}
i--
dAtA[i] = 0xa
return len(dAtA) - i, nil
}
func (m *EndpointSliceList) Marshal() (dAtA []byte, err error) {
size := m.Size()
dAtA = make([]byte, size)
n, err := m.MarshalToSizedBuffer(dAtA[:size])
if err != nil {
return nil, err
}
return dAtA[:n], nil
}
func (m *EndpointSliceList) MarshalTo(dAtA []byte) (int, error) {
size := m.Size()
return m.MarshalToSizedBuffer(dAtA[:size])
}
func (m *EndpointSliceList) MarshalToSizedBuffer(dAtA []byte) (int, error) {
i := len(dAtA)
_ = i
var l int
_ = l
if len(m.Items) > 0 {
for iNdEx := len(m.Items) - 1; iNdEx >= 0; iNdEx-- {
{
size, err := m.Items[iNdEx].MarshalToSizedBuffer(dAtA[:i])
if err != nil {
return 0, err
}
i -= size
i = encodeVarintGenerated(dAtA, i, uint64(size))
}
i--
dAtA[i] = 0x12
}
}
{
size, err := m.ListMeta.MarshalToSizedBuffer(dAtA[:i])
if err != nil {
return 0, err
}
i -= size
i = encodeVarintGenerated(dAtA, i, uint64(size))
}
i--
dAtA[i] = 0xa
return len(dAtA) - i, nil
}
func (m *ForZone) Marshal() (dAtA []byte, err error) {
size := m.Size()
dAtA = make([]byte, size)
n, err := m.MarshalToSizedBuffer(dAtA[:size])
if err != nil {
return nil, err
}
return dAtA[:n], nil
}
func (m *ForZone) MarshalTo(dAtA []byte) (int, error) {
size := m.Size()
return m.MarshalToSizedBuffer(dAtA[:size])
}
func (m *ForZone) MarshalToSizedBuffer(dAtA []byte) (int, error) {
i := len(dAtA)
_ = i
var l int
_ = l
i -= len(m.Name)
copy(dAtA[i:], m.Name)
i = encodeVarintGenerated(dAtA, i, uint64(len(m.Name)))
i--
dAtA[i] = 0xa
return len(dAtA) - i, nil
}
func encodeVarintGenerated(dAtA []byte, offset int, v uint64) int {
offset -= sovGenerated(v)
base := offset
for v >= 1<<7 {
dAtA[offset] = uint8(v&0x7f | 0x80)
v >>= 7
offset++
}
dAtA[offset] = uint8(v)
return base
}
func (m *Endpoint) Size() (n int) {
if m == nil {
return 0
}
var l int
_ = l
if len(m.Addresses) > 0 {
for _, s := range m.Addresses {
l = len(s)
n += 1 + l + sovGenerated(uint64(l))
}
}
l = m.Conditions.Size()
n += 1 + l + sovGenerated(uint64(l))
if m.Hostname != nil {
l = len(*m.Hostname)
n += 1 + l + sovGenerated(uint64(l))
}
if len(m.DeprecatedTopology) > 0 {
for k, v := range m.DeprecatedTopology {
_ = k
_ = v
mapEntrySize := 1 + len(k) + sovGenerated(uint64(len(k))) + 1 + len(v) + sovGenerated(uint64(len(v)))
n += mapEntrySize + 1 + sovGenerated(uint64(mapEntrySize))
}
}
if m.NodeName != nil {
l = len(*m.NodeName)
n += 1 + l + sovGenerated(uint64(l))
}
if m.Zone != nil {
l = len(*m.Zone)
n += 1 + l + sovGenerated(uint64(l))
}
if m.Hints != nil {
l = m.Hints.Size()
n += 1 + l + sovGenerated(uint64(l))
}
return n
}
func (m *EndpointConditions) Size() (n int) {
if m == nil {
return 0
}
var l int
_ = l
if m.Ready != nil {
n += 2
}
if m.Serving != nil {
n += 2
}
if m.Terminating != nil {
n += 2
}
return n
}
func (m *EndpointHints) Size() (n int) {
if m == nil {
return 0
}
var l int
_ = l
if len(m.ForZones) > 0 {
for _, e := range m.ForZones {
l = e.Size()
n += 1 + l + sovGenerated(uint64(l))
}
}
return n
}
func (m *EndpointPort) Size() (n int) {
if m == nil {
return 0
}
var l int
_ = l
if m.Name != nil {
l = len(*m.Name)
n += 1 + l + sovGenerated(uint64(l))
}
if m.Protocol != nil {
l = len(*m.Protocol)
n += 1 + l + sovGenerated(uint64(l))
}
if m.Port != nil {
n += 1 + sovGenerated(uint64(*m.Port))
}
if m.AppProtocol != nil {
l = len(*m.AppProtocol)
n += 1 + l + sovGenerated(uint64(l))
}
return n
}
func (m *EndpointSlice) Size() (n int) {
if m == nil {
return 0
}
var l int
_ = l
l = m.ObjectMeta.Size()
n += 1 + l + sovGenerated(uint64(l))
if len(m.Endpoints) > 0 {
for _, e := range m.Endpoints {
l = e.Size()
n += 1 + l + sovGenerated(uint64(l))
}
}
if len(m.Ports) > 0 {
for _, e := range m.Ports {
l = e.Size()
n += 1 + l + sovGenerated(uint64(l))
}
}
l = len(m.AddressType)
n += 1 + l + sovGenerated(uint64(l))
return n
}
func (m *EndpointSliceList) Size() (n int) {
if m == nil {
return 0
}
var l int
_ = l
l = m.ListMeta.Size()
n += 1 + l + sovGenerated(uint64(l))
if len(m.Items) > 0 {
for _, e := range m.Items {
l = e.Size()
n += 1 + l + sovGenerated(uint64(l))
}
}
return n
}
func (m *ForZone) Size() (n int) {
if m == nil {
return 0
}
var l int
_ = l
l = len(m.Name)
n += 1 + l + sovGenerated(uint64(l))
return n
}
func sovGenerated(x uint64) (n int) {
return (math_bits.Len64(x|1) + 6) / 7
}
func sozGenerated(x uint64) (n int) {
return sovGenerated(uint64((x << 1) ^ uint64((int64(x) >> 63))))
}
func (this *Endpoint) String() string {
if this == nil {
return "nil"
}
keysForDeprecatedTopology := make([]string, 0, len(this.DeprecatedTopology))
for k := range this.DeprecatedTopology {
keysForDeprecatedTopology = append(keysForDeprecatedTopology, k)
}
github_com_gogo_protobuf_sortkeys.Strings(keysForDeprecatedTopology)
mapStringForDeprecatedTopology := "map[string]string{"
for _, k := range keysForDeprecatedTopology {
mapStringForDeprecatedTopology += fmt.Sprintf("%v: %v,", k, this.DeprecatedTopology[k])
}
mapStringForDeprecatedTopology += "}"
s := strings.Join([]string{`&Endpoint{`,
`Addresses:` + fmt.Sprintf("%v", this.Addresses) + `,`,
`Conditions:` + strings.Replace(strings.Replace(this.Conditions.String(), "EndpointConditions", "EndpointConditions", 1), `&`, ``, 1) + `,`,
`Hostname:` + valueToStringGenerated(this.Hostname) + `,`,
`DeprecatedTopology:` + mapStringForDeprecatedTopology + `,`,
`NodeName:` + valueToStringGenerated(this.NodeName) + `,`,
`Zone:` + valueToStringGenerated(this.Zone) + `,`,
`Hints:` + strings.Replace(this.Hints.String(), "EndpointHints", "EndpointHints", 1) + `,`,
`}`,
}, "")
return s
}
func (this *EndpointConditions) String() string {
if this == nil {
return "nil"
}
s := strings.Join([]string{`&EndpointConditions{`,
`Ready:` + valueToStringGenerated(this.Ready) + `,`,
`Serving:` + valueToStringGenerated(this.Serving) + `,`,
`Terminating:` + valueToStringGenerated(this.Terminating) + `,`,
`}`,
}, "")
return s
}
func (this *EndpointHints) String() string {
if this == nil {
return "nil"
}
repeatedStringForForZones := "[]ForZone{"
for _, f := range this.ForZones {
repeatedStringForForZones += strings.Replace(strings.Replace(f.String(), "ForZone", "ForZone", 1), `&`, ``, 1) + ","
}
repeatedStringForForZones += "}"
s := strings.Join([]string{`&EndpointHints{`,
`ForZones:` + repeatedStringForForZones + `,`,
`}`,
}, "")
return s
}
func (this *EndpointPort) String() string {
if this == nil {
return "nil"
}
s := strings.Join([]string{`&EndpointPort{`,
`Name:` + valueToStringGenerated(this.Name) + `,`,
`Protocol:` + valueToStringGenerated(this.Protocol) + `,`,
`Port:` + valueToStringGenerated(this.Port) + `,`,
`AppProtocol:` + valueToStringGenerated(this.AppProtocol) + `,`,
`}`,
}, "")
return s
}
func (this *EndpointSlice) String() string {
if this == nil {
return "nil"
}
repeatedStringForEndpoints := "[]Endpoint{"
for _, f := range this.Endpoints {
repeatedStringForEndpoints += strings.Replace(strings.Replace(f.String(), "Endpoint", "Endpoint", 1), `&`, ``, 1) + ","
}
repeatedStringForEndpoints += "}"
repeatedStringForPorts := "[]EndpointPort{"
for _, f := range this.Ports {
repeatedStringForPorts += strings.Replace(strings.Replace(f.String(), "EndpointPort", "EndpointPort", 1), `&`, ``, 1) + ","
}
repeatedStringForPorts += "}"
s := strings.Join([]string{`&EndpointSlice{`,
`ObjectMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ObjectMeta), "ObjectMeta", "v1.ObjectMeta", 1), `&`, ``, 1) + `,`,
`Endpoints:` + repeatedStringForEndpoints + `,`,
`Ports:` + repeatedStringForPorts + `,`,
`AddressType:` + fmt.Sprintf("%v", this.AddressType) + `,`,
`}`,
}, "")
return s
}
func (this *EndpointSliceList) String() string {
if this == nil {
return "nil"
}
repeatedStringForItems := "[]EndpointSlice{"
for _, f := range this.Items {
repeatedStringForItems += strings.Replace(strings.Replace(f.String(), "EndpointSlice", "EndpointSlice", 1), `&`, ``, 1) + ","
}
repeatedStringForItems += "}"
s := strings.Join([]string{`&EndpointSliceList{`,
`ListMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ListMeta), "ListMeta", "v1.ListMeta", 1), `&`, ``, 1) + `,`,
`Items:` + repeatedStringForItems + `,`,
`}`,
}, "")
return s
}
func (this *ForZone) String() string {
if this == nil {
return "nil"
}
s := strings.Join([]string{`&ForZone{`,
`Name:` + fmt.Sprintf("%v", this.Name) + `,`,
`}`,
}, "")
return s
}
func valueToStringGenerated(v interface{}) string {
rv := reflect.ValueOf(v)
if rv.IsNil() {
return "nil"
}
pv := reflect.Indirect(rv).Interface()
return fmt.Sprintf("*%v", pv)
}
func (m *Endpoint) Unmarshal(dAtA []byte) error {
l := len(dAtA)
iNdEx := 0
for iNdEx < l {
preIndex := iNdEx
var wire uint64
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowGenerated
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
wire |= uint64(b&0x7F) << shift
if b < 0x80 {
break
}
}
fieldNum := int32(wire >> 3)
wireType := int(wire & 0x7)
if wireType == 4 {
return fmt.Errorf("proto: Endpoint: wiretype end group for non-group")
}
if fieldNum <= 0 {
return fmt.Errorf("proto: Endpoint: illegal tag %d (wire type %d)", fieldNum, wire)
}
switch fieldNum {
case 1:
if wireType != 2 {
return fmt.Errorf("proto: wrong wireType = %d for field Addresses", wireType)
}
var stringLen uint64
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowGenerated
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
stringLen |= uint64(b&0x7F) << shift
if b < 0x80 {
break
}
}
intStringLen := int(stringLen)
if intStringLen < 0 {
return ErrInvalidLengthGenerated
}
postIndex := iNdEx + intStringLen
if postIndex < 0 {
return ErrInvalidLengthGenerated
}
if postIndex > l {
return io.ErrUnexpectedEOF
}
m.Addresses = append(m.Addresses, string(dAtA[iNdEx:postIndex]))
iNdEx = postIndex
case 2:
if wireType != 2 {
return fmt.Errorf("proto: wrong wireType = %d for field Conditions", wireType)
}
var msglen int
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowGenerated
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
msglen |= int(b&0x7F) << shift
if b < 0x80 {
break
}
}
if msglen < 0 {
return ErrInvalidLengthGenerated
}
postIndex := iNdEx + msglen
if postIndex < 0 {
return ErrInvalidLengthGenerated
}
if postIndex > l {
return io.ErrUnexpectedEOF
}
if err := m.Conditions.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
return err
}
iNdEx = postIndex
case 3:
if wireType != 2 {
return fmt.Errorf("proto: wrong wireType = %d for field Hostname", wireType)
}
var stringLen uint64
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowGenerated
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
stringLen |= uint64(b&0x7F) << shift
if b < 0x80 {
break
}
}
intStringLen := int(stringLen)
if intStringLen < 0 {
return ErrInvalidLengthGenerated
}
postIndex := iNdEx + intStringLen
if postIndex < 0 {
return ErrInvalidLengthGenerated
}
if postIndex > l {
return io.ErrUnexpectedEOF
}
s := string(dAtA[iNdEx:postIndex])
m.Hostname = &s
iNdEx = postIndex
case 5:
if wireType != 2 {
return fmt.Errorf("proto: wrong wireType = %d for field DeprecatedTopology", wireType)
}
var msglen int
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowGenerated
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
msglen |= int(b&0x7F) << shift
if b < 0x80 {
break
}
}
if msglen < 0 {
return ErrInvalidLengthGenerated
}
postIndex := iNdEx + msglen
if postIndex < 0 {
return ErrInvalidLengthGenerated
}
if postIndex > l {
return io.ErrUnexpectedEOF
}
if m.DeprecatedTopology == nil {
m.DeprecatedTopology = make(map[string]string)
}
var mapkey string
var mapvalue string
for iNdEx < postIndex {
entryPreIndex := iNdEx
var wire uint64
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowGenerated
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
wire |= uint64(b&0x7F) << shift
if b < 0x80 {
break
}
}
fieldNum := int32(wire >> 3)
if fieldNum == 1 {
var stringLenmapkey uint64
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowGenerated
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
stringLenmapkey |= uint64(b&0x7F) << shift
if b < 0x80 {
break
}
}
intStringLenmapkey := int(stringLenmapkey)
if intStringLenmapkey < 0 {
return ErrInvalidLengthGenerated
}
postStringIndexmapkey := iNdEx + intStringLenmapkey
if postStringIndexmapkey < 0 {
return ErrInvalidLengthGenerated
}
if postStringIndexmapkey > l {
return io.ErrUnexpectedEOF
}
mapkey = string(dAtA[iNdEx:postStringIndexmapkey])
iNdEx = postStringIndexmapkey
} else if fieldNum == 2 {
var stringLenmapvalue uint64
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowGenerated
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
stringLenmapvalue |= uint64(b&0x7F) << shift
if b < 0x80 {
break
}
}
intStringLenmapvalue := int(stringLenmapvalue)
if intStringLenmapvalue < 0 {
return ErrInvalidLengthGenerated
}
postStringIndexmapvalue := iNdEx + intStringLenmapvalue
if postStringIndexmapvalue < 0 {
return ErrInvalidLengthGenerated
}
if postStringIndexmapvalue > l {
return io.ErrUnexpectedEOF
}
mapvalue = string(dAtA[iNdEx:postStringIndexmapvalue])
iNdEx = postStringIndexmapvalue
} else {
iNdEx = entryPreIndex
skippy, err := skipGenerated(dAtA[iNdEx:])
if err != nil {
return err
}
if (skippy < 0) || (iNdEx+skippy) < 0 {
return ErrInvalidLengthGenerated
}
if (iNdEx + skippy) > postIndex {
return io.ErrUnexpectedEOF
}
iNdEx += skippy
}
}
m.DeprecatedTopology[mapkey] = mapvalue
iNdEx = postIndex
case 6:
if wireType != 2 {
return fmt.Errorf("proto: wrong wireType = %d for field NodeName", wireType)
}
var stringLen uint64
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowGenerated
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
stringLen |= uint64(b&0x7F) << shift
if b < 0x80 {
break
}
}
intStringLen := int(stringLen)
if intStringLen < 0 {
return ErrInvalidLengthGenerated
}
postIndex := iNdEx + intStringLen
if postIndex < 0 {
return ErrInvalidLengthGenerated
}
if postIndex > l {
return io.ErrUnexpectedEOF
}
s := string(dAtA[iNdEx:postIndex])
m.NodeName = &s
iNdEx = postIndex
case 7:
if wireType != 2 {
return fmt.Errorf("proto: wrong wireType = %d for field Zone", wireType)
}
var stringLen uint64
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowGenerated
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
stringLen |= uint64(b&0x7F) << shift
if b < 0x80 {
break
}
}
intStringLen := int(stringLen)
if intStringLen < 0 {
return ErrInvalidLengthGenerated
}
postIndex := iNdEx + intStringLen
if postIndex < 0 {
return ErrInvalidLengthGenerated
}
if postIndex > l {
return io.ErrUnexpectedEOF
}
s := string(dAtA[iNdEx:postIndex])
m.Zone = &s
iNdEx = postIndex
case 8:
if wireType != 2 {
return fmt.Errorf("proto: wrong wireType = %d for field Hints", wireType)
}
var msglen int
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowGenerated
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
msglen |= int(b&0x7F) << shift
if b < 0x80 {
break
}
}
if msglen < 0 {
return ErrInvalidLengthGenerated
}
postIndex := iNdEx + msglen
if postIndex < 0 {
return ErrInvalidLengthGenerated
}
if postIndex > l {
return io.ErrUnexpectedEOF
}
if m.Hints == nil {
m.Hints = &EndpointHints{}
}
if err := m.Hints.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
return err
}
iNdEx = postIndex
default:
iNdEx = preIndex
skippy, err := skipGenerated(dAtA[iNdEx:])
if err != nil {
return err
}
if (skippy < 0) || (iNdEx+skippy) < 0 {
return ErrInvalidLengthGenerated
}
if (iNdEx + skippy) > l {
return io.ErrUnexpectedEOF
}
iNdEx += skippy
}
}
if iNdEx > l {
return io.ErrUnexpectedEOF
}
return nil
}
func (m *EndpointConditions) Unmarshal(dAtA []byte) error {
l := len(dAtA)
iNdEx := 0
for iNdEx < l {
preIndex := iNdEx
var wire uint64
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowGenerated
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
wire |= uint64(b&0x7F) << shift
if b < 0x80 {
break
}
}
fieldNum := int32(wire >> 3)
wireType := int(wire & 0x7)
if wireType == 4 {
return fmt.Errorf("proto: EndpointConditions: wiretype end group for non-group")
}
if fieldNum <= 0 {
return fmt.Errorf("proto: EndpointConditions: illegal tag %d (wire type %d)", fieldNum, wire)
}
switch fieldNum {
case 1:
if wireType != 0 {
return fmt.Errorf("proto: wrong wireType = %d for field Ready", wireType)
}
var v int
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowGenerated
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
v |= int(b&0x7F) << shift
if b < 0x80 {
break
}
}
b := bool(v != 0)
m.Ready = &b
case 2:
if wireType != 0 {
return fmt.Errorf("proto: wrong wireType = %d for field Serving", wireType)
}
var v int
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowGenerated
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
v |= int(b&0x7F) << shift
if b < 0x80 {
break
}
}
b := bool(v != 0)
m.Serving = &b
case 3:
if wireType != 0 {
return fmt.Errorf("proto: wrong wireType = %d for field Terminating", wireType)
}
var v int
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowGenerated
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
v |= int(b&0x7F) << shift
if b < 0x80 {
break
}
}
b := bool(v != 0)
m.Terminating = &b
default:
iNdEx = preIndex
skippy, err := skipGenerated(dAtA[iNdEx:])
if err != nil {
return err
}
if (skippy < 0) || (iNdEx+skippy) < 0 {
return ErrInvalidLengthGenerated
}
if (iNdEx + skippy) > l {
return io.ErrUnexpectedEOF
}
iNdEx += skippy
}
}
if iNdEx > l {
return io.ErrUnexpectedEOF
}
return nil
}
func (m *EndpointHints) Unmarshal(dAtA []byte) error {
l := len(dAtA)
iNdEx := 0
for iNdEx < l {
preIndex := iNdEx
var wire uint64
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowGenerated
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
wire |= uint64(b&0x7F) << shift
if b < 0x80 {
break
}
}
fieldNum := int32(wire >> 3)
wireType := int(wire & 0x7)
if wireType == 4 {
return fmt.Errorf("proto: EndpointHints: wiretype end group for non-group")
}
if fieldNum <= 0 {
return fmt.Errorf("proto: EndpointHints: illegal tag %d (wire type %d)", fieldNum, wire)
}
switch fieldNum {
case 1:
if wireType != 2 {
return fmt.Errorf("proto: wrong wireType = %d for field ForZones", wireType)
}
var msglen int
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowGenerated
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
msglen |= int(b&0x7F) << shift
if b < 0x80 {
break
}
}
if msglen < 0 {
return ErrInvalidLengthGenerated
}
postIndex := iNdEx + msglen
if postIndex < 0 {
return ErrInvalidLengthGenerated
}
if postIndex > l {
return io.ErrUnexpectedEOF
}
m.ForZones = append(m.ForZones, ForZone{})
if err := m.ForZones[len(m.ForZones)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
return err
}
iNdEx = postIndex
default:
iNdEx = preIndex
skippy, err := skipGenerated(dAtA[iNdEx:])
if err != nil {
return err
}
if (skippy < 0) || (iNdEx+skippy) < 0 {
return ErrInvalidLengthGenerated
}
if (iNdEx + skippy) > l {
return io.ErrUnexpectedEOF
}
iNdEx += skippy
}
}
if iNdEx > l {
return io.ErrUnexpectedEOF
}
return nil
}
func (m *EndpointPort) Unmarshal(dAtA []byte) error {
l := len(dAtA)
iNdEx := 0
for iNdEx < l {
preIndex := iNdEx
var wire uint64
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowGenerated
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
wire |= uint64(b&0x7F) << shift
if b < 0x80 {
break
}
}
fieldNum := int32(wire >> 3)
wireType := int(wire & 0x7)
if wireType == 4 {
return fmt.Errorf("proto: EndpointPort: wiretype end group for non-group")
}
if fieldNum <= 0 {
return fmt.Errorf("proto: EndpointPort: illegal tag %d (wire type %d)", fieldNum, wire)
}
switch fieldNum {
case 1:
if wireType != 2 {
return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType)
}
var stringLen uint64
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowGenerated
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
stringLen |= uint64(b&0x7F) << shift
if b < 0x80 {
break
}
}
intStringLen := int(stringLen)
if intStringLen < 0 {
return ErrInvalidLengthGenerated
}
postIndex := iNdEx + intStringLen
if postIndex < 0 {
return ErrInvalidLengthGenerated
}
if postIndex > l {
return io.ErrUnexpectedEOF
}
s := string(dAtA[iNdEx:postIndex])
m.Name = &s
iNdEx = postIndex
case 2:
if wireType != 2 {
return fmt.Errorf("proto: wrong wireType = %d for field Protocol", wireType)
}
var stringLen uint64
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowGenerated
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
stringLen |= uint64(b&0x7F) << shift
if b < 0x80 {
break
}
}
intStringLen := int(stringLen)
if intStringLen < 0 {
return ErrInvalidLengthGenerated
}
postIndex := iNdEx + intStringLen
if postIndex < 0 {
return ErrInvalidLengthGenerated
}
if postIndex > l {
return io.ErrUnexpectedEOF
}
s := github_com_cilium_cilium_pkg_k8s_slim_k8s_api_core_v1.Protocol(dAtA[iNdEx:postIndex])
m.Protocol = &s
iNdEx = postIndex
case 3:
if wireType != 0 {
return fmt.Errorf("proto: wrong wireType = %d for field Port", wireType)
}
var v int32
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowGenerated
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
v |= int32(b&0x7F) << shift
if b < 0x80 {
break
}
}
m.Port = &v
case 4:
if wireType != 2 {
return fmt.Errorf("proto: wrong wireType = %d for field AppProtocol", wireType)
}
var stringLen uint64
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowGenerated
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
stringLen |= uint64(b&0x7F) << shift
if b < 0x80 {
break
}
}
intStringLen := int(stringLen)
if intStringLen < 0 {
return ErrInvalidLengthGenerated
}
postIndex := iNdEx + intStringLen
if postIndex < 0 {
return ErrInvalidLengthGenerated
}
if postIndex > l {
return io.ErrUnexpectedEOF
}
s := string(dAtA[iNdEx:postIndex])
m.AppProtocol = &s
iNdEx = postIndex
default:
iNdEx = preIndex
skippy, err := skipGenerated(dAtA[iNdEx:])
if err != nil {
return err
}
if (skippy < 0) || (iNdEx+skippy) < 0 {
return ErrInvalidLengthGenerated
}
if (iNdEx + skippy) > l {
return io.ErrUnexpectedEOF
}
iNdEx += skippy
}
}
if iNdEx > l {
return io.ErrUnexpectedEOF
}
return nil
}
func (m *EndpointSlice) Unmarshal(dAtA []byte) error {
l := len(dAtA)
iNdEx := 0
for iNdEx < l {
preIndex := iNdEx
var wire uint64
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowGenerated
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
wire |= uint64(b&0x7F) << shift
if b < 0x80 {
break
}
}
fieldNum := int32(wire >> 3)
wireType := int(wire & 0x7)
if wireType == 4 {
return fmt.Errorf("proto: EndpointSlice: wiretype end group for non-group")
}
if fieldNum <= 0 {
return fmt.Errorf("proto: EndpointSlice: illegal tag %d (wire type %d)", fieldNum, wire)
}
switch fieldNum {
case 1:
if wireType != 2 {
return fmt.Errorf("proto: wrong wireType = %d for field ObjectMeta", wireType)
}
var msglen int
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowGenerated
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
msglen |= int(b&0x7F) << shift
if b < 0x80 {
break
}
}
if msglen < 0 {
return ErrInvalidLengthGenerated
}
postIndex := iNdEx + msglen
if postIndex < 0 {
return ErrInvalidLengthGenerated
}
if postIndex > l {
return io.ErrUnexpectedEOF
}
if err := m.ObjectMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
return err
}
iNdEx = postIndex
case 2:
if wireType != 2 {
return fmt.Errorf("proto: wrong wireType = %d for field Endpoints", wireType)
}
var msglen int
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowGenerated
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
msglen |= int(b&0x7F) << shift
if b < 0x80 {
break
}
}
if msglen < 0 {
return ErrInvalidLengthGenerated
}
postIndex := iNdEx + msglen
if postIndex < 0 {
return ErrInvalidLengthGenerated
}
if postIndex > l {
return io.ErrUnexpectedEOF
}
m.Endpoints = append(m.Endpoints, Endpoint{})
if err := m.Endpoints[len(m.Endpoints)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
return err
}
iNdEx = postIndex
case 3:
if wireType != 2 {
return fmt.Errorf("proto: wrong wireType = %d for field Ports", wireType)
}
var msglen int
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowGenerated
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
msglen |= int(b&0x7F) << shift
if b < 0x80 {
break
}
}
if msglen < 0 {
return ErrInvalidLengthGenerated
}
postIndex := iNdEx + msglen
if postIndex < 0 {
return ErrInvalidLengthGenerated
}
if postIndex > l {
return io.ErrUnexpectedEOF
}
m.Ports = append(m.Ports, EndpointPort{})
if err := m.Ports[len(m.Ports)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
return err
}
iNdEx = postIndex
case 4:
if wireType != 2 {
return fmt.Errorf("proto: wrong wireType = %d for field AddressType", wireType)
}
var stringLen uint64
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowGenerated
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
stringLen |= uint64(b&0x7F) << shift
if b < 0x80 {
break
}
}
intStringLen := int(stringLen)
if intStringLen < 0 {
return ErrInvalidLengthGenerated
}
postIndex := iNdEx + intStringLen
if postIndex < 0 {
return ErrInvalidLengthGenerated
}
if postIndex > l {
return io.ErrUnexpectedEOF
}
m.AddressType = AddressType(dAtA[iNdEx:postIndex])
iNdEx = postIndex
default:
iNdEx = preIndex
skippy, err := skipGenerated(dAtA[iNdEx:])
if err != nil {
return err
}
if (skippy < 0) || (iNdEx+skippy) < 0 {
return ErrInvalidLengthGenerated
}
if (iNdEx + skippy) > l {
return io.ErrUnexpectedEOF
}
iNdEx += skippy
}
}
if iNdEx > l {
return io.ErrUnexpectedEOF
}
return nil
}
func (m *EndpointSliceList) Unmarshal(dAtA []byte) error {
l := len(dAtA)
iNdEx := 0
for iNdEx < l {
preIndex := iNdEx
var wire uint64
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowGenerated
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
wire |= uint64(b&0x7F) << shift
if b < 0x80 {
break
}
}
fieldNum := int32(wire >> 3)
wireType := int(wire & 0x7)
if wireType == 4 {
return fmt.Errorf("proto: EndpointSliceList: wiretype end group for non-group")
}
if fieldNum <= 0 {
return fmt.Errorf("proto: EndpointSliceList: illegal tag %d (wire type %d)", fieldNum, wire)
}
switch fieldNum {
case 1:
if wireType != 2 {
return fmt.Errorf("proto: wrong wireType = %d for field ListMeta", wireType)
}
var msglen int
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowGenerated
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
msglen |= int(b&0x7F) << shift
if b < 0x80 {
break
}
}
if msglen < 0 {
return ErrInvalidLengthGenerated
}
postIndex := iNdEx + msglen
if postIndex < 0 {
return ErrInvalidLengthGenerated
}
if postIndex > l {
return io.ErrUnexpectedEOF
}
if err := m.ListMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
return err
}
iNdEx = postIndex
case 2:
if wireType != 2 {
return fmt.Errorf("proto: wrong wireType = %d for field Items", wireType)
}
var msglen int
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowGenerated
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
msglen |= int(b&0x7F) << shift
if b < 0x80 {
break
}
}
if msglen < 0 {
return ErrInvalidLengthGenerated
}
postIndex := iNdEx + msglen
if postIndex < 0 {
return ErrInvalidLengthGenerated
}
if postIndex > l {
return io.ErrUnexpectedEOF
}
m.Items = append(m.Items, EndpointSlice{})
if err := m.Items[len(m.Items)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
return err
}
iNdEx = postIndex
default:
iNdEx = preIndex
skippy, err := skipGenerated(dAtA[iNdEx:])
if err != nil {
return err
}
if (skippy < 0) || (iNdEx+skippy) < 0 {
return ErrInvalidLengthGenerated
}
if (iNdEx + skippy) > l {
return io.ErrUnexpectedEOF
}
iNdEx += skippy
}
}
if iNdEx > l {
return io.ErrUnexpectedEOF
}
return nil
}
func (m *ForZone) Unmarshal(dAtA []byte) error {
l := len(dAtA)
iNdEx := 0
for iNdEx < l {
preIndex := iNdEx
var wire uint64
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowGenerated
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
wire |= uint64(b&0x7F) << shift
if b < 0x80 {
break
}
}
fieldNum := int32(wire >> 3)
wireType := int(wire & 0x7)
if wireType == 4 {
return fmt.Errorf("proto: ForZone: wiretype end group for non-group")
}
if fieldNum <= 0 {
return fmt.Errorf("proto: ForZone: illegal tag %d (wire type %d)", fieldNum, wire)
}
switch fieldNum {
case 1:
if wireType != 2 {
return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType)
}
var stringLen uint64
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowGenerated
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
stringLen |= uint64(b&0x7F) << shift
if b < 0x80 {
break
}
}
intStringLen := int(stringLen)
if intStringLen < 0 {
return ErrInvalidLengthGenerated
}
postIndex := iNdEx + intStringLen
if postIndex < 0 {
return ErrInvalidLengthGenerated
}
if postIndex > l {
return io.ErrUnexpectedEOF
}
m.Name = string(dAtA[iNdEx:postIndex])
iNdEx = postIndex
default:
iNdEx = preIndex
skippy, err := skipGenerated(dAtA[iNdEx:])
if err != nil {
return err
}
if (skippy < 0) || (iNdEx+skippy) < 0 {
return ErrInvalidLengthGenerated
}
if (iNdEx + skippy) > l {
return io.ErrUnexpectedEOF
}
iNdEx += skippy
}
}
if iNdEx > l {
return io.ErrUnexpectedEOF
}
return nil
}
func skipGenerated(dAtA []byte) (n int, err error) {
l := len(dAtA)
iNdEx := 0
depth := 0
for iNdEx < l {
var wire uint64
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return 0, ErrIntOverflowGenerated
}
if iNdEx >= l {
return 0, io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
wire |= (uint64(b) & 0x7F) << shift
if b < 0x80 {
break
}
}
wireType := int(wire & 0x7)
switch wireType {
case 0:
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return 0, ErrIntOverflowGenerated
}
if iNdEx >= l {
return 0, io.ErrUnexpectedEOF
}
iNdEx++
if dAtA[iNdEx-1] < 0x80 {
break
}
}
case 1:
iNdEx += 8
case 2:
var length int
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return 0, ErrIntOverflowGenerated
}
if iNdEx >= l {
return 0, io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
length |= (int(b) & 0x7F) << shift
if b < 0x80 {
break
}
}
if length < 0 {
return 0, ErrInvalidLengthGenerated
}
iNdEx += length
case 3:
depth++
case 4:
if depth == 0 {
return 0, ErrUnexpectedEndOfGroupGenerated
}
depth--
case 5:
iNdEx += 4
default:
return 0, fmt.Errorf("proto: illegal wireType %d", wireType)
}
if iNdEx < 0 {
return 0, ErrInvalidLengthGenerated
}
if depth == 0 {
return iNdEx, nil
}
}
return 0, io.ErrUnexpectedEOF
}
var (
ErrInvalidLengthGenerated = fmt.Errorf("proto: negative length found during unmarshaling")
ErrIntOverflowGenerated = fmt.Errorf("proto: integer overflow")
ErrUnexpectedEndOfGroupGenerated = fmt.Errorf("proto: unexpected end of group")
)
// SPDX-License-Identifier: Apache-2.0
// Copyright Authors of Cilium
// Copyright 2019 The Kubernetes Authors.
package v1
import (
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/runtime"
"k8s.io/apimachinery/pkg/runtime/schema"
)
// GroupName is the group name used in this package
const GroupName = "discovery.k8s.io"
// SchemeGroupVersion is group version used to register these objects
var SchemeGroupVersion = schema.GroupVersion{Group: GroupName, Version: "v1"}
// Kind takes an unqualified kind and returns a Group qualified GroupKind
func Kind(kind string) schema.GroupKind {
return SchemeGroupVersion.WithKind(kind).GroupKind()
}
// Resource takes an unqualified resource and returns a Group qualified GroupResource
func Resource(resource string) schema.GroupResource {
return SchemeGroupVersion.WithResource(resource).GroupResource()
}
var (
// SchemeBuilder is the scheme builder with scheme init functions to run for this API package
SchemeBuilder = runtime.NewSchemeBuilder(addKnownTypes)
// AddToScheme is a common registration function for mapping packaged scoped group & version keys to a scheme
AddToScheme = SchemeBuilder.AddToScheme
)
// Adds the list of known types to the given scheme.
func addKnownTypes(scheme *runtime.Scheme) error {
scheme.AddKnownTypes(SchemeGroupVersion,
&EndpointSlice{},
&EndpointSliceList{},
)
metav1.AddToGroupVersion(scheme, SchemeGroupVersion)
return nil
}
//go:build !ignore_autogenerated
// +build !ignore_autogenerated
// SPDX-License-Identifier: Apache-2.0
// Copyright Authors of Cilium
// Code generated by deepcopy-gen. DO NOT EDIT.
package v1
import (
corev1 "github.com/cilium/cilium/pkg/k8s/slim/k8s/api/core/v1"
runtime "k8s.io/apimachinery/pkg/runtime"
)
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *Endpoint) DeepCopyInto(out *Endpoint) {
*out = *in
if in.Addresses != nil {
in, out := &in.Addresses, &out.Addresses
*out = make([]string, len(*in))
copy(*out, *in)
}
in.Conditions.DeepCopyInto(&out.Conditions)
if in.Hostname != nil {
in, out := &in.Hostname, &out.Hostname
*out = new(string)
**out = **in
}
if in.DeprecatedTopology != nil {
in, out := &in.DeprecatedTopology, &out.DeprecatedTopology
*out = make(map[string]string, len(*in))
for key, val := range *in {
(*out)[key] = val
}
}
if in.NodeName != nil {
in, out := &in.NodeName, &out.NodeName
*out = new(string)
**out = **in
}
if in.Zone != nil {
in, out := &in.Zone, &out.Zone
*out = new(string)
**out = **in
}
if in.Hints != nil {
in, out := &in.Hints, &out.Hints
*out = new(EndpointHints)
(*in).DeepCopyInto(*out)
}
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Endpoint.
func (in *Endpoint) DeepCopy() *Endpoint {
if in == nil {
return nil
}
out := new(Endpoint)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *EndpointConditions) DeepCopyInto(out *EndpointConditions) {
*out = *in
if in.Ready != nil {
in, out := &in.Ready, &out.Ready
*out = new(bool)
**out = **in
}
if in.Serving != nil {
in, out := &in.Serving, &out.Serving
*out = new(bool)
**out = **in
}
if in.Terminating != nil {
in, out := &in.Terminating, &out.Terminating
*out = new(bool)
**out = **in
}
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new EndpointConditions.
func (in *EndpointConditions) DeepCopy() *EndpointConditions {
if in == nil {
return nil
}
out := new(EndpointConditions)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *EndpointHints) DeepCopyInto(out *EndpointHints) {
*out = *in
if in.ForZones != nil {
in, out := &in.ForZones, &out.ForZones
*out = make([]ForZone, len(*in))
copy(*out, *in)
}
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new EndpointHints.
func (in *EndpointHints) DeepCopy() *EndpointHints {
if in == nil {
return nil
}
out := new(EndpointHints)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *EndpointPort) DeepCopyInto(out *EndpointPort) {
*out = *in
if in.Name != nil {
in, out := &in.Name, &out.Name
*out = new(string)
**out = **in
}
if in.Protocol != nil {
in, out := &in.Protocol, &out.Protocol
*out = new(corev1.Protocol)
**out = **in
}
if in.Port != nil {
in, out := &in.Port, &out.Port
*out = new(int32)
**out = **in
}
if in.AppProtocol != nil {
in, out := &in.AppProtocol, &out.AppProtocol
*out = new(string)
**out = **in
}
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new EndpointPort.
func (in *EndpointPort) DeepCopy() *EndpointPort {
if in == nil {
return nil
}
out := new(EndpointPort)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *EndpointSlice) DeepCopyInto(out *EndpointSlice) {
*out = *in
out.TypeMeta = in.TypeMeta
in.ObjectMeta.DeepCopyInto(&out.ObjectMeta)
if in.Endpoints != nil {
in, out := &in.Endpoints, &out.Endpoints
*out = make([]Endpoint, len(*in))
for i := range *in {
(*in)[i].DeepCopyInto(&(*out)[i])
}
}
if in.Ports != nil {
in, out := &in.Ports, &out.Ports
*out = make([]EndpointPort, len(*in))
for i := range *in {
(*in)[i].DeepCopyInto(&(*out)[i])
}
}
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new EndpointSlice.
func (in *EndpointSlice) DeepCopy() *EndpointSlice {
if in == nil {
return nil
}
out := new(EndpointSlice)
in.DeepCopyInto(out)
return out
}
// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
func (in *EndpointSlice) DeepCopyObject() runtime.Object {
if c := in.DeepCopy(); c != nil {
return c
}
return nil
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *EndpointSliceList) DeepCopyInto(out *EndpointSliceList) {
*out = *in
out.TypeMeta = in.TypeMeta
in.ListMeta.DeepCopyInto(&out.ListMeta)
if in.Items != nil {
in, out := &in.Items, &out.Items
*out = make([]EndpointSlice, len(*in))
for i := range *in {
(*in)[i].DeepCopyInto(&(*out)[i])
}
}
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new EndpointSliceList.
func (in *EndpointSliceList) DeepCopy() *EndpointSliceList {
if in == nil {
return nil
}
out := new(EndpointSliceList)
in.DeepCopyInto(out)
return out
}
// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
func (in *EndpointSliceList) DeepCopyObject() runtime.Object {
if c := in.DeepCopy(); c != nil {
return c
}
return nil
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *ForZone) DeepCopyInto(out *ForZone) {
*out = *in
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ForZone.
func (in *ForZone) DeepCopy() *ForZone {
if in == nil {
return nil
}
out := new(ForZone)
in.DeepCopyInto(out)
return out
}
//go:build !ignore_autogenerated
// +build !ignore_autogenerated
// SPDX-License-Identifier: Apache-2.0
// Copyright Authors of Cilium
// Code generated by deepequal-gen. DO NOT EDIT.
package v1
// DeepEqual is an autogenerated deepequal function, deeply comparing the
// receiver with other. in must be non-nil.
func (in *Endpoint) DeepEqual(other *Endpoint) bool {
if other == nil {
return false
}
if ((in.Addresses != nil) && (other.Addresses != nil)) || ((in.Addresses == nil) != (other.Addresses == nil)) {
in, other := &in.Addresses, &other.Addresses
if other == nil {
return false
}
if len(*in) != len(*other) {
return false
} else {
for i, inElement := range *in {
if inElement != (*other)[i] {
return false
}
}
}
}
if !in.Conditions.DeepEqual(&other.Conditions) {
return false
}
if (in.Hostname == nil) != (other.Hostname == nil) {
return false
} else if in.Hostname != nil {
if *in.Hostname != *other.Hostname {
return false
}
}
if ((in.DeprecatedTopology != nil) && (other.DeprecatedTopology != nil)) || ((in.DeprecatedTopology == nil) != (other.DeprecatedTopology == nil)) {
in, other := &in.DeprecatedTopology, &other.DeprecatedTopology
if other == nil {
return false
}
if len(*in) != len(*other) {
return false
} else {
for key, inValue := range *in {
if otherValue, present := (*other)[key]; !present {
return false
} else {
if inValue != otherValue {
return false
}
}
}
}
}
if (in.NodeName == nil) != (other.NodeName == nil) {
return false
} else if in.NodeName != nil {
if *in.NodeName != *other.NodeName {
return false
}
}
if (in.Zone == nil) != (other.Zone == nil) {
return false
} else if in.Zone != nil {
if *in.Zone != *other.Zone {
return false
}
}
if (in.Hints == nil) != (other.Hints == nil) {
return false
} else if in.Hints != nil {
if !in.Hints.DeepEqual(other.Hints) {
return false
}
}
return true
}
// DeepEqual is an autogenerated deepequal function, deeply comparing the
// receiver with other. in must be non-nil.
func (in *EndpointConditions) DeepEqual(other *EndpointConditions) bool {
if other == nil {
return false
}
if (in.Ready == nil) != (other.Ready == nil) {
return false
} else if in.Ready != nil {
if *in.Ready != *other.Ready {
return false
}
}
if (in.Serving == nil) != (other.Serving == nil) {
return false
} else if in.Serving != nil {
if *in.Serving != *other.Serving {
return false
}
}
if (in.Terminating == nil) != (other.Terminating == nil) {
return false
} else if in.Terminating != nil {
if *in.Terminating != *other.Terminating {
return false
}
}
return true
}
// DeepEqual is an autogenerated deepequal function, deeply comparing the
// receiver with other. in must be non-nil.
func (in *EndpointHints) DeepEqual(other *EndpointHints) bool {
if other == nil {
return false
}
if ((in.ForZones != nil) && (other.ForZones != nil)) || ((in.ForZones == nil) != (other.ForZones == nil)) {
in, other := &in.ForZones, &other.ForZones
if other == nil {
return false
}
if len(*in) != len(*other) {
return false
} else {
for i, inElement := range *in {
if !inElement.DeepEqual(&(*other)[i]) {
return false
}
}
}
}
return true
}
// DeepEqual is an autogenerated deepequal function, deeply comparing the
// receiver with other. in must be non-nil.
func (in *EndpointPort) DeepEqual(other *EndpointPort) bool {
if other == nil {
return false
}
if (in.Name == nil) != (other.Name == nil) {
return false
} else if in.Name != nil {
if *in.Name != *other.Name {
return false
}
}
if (in.Protocol == nil) != (other.Protocol == nil) {
return false
} else if in.Protocol != nil {
if *in.Protocol != *other.Protocol {
return false
}
}
if (in.Port == nil) != (other.Port == nil) {
return false
} else if in.Port != nil {
if *in.Port != *other.Port {
return false
}
}
if (in.AppProtocol == nil) != (other.AppProtocol == nil) {
return false
} else if in.AppProtocol != nil {
if *in.AppProtocol != *other.AppProtocol {
return false
}
}
return true
}
// DeepEqual is an autogenerated deepequal function, deeply comparing the
// receiver with other. in must be non-nil.
func (in *EndpointSlice) DeepEqual(other *EndpointSlice) bool {
if other == nil {
return false
}
if in.TypeMeta != other.TypeMeta {
return false
}
if !in.ObjectMeta.DeepEqual(&other.ObjectMeta) {
return false
}
if in.AddressType != other.AddressType {
return false
}
if ((in.Endpoints != nil) && (other.Endpoints != nil)) || ((in.Endpoints == nil) != (other.Endpoints == nil)) {
in, other := &in.Endpoints, &other.Endpoints
if other == nil {
return false
}
if len(*in) != len(*other) {
return false
} else {
for i, inElement := range *in {
if !inElement.DeepEqual(&(*other)[i]) {
return false
}
}
}
}
if ((in.Ports != nil) && (other.Ports != nil)) || ((in.Ports == nil) != (other.Ports == nil)) {
in, other := &in.Ports, &other.Ports
if other == nil {
return false
}
if len(*in) != len(*other) {
return false
} else {
for i, inElement := range *in {
if !inElement.DeepEqual(&(*other)[i]) {
return false
}
}
}
}
return true
}
// DeepEqual is an autogenerated deepequal function, deeply comparing the
// receiver with other. in must be non-nil.
func (in *EndpointSliceList) DeepEqual(other *EndpointSliceList) bool {
if other == nil {
return false
}
if in.TypeMeta != other.TypeMeta {
return false
}
if !in.ListMeta.DeepEqual(&other.ListMeta) {
return false
}
if ((in.Items != nil) && (other.Items != nil)) || ((in.Items == nil) != (other.Items == nil)) {
in, other := &in.Items, &other.Items
if other == nil {
return false
}
if len(*in) != len(*other) {
return false
} else {
for i, inElement := range *in {
if !inElement.DeepEqual(&(*other)[i]) {
return false
}
}
}
}
return true
}
// DeepEqual is an autogenerated deepequal function, deeply comparing the
// receiver with other. in must be non-nil.
func (in *ForZone) DeepEqual(other *ForZone) bool {
if other == nil {
return false
}
if in.Name != other.Name {
return false
}
return true
}
// SPDX-License-Identifier: Apache-2.0
// Copyright Authors of Cilium
// Code generated by protoc-gen-gogo. DO NOT EDIT.
// source: github.com/cilium/cilium/pkg/k8s/slim/k8s/api/networking/v1/generated.proto
package v1
import (
fmt "fmt"
github_com_cilium_cilium_pkg_k8s_slim_k8s_api_core_v1 "github.com/cilium/cilium/pkg/k8s/slim/k8s/api/core/v1"
v1 "github.com/cilium/cilium/pkg/k8s/slim/k8s/apis/meta/v1"
intstr "github.com/cilium/cilium/pkg/k8s/slim/k8s/apis/util/intstr"
io "io"
proto "github.com/gogo/protobuf/proto"
math "math"
math_bits "math/bits"
reflect "reflect"
strings "strings"
)
// Reference imports to suppress errors if they are not otherwise used.
var _ = proto.Marshal
var _ = fmt.Errorf
var _ = math.Inf
// This is a compile-time assertion to ensure that this generated file
// is compatible with the proto package it is being compiled against.
// A compilation error at this line likely means your copy of the
// proto package needs to be updated.
const _ = proto.GoGoProtoPackageIsVersion3 // please upgrade the proto package
func (m *IPBlock) Reset() { *m = IPBlock{} }
func (*IPBlock) ProtoMessage() {}
func (*IPBlock) Descriptor() ([]byte, []int) {
return fileDescriptor_5d3be2d57d520df2, []int{0}
}
func (m *IPBlock) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
}
func (m *IPBlock) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
b = b[:cap(b)]
n, err := m.MarshalToSizedBuffer(b)
if err != nil {
return nil, err
}
return b[:n], nil
}
func (m *IPBlock) XXX_Merge(src proto.Message) {
xxx_messageInfo_IPBlock.Merge(m, src)
}
func (m *IPBlock) XXX_Size() int {
return m.Size()
}
func (m *IPBlock) XXX_DiscardUnknown() {
xxx_messageInfo_IPBlock.DiscardUnknown(m)
}
var xxx_messageInfo_IPBlock proto.InternalMessageInfo
func (m *NetworkPolicy) Reset() { *m = NetworkPolicy{} }
func (*NetworkPolicy) ProtoMessage() {}
func (*NetworkPolicy) Descriptor() ([]byte, []int) {
return fileDescriptor_5d3be2d57d520df2, []int{1}
}
func (m *NetworkPolicy) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
}
func (m *NetworkPolicy) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
b = b[:cap(b)]
n, err := m.MarshalToSizedBuffer(b)
if err != nil {
return nil, err
}
return b[:n], nil
}
func (m *NetworkPolicy) XXX_Merge(src proto.Message) {
xxx_messageInfo_NetworkPolicy.Merge(m, src)
}
func (m *NetworkPolicy) XXX_Size() int {
return m.Size()
}
func (m *NetworkPolicy) XXX_DiscardUnknown() {
xxx_messageInfo_NetworkPolicy.DiscardUnknown(m)
}
var xxx_messageInfo_NetworkPolicy proto.InternalMessageInfo
func (m *NetworkPolicyEgressRule) Reset() { *m = NetworkPolicyEgressRule{} }
func (*NetworkPolicyEgressRule) ProtoMessage() {}
func (*NetworkPolicyEgressRule) Descriptor() ([]byte, []int) {
return fileDescriptor_5d3be2d57d520df2, []int{2}
}
func (m *NetworkPolicyEgressRule) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
}
func (m *NetworkPolicyEgressRule) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
b = b[:cap(b)]
n, err := m.MarshalToSizedBuffer(b)
if err != nil {
return nil, err
}
return b[:n], nil
}
func (m *NetworkPolicyEgressRule) XXX_Merge(src proto.Message) {
xxx_messageInfo_NetworkPolicyEgressRule.Merge(m, src)
}
func (m *NetworkPolicyEgressRule) XXX_Size() int {
return m.Size()
}
func (m *NetworkPolicyEgressRule) XXX_DiscardUnknown() {
xxx_messageInfo_NetworkPolicyEgressRule.DiscardUnknown(m)
}
var xxx_messageInfo_NetworkPolicyEgressRule proto.InternalMessageInfo
func (m *NetworkPolicyIngressRule) Reset() { *m = NetworkPolicyIngressRule{} }
func (*NetworkPolicyIngressRule) ProtoMessage() {}
func (*NetworkPolicyIngressRule) Descriptor() ([]byte, []int) {
return fileDescriptor_5d3be2d57d520df2, []int{3}
}
func (m *NetworkPolicyIngressRule) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
}
func (m *NetworkPolicyIngressRule) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
b = b[:cap(b)]
n, err := m.MarshalToSizedBuffer(b)
if err != nil {
return nil, err
}
return b[:n], nil
}
func (m *NetworkPolicyIngressRule) XXX_Merge(src proto.Message) {
xxx_messageInfo_NetworkPolicyIngressRule.Merge(m, src)
}
func (m *NetworkPolicyIngressRule) XXX_Size() int {
return m.Size()
}
func (m *NetworkPolicyIngressRule) XXX_DiscardUnknown() {
xxx_messageInfo_NetworkPolicyIngressRule.DiscardUnknown(m)
}
var xxx_messageInfo_NetworkPolicyIngressRule proto.InternalMessageInfo
func (m *NetworkPolicyList) Reset() { *m = NetworkPolicyList{} }
func (*NetworkPolicyList) ProtoMessage() {}
func (*NetworkPolicyList) Descriptor() ([]byte, []int) {
return fileDescriptor_5d3be2d57d520df2, []int{4}
}
func (m *NetworkPolicyList) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
}
func (m *NetworkPolicyList) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
b = b[:cap(b)]
n, err := m.MarshalToSizedBuffer(b)
if err != nil {
return nil, err
}
return b[:n], nil
}
func (m *NetworkPolicyList) XXX_Merge(src proto.Message) {
xxx_messageInfo_NetworkPolicyList.Merge(m, src)
}
func (m *NetworkPolicyList) XXX_Size() int {
return m.Size()
}
func (m *NetworkPolicyList) XXX_DiscardUnknown() {
xxx_messageInfo_NetworkPolicyList.DiscardUnknown(m)
}
var xxx_messageInfo_NetworkPolicyList proto.InternalMessageInfo
func (m *NetworkPolicyPeer) Reset() { *m = NetworkPolicyPeer{} }
func (*NetworkPolicyPeer) ProtoMessage() {}
func (*NetworkPolicyPeer) Descriptor() ([]byte, []int) {
return fileDescriptor_5d3be2d57d520df2, []int{5}
}
func (m *NetworkPolicyPeer) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
}
func (m *NetworkPolicyPeer) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
b = b[:cap(b)]
n, err := m.MarshalToSizedBuffer(b)
if err != nil {
return nil, err
}
return b[:n], nil
}
func (m *NetworkPolicyPeer) XXX_Merge(src proto.Message) {
xxx_messageInfo_NetworkPolicyPeer.Merge(m, src)
}
func (m *NetworkPolicyPeer) XXX_Size() int {
return m.Size()
}
func (m *NetworkPolicyPeer) XXX_DiscardUnknown() {
xxx_messageInfo_NetworkPolicyPeer.DiscardUnknown(m)
}
var xxx_messageInfo_NetworkPolicyPeer proto.InternalMessageInfo
func (m *NetworkPolicyPort) Reset() { *m = NetworkPolicyPort{} }
func (*NetworkPolicyPort) ProtoMessage() {}
func (*NetworkPolicyPort) Descriptor() ([]byte, []int) {
return fileDescriptor_5d3be2d57d520df2, []int{6}
}
func (m *NetworkPolicyPort) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
}
func (m *NetworkPolicyPort) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
b = b[:cap(b)]
n, err := m.MarshalToSizedBuffer(b)
if err != nil {
return nil, err
}
return b[:n], nil
}
func (m *NetworkPolicyPort) XXX_Merge(src proto.Message) {
xxx_messageInfo_NetworkPolicyPort.Merge(m, src)
}
func (m *NetworkPolicyPort) XXX_Size() int {
return m.Size()
}
func (m *NetworkPolicyPort) XXX_DiscardUnknown() {
xxx_messageInfo_NetworkPolicyPort.DiscardUnknown(m)
}
var xxx_messageInfo_NetworkPolicyPort proto.InternalMessageInfo
func (m *NetworkPolicySpec) Reset() { *m = NetworkPolicySpec{} }
func (*NetworkPolicySpec) ProtoMessage() {}
func (*NetworkPolicySpec) Descriptor() ([]byte, []int) {
return fileDescriptor_5d3be2d57d520df2, []int{7}
}
func (m *NetworkPolicySpec) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
}
func (m *NetworkPolicySpec) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
b = b[:cap(b)]
n, err := m.MarshalToSizedBuffer(b)
if err != nil {
return nil, err
}
return b[:n], nil
}
func (m *NetworkPolicySpec) XXX_Merge(src proto.Message) {
xxx_messageInfo_NetworkPolicySpec.Merge(m, src)
}
func (m *NetworkPolicySpec) XXX_Size() int {
return m.Size()
}
func (m *NetworkPolicySpec) XXX_DiscardUnknown() {
xxx_messageInfo_NetworkPolicySpec.DiscardUnknown(m)
}
var xxx_messageInfo_NetworkPolicySpec proto.InternalMessageInfo
func init() {
proto.RegisterType((*IPBlock)(nil), "github.com.cilium.cilium.pkg.k8s.slim.k8s.api.networking.v1.IPBlock")
proto.RegisterType((*NetworkPolicy)(nil), "github.com.cilium.cilium.pkg.k8s.slim.k8s.api.networking.v1.NetworkPolicy")
proto.RegisterType((*NetworkPolicyEgressRule)(nil), "github.com.cilium.cilium.pkg.k8s.slim.k8s.api.networking.v1.NetworkPolicyEgressRule")
proto.RegisterType((*NetworkPolicyIngressRule)(nil), "github.com.cilium.cilium.pkg.k8s.slim.k8s.api.networking.v1.NetworkPolicyIngressRule")
proto.RegisterType((*NetworkPolicyList)(nil), "github.com.cilium.cilium.pkg.k8s.slim.k8s.api.networking.v1.NetworkPolicyList")
proto.RegisterType((*NetworkPolicyPeer)(nil), "github.com.cilium.cilium.pkg.k8s.slim.k8s.api.networking.v1.NetworkPolicyPeer")
proto.RegisterType((*NetworkPolicyPort)(nil), "github.com.cilium.cilium.pkg.k8s.slim.k8s.api.networking.v1.NetworkPolicyPort")
proto.RegisterType((*NetworkPolicySpec)(nil), "github.com.cilium.cilium.pkg.k8s.slim.k8s.api.networking.v1.NetworkPolicySpec")
}
func init() {
proto.RegisterFile("github.com/cilium/cilium/pkg/k8s/slim/k8s/api/networking/v1/generated.proto", fileDescriptor_5d3be2d57d520df2)
}
var fileDescriptor_5d3be2d57d520df2 = []byte{
// 838 bytes of a gzipped FileDescriptorProto
0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xc4, 0x56, 0xcf, 0x6f, 0xe4, 0x34,
0x14, 0x9e, 0x64, 0xa6, 0x9d, 0xae, 0xcb, 0xb2, 0xd4, 0x08, 0x31, 0x5a, 0x89, 0xa4, 0x8a, 0x84,
0xb4, 0x17, 0x1c, 0x95, 0x03, 0x5a, 0x69, 0x25, 0x40, 0x61, 0x67, 0xd1, 0x2c, 0xd0, 0x8e, 0xdc,
0x72, 0xe1, 0x96, 0xc9, 0x78, 0x53, 0x77, 0x92, 0x38, 0xb2, 0x3d, 0x65, 0x2b, 0xed, 0x61, 0x39,
0x21, 0x21, 0x21, 0xf1, 0x67, 0xf5, 0xb8, 0x12, 0x97, 0x3d, 0x45, 0x34, 0xfc, 0x0b, 0x70, 0xe9,
0x09, 0xd9, 0x71, 0x26, 0xf3, 0x63, 0x11, 0x9a, 0x76, 0x56, 0x9c, 0x12, 0xbf, 0xd8, 0xdf, 0xf7,
0xbd, 0xe7, 0xcf, 0xcf, 0x01, 0xdf, 0xc4, 0x54, 0x9e, 0x4e, 0x47, 0x28, 0x62, 0xa9, 0x1f, 0xd1,
0x84, 0x4e, 0x67, 0x8f, 0x7c, 0x12, 0xfb, 0x93, 0x87, 0xc2, 0x17, 0x09, 0x4d, 0xf5, 0x4b, 0x98,
0x53, 0x3f, 0x23, 0xf2, 0x47, 0xc6, 0x27, 0x34, 0x8b, 0xfd, 0xf3, 0x03, 0x3f, 0x26, 0x19, 0xe1,
0xa1, 0x24, 0x63, 0x94, 0x73, 0x26, 0x19, 0x7c, 0xd4, 0x80, 0xa1, 0x0a, 0xa5, 0x7e, 0xe4, 0x93,
0x18, 0x4d, 0x1e, 0x0a, 0xa4, 0xc0, 0xf4, 0x4b, 0x98, 0x53, 0xd4, 0x80, 0xa1, 0xf3, 0x83, 0xfb,
0xfd, 0xf5, 0x94, 0x44, 0x8c, 0x93, 0x37, 0x68, 0xb8, 0xff, 0x64, 0x2d, 0x18, 0xe1, 0xa7, 0x44,
0x86, 0x6f, 0xc2, 0x79, 0xba, 0x26, 0xce, 0x54, 0xd2, 0xc4, 0xa7, 0x99, 0x14, 0x92, 0xaf, 0x60,
0x7d, 0x32, 0x87, 0x15, 0xb3, 0x98, 0xf9, 0x3a, 0x3c, 0x9a, 0x3e, 0xd3, 0x23, 0x3d, 0xd0, 0x6f,
0x66, 0xba, 0x02, 0x45, 0x94, 0x29, 0xdc, 0x34, 0x8c, 0x4e, 0x69, 0x46, 0xf8, 0x85, 0x66, 0xe5,
0xd3, 0x4c, 0xd2, 0x94, 0xac, 0xe0, 0x7f, 0xf6, 0x5f, 0x0b, 0x44, 0x74, 0x4a, 0xd2, 0x70, 0x79,
0x9d, 0x77, 0x04, 0xba, 0x83, 0x61, 0x90, 0xb0, 0x68, 0x02, 0xf7, 0x41, 0x27, 0xa2, 0x63, 0xde,
0xb3, 0xf6, 0xad, 0x07, 0x77, 0x82, 0x77, 0x2e, 0x0b, 0xb7, 0x55, 0x16, 0x6e, 0xe7, 0xab, 0xc1,
0x63, 0x8c, 0xf5, 0x17, 0xe8, 0x81, 0x6d, 0xf2, 0x3c, 0x22, 0xb9, 0xec, 0xd9, 0xfb, 0xed, 0x07,
0x77, 0x02, 0x50, 0x16, 0xee, 0x76, 0x5f, 0x47, 0xb0, 0xf9, 0xe2, 0xfd, 0x6d, 0x81, 0xbb, 0x87,
0xd5, 0xae, 0x0e, 0x59, 0x42, 0xa3, 0x0b, 0xc8, 0xc1, 0x8e, 0xaa, 0xf0, 0x38, 0x94, 0xa1, 0xc6,
0xde, 0xfd, 0x34, 0x40, 0x6b, 0xb9, 0x44, 0x20, 0xb5, 0x1e, 0x9d, 0x1f, 0xa0, 0xa3, 0xd1, 0x19,
0x89, 0xe4, 0x77, 0x44, 0x86, 0x01, 0x34, 0xfa, 0x40, 0x13, 0xc3, 0x33, 0x1e, 0x98, 0x83, 0x8e,
0xc8, 0x49, 0xd4, 0xb3, 0x35, 0xdf, 0x21, 0xba, 0x85, 0x2b, 0xd1, 0x42, 0x36, 0xc7, 0x39, 0x89,
0x9a, 0xda, 0xa8, 0x11, 0xd6, 0x4c, 0xde, 0xb5, 0x05, 0x3e, 0x5c, 0x98, 0xd9, 0x8f, 0x39, 0x11,
0x02, 0x4f, 0x13, 0x02, 0x05, 0xd8, 0xca, 0x19, 0x97, 0xa2, 0x67, 0xed, 0xb7, 0x37, 0x2b, 0x67,
0xc8, 0xb8, 0x0c, 0xee, 0x1a, 0x39, 0x5b, 0x6a, 0x24, 0x70, 0xc5, 0x05, 0xcf, 0x80, 0x2d, 0x99,
0xde, 0xa8, 0xcd, 0x32, 0x12, 0xc2, 0x03, 0x60, 0x18, 0xed, 0x13, 0x86, 0x6d, 0xc9, 0xbc, 0x9f,
0x6c, 0xd0, 0x5b, 0x98, 0x35, 0xc8, 0xfe, 0xe7, 0xec, 0x73, 0xd0, 0x79, 0xc6, 0x59, 0xfa, 0x96,
0xf2, 0x9f, 0x19, 0xe0, 0x09, 0x67, 0x29, 0xd6, 0x4c, 0xde, 0x5f, 0x16, 0xd8, 0x5b, 0x98, 0xf9,
0x2d, 0x15, 0x12, 0x66, 0x2b, 0xe6, 0xff, 0xf2, 0xa6, 0xe6, 0x57, 0x78, 0xda, 0xfa, 0xef, 0x19,
0xf6, 0x9d, 0x3a, 0x32, 0x67, 0x7c, 0x06, 0xb6, 0xa8, 0x24, 0xa9, 0x30, 0x89, 0x3f, 0xdd, 0x5c,
0xe2, 0x4d, 0xa1, 0x07, 0x8a, 0x00, 0x57, 0x3c, 0xde, 0xcf, 0xed, 0xa5, 0xb4, 0x55, 0x81, 0xe0,
0x73, 0xb0, 0x9b, 0xb3, 0xf1, 0x31, 0x49, 0x48, 0x24, 0x19, 0x37, 0x99, 0xf7, 0x6f, 0x9c, 0x79,
0x38, 0x22, 0x49, 0x0d, 0x16, 0xdc, 0x2b, 0x0b, 0x77, 0x77, 0xd8, 0xa0, 0xe3, 0x79, 0x2a, 0xf8,
0x8b, 0x05, 0xf6, 0xb2, 0x30, 0x25, 0x22, 0x0f, 0x23, 0x32, 0x13, 0x60, 0x6f, 0x52, 0xc0, 0x07,
0x65, 0xe1, 0xee, 0x1d, 0x2e, 0x73, 0xe0, 0x55, 0x5a, 0x38, 0x01, 0x5d, 0x9a, 0xeb, 0xee, 0xda,
0x6b, 0x6b, 0x05, 0x8f, 0x6f, 0xb5, 0x1f, 0xa6, 0x53, 0x07, 0xbb, 0x65, 0xe1, 0xd6, 0x6d, 0x1b,
0xd7, 0x0c, 0xde, 0xaf, 0xf6, 0xf2, 0x4e, 0x30, 0x2e, 0xe1, 0x19, 0xd8, 0xd1, 0x9d, 0x3e, 0x62,
0x89, 0xe9, 0xec, 0x87, 0xca, 0x3a, 0x43, 0x13, 0xbb, 0x2e, 0xdc, 0xcf, 0x6f, 0x74, 0xeb, 0xa2,
0x1a, 0x01, 0xcf, 0xf0, 0x21, 0x01, 0x1d, 0x75, 0xfa, 0x4c, 0xb5, 0xbf, 0x5e, 0xb7, 0xda, 0xea,
0xfe, 0x44, 0xd5, 0xfd, 0x89, 0x06, 0x99, 0x3c, 0xe2, 0xc7, 0x92, 0xd3, 0x2c, 0x0e, 0x76, 0xd4,
0x49, 0x53, 0xc9, 0x60, 0x0d, 0x0f, 0x3f, 0x06, 0x5d, 0x92, 0x8d, 0x55, 0x40, 0x57, 0x75, 0xab,
0xaa, 0x47, 0xbf, 0x0a, 0xe1, 0xfa, 0x9b, 0xf7, 0xfb, 0xb2, 0x33, 0x55, 0xb7, 0x86, 0x2f, 0xde,
0xa2, 0x33, 0xdf, 0x37, 0x27, 0xe4, 0xdf, 0xdd, 0xf9, 0xd2, 0x02, 0x5d, 0x5a, 0xf5, 0x46, 0x73,
0x42, 0xbf, 0xdf, 0xdc, 0x09, 0x9d, 0x6b, 0xba, 0xc1, 0x3d, 0x23, 0xa5, 0x5b, 0x07, 0x6b, 0x5a,
0xf8, 0x02, 0x6c, 0x93, 0x4a, 0x40, 0x5b, 0x0b, 0x38, 0xd9, 0x9c, 0x80, 0xe6, 0xca, 0x0b, 0xde,
0x35, 0xfc, 0xdb, 0x26, 0x66, 0x38, 0xe1, 0x17, 0xaa, 0xfc, 0x6a, 0xee, 0xc9, 0x45, 0x4e, 0x44,
0xaf, 0xa3, 0xff, 0x23, 0x3e, 0xaa, 0x6a, 0x36, 0x0b, 0x5f, 0x17, 0x2e, 0x68, 0x86, 0x78, 0x7e,
0x45, 0x10, 0x5e, 0x5e, 0x39, 0xad, 0x57, 0x57, 0x4e, 0xeb, 0xf5, 0x95, 0xd3, 0x7a, 0x59, 0x3a,
0xd6, 0x65, 0xe9, 0x58, 0xaf, 0x4a, 0xc7, 0x7a, 0x5d, 0x3a, 0xd6, 0x1f, 0xa5, 0x63, 0xfd, 0xf6,
0xa7, 0xd3, 0xfa, 0xe1, 0xd1, 0x2d, 0xfe, 0x69, 0xff, 0x09, 0x00, 0x00, 0xff, 0xff, 0x1c, 0xf7,
0x43, 0xc0, 0x11, 0x0b, 0x00, 0x00,
}
func (m *IPBlock) Marshal() (dAtA []byte, err error) {
size := m.Size()
dAtA = make([]byte, size)
n, err := m.MarshalToSizedBuffer(dAtA[:size])
if err != nil {
return nil, err
}
return dAtA[:n], nil
}
func (m *IPBlock) MarshalTo(dAtA []byte) (int, error) {
size := m.Size()
return m.MarshalToSizedBuffer(dAtA[:size])
}
func (m *IPBlock) MarshalToSizedBuffer(dAtA []byte) (int, error) {
i := len(dAtA)
_ = i
var l int
_ = l
if len(m.Except) > 0 {
for iNdEx := len(m.Except) - 1; iNdEx >= 0; iNdEx-- {
i -= len(m.Except[iNdEx])
copy(dAtA[i:], m.Except[iNdEx])
i = encodeVarintGenerated(dAtA, i, uint64(len(m.Except[iNdEx])))
i--
dAtA[i] = 0x12
}
}
i -= len(m.CIDR)
copy(dAtA[i:], m.CIDR)
i = encodeVarintGenerated(dAtA, i, uint64(len(m.CIDR)))
i--
dAtA[i] = 0xa
return len(dAtA) - i, nil
}
func (m *NetworkPolicy) Marshal() (dAtA []byte, err error) {
size := m.Size()
dAtA = make([]byte, size)
n, err := m.MarshalToSizedBuffer(dAtA[:size])
if err != nil {
return nil, err
}
return dAtA[:n], nil
}
func (m *NetworkPolicy) MarshalTo(dAtA []byte) (int, error) {
size := m.Size()
return m.MarshalToSizedBuffer(dAtA[:size])
}
func (m *NetworkPolicy) MarshalToSizedBuffer(dAtA []byte) (int, error) {
i := len(dAtA)
_ = i
var l int
_ = l
{
size, err := m.Spec.MarshalToSizedBuffer(dAtA[:i])
if err != nil {
return 0, err
}
i -= size
i = encodeVarintGenerated(dAtA, i, uint64(size))
}
i--
dAtA[i] = 0x12
{
size, err := m.ObjectMeta.MarshalToSizedBuffer(dAtA[:i])
if err != nil {
return 0, err
}
i -= size
i = encodeVarintGenerated(dAtA, i, uint64(size))
}
i--
dAtA[i] = 0xa
return len(dAtA) - i, nil
}
func (m *NetworkPolicyEgressRule) Marshal() (dAtA []byte, err error) {
size := m.Size()
dAtA = make([]byte, size)
n, err := m.MarshalToSizedBuffer(dAtA[:size])
if err != nil {
return nil, err
}
return dAtA[:n], nil
}
func (m *NetworkPolicyEgressRule) MarshalTo(dAtA []byte) (int, error) {
size := m.Size()
return m.MarshalToSizedBuffer(dAtA[:size])
}
func (m *NetworkPolicyEgressRule) MarshalToSizedBuffer(dAtA []byte) (int, error) {
i := len(dAtA)
_ = i
var l int
_ = l
if len(m.To) > 0 {
for iNdEx := len(m.To) - 1; iNdEx >= 0; iNdEx-- {
{
size, err := m.To[iNdEx].MarshalToSizedBuffer(dAtA[:i])
if err != nil {
return 0, err
}
i -= size
i = encodeVarintGenerated(dAtA, i, uint64(size))
}
i--
dAtA[i] = 0x12
}
}
if len(m.Ports) > 0 {
for iNdEx := len(m.Ports) - 1; iNdEx >= 0; iNdEx-- {
{
size, err := m.Ports[iNdEx].MarshalToSizedBuffer(dAtA[:i])
if err != nil {
return 0, err
}
i -= size
i = encodeVarintGenerated(dAtA, i, uint64(size))
}
i--
dAtA[i] = 0xa
}
}
return len(dAtA) - i, nil
}
func (m *NetworkPolicyIngressRule) Marshal() (dAtA []byte, err error) {
size := m.Size()
dAtA = make([]byte, size)
n, err := m.MarshalToSizedBuffer(dAtA[:size])
if err != nil {
return nil, err
}
return dAtA[:n], nil
}
func (m *NetworkPolicyIngressRule) MarshalTo(dAtA []byte) (int, error) {
size := m.Size()
return m.MarshalToSizedBuffer(dAtA[:size])
}
func (m *NetworkPolicyIngressRule) MarshalToSizedBuffer(dAtA []byte) (int, error) {
i := len(dAtA)
_ = i
var l int
_ = l
if len(m.From) > 0 {
for iNdEx := len(m.From) - 1; iNdEx >= 0; iNdEx-- {
{
size, err := m.From[iNdEx].MarshalToSizedBuffer(dAtA[:i])
if err != nil {
return 0, err
}
i -= size
i = encodeVarintGenerated(dAtA, i, uint64(size))
}
i--
dAtA[i] = 0x12
}
}
if len(m.Ports) > 0 {
for iNdEx := len(m.Ports) - 1; iNdEx >= 0; iNdEx-- {
{
size, err := m.Ports[iNdEx].MarshalToSizedBuffer(dAtA[:i])
if err != nil {
return 0, err
}
i -= size
i = encodeVarintGenerated(dAtA, i, uint64(size))
}
i--
dAtA[i] = 0xa
}
}
return len(dAtA) - i, nil
}
func (m *NetworkPolicyList) Marshal() (dAtA []byte, err error) {
size := m.Size()
dAtA = make([]byte, size)
n, err := m.MarshalToSizedBuffer(dAtA[:size])
if err != nil {
return nil, err
}
return dAtA[:n], nil
}
func (m *NetworkPolicyList) MarshalTo(dAtA []byte) (int, error) {
size := m.Size()
return m.MarshalToSizedBuffer(dAtA[:size])
}
func (m *NetworkPolicyList) MarshalToSizedBuffer(dAtA []byte) (int, error) {
i := len(dAtA)
_ = i
var l int
_ = l
if len(m.Items) > 0 {
for iNdEx := len(m.Items) - 1; iNdEx >= 0; iNdEx-- {
{
size, err := m.Items[iNdEx].MarshalToSizedBuffer(dAtA[:i])
if err != nil {
return 0, err
}
i -= size
i = encodeVarintGenerated(dAtA, i, uint64(size))
}
i--
dAtA[i] = 0x12
}
}
{
size, err := m.ListMeta.MarshalToSizedBuffer(dAtA[:i])
if err != nil {
return 0, err
}
i -= size
i = encodeVarintGenerated(dAtA, i, uint64(size))
}
i--
dAtA[i] = 0xa
return len(dAtA) - i, nil
}
func (m *NetworkPolicyPeer) Marshal() (dAtA []byte, err error) {
size := m.Size()
dAtA = make([]byte, size)
n, err := m.MarshalToSizedBuffer(dAtA[:size])
if err != nil {
return nil, err
}
return dAtA[:n], nil
}
func (m *NetworkPolicyPeer) MarshalTo(dAtA []byte) (int, error) {
size := m.Size()
return m.MarshalToSizedBuffer(dAtA[:size])
}
func (m *NetworkPolicyPeer) MarshalToSizedBuffer(dAtA []byte) (int, error) {
i := len(dAtA)
_ = i
var l int
_ = l
if m.IPBlock != nil {
{
size, err := m.IPBlock.MarshalToSizedBuffer(dAtA[:i])
if err != nil {
return 0, err
}
i -= size
i = encodeVarintGenerated(dAtA, i, uint64(size))
}
i--
dAtA[i] = 0x1a
}
if m.NamespaceSelector != nil {
{
size, err := m.NamespaceSelector.MarshalToSizedBuffer(dAtA[:i])
if err != nil {
return 0, err
}
i -= size
i = encodeVarintGenerated(dAtA, i, uint64(size))
}
i--
dAtA[i] = 0x12
}
if m.PodSelector != nil {
{
size, err := m.PodSelector.MarshalToSizedBuffer(dAtA[:i])
if err != nil {
return 0, err
}
i -= size
i = encodeVarintGenerated(dAtA, i, uint64(size))
}
i--
dAtA[i] = 0xa
}
return len(dAtA) - i, nil
}
func (m *NetworkPolicyPort) Marshal() (dAtA []byte, err error) {
size := m.Size()
dAtA = make([]byte, size)
n, err := m.MarshalToSizedBuffer(dAtA[:size])
if err != nil {
return nil, err
}
return dAtA[:n], nil
}
func (m *NetworkPolicyPort) MarshalTo(dAtA []byte) (int, error) {
size := m.Size()
return m.MarshalToSizedBuffer(dAtA[:size])
}
func (m *NetworkPolicyPort) MarshalToSizedBuffer(dAtA []byte) (int, error) {
i := len(dAtA)
_ = i
var l int
_ = l
if m.EndPort != nil {
i = encodeVarintGenerated(dAtA, i, uint64(*m.EndPort))
i--
dAtA[i] = 0x18
}
if m.Port != nil {
{
size, err := m.Port.MarshalToSizedBuffer(dAtA[:i])
if err != nil {
return 0, err
}
i -= size
i = encodeVarintGenerated(dAtA, i, uint64(size))
}
i--
dAtA[i] = 0x12
}
if m.Protocol != nil {
i -= len(*m.Protocol)
copy(dAtA[i:], *m.Protocol)
i = encodeVarintGenerated(dAtA, i, uint64(len(*m.Protocol)))
i--
dAtA[i] = 0xa
}
return len(dAtA) - i, nil
}
func (m *NetworkPolicySpec) Marshal() (dAtA []byte, err error) {
size := m.Size()
dAtA = make([]byte, size)
n, err := m.MarshalToSizedBuffer(dAtA[:size])
if err != nil {
return nil, err
}
return dAtA[:n], nil
}
func (m *NetworkPolicySpec) MarshalTo(dAtA []byte) (int, error) {
size := m.Size()
return m.MarshalToSizedBuffer(dAtA[:size])
}
func (m *NetworkPolicySpec) MarshalToSizedBuffer(dAtA []byte) (int, error) {
i := len(dAtA)
_ = i
var l int
_ = l
if len(m.PolicyTypes) > 0 {
for iNdEx := len(m.PolicyTypes) - 1; iNdEx >= 0; iNdEx-- {
i -= len(m.PolicyTypes[iNdEx])
copy(dAtA[i:], m.PolicyTypes[iNdEx])
i = encodeVarintGenerated(dAtA, i, uint64(len(m.PolicyTypes[iNdEx])))
i--
dAtA[i] = 0x22
}
}
if len(m.Egress) > 0 {
for iNdEx := len(m.Egress) - 1; iNdEx >= 0; iNdEx-- {
{
size, err := m.Egress[iNdEx].MarshalToSizedBuffer(dAtA[:i])
if err != nil {
return 0, err
}
i -= size
i = encodeVarintGenerated(dAtA, i, uint64(size))
}
i--
dAtA[i] = 0x1a
}
}
if len(m.Ingress) > 0 {
for iNdEx := len(m.Ingress) - 1; iNdEx >= 0; iNdEx-- {
{
size, err := m.Ingress[iNdEx].MarshalToSizedBuffer(dAtA[:i])
if err != nil {
return 0, err
}
i -= size
i = encodeVarintGenerated(dAtA, i, uint64(size))
}
i--
dAtA[i] = 0x12
}
}
{
size, err := m.PodSelector.MarshalToSizedBuffer(dAtA[:i])
if err != nil {
return 0, err
}
i -= size
i = encodeVarintGenerated(dAtA, i, uint64(size))
}
i--
dAtA[i] = 0xa
return len(dAtA) - i, nil
}
func encodeVarintGenerated(dAtA []byte, offset int, v uint64) int {
offset -= sovGenerated(v)
base := offset
for v >= 1<<7 {
dAtA[offset] = uint8(v&0x7f | 0x80)
v >>= 7
offset++
}
dAtA[offset] = uint8(v)
return base
}
func (m *IPBlock) Size() (n int) {
if m == nil {
return 0
}
var l int
_ = l
l = len(m.CIDR)
n += 1 + l + sovGenerated(uint64(l))
if len(m.Except) > 0 {
for _, s := range m.Except {
l = len(s)
n += 1 + l + sovGenerated(uint64(l))
}
}
return n
}
func (m *NetworkPolicy) Size() (n int) {
if m == nil {
return 0
}
var l int
_ = l
l = m.ObjectMeta.Size()
n += 1 + l + sovGenerated(uint64(l))
l = m.Spec.Size()
n += 1 + l + sovGenerated(uint64(l))
return n
}
func (m *NetworkPolicyEgressRule) Size() (n int) {
if m == nil {
return 0
}
var l int
_ = l
if len(m.Ports) > 0 {
for _, e := range m.Ports {
l = e.Size()
n += 1 + l + sovGenerated(uint64(l))
}
}
if len(m.To) > 0 {
for _, e := range m.To {
l = e.Size()
n += 1 + l + sovGenerated(uint64(l))
}
}
return n
}
func (m *NetworkPolicyIngressRule) Size() (n int) {
if m == nil {
return 0
}
var l int
_ = l
if len(m.Ports) > 0 {
for _, e := range m.Ports {
l = e.Size()
n += 1 + l + sovGenerated(uint64(l))
}
}
if len(m.From) > 0 {
for _, e := range m.From {
l = e.Size()
n += 1 + l + sovGenerated(uint64(l))
}
}
return n
}
func (m *NetworkPolicyList) Size() (n int) {
if m == nil {
return 0
}
var l int
_ = l
l = m.ListMeta.Size()
n += 1 + l + sovGenerated(uint64(l))
if len(m.Items) > 0 {
for _, e := range m.Items {
l = e.Size()
n += 1 + l + sovGenerated(uint64(l))
}
}
return n
}
func (m *NetworkPolicyPeer) Size() (n int) {
if m == nil {
return 0
}
var l int
_ = l
if m.PodSelector != nil {
l = m.PodSelector.Size()
n += 1 + l + sovGenerated(uint64(l))
}
if m.NamespaceSelector != nil {
l = m.NamespaceSelector.Size()
n += 1 + l + sovGenerated(uint64(l))
}
if m.IPBlock != nil {
l = m.IPBlock.Size()
n += 1 + l + sovGenerated(uint64(l))
}
return n
}
func (m *NetworkPolicyPort) Size() (n int) {
if m == nil {
return 0
}
var l int
_ = l
if m.Protocol != nil {
l = len(*m.Protocol)
n += 1 + l + sovGenerated(uint64(l))
}
if m.Port != nil {
l = m.Port.Size()
n += 1 + l + sovGenerated(uint64(l))
}
if m.EndPort != nil {
n += 1 + sovGenerated(uint64(*m.EndPort))
}
return n
}
func (m *NetworkPolicySpec) Size() (n int) {
if m == nil {
return 0
}
var l int
_ = l
l = m.PodSelector.Size()
n += 1 + l + sovGenerated(uint64(l))
if len(m.Ingress) > 0 {
for _, e := range m.Ingress {
l = e.Size()
n += 1 + l + sovGenerated(uint64(l))
}
}
if len(m.Egress) > 0 {
for _, e := range m.Egress {
l = e.Size()
n += 1 + l + sovGenerated(uint64(l))
}
}
if len(m.PolicyTypes) > 0 {
for _, s := range m.PolicyTypes {
l = len(s)
n += 1 + l + sovGenerated(uint64(l))
}
}
return n
}
func sovGenerated(x uint64) (n int) {
return (math_bits.Len64(x|1) + 6) / 7
}
func sozGenerated(x uint64) (n int) {
return sovGenerated(uint64((x << 1) ^ uint64((int64(x) >> 63))))
}
func (this *IPBlock) String() string {
if this == nil {
return "nil"
}
s := strings.Join([]string{`&IPBlock{`,
`CIDR:` + fmt.Sprintf("%v", this.CIDR) + `,`,
`Except:` + fmt.Sprintf("%v", this.Except) + `,`,
`}`,
}, "")
return s
}
func (this *NetworkPolicy) String() string {
if this == nil {
return "nil"
}
s := strings.Join([]string{`&NetworkPolicy{`,
`ObjectMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ObjectMeta), "ObjectMeta", "v1.ObjectMeta", 1), `&`, ``, 1) + `,`,
`Spec:` + strings.Replace(strings.Replace(this.Spec.String(), "NetworkPolicySpec", "NetworkPolicySpec", 1), `&`, ``, 1) + `,`,
`}`,
}, "")
return s
}
func (this *NetworkPolicyEgressRule) String() string {
if this == nil {
return "nil"
}
repeatedStringForPorts := "[]NetworkPolicyPort{"
for _, f := range this.Ports {
repeatedStringForPorts += strings.Replace(strings.Replace(f.String(), "NetworkPolicyPort", "NetworkPolicyPort", 1), `&`, ``, 1) + ","
}
repeatedStringForPorts += "}"
repeatedStringForTo := "[]NetworkPolicyPeer{"
for _, f := range this.To {
repeatedStringForTo += strings.Replace(strings.Replace(f.String(), "NetworkPolicyPeer", "NetworkPolicyPeer", 1), `&`, ``, 1) + ","
}
repeatedStringForTo += "}"
s := strings.Join([]string{`&NetworkPolicyEgressRule{`,
`Ports:` + repeatedStringForPorts + `,`,
`To:` + repeatedStringForTo + `,`,
`}`,
}, "")
return s
}
func (this *NetworkPolicyIngressRule) String() string {
if this == nil {
return "nil"
}
repeatedStringForPorts := "[]NetworkPolicyPort{"
for _, f := range this.Ports {
repeatedStringForPorts += strings.Replace(strings.Replace(f.String(), "NetworkPolicyPort", "NetworkPolicyPort", 1), `&`, ``, 1) + ","
}
repeatedStringForPorts += "}"
repeatedStringForFrom := "[]NetworkPolicyPeer{"
for _, f := range this.From {
repeatedStringForFrom += strings.Replace(strings.Replace(f.String(), "NetworkPolicyPeer", "NetworkPolicyPeer", 1), `&`, ``, 1) + ","
}
repeatedStringForFrom += "}"
s := strings.Join([]string{`&NetworkPolicyIngressRule{`,
`Ports:` + repeatedStringForPorts + `,`,
`From:` + repeatedStringForFrom + `,`,
`}`,
}, "")
return s
}
func (this *NetworkPolicyList) String() string {
if this == nil {
return "nil"
}
repeatedStringForItems := "[]NetworkPolicy{"
for _, f := range this.Items {
repeatedStringForItems += strings.Replace(strings.Replace(f.String(), "NetworkPolicy", "NetworkPolicy", 1), `&`, ``, 1) + ","
}
repeatedStringForItems += "}"
s := strings.Join([]string{`&NetworkPolicyList{`,
`ListMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ListMeta), "ListMeta", "v1.ListMeta", 1), `&`, ``, 1) + `,`,
`Items:` + repeatedStringForItems + `,`,
`}`,
}, "")
return s
}
func (this *NetworkPolicyPeer) String() string {
if this == nil {
return "nil"
}
s := strings.Join([]string{`&NetworkPolicyPeer{`,
`PodSelector:` + strings.Replace(fmt.Sprintf("%v", this.PodSelector), "LabelSelector", "v1.LabelSelector", 1) + `,`,
`NamespaceSelector:` + strings.Replace(fmt.Sprintf("%v", this.NamespaceSelector), "LabelSelector", "v1.LabelSelector", 1) + `,`,
`IPBlock:` + strings.Replace(this.IPBlock.String(), "IPBlock", "IPBlock", 1) + `,`,
`}`,
}, "")
return s
}
func (this *NetworkPolicyPort) String() string {
if this == nil {
return "nil"
}
s := strings.Join([]string{`&NetworkPolicyPort{`,
`Protocol:` + valueToStringGenerated(this.Protocol) + `,`,
`Port:` + strings.Replace(fmt.Sprintf("%v", this.Port), "IntOrString", "intstr.IntOrString", 1) + `,`,
`EndPort:` + valueToStringGenerated(this.EndPort) + `,`,
`}`,
}, "")
return s
}
func (this *NetworkPolicySpec) String() string {
if this == nil {
return "nil"
}
repeatedStringForIngress := "[]NetworkPolicyIngressRule{"
for _, f := range this.Ingress {
repeatedStringForIngress += strings.Replace(strings.Replace(f.String(), "NetworkPolicyIngressRule", "NetworkPolicyIngressRule", 1), `&`, ``, 1) + ","
}
repeatedStringForIngress += "}"
repeatedStringForEgress := "[]NetworkPolicyEgressRule{"
for _, f := range this.Egress {
repeatedStringForEgress += strings.Replace(strings.Replace(f.String(), "NetworkPolicyEgressRule", "NetworkPolicyEgressRule", 1), `&`, ``, 1) + ","
}
repeatedStringForEgress += "}"
s := strings.Join([]string{`&NetworkPolicySpec{`,
`PodSelector:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.PodSelector), "LabelSelector", "v1.LabelSelector", 1), `&`, ``, 1) + `,`,
`Ingress:` + repeatedStringForIngress + `,`,
`Egress:` + repeatedStringForEgress + `,`,
`PolicyTypes:` + fmt.Sprintf("%v", this.PolicyTypes) + `,`,
`}`,
}, "")
return s
}
func valueToStringGenerated(v interface{}) string {
rv := reflect.ValueOf(v)
if rv.IsNil() {
return "nil"
}
pv := reflect.Indirect(rv).Interface()
return fmt.Sprintf("*%v", pv)
}
func (m *IPBlock) Unmarshal(dAtA []byte) error {
l := len(dAtA)
iNdEx := 0
for iNdEx < l {
preIndex := iNdEx
var wire uint64
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowGenerated
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
wire |= uint64(b&0x7F) << shift
if b < 0x80 {
break
}
}
fieldNum := int32(wire >> 3)
wireType := int(wire & 0x7)
if wireType == 4 {
return fmt.Errorf("proto: IPBlock: wiretype end group for non-group")
}
if fieldNum <= 0 {
return fmt.Errorf("proto: IPBlock: illegal tag %d (wire type %d)", fieldNum, wire)
}
switch fieldNum {
case 1:
if wireType != 2 {
return fmt.Errorf("proto: wrong wireType = %d for field CIDR", wireType)
}
var stringLen uint64
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowGenerated
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
stringLen |= uint64(b&0x7F) << shift
if b < 0x80 {
break
}
}
intStringLen := int(stringLen)
if intStringLen < 0 {
return ErrInvalidLengthGenerated
}
postIndex := iNdEx + intStringLen
if postIndex < 0 {
return ErrInvalidLengthGenerated
}
if postIndex > l {
return io.ErrUnexpectedEOF
}
m.CIDR = string(dAtA[iNdEx:postIndex])
iNdEx = postIndex
case 2:
if wireType != 2 {
return fmt.Errorf("proto: wrong wireType = %d for field Except", wireType)
}
var stringLen uint64
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowGenerated
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
stringLen |= uint64(b&0x7F) << shift
if b < 0x80 {
break
}
}
intStringLen := int(stringLen)
if intStringLen < 0 {
return ErrInvalidLengthGenerated
}
postIndex := iNdEx + intStringLen
if postIndex < 0 {
return ErrInvalidLengthGenerated
}
if postIndex > l {
return io.ErrUnexpectedEOF
}
m.Except = append(m.Except, string(dAtA[iNdEx:postIndex]))
iNdEx = postIndex
default:
iNdEx = preIndex
skippy, err := skipGenerated(dAtA[iNdEx:])
if err != nil {
return err
}
if (skippy < 0) || (iNdEx+skippy) < 0 {
return ErrInvalidLengthGenerated
}
if (iNdEx + skippy) > l {
return io.ErrUnexpectedEOF
}
iNdEx += skippy
}
}
if iNdEx > l {
return io.ErrUnexpectedEOF
}
return nil
}
func (m *NetworkPolicy) Unmarshal(dAtA []byte) error {
l := len(dAtA)
iNdEx := 0
for iNdEx < l {
preIndex := iNdEx
var wire uint64
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowGenerated
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
wire |= uint64(b&0x7F) << shift
if b < 0x80 {
break
}
}
fieldNum := int32(wire >> 3)
wireType := int(wire & 0x7)
if wireType == 4 {
return fmt.Errorf("proto: NetworkPolicy: wiretype end group for non-group")
}
if fieldNum <= 0 {
return fmt.Errorf("proto: NetworkPolicy: illegal tag %d (wire type %d)", fieldNum, wire)
}
switch fieldNum {
case 1:
if wireType != 2 {
return fmt.Errorf("proto: wrong wireType = %d for field ObjectMeta", wireType)
}
var msglen int
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowGenerated
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
msglen |= int(b&0x7F) << shift
if b < 0x80 {
break
}
}
if msglen < 0 {
return ErrInvalidLengthGenerated
}
postIndex := iNdEx + msglen
if postIndex < 0 {
return ErrInvalidLengthGenerated
}
if postIndex > l {
return io.ErrUnexpectedEOF
}
if err := m.ObjectMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
return err
}
iNdEx = postIndex
case 2:
if wireType != 2 {
return fmt.Errorf("proto: wrong wireType = %d for field Spec", wireType)
}
var msglen int
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowGenerated
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
msglen |= int(b&0x7F) << shift
if b < 0x80 {
break
}
}
if msglen < 0 {
return ErrInvalidLengthGenerated
}
postIndex := iNdEx + msglen
if postIndex < 0 {
return ErrInvalidLengthGenerated
}
if postIndex > l {
return io.ErrUnexpectedEOF
}
if err := m.Spec.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
return err
}
iNdEx = postIndex
default:
iNdEx = preIndex
skippy, err := skipGenerated(dAtA[iNdEx:])
if err != nil {
return err
}
if (skippy < 0) || (iNdEx+skippy) < 0 {
return ErrInvalidLengthGenerated
}
if (iNdEx + skippy) > l {
return io.ErrUnexpectedEOF
}
iNdEx += skippy
}
}
if iNdEx > l {
return io.ErrUnexpectedEOF
}
return nil
}
func (m *NetworkPolicyEgressRule) Unmarshal(dAtA []byte) error {
l := len(dAtA)
iNdEx := 0
for iNdEx < l {
preIndex := iNdEx
var wire uint64
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowGenerated
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
wire |= uint64(b&0x7F) << shift
if b < 0x80 {
break
}
}
fieldNum := int32(wire >> 3)
wireType := int(wire & 0x7)
if wireType == 4 {
return fmt.Errorf("proto: NetworkPolicyEgressRule: wiretype end group for non-group")
}
if fieldNum <= 0 {
return fmt.Errorf("proto: NetworkPolicyEgressRule: illegal tag %d (wire type %d)", fieldNum, wire)
}
switch fieldNum {
case 1:
if wireType != 2 {
return fmt.Errorf("proto: wrong wireType = %d for field Ports", wireType)
}
var msglen int
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowGenerated
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
msglen |= int(b&0x7F) << shift
if b < 0x80 {
break
}
}
if msglen < 0 {
return ErrInvalidLengthGenerated
}
postIndex := iNdEx + msglen
if postIndex < 0 {
return ErrInvalidLengthGenerated
}
if postIndex > l {
return io.ErrUnexpectedEOF
}
m.Ports = append(m.Ports, NetworkPolicyPort{})
if err := m.Ports[len(m.Ports)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
return err
}
iNdEx = postIndex
case 2:
if wireType != 2 {
return fmt.Errorf("proto: wrong wireType = %d for field To", wireType)
}
var msglen int
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowGenerated
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
msglen |= int(b&0x7F) << shift
if b < 0x80 {
break
}
}
if msglen < 0 {
return ErrInvalidLengthGenerated
}
postIndex := iNdEx + msglen
if postIndex < 0 {
return ErrInvalidLengthGenerated
}
if postIndex > l {
return io.ErrUnexpectedEOF
}
m.To = append(m.To, NetworkPolicyPeer{})
if err := m.To[len(m.To)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
return err
}
iNdEx = postIndex
default:
iNdEx = preIndex
skippy, err := skipGenerated(dAtA[iNdEx:])
if err != nil {
return err
}
if (skippy < 0) || (iNdEx+skippy) < 0 {
return ErrInvalidLengthGenerated
}
if (iNdEx + skippy) > l {
return io.ErrUnexpectedEOF
}
iNdEx += skippy
}
}
if iNdEx > l {
return io.ErrUnexpectedEOF
}
return nil
}
func (m *NetworkPolicyIngressRule) Unmarshal(dAtA []byte) error {
l := len(dAtA)
iNdEx := 0
for iNdEx < l {
preIndex := iNdEx
var wire uint64
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowGenerated
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
wire |= uint64(b&0x7F) << shift
if b < 0x80 {
break
}
}
fieldNum := int32(wire >> 3)
wireType := int(wire & 0x7)
if wireType == 4 {
return fmt.Errorf("proto: NetworkPolicyIngressRule: wiretype end group for non-group")
}
if fieldNum <= 0 {
return fmt.Errorf("proto: NetworkPolicyIngressRule: illegal tag %d (wire type %d)", fieldNum, wire)
}
switch fieldNum {
case 1:
if wireType != 2 {
return fmt.Errorf("proto: wrong wireType = %d for field Ports", wireType)
}
var msglen int
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowGenerated
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
msglen |= int(b&0x7F) << shift
if b < 0x80 {
break
}
}
if msglen < 0 {
return ErrInvalidLengthGenerated
}
postIndex := iNdEx + msglen
if postIndex < 0 {
return ErrInvalidLengthGenerated
}
if postIndex > l {
return io.ErrUnexpectedEOF
}
m.Ports = append(m.Ports, NetworkPolicyPort{})
if err := m.Ports[len(m.Ports)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
return err
}
iNdEx = postIndex
case 2:
if wireType != 2 {
return fmt.Errorf("proto: wrong wireType = %d for field From", wireType)
}
var msglen int
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowGenerated
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
msglen |= int(b&0x7F) << shift
if b < 0x80 {
break
}
}
if msglen < 0 {
return ErrInvalidLengthGenerated
}
postIndex := iNdEx + msglen
if postIndex < 0 {
return ErrInvalidLengthGenerated
}
if postIndex > l {
return io.ErrUnexpectedEOF
}
m.From = append(m.From, NetworkPolicyPeer{})
if err := m.From[len(m.From)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
return err
}
iNdEx = postIndex
default:
iNdEx = preIndex
skippy, err := skipGenerated(dAtA[iNdEx:])
if err != nil {
return err
}
if (skippy < 0) || (iNdEx+skippy) < 0 {
return ErrInvalidLengthGenerated
}
if (iNdEx + skippy) > l {
return io.ErrUnexpectedEOF
}
iNdEx += skippy
}
}
if iNdEx > l {
return io.ErrUnexpectedEOF
}
return nil
}
func (m *NetworkPolicyList) Unmarshal(dAtA []byte) error {
l := len(dAtA)
iNdEx := 0
for iNdEx < l {
preIndex := iNdEx
var wire uint64
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowGenerated
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
wire |= uint64(b&0x7F) << shift
if b < 0x80 {
break
}
}
fieldNum := int32(wire >> 3)
wireType := int(wire & 0x7)
if wireType == 4 {
return fmt.Errorf("proto: NetworkPolicyList: wiretype end group for non-group")
}
if fieldNum <= 0 {
return fmt.Errorf("proto: NetworkPolicyList: illegal tag %d (wire type %d)", fieldNum, wire)
}
switch fieldNum {
case 1:
if wireType != 2 {
return fmt.Errorf("proto: wrong wireType = %d for field ListMeta", wireType)
}
var msglen int
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowGenerated
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
msglen |= int(b&0x7F) << shift
if b < 0x80 {
break
}
}
if msglen < 0 {
return ErrInvalidLengthGenerated
}
postIndex := iNdEx + msglen
if postIndex < 0 {
return ErrInvalidLengthGenerated
}
if postIndex > l {
return io.ErrUnexpectedEOF
}
if err := m.ListMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
return err
}
iNdEx = postIndex
case 2:
if wireType != 2 {
return fmt.Errorf("proto: wrong wireType = %d for field Items", wireType)
}
var msglen int
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowGenerated
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
msglen |= int(b&0x7F) << shift
if b < 0x80 {
break
}
}
if msglen < 0 {
return ErrInvalidLengthGenerated
}
postIndex := iNdEx + msglen
if postIndex < 0 {
return ErrInvalidLengthGenerated
}
if postIndex > l {
return io.ErrUnexpectedEOF
}
m.Items = append(m.Items, NetworkPolicy{})
if err := m.Items[len(m.Items)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
return err
}
iNdEx = postIndex
default:
iNdEx = preIndex
skippy, err := skipGenerated(dAtA[iNdEx:])
if err != nil {
return err
}
if (skippy < 0) || (iNdEx+skippy) < 0 {
return ErrInvalidLengthGenerated
}
if (iNdEx + skippy) > l {
return io.ErrUnexpectedEOF
}
iNdEx += skippy
}
}
if iNdEx > l {
return io.ErrUnexpectedEOF
}
return nil
}
func (m *NetworkPolicyPeer) Unmarshal(dAtA []byte) error {
l := len(dAtA)
iNdEx := 0
for iNdEx < l {
preIndex := iNdEx
var wire uint64
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowGenerated
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
wire |= uint64(b&0x7F) << shift
if b < 0x80 {
break
}
}
fieldNum := int32(wire >> 3)
wireType := int(wire & 0x7)
if wireType == 4 {
return fmt.Errorf("proto: NetworkPolicyPeer: wiretype end group for non-group")
}
if fieldNum <= 0 {
return fmt.Errorf("proto: NetworkPolicyPeer: illegal tag %d (wire type %d)", fieldNum, wire)
}
switch fieldNum {
case 1:
if wireType != 2 {
return fmt.Errorf("proto: wrong wireType = %d for field PodSelector", wireType)
}
var msglen int
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowGenerated
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
msglen |= int(b&0x7F) << shift
if b < 0x80 {
break
}
}
if msglen < 0 {
return ErrInvalidLengthGenerated
}
postIndex := iNdEx + msglen
if postIndex < 0 {
return ErrInvalidLengthGenerated
}
if postIndex > l {
return io.ErrUnexpectedEOF
}
if m.PodSelector == nil {
m.PodSelector = &v1.LabelSelector{}
}
if err := m.PodSelector.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
return err
}
iNdEx = postIndex
case 2:
if wireType != 2 {
return fmt.Errorf("proto: wrong wireType = %d for field NamespaceSelector", wireType)
}
var msglen int
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowGenerated
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
msglen |= int(b&0x7F) << shift
if b < 0x80 {
break
}
}
if msglen < 0 {
return ErrInvalidLengthGenerated
}
postIndex := iNdEx + msglen
if postIndex < 0 {
return ErrInvalidLengthGenerated
}
if postIndex > l {
return io.ErrUnexpectedEOF
}
if m.NamespaceSelector == nil {
m.NamespaceSelector = &v1.LabelSelector{}
}
if err := m.NamespaceSelector.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
return err
}
iNdEx = postIndex
case 3:
if wireType != 2 {
return fmt.Errorf("proto: wrong wireType = %d for field IPBlock", wireType)
}
var msglen int
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowGenerated
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
msglen |= int(b&0x7F) << shift
if b < 0x80 {
break
}
}
if msglen < 0 {
return ErrInvalidLengthGenerated
}
postIndex := iNdEx + msglen
if postIndex < 0 {
return ErrInvalidLengthGenerated
}
if postIndex > l {
return io.ErrUnexpectedEOF
}
if m.IPBlock == nil {
m.IPBlock = &IPBlock{}
}
if err := m.IPBlock.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
return err
}
iNdEx = postIndex
default:
iNdEx = preIndex
skippy, err := skipGenerated(dAtA[iNdEx:])
if err != nil {
return err
}
if (skippy < 0) || (iNdEx+skippy) < 0 {
return ErrInvalidLengthGenerated
}
if (iNdEx + skippy) > l {
return io.ErrUnexpectedEOF
}
iNdEx += skippy
}
}
if iNdEx > l {
return io.ErrUnexpectedEOF
}
return nil
}
func (m *NetworkPolicyPort) Unmarshal(dAtA []byte) error {
l := len(dAtA)
iNdEx := 0
for iNdEx < l {
preIndex := iNdEx
var wire uint64
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowGenerated
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
wire |= uint64(b&0x7F) << shift
if b < 0x80 {
break
}
}
fieldNum := int32(wire >> 3)
wireType := int(wire & 0x7)
if wireType == 4 {
return fmt.Errorf("proto: NetworkPolicyPort: wiretype end group for non-group")
}
if fieldNum <= 0 {
return fmt.Errorf("proto: NetworkPolicyPort: illegal tag %d (wire type %d)", fieldNum, wire)
}
switch fieldNum {
case 1:
if wireType != 2 {
return fmt.Errorf("proto: wrong wireType = %d for field Protocol", wireType)
}
var stringLen uint64
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowGenerated
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
stringLen |= uint64(b&0x7F) << shift
if b < 0x80 {
break
}
}
intStringLen := int(stringLen)
if intStringLen < 0 {
return ErrInvalidLengthGenerated
}
postIndex := iNdEx + intStringLen
if postIndex < 0 {
return ErrInvalidLengthGenerated
}
if postIndex > l {
return io.ErrUnexpectedEOF
}
s := github_com_cilium_cilium_pkg_k8s_slim_k8s_api_core_v1.Protocol(dAtA[iNdEx:postIndex])
m.Protocol = &s
iNdEx = postIndex
case 2:
if wireType != 2 {
return fmt.Errorf("proto: wrong wireType = %d for field Port", wireType)
}
var msglen int
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowGenerated
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
msglen |= int(b&0x7F) << shift
if b < 0x80 {
break
}
}
if msglen < 0 {
return ErrInvalidLengthGenerated
}
postIndex := iNdEx + msglen
if postIndex < 0 {
return ErrInvalidLengthGenerated
}
if postIndex > l {
return io.ErrUnexpectedEOF
}
if m.Port == nil {
m.Port = &intstr.IntOrString{}
}
if err := m.Port.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
return err
}
iNdEx = postIndex
case 3:
if wireType != 0 {
return fmt.Errorf("proto: wrong wireType = %d for field EndPort", wireType)
}
var v int32
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowGenerated
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
v |= int32(b&0x7F) << shift
if b < 0x80 {
break
}
}
m.EndPort = &v
default:
iNdEx = preIndex
skippy, err := skipGenerated(dAtA[iNdEx:])
if err != nil {
return err
}
if (skippy < 0) || (iNdEx+skippy) < 0 {
return ErrInvalidLengthGenerated
}
if (iNdEx + skippy) > l {
return io.ErrUnexpectedEOF
}
iNdEx += skippy
}
}
if iNdEx > l {
return io.ErrUnexpectedEOF
}
return nil
}
func (m *NetworkPolicySpec) Unmarshal(dAtA []byte) error {
l := len(dAtA)
iNdEx := 0
for iNdEx < l {
preIndex := iNdEx
var wire uint64
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowGenerated
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
wire |= uint64(b&0x7F) << shift
if b < 0x80 {
break
}
}
fieldNum := int32(wire >> 3)
wireType := int(wire & 0x7)
if wireType == 4 {
return fmt.Errorf("proto: NetworkPolicySpec: wiretype end group for non-group")
}
if fieldNum <= 0 {
return fmt.Errorf("proto: NetworkPolicySpec: illegal tag %d (wire type %d)", fieldNum, wire)
}
switch fieldNum {
case 1:
if wireType != 2 {
return fmt.Errorf("proto: wrong wireType = %d for field PodSelector", wireType)
}
var msglen int
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowGenerated
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
msglen |= int(b&0x7F) << shift
if b < 0x80 {
break
}
}
if msglen < 0 {
return ErrInvalidLengthGenerated
}
postIndex := iNdEx + msglen
if postIndex < 0 {
return ErrInvalidLengthGenerated
}
if postIndex > l {
return io.ErrUnexpectedEOF
}
if err := m.PodSelector.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
return err
}
iNdEx = postIndex
case 2:
if wireType != 2 {
return fmt.Errorf("proto: wrong wireType = %d for field Ingress", wireType)
}
var msglen int
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowGenerated
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
msglen |= int(b&0x7F) << shift
if b < 0x80 {
break
}
}
if msglen < 0 {
return ErrInvalidLengthGenerated
}
postIndex := iNdEx + msglen
if postIndex < 0 {
return ErrInvalidLengthGenerated
}
if postIndex > l {
return io.ErrUnexpectedEOF
}
m.Ingress = append(m.Ingress, NetworkPolicyIngressRule{})
if err := m.Ingress[len(m.Ingress)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
return err
}
iNdEx = postIndex
case 3:
if wireType != 2 {
return fmt.Errorf("proto: wrong wireType = %d for field Egress", wireType)
}
var msglen int
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowGenerated
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
msglen |= int(b&0x7F) << shift
if b < 0x80 {
break
}
}
if msglen < 0 {
return ErrInvalidLengthGenerated
}
postIndex := iNdEx + msglen
if postIndex < 0 {
return ErrInvalidLengthGenerated
}
if postIndex > l {
return io.ErrUnexpectedEOF
}
m.Egress = append(m.Egress, NetworkPolicyEgressRule{})
if err := m.Egress[len(m.Egress)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
return err
}
iNdEx = postIndex
case 4:
if wireType != 2 {
return fmt.Errorf("proto: wrong wireType = %d for field PolicyTypes", wireType)
}
var stringLen uint64
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowGenerated
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
stringLen |= uint64(b&0x7F) << shift
if b < 0x80 {
break
}
}
intStringLen := int(stringLen)
if intStringLen < 0 {
return ErrInvalidLengthGenerated
}
postIndex := iNdEx + intStringLen
if postIndex < 0 {
return ErrInvalidLengthGenerated
}
if postIndex > l {
return io.ErrUnexpectedEOF
}
m.PolicyTypes = append(m.PolicyTypes, PolicyType(dAtA[iNdEx:postIndex]))
iNdEx = postIndex
default:
iNdEx = preIndex
skippy, err := skipGenerated(dAtA[iNdEx:])
if err != nil {
return err
}
if (skippy < 0) || (iNdEx+skippy) < 0 {
return ErrInvalidLengthGenerated
}
if (iNdEx + skippy) > l {
return io.ErrUnexpectedEOF
}
iNdEx += skippy
}
}
if iNdEx > l {
return io.ErrUnexpectedEOF
}
return nil
}
func skipGenerated(dAtA []byte) (n int, err error) {
l := len(dAtA)
iNdEx := 0
depth := 0
for iNdEx < l {
var wire uint64
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return 0, ErrIntOverflowGenerated
}
if iNdEx >= l {
return 0, io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
wire |= (uint64(b) & 0x7F) << shift
if b < 0x80 {
break
}
}
wireType := int(wire & 0x7)
switch wireType {
case 0:
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return 0, ErrIntOverflowGenerated
}
if iNdEx >= l {
return 0, io.ErrUnexpectedEOF
}
iNdEx++
if dAtA[iNdEx-1] < 0x80 {
break
}
}
case 1:
iNdEx += 8
case 2:
var length int
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return 0, ErrIntOverflowGenerated
}
if iNdEx >= l {
return 0, io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
length |= (int(b) & 0x7F) << shift
if b < 0x80 {
break
}
}
if length < 0 {
return 0, ErrInvalidLengthGenerated
}
iNdEx += length
case 3:
depth++
case 4:
if depth == 0 {
return 0, ErrUnexpectedEndOfGroupGenerated
}
depth--
case 5:
iNdEx += 4
default:
return 0, fmt.Errorf("proto: illegal wireType %d", wireType)
}
if iNdEx < 0 {
return 0, ErrInvalidLengthGenerated
}
if depth == 0 {
return iNdEx, nil
}
}
return 0, io.ErrUnexpectedEOF
}
var (
ErrInvalidLengthGenerated = fmt.Errorf("proto: negative length found during unmarshaling")
ErrIntOverflowGenerated = fmt.Errorf("proto: integer overflow")
ErrUnexpectedEndOfGroupGenerated = fmt.Errorf("proto: unexpected end of group")
)
// SPDX-License-Identifier: Apache-2.0
// Copyright Authors of Cilium
// Copyright 2017 The Kubernetes Authors.
package v1
import (
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/runtime"
"k8s.io/apimachinery/pkg/runtime/schema"
)
// GroupName is the group name use in this package
const GroupName = "networking.k8s.io"
// SchemeGroupVersion is group version used to register these objects
var SchemeGroupVersion = schema.GroupVersion{Group: GroupName, Version: "v1"}
// Resource takes an unqualified resource and returns a Group qualified GroupResource
func Resource(resource string) schema.GroupResource {
return SchemeGroupVersion.WithResource(resource).GroupResource()
}
var (
// TODO: move SchemeBuilder with zz_generated.deepcopy.go to k8s.io/api.
// localSchemeBuilder and AddToScheme will stay in k8s.io/kubernetes.
SchemeBuilder = runtime.NewSchemeBuilder(addKnownTypes)
localSchemeBuilder = &SchemeBuilder
AddToScheme = localSchemeBuilder.AddToScheme
)
// Adds the list of known types to the given scheme.
func addKnownTypes(scheme *runtime.Scheme) error {
scheme.AddKnownTypes(SchemeGroupVersion,
&NetworkPolicy{},
&NetworkPolicyList{},
)
metav1.AddToGroupVersion(scheme, SchemeGroupVersion)
return nil
}
//go:build !ignore_autogenerated
// +build !ignore_autogenerated
// SPDX-License-Identifier: Apache-2.0
// Copyright Authors of Cilium
// Code generated by deepcopy-gen. DO NOT EDIT.
package v1
import (
corev1 "github.com/cilium/cilium/pkg/k8s/slim/k8s/api/core/v1"
metav1 "github.com/cilium/cilium/pkg/k8s/slim/k8s/apis/meta/v1"
intstr "github.com/cilium/cilium/pkg/k8s/slim/k8s/apis/util/intstr"
runtime "k8s.io/apimachinery/pkg/runtime"
)
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *IPBlock) DeepCopyInto(out *IPBlock) {
*out = *in
if in.Except != nil {
in, out := &in.Except, &out.Except
*out = make([]string, len(*in))
copy(*out, *in)
}
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new IPBlock.
func (in *IPBlock) DeepCopy() *IPBlock {
if in == nil {
return nil
}
out := new(IPBlock)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *NetworkPolicy) DeepCopyInto(out *NetworkPolicy) {
*out = *in
out.TypeMeta = in.TypeMeta
in.ObjectMeta.DeepCopyInto(&out.ObjectMeta)
in.Spec.DeepCopyInto(&out.Spec)
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NetworkPolicy.
func (in *NetworkPolicy) DeepCopy() *NetworkPolicy {
if in == nil {
return nil
}
out := new(NetworkPolicy)
in.DeepCopyInto(out)
return out
}
// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
func (in *NetworkPolicy) DeepCopyObject() runtime.Object {
if c := in.DeepCopy(); c != nil {
return c
}
return nil
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *NetworkPolicyEgressRule) DeepCopyInto(out *NetworkPolicyEgressRule) {
*out = *in
if in.Ports != nil {
in, out := &in.Ports, &out.Ports
*out = make([]NetworkPolicyPort, len(*in))
for i := range *in {
(*in)[i].DeepCopyInto(&(*out)[i])
}
}
if in.To != nil {
in, out := &in.To, &out.To
*out = make([]NetworkPolicyPeer, len(*in))
for i := range *in {
(*in)[i].DeepCopyInto(&(*out)[i])
}
}
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NetworkPolicyEgressRule.
func (in *NetworkPolicyEgressRule) DeepCopy() *NetworkPolicyEgressRule {
if in == nil {
return nil
}
out := new(NetworkPolicyEgressRule)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *NetworkPolicyIngressRule) DeepCopyInto(out *NetworkPolicyIngressRule) {
*out = *in
if in.Ports != nil {
in, out := &in.Ports, &out.Ports
*out = make([]NetworkPolicyPort, len(*in))
for i := range *in {
(*in)[i].DeepCopyInto(&(*out)[i])
}
}
if in.From != nil {
in, out := &in.From, &out.From
*out = make([]NetworkPolicyPeer, len(*in))
for i := range *in {
(*in)[i].DeepCopyInto(&(*out)[i])
}
}
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NetworkPolicyIngressRule.
func (in *NetworkPolicyIngressRule) DeepCopy() *NetworkPolicyIngressRule {
if in == nil {
return nil
}
out := new(NetworkPolicyIngressRule)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *NetworkPolicyList) DeepCopyInto(out *NetworkPolicyList) {
*out = *in
out.TypeMeta = in.TypeMeta
in.ListMeta.DeepCopyInto(&out.ListMeta)
if in.Items != nil {
in, out := &in.Items, &out.Items
*out = make([]NetworkPolicy, len(*in))
for i := range *in {
(*in)[i].DeepCopyInto(&(*out)[i])
}
}
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NetworkPolicyList.
func (in *NetworkPolicyList) DeepCopy() *NetworkPolicyList {
if in == nil {
return nil
}
out := new(NetworkPolicyList)
in.DeepCopyInto(out)
return out
}
// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
func (in *NetworkPolicyList) DeepCopyObject() runtime.Object {
if c := in.DeepCopy(); c != nil {
return c
}
return nil
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *NetworkPolicyPeer) DeepCopyInto(out *NetworkPolicyPeer) {
*out = *in
if in.PodSelector != nil {
in, out := &in.PodSelector, &out.PodSelector
*out = new(metav1.LabelSelector)
(*in).DeepCopyInto(*out)
}
if in.NamespaceSelector != nil {
in, out := &in.NamespaceSelector, &out.NamespaceSelector
*out = new(metav1.LabelSelector)
(*in).DeepCopyInto(*out)
}
if in.IPBlock != nil {
in, out := &in.IPBlock, &out.IPBlock
*out = new(IPBlock)
(*in).DeepCopyInto(*out)
}
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NetworkPolicyPeer.
func (in *NetworkPolicyPeer) DeepCopy() *NetworkPolicyPeer {
if in == nil {
return nil
}
out := new(NetworkPolicyPeer)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *NetworkPolicyPort) DeepCopyInto(out *NetworkPolicyPort) {
*out = *in
if in.Protocol != nil {
in, out := &in.Protocol, &out.Protocol
*out = new(corev1.Protocol)
**out = **in
}
if in.Port != nil {
in, out := &in.Port, &out.Port
*out = new(intstr.IntOrString)
**out = **in
}
if in.EndPort != nil {
in, out := &in.EndPort, &out.EndPort
*out = new(int32)
**out = **in
}
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NetworkPolicyPort.
func (in *NetworkPolicyPort) DeepCopy() *NetworkPolicyPort {
if in == nil {
return nil
}
out := new(NetworkPolicyPort)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *NetworkPolicySpec) DeepCopyInto(out *NetworkPolicySpec) {
*out = *in
in.PodSelector.DeepCopyInto(&out.PodSelector)
if in.Ingress != nil {
in, out := &in.Ingress, &out.Ingress
*out = make([]NetworkPolicyIngressRule, len(*in))
for i := range *in {
(*in)[i].DeepCopyInto(&(*out)[i])
}
}
if in.Egress != nil {
in, out := &in.Egress, &out.Egress
*out = make([]NetworkPolicyEgressRule, len(*in))
for i := range *in {
(*in)[i].DeepCopyInto(&(*out)[i])
}
}
if in.PolicyTypes != nil {
in, out := &in.PolicyTypes, &out.PolicyTypes
*out = make([]PolicyType, len(*in))
copy(*out, *in)
}
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NetworkPolicySpec.
func (in *NetworkPolicySpec) DeepCopy() *NetworkPolicySpec {
if in == nil {
return nil
}
out := new(NetworkPolicySpec)
in.DeepCopyInto(out)
return out
}
//go:build !ignore_autogenerated
// +build !ignore_autogenerated
// SPDX-License-Identifier: Apache-2.0
// Copyright Authors of Cilium
// Code generated by deepequal-gen. DO NOT EDIT.
package v1
// DeepEqual is an autogenerated deepequal function, deeply comparing the
// receiver with other. in must be non-nil.
func (in *IPBlock) DeepEqual(other *IPBlock) bool {
if other == nil {
return false
}
if in.CIDR != other.CIDR {
return false
}
if ((in.Except != nil) && (other.Except != nil)) || ((in.Except == nil) != (other.Except == nil)) {
in, other := &in.Except, &other.Except
if other == nil {
return false
}
if len(*in) != len(*other) {
return false
} else {
for i, inElement := range *in {
if inElement != (*other)[i] {
return false
}
}
}
}
return true
}
// DeepEqual is an autogenerated deepequal function, deeply comparing the
// receiver with other. in must be non-nil.
func (in *NetworkPolicy) DeepEqual(other *NetworkPolicy) bool {
if other == nil {
return false
}
if in.TypeMeta != other.TypeMeta {
return false
}
if !in.ObjectMeta.DeepEqual(&other.ObjectMeta) {
return false
}
if !in.Spec.DeepEqual(&other.Spec) {
return false
}
return true
}
// DeepEqual is an autogenerated deepequal function, deeply comparing the
// receiver with other. in must be non-nil.
func (in *NetworkPolicyEgressRule) DeepEqual(other *NetworkPolicyEgressRule) bool {
if other == nil {
return false
}
if ((in.Ports != nil) && (other.Ports != nil)) || ((in.Ports == nil) != (other.Ports == nil)) {
in, other := &in.Ports, &other.Ports
if other == nil {
return false
}
if len(*in) != len(*other) {
return false
} else {
for i, inElement := range *in {
if !inElement.DeepEqual(&(*other)[i]) {
return false
}
}
}
}
if ((in.To != nil) && (other.To != nil)) || ((in.To == nil) != (other.To == nil)) {
in, other := &in.To, &other.To
if other == nil {
return false
}
if len(*in) != len(*other) {
return false
} else {
for i, inElement := range *in {
if !inElement.DeepEqual(&(*other)[i]) {
return false
}
}
}
}
return true
}
// DeepEqual is an autogenerated deepequal function, deeply comparing the
// receiver with other. in must be non-nil.
func (in *NetworkPolicyIngressRule) DeepEqual(other *NetworkPolicyIngressRule) bool {
if other == nil {
return false
}
if ((in.Ports != nil) && (other.Ports != nil)) || ((in.Ports == nil) != (other.Ports == nil)) {
in, other := &in.Ports, &other.Ports
if other == nil {
return false
}
if len(*in) != len(*other) {
return false
} else {
for i, inElement := range *in {
if !inElement.DeepEqual(&(*other)[i]) {
return false
}
}
}
}
if ((in.From != nil) && (other.From != nil)) || ((in.From == nil) != (other.From == nil)) {
in, other := &in.From, &other.From
if other == nil {
return false
}
if len(*in) != len(*other) {
return false
} else {
for i, inElement := range *in {
if !inElement.DeepEqual(&(*other)[i]) {
return false
}
}
}
}
return true
}
// DeepEqual is an autogenerated deepequal function, deeply comparing the
// receiver with other. in must be non-nil.
func (in *NetworkPolicyList) DeepEqual(other *NetworkPolicyList) bool {
if other == nil {
return false
}
if in.TypeMeta != other.TypeMeta {
return false
}
if !in.ListMeta.DeepEqual(&other.ListMeta) {
return false
}
if ((in.Items != nil) && (other.Items != nil)) || ((in.Items == nil) != (other.Items == nil)) {
in, other := &in.Items, &other.Items
if other == nil {
return false
}
if len(*in) != len(*other) {
return false
} else {
for i, inElement := range *in {
if !inElement.DeepEqual(&(*other)[i]) {
return false
}
}
}
}
return true
}
// DeepEqual is an autogenerated deepequal function, deeply comparing the
// receiver with other. in must be non-nil.
func (in *NetworkPolicyPeer) DeepEqual(other *NetworkPolicyPeer) bool {
if other == nil {
return false
}
if (in.PodSelector == nil) != (other.PodSelector == nil) {
return false
} else if in.PodSelector != nil {
if !in.PodSelector.DeepEqual(other.PodSelector) {
return false
}
}
if (in.NamespaceSelector == nil) != (other.NamespaceSelector == nil) {
return false
} else if in.NamespaceSelector != nil {
if !in.NamespaceSelector.DeepEqual(other.NamespaceSelector) {
return false
}
}
if (in.IPBlock == nil) != (other.IPBlock == nil) {
return false
} else if in.IPBlock != nil {
if !in.IPBlock.DeepEqual(other.IPBlock) {
return false
}
}
return true
}
// DeepEqual is an autogenerated deepequal function, deeply comparing the
// receiver with other. in must be non-nil.
func (in *NetworkPolicyPort) DeepEqual(other *NetworkPolicyPort) bool {
if other == nil {
return false
}
if (in.Protocol == nil) != (other.Protocol == nil) {
return false
} else if in.Protocol != nil {
if *in.Protocol != *other.Protocol {
return false
}
}
if (in.Port == nil) != (other.Port == nil) {
return false
} else if in.Port != nil {
if !in.Port.DeepEqual(other.Port) {
return false
}
}
if (in.EndPort == nil) != (other.EndPort == nil) {
return false
} else if in.EndPort != nil {
if *in.EndPort != *other.EndPort {
return false
}
}
return true
}
// DeepEqual is an autogenerated deepequal function, deeply comparing the
// receiver with other. in must be non-nil.
func (in *NetworkPolicySpec) DeepEqual(other *NetworkPolicySpec) bool {
if other == nil {
return false
}
if !in.PodSelector.DeepEqual(&other.PodSelector) {
return false
}
if ((in.Ingress != nil) && (other.Ingress != nil)) || ((in.Ingress == nil) != (other.Ingress == nil)) {
in, other := &in.Ingress, &other.Ingress
if other == nil {
return false
}
if len(*in) != len(*other) {
return false
} else {
for i, inElement := range *in {
if !inElement.DeepEqual(&(*other)[i]) {
return false
}
}
}
}
if ((in.Egress != nil) && (other.Egress != nil)) || ((in.Egress == nil) != (other.Egress == nil)) {
in, other := &in.Egress, &other.Egress
if other == nil {
return false
}
if len(*in) != len(*other) {
return false
} else {
for i, inElement := range *in {
if !inElement.DeepEqual(&(*other)[i]) {
return false
}
}
}
}
if ((in.PolicyTypes != nil) && (other.PolicyTypes != nil)) || ((in.PolicyTypes == nil) != (other.PolicyTypes == nil)) {
in, other := &in.PolicyTypes, &other.PolicyTypes
if other == nil {
return false
}
if len(*in) != len(*other) {
return false
} else {
for i, inElement := range *in {
if inElement != (*other)[i] {
return false
}
}
}
}
return true
}
// SPDX-License-Identifier: Apache-2.0
// Copyright Authors of Cilium
// Code generated by client-gen. DO NOT EDIT.
package scheme
import (
apiextensionsv1 "github.com/cilium/cilium/pkg/k8s/slim/k8s/apis/apiextensions/v1"
v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
runtime "k8s.io/apimachinery/pkg/runtime"
schema "k8s.io/apimachinery/pkg/runtime/schema"
serializer "k8s.io/apimachinery/pkg/runtime/serializer"
utilruntime "k8s.io/apimachinery/pkg/util/runtime"
)
var Scheme = runtime.NewScheme()
var Codecs = serializer.NewCodecFactory(Scheme)
var ParameterCodec = runtime.NewParameterCodec(Scheme)
var localSchemeBuilder = runtime.SchemeBuilder{
apiextensionsv1.AddToScheme,
}
// AddToScheme adds all types of this clientset into the given scheme. This allows composition
// of clientsets, like in:
//
// import (
// "k8s.io/client-go/kubernetes"
// clientsetscheme "k8s.io/client-go/kubernetes/scheme"
// aggregatorclientsetscheme "k8s.io/kube-aggregator/pkg/client/clientset_generated/clientset/scheme"
// )
//
// kclientset, _ := kubernetes.NewForConfig(c)
// _ = aggregatorclientsetscheme.AddToScheme(clientsetscheme.Scheme)
//
// After this, RawExtensions in Kubernetes types will serialize kube-aggregator types
// correctly.
var AddToScheme = localSchemeBuilder.AddToScheme
func init() {
v1.AddToGroupVersion(Scheme, schema.GroupVersion{Version: "v1"})
utilruntime.Must(AddToScheme(Scheme))
}
// SPDX-License-Identifier: Apache-2.0
// Copyright Authors of Cilium
// Code generated by client-gen. DO NOT EDIT.
package v1
import (
http "net/http"
scheme "github.com/cilium/cilium/pkg/k8s/slim/k8s/apiextensions-client/clientset/versioned/scheme"
apiextensionsv1 "github.com/cilium/cilium/pkg/k8s/slim/k8s/apis/apiextensions/v1"
rest "k8s.io/client-go/rest"
)
type ApiextensionsV1Interface interface {
RESTClient() rest.Interface
CustomResourceDefinitionsGetter
}
// ApiextensionsV1Client is used to interact with features provided by the apiextensions.k8s.io group.
type ApiextensionsV1Client struct {
restClient rest.Interface
}
func (c *ApiextensionsV1Client) CustomResourceDefinitions() CustomResourceDefinitionInterface {
return newCustomResourceDefinitions(c)
}
// NewForConfig creates a new ApiextensionsV1Client for the given config.
// NewForConfig is equivalent to NewForConfigAndClient(c, httpClient),
// where httpClient was generated with rest.HTTPClientFor(c).
func NewForConfig(c *rest.Config) (*ApiextensionsV1Client, error) {
config := *c
setConfigDefaults(&config)
httpClient, err := rest.HTTPClientFor(&config)
if err != nil {
return nil, err
}
return NewForConfigAndClient(&config, httpClient)
}
// NewForConfigAndClient creates a new ApiextensionsV1Client for the given config and http client.
// Note the http client provided takes precedence over the configured transport values.
func NewForConfigAndClient(c *rest.Config, h *http.Client) (*ApiextensionsV1Client, error) {
config := *c
setConfigDefaults(&config)
client, err := rest.RESTClientForConfigAndClient(&config, h)
if err != nil {
return nil, err
}
return &ApiextensionsV1Client{client}, nil
}
// NewForConfigOrDie creates a new ApiextensionsV1Client for the given config and
// panics if there is an error in the config.
func NewForConfigOrDie(c *rest.Config) *ApiextensionsV1Client {
client, err := NewForConfig(c)
if err != nil {
panic(err)
}
return client
}
// New creates a new ApiextensionsV1Client for the given RESTClient.
func New(c rest.Interface) *ApiextensionsV1Client {
return &ApiextensionsV1Client{c}
}
func setConfigDefaults(config *rest.Config) {
gv := apiextensionsv1.SchemeGroupVersion
config.GroupVersion = &gv
config.APIPath = "/apis"
config.NegotiatedSerializer = rest.CodecFactoryForGeneratedClient(scheme.Scheme, scheme.Codecs).WithoutConversion()
if config.UserAgent == "" {
config.UserAgent = rest.DefaultKubernetesUserAgent()
}
}
// RESTClient returns a RESTClient that is used to communicate
// with API server by this client implementation.
func (c *ApiextensionsV1Client) RESTClient() rest.Interface {
if c == nil {
return nil
}
return c.restClient
}
// SPDX-License-Identifier: Apache-2.0
// Copyright Authors of Cilium
// Code generated by client-gen. DO NOT EDIT.
package v1
import (
context "context"
scheme "github.com/cilium/cilium/pkg/k8s/slim/k8s/apiextensions-client/clientset/versioned/scheme"
apiextensionsv1 "github.com/cilium/cilium/pkg/k8s/slim/k8s/apis/apiextensions/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
types "k8s.io/apimachinery/pkg/types"
watch "k8s.io/apimachinery/pkg/watch"
gentype "k8s.io/client-go/gentype"
)
// CustomResourceDefinitionsGetter has a method to return a CustomResourceDefinitionInterface.
// A group's client should implement this interface.
type CustomResourceDefinitionsGetter interface {
CustomResourceDefinitions() CustomResourceDefinitionInterface
}
// CustomResourceDefinitionInterface has methods to work with CustomResourceDefinition resources.
type CustomResourceDefinitionInterface interface {
Create(ctx context.Context, customResourceDefinition *apiextensionsv1.CustomResourceDefinition, opts metav1.CreateOptions) (*apiextensionsv1.CustomResourceDefinition, error)
Update(ctx context.Context, customResourceDefinition *apiextensionsv1.CustomResourceDefinition, opts metav1.UpdateOptions) (*apiextensionsv1.CustomResourceDefinition, error)
Delete(ctx context.Context, name string, opts metav1.DeleteOptions) error
DeleteCollection(ctx context.Context, opts metav1.DeleteOptions, listOpts metav1.ListOptions) error
Get(ctx context.Context, name string, opts metav1.GetOptions) (*apiextensionsv1.CustomResourceDefinition, error)
List(ctx context.Context, opts metav1.ListOptions) (*apiextensionsv1.CustomResourceDefinitionList, error)
Watch(ctx context.Context, opts metav1.ListOptions) (watch.Interface, error)
Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts metav1.PatchOptions, subresources ...string) (result *apiextensionsv1.CustomResourceDefinition, err error)
CustomResourceDefinitionExpansion
}
// customResourceDefinitions implements CustomResourceDefinitionInterface
type customResourceDefinitions struct {
*gentype.ClientWithList[*apiextensionsv1.CustomResourceDefinition, *apiextensionsv1.CustomResourceDefinitionList]
}
// newCustomResourceDefinitions returns a CustomResourceDefinitions
func newCustomResourceDefinitions(c *ApiextensionsV1Client) *customResourceDefinitions {
return &customResourceDefinitions{
gentype.NewClientWithList[*apiextensionsv1.CustomResourceDefinition, *apiextensionsv1.CustomResourceDefinitionList](
"customresourcedefinitions",
c.RESTClient(),
scheme.ParameterCodec,
"",
func() *apiextensionsv1.CustomResourceDefinition { return &apiextensionsv1.CustomResourceDefinition{} },
func() *apiextensionsv1.CustomResourceDefinitionList {
return &apiextensionsv1.CustomResourceDefinitionList{}
},
),
}
}
// SPDX-License-Identifier: Apache-2.0
// Copyright Authors of Cilium
// Copyright The Kubernetes Authors.
package clientset
import (
"fmt"
"net/http"
apiextclientset "k8s.io/apiextensions-apiserver/pkg/client/clientset/clientset"
apiextensionsv1 "k8s.io/apiextensions-apiserver/pkg/client/clientset/clientset/typed/apiextensions/v1"
apiextensionsv1beta1 "k8s.io/apiextensions-apiserver/pkg/client/clientset/clientset/typed/apiextensions/v1beta1"
rest "k8s.io/client-go/rest"
flowcontrol "k8s.io/client-go/util/flowcontrol"
slim_apiextensionsv1 "github.com/cilium/cilium/pkg/k8s/slim/k8s/apiextensions-client/clientset/versioned/typed/apiextensions/v1"
)
// Clientset contains the clients for groups. Each group has exactly one
// version included in a Clientset.
type Clientset struct {
*apiextclientset.Clientset
apiextensionsV1beta1 *apiextensionsv1beta1.ApiextensionsV1beta1Client
apiextensionsV1 *apiextensionsv1.ApiextensionsV1Client
}
// ApiextensionsV1 retrieves the ApiextensionsV1Client
func (c *Clientset) ApiextensionsV1() apiextensionsv1.ApiextensionsV1Interface {
return c.apiextensionsV1
}
// ApiextensionsV1beta1 retrieves the ApiextensionsV1beta1Client
func (c *Clientset) ApiextensionsV1beta1() apiextensionsv1beta1.ApiextensionsV1beta1Interface {
return c.apiextensionsV1beta1
}
// NewForConfigAndClient creates a new Clientset for the given config and http client.
// Note the http client provided takes precedence over the configured transport values.
// If config's RateLimiter is not set and QPS and Burst are acceptable,
// NewForConfigAndClient will generate a rate-limiter in configShallowCopy.
func NewForConfigAndClient(c *rest.Config, httpClient *http.Client) (*Clientset, error) {
configShallowCopy := *c
if configShallowCopy.RateLimiter == nil && configShallowCopy.QPS > 0 {
if configShallowCopy.Burst <= 0 {
return nil, fmt.Errorf("burst is required to be greater than 0 when RateLimiter is not set and QPS is set to greater than 0")
}
configShallowCopy.RateLimiter = flowcontrol.NewTokenBucketRateLimiter(configShallowCopy.QPS, configShallowCopy.Burst)
}
var cs Clientset
var err error
cs.Clientset, err = apiextclientset.NewForConfigAndClient(&configShallowCopy, httpClient)
if err != nil {
return nil, err
}
// Wrap extensionsV1 with our own implementation
extensionsV1, err := slim_apiextensionsv1.NewForConfigAndClient(&configShallowCopy, httpClient)
if err != nil {
return nil, err
}
cs.apiextensionsV1 = apiextensionsv1.New(extensionsV1.RESTClient())
return &cs, nil
}
// NewForConfigOrDie creates a new Clientset for the given config and
// panics if there is an error in the config.
func NewForConfigOrDie(c *rest.Config) *Clientset {
var cs Clientset
cs.Clientset = apiextclientset.NewForConfigOrDie(c)
// Wrap extensionsV1 with our own implementation
cs.apiextensionsV1 = apiextensionsv1.New(slim_apiextensionsv1.NewForConfigOrDie(c).RESTClient())
return &cs
}
// New creates a new Clientset for the given RESTClient.
func New(c rest.Interface) *Clientset {
var cs Clientset
cs.Clientset = apiextclientset.New(c)
// Wrap extensionsV1 with our own implementation
cs.apiextensionsV1 = apiextensionsv1.New(slim_apiextensionsv1.New(c).RESTClient())
return &cs
}
// SPDX-License-Identifier: Apache-2.0
// Copyright Authors of Cilium
// Copyright 2019 The Kubernetes Authors.
package v1
import (
"k8s.io/apimachinery/pkg/runtime"
)
func addDefaultingFuncs(scheme *runtime.Scheme) error {
return RegisterDefaults(scheme)
}
// SPDX-License-Identifier: Apache-2.0
// Copyright Authors of Cilium
// Code generated by protoc-gen-gogo. DO NOT EDIT.
// source: github.com/cilium/cilium/pkg/k8s/slim/k8s/apis/apiextensions/v1/generated.proto
package v1
import (
encoding_binary "encoding/binary"
fmt "fmt"
io "io"
proto "github.com/gogo/protobuf/proto"
github_com_gogo_protobuf_sortkeys "github.com/gogo/protobuf/sortkeys"
math "math"
math_bits "math/bits"
reflect "reflect"
strings "strings"
)
// Reference imports to suppress errors if they are not otherwise used.
var _ = proto.Marshal
var _ = fmt.Errorf
var _ = math.Inf
// This is a compile-time assertion to ensure that this generated file
// is compatible with the proto package it is being compiled against.
// A compilation error at this line likely means your copy of the
// proto package needs to be updated.
const _ = proto.GoGoProtoPackageIsVersion3 // please upgrade the proto package
func (m *CustomResourceDefinition) Reset() { *m = CustomResourceDefinition{} }
func (*CustomResourceDefinition) ProtoMessage() {}
func (*CustomResourceDefinition) Descriptor() ([]byte, []int) {
return fileDescriptor_2ae25e910fba1c55, []int{0}
}
func (m *CustomResourceDefinition) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
}
func (m *CustomResourceDefinition) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
b = b[:cap(b)]
n, err := m.MarshalToSizedBuffer(b)
if err != nil {
return nil, err
}
return b[:n], nil
}
func (m *CustomResourceDefinition) XXX_Merge(src proto.Message) {
xxx_messageInfo_CustomResourceDefinition.Merge(m, src)
}
func (m *CustomResourceDefinition) XXX_Size() int {
return m.Size()
}
func (m *CustomResourceDefinition) XXX_DiscardUnknown() {
xxx_messageInfo_CustomResourceDefinition.DiscardUnknown(m)
}
var xxx_messageInfo_CustomResourceDefinition proto.InternalMessageInfo
func (m *CustomResourceDefinitionList) Reset() { *m = CustomResourceDefinitionList{} }
func (*CustomResourceDefinitionList) ProtoMessage() {}
func (*CustomResourceDefinitionList) Descriptor() ([]byte, []int) {
return fileDescriptor_2ae25e910fba1c55, []int{1}
}
func (m *CustomResourceDefinitionList) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
}
func (m *CustomResourceDefinitionList) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
b = b[:cap(b)]
n, err := m.MarshalToSizedBuffer(b)
if err != nil {
return nil, err
}
return b[:n], nil
}
func (m *CustomResourceDefinitionList) XXX_Merge(src proto.Message) {
xxx_messageInfo_CustomResourceDefinitionList.Merge(m, src)
}
func (m *CustomResourceDefinitionList) XXX_Size() int {
return m.Size()
}
func (m *CustomResourceDefinitionList) XXX_DiscardUnknown() {
xxx_messageInfo_CustomResourceDefinitionList.DiscardUnknown(m)
}
var xxx_messageInfo_CustomResourceDefinitionList proto.InternalMessageInfo
func (m *CustomResourceSubresourceScale) Reset() { *m = CustomResourceSubresourceScale{} }
func (*CustomResourceSubresourceScale) ProtoMessage() {}
func (*CustomResourceSubresourceScale) Descriptor() ([]byte, []int) {
return fileDescriptor_2ae25e910fba1c55, []int{2}
}
func (m *CustomResourceSubresourceScale) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
}
func (m *CustomResourceSubresourceScale) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
b = b[:cap(b)]
n, err := m.MarshalToSizedBuffer(b)
if err != nil {
return nil, err
}
return b[:n], nil
}
func (m *CustomResourceSubresourceScale) XXX_Merge(src proto.Message) {
xxx_messageInfo_CustomResourceSubresourceScale.Merge(m, src)
}
func (m *CustomResourceSubresourceScale) XXX_Size() int {
return m.Size()
}
func (m *CustomResourceSubresourceScale) XXX_DiscardUnknown() {
xxx_messageInfo_CustomResourceSubresourceScale.DiscardUnknown(m)
}
var xxx_messageInfo_CustomResourceSubresourceScale proto.InternalMessageInfo
func (m *CustomResourceSubresourceStatus) Reset() { *m = CustomResourceSubresourceStatus{} }
func (*CustomResourceSubresourceStatus) ProtoMessage() {}
func (*CustomResourceSubresourceStatus) Descriptor() ([]byte, []int) {
return fileDescriptor_2ae25e910fba1c55, []int{3}
}
func (m *CustomResourceSubresourceStatus) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
}
func (m *CustomResourceSubresourceStatus) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
b = b[:cap(b)]
n, err := m.MarshalToSizedBuffer(b)
if err != nil {
return nil, err
}
return b[:n], nil
}
func (m *CustomResourceSubresourceStatus) XXX_Merge(src proto.Message) {
xxx_messageInfo_CustomResourceSubresourceStatus.Merge(m, src)
}
func (m *CustomResourceSubresourceStatus) XXX_Size() int {
return m.Size()
}
func (m *CustomResourceSubresourceStatus) XXX_DiscardUnknown() {
xxx_messageInfo_CustomResourceSubresourceStatus.DiscardUnknown(m)
}
var xxx_messageInfo_CustomResourceSubresourceStatus proto.InternalMessageInfo
func (m *CustomResourceSubresources) Reset() { *m = CustomResourceSubresources{} }
func (*CustomResourceSubresources) ProtoMessage() {}
func (*CustomResourceSubresources) Descriptor() ([]byte, []int) {
return fileDescriptor_2ae25e910fba1c55, []int{4}
}
func (m *CustomResourceSubresources) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
}
func (m *CustomResourceSubresources) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
b = b[:cap(b)]
n, err := m.MarshalToSizedBuffer(b)
if err != nil {
return nil, err
}
return b[:n], nil
}
func (m *CustomResourceSubresources) XXX_Merge(src proto.Message) {
xxx_messageInfo_CustomResourceSubresources.Merge(m, src)
}
func (m *CustomResourceSubresources) XXX_Size() int {
return m.Size()
}
func (m *CustomResourceSubresources) XXX_DiscardUnknown() {
xxx_messageInfo_CustomResourceSubresources.DiscardUnknown(m)
}
var xxx_messageInfo_CustomResourceSubresources proto.InternalMessageInfo
func (m *CustomResourceValidation) Reset() { *m = CustomResourceValidation{} }
func (*CustomResourceValidation) ProtoMessage() {}
func (*CustomResourceValidation) Descriptor() ([]byte, []int) {
return fileDescriptor_2ae25e910fba1c55, []int{5}
}
func (m *CustomResourceValidation) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
}
func (m *CustomResourceValidation) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
b = b[:cap(b)]
n, err := m.MarshalToSizedBuffer(b)
if err != nil {
return nil, err
}
return b[:n], nil
}
func (m *CustomResourceValidation) XXX_Merge(src proto.Message) {
xxx_messageInfo_CustomResourceValidation.Merge(m, src)
}
func (m *CustomResourceValidation) XXX_Size() int {
return m.Size()
}
func (m *CustomResourceValidation) XXX_DiscardUnknown() {
xxx_messageInfo_CustomResourceValidation.DiscardUnknown(m)
}
var xxx_messageInfo_CustomResourceValidation proto.InternalMessageInfo
func (m *ExternalDocumentation) Reset() { *m = ExternalDocumentation{} }
func (*ExternalDocumentation) ProtoMessage() {}
func (*ExternalDocumentation) Descriptor() ([]byte, []int) {
return fileDescriptor_2ae25e910fba1c55, []int{6}
}
func (m *ExternalDocumentation) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
}
func (m *ExternalDocumentation) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
b = b[:cap(b)]
n, err := m.MarshalToSizedBuffer(b)
if err != nil {
return nil, err
}
return b[:n], nil
}
func (m *ExternalDocumentation) XXX_Merge(src proto.Message) {
xxx_messageInfo_ExternalDocumentation.Merge(m, src)
}
func (m *ExternalDocumentation) XXX_Size() int {
return m.Size()
}
func (m *ExternalDocumentation) XXX_DiscardUnknown() {
xxx_messageInfo_ExternalDocumentation.DiscardUnknown(m)
}
var xxx_messageInfo_ExternalDocumentation proto.InternalMessageInfo
func (m *JSON) Reset() { *m = JSON{} }
func (*JSON) ProtoMessage() {}
func (*JSON) Descriptor() ([]byte, []int) {
return fileDescriptor_2ae25e910fba1c55, []int{7}
}
func (m *JSON) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
}
func (m *JSON) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
b = b[:cap(b)]
n, err := m.MarshalToSizedBuffer(b)
if err != nil {
return nil, err
}
return b[:n], nil
}
func (m *JSON) XXX_Merge(src proto.Message) {
xxx_messageInfo_JSON.Merge(m, src)
}
func (m *JSON) XXX_Size() int {
return m.Size()
}
func (m *JSON) XXX_DiscardUnknown() {
xxx_messageInfo_JSON.DiscardUnknown(m)
}
var xxx_messageInfo_JSON proto.InternalMessageInfo
func (m *JSONSchemaProps) Reset() { *m = JSONSchemaProps{} }
func (*JSONSchemaProps) ProtoMessage() {}
func (*JSONSchemaProps) Descriptor() ([]byte, []int) {
return fileDescriptor_2ae25e910fba1c55, []int{8}
}
func (m *JSONSchemaProps) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
}
func (m *JSONSchemaProps) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
b = b[:cap(b)]
n, err := m.MarshalToSizedBuffer(b)
if err != nil {
return nil, err
}
return b[:n], nil
}
func (m *JSONSchemaProps) XXX_Merge(src proto.Message) {
xxx_messageInfo_JSONSchemaProps.Merge(m, src)
}
func (m *JSONSchemaProps) XXX_Size() int {
return m.Size()
}
func (m *JSONSchemaProps) XXX_DiscardUnknown() {
xxx_messageInfo_JSONSchemaProps.DiscardUnknown(m)
}
var xxx_messageInfo_JSONSchemaProps proto.InternalMessageInfo
func (m *JSONSchemaPropsOrArray) Reset() { *m = JSONSchemaPropsOrArray{} }
func (*JSONSchemaPropsOrArray) ProtoMessage() {}
func (*JSONSchemaPropsOrArray) Descriptor() ([]byte, []int) {
return fileDescriptor_2ae25e910fba1c55, []int{9}
}
func (m *JSONSchemaPropsOrArray) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
}
func (m *JSONSchemaPropsOrArray) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
b = b[:cap(b)]
n, err := m.MarshalToSizedBuffer(b)
if err != nil {
return nil, err
}
return b[:n], nil
}
func (m *JSONSchemaPropsOrArray) XXX_Merge(src proto.Message) {
xxx_messageInfo_JSONSchemaPropsOrArray.Merge(m, src)
}
func (m *JSONSchemaPropsOrArray) XXX_Size() int {
return m.Size()
}
func (m *JSONSchemaPropsOrArray) XXX_DiscardUnknown() {
xxx_messageInfo_JSONSchemaPropsOrArray.DiscardUnknown(m)
}
var xxx_messageInfo_JSONSchemaPropsOrArray proto.InternalMessageInfo
func (m *JSONSchemaPropsOrBool) Reset() { *m = JSONSchemaPropsOrBool{} }
func (*JSONSchemaPropsOrBool) ProtoMessage() {}
func (*JSONSchemaPropsOrBool) Descriptor() ([]byte, []int) {
return fileDescriptor_2ae25e910fba1c55, []int{10}
}
func (m *JSONSchemaPropsOrBool) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
}
func (m *JSONSchemaPropsOrBool) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
b = b[:cap(b)]
n, err := m.MarshalToSizedBuffer(b)
if err != nil {
return nil, err
}
return b[:n], nil
}
func (m *JSONSchemaPropsOrBool) XXX_Merge(src proto.Message) {
xxx_messageInfo_JSONSchemaPropsOrBool.Merge(m, src)
}
func (m *JSONSchemaPropsOrBool) XXX_Size() int {
return m.Size()
}
func (m *JSONSchemaPropsOrBool) XXX_DiscardUnknown() {
xxx_messageInfo_JSONSchemaPropsOrBool.DiscardUnknown(m)
}
var xxx_messageInfo_JSONSchemaPropsOrBool proto.InternalMessageInfo
func (m *JSONSchemaPropsOrStringArray) Reset() { *m = JSONSchemaPropsOrStringArray{} }
func (*JSONSchemaPropsOrStringArray) ProtoMessage() {}
func (*JSONSchemaPropsOrStringArray) Descriptor() ([]byte, []int) {
return fileDescriptor_2ae25e910fba1c55, []int{11}
}
func (m *JSONSchemaPropsOrStringArray) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
}
func (m *JSONSchemaPropsOrStringArray) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
b = b[:cap(b)]
n, err := m.MarshalToSizedBuffer(b)
if err != nil {
return nil, err
}
return b[:n], nil
}
func (m *JSONSchemaPropsOrStringArray) XXX_Merge(src proto.Message) {
xxx_messageInfo_JSONSchemaPropsOrStringArray.Merge(m, src)
}
func (m *JSONSchemaPropsOrStringArray) XXX_Size() int {
return m.Size()
}
func (m *JSONSchemaPropsOrStringArray) XXX_DiscardUnknown() {
xxx_messageInfo_JSONSchemaPropsOrStringArray.DiscardUnknown(m)
}
var xxx_messageInfo_JSONSchemaPropsOrStringArray proto.InternalMessageInfo
func (m *ValidationRule) Reset() { *m = ValidationRule{} }
func (*ValidationRule) ProtoMessage() {}
func (*ValidationRule) Descriptor() ([]byte, []int) {
return fileDescriptor_2ae25e910fba1c55, []int{12}
}
func (m *ValidationRule) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
}
func (m *ValidationRule) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
b = b[:cap(b)]
n, err := m.MarshalToSizedBuffer(b)
if err != nil {
return nil, err
}
return b[:n], nil
}
func (m *ValidationRule) XXX_Merge(src proto.Message) {
xxx_messageInfo_ValidationRule.Merge(m, src)
}
func (m *ValidationRule) XXX_Size() int {
return m.Size()
}
func (m *ValidationRule) XXX_DiscardUnknown() {
xxx_messageInfo_ValidationRule.DiscardUnknown(m)
}
var xxx_messageInfo_ValidationRule proto.InternalMessageInfo
func init() {
proto.RegisterType((*CustomResourceDefinition)(nil), "github.com.cilium.cilium.pkg.k8s.slim.k8s.apis.apiextensions.v1.CustomResourceDefinition")
proto.RegisterType((*CustomResourceDefinitionList)(nil), "github.com.cilium.cilium.pkg.k8s.slim.k8s.apis.apiextensions.v1.CustomResourceDefinitionList")
proto.RegisterType((*CustomResourceSubresourceScale)(nil), "github.com.cilium.cilium.pkg.k8s.slim.k8s.apis.apiextensions.v1.CustomResourceSubresourceScale")
proto.RegisterType((*CustomResourceSubresourceStatus)(nil), "github.com.cilium.cilium.pkg.k8s.slim.k8s.apis.apiextensions.v1.CustomResourceSubresourceStatus")
proto.RegisterType((*CustomResourceSubresources)(nil), "github.com.cilium.cilium.pkg.k8s.slim.k8s.apis.apiextensions.v1.CustomResourceSubresources")
proto.RegisterType((*CustomResourceValidation)(nil), "github.com.cilium.cilium.pkg.k8s.slim.k8s.apis.apiextensions.v1.CustomResourceValidation")
proto.RegisterType((*ExternalDocumentation)(nil), "github.com.cilium.cilium.pkg.k8s.slim.k8s.apis.apiextensions.v1.ExternalDocumentation")
proto.RegisterType((*JSON)(nil), "github.com.cilium.cilium.pkg.k8s.slim.k8s.apis.apiextensions.v1.JSON")
proto.RegisterType((*JSONSchemaProps)(nil), "github.com.cilium.cilium.pkg.k8s.slim.k8s.apis.apiextensions.v1.JSONSchemaProps")
proto.RegisterMapType((JSONSchemaDefinitions)(nil), "github.com.cilium.cilium.pkg.k8s.slim.k8s.apis.apiextensions.v1.JSONSchemaProps.DefinitionsEntry")
proto.RegisterMapType((JSONSchemaDependencies)(nil), "github.com.cilium.cilium.pkg.k8s.slim.k8s.apis.apiextensions.v1.JSONSchemaProps.DependenciesEntry")
proto.RegisterMapType((map[string]JSONSchemaProps)(nil), "github.com.cilium.cilium.pkg.k8s.slim.k8s.apis.apiextensions.v1.JSONSchemaProps.PatternPropertiesEntry")
proto.RegisterMapType((map[string]JSONSchemaProps)(nil), "github.com.cilium.cilium.pkg.k8s.slim.k8s.apis.apiextensions.v1.JSONSchemaProps.PropertiesEntry")
proto.RegisterType((*JSONSchemaPropsOrArray)(nil), "github.com.cilium.cilium.pkg.k8s.slim.k8s.apis.apiextensions.v1.JSONSchemaPropsOrArray")
proto.RegisterType((*JSONSchemaPropsOrBool)(nil), "github.com.cilium.cilium.pkg.k8s.slim.k8s.apis.apiextensions.v1.JSONSchemaPropsOrBool")
proto.RegisterType((*JSONSchemaPropsOrStringArray)(nil), "github.com.cilium.cilium.pkg.k8s.slim.k8s.apis.apiextensions.v1.JSONSchemaPropsOrStringArray")
proto.RegisterType((*ValidationRule)(nil), "github.com.cilium.cilium.pkg.k8s.slim.k8s.apis.apiextensions.v1.ValidationRule")
}
func init() {
proto.RegisterFile("github.com/cilium/cilium/pkg/k8s/slim/k8s/apis/apiextensions/v1/generated.proto", fileDescriptor_2ae25e910fba1c55)
}
var fileDescriptor_2ae25e910fba1c55 = []byte{
// 1977 bytes of a gzipped FileDescriptorProto
0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xc4, 0x59, 0x4b, 0x73, 0x1b, 0x59,
0x15, 0x76, 0xcb, 0x2f, 0xf9, 0xc8, 0x8e, 0xad, 0x9b, 0xd8, 0x74, 0x34, 0x19, 0x49, 0x51, 0x98,
0xe0, 0x30, 0x41, 0xae, 0x84, 0x82, 0x49, 0xf1, 0xa8, 0x60, 0x8d, 0x15, 0xc8, 0x8c, 0x15, 0xb9,
0xae, 0x27, 0xc1, 0x40, 0x51, 0xcc, 0x95, 0xfa, 0x4a, 0xee, 0x71, 0xbf, 0xa6, 0xfb, 0xb6, 0x23,
0x6d, 0xa8, 0x59, 0x50, 0xc5, 0x86, 0x1a, 0x66, 0x01, 0x14, 0x1b, 0x58, 0x51, 0xac, 0x58, 0xc0,
0x02, 0x96, 0xac, 0xb3, 0x9c, 0x2a, 0x36, 0xb3, 0x52, 0x11, 0xf1, 0x23, 0xa8, 0x4a, 0xb1, 0xa0,
0xee, 0xa3, 0x1f, 0x7a, 0x78, 0x86, 0x10, 0x79, 0xb2, 0x89, 0x5b, 0xe7, 0x3b, 0xf7, 0x7c, 0xdf,
0x3d, 0x7d, 0x1f, 0xa7, 0x4f, 0xa0, 0xd9, 0x35, 0xd9, 0x71, 0xd8, 0xaa, 0xb6, 0x5d, 0x7b, 0xa7,
0x6d, 0x5a, 0x66, 0x18, 0xff, 0xf1, 0x4e, 0xba, 0x3b, 0x27, 0x77, 0x82, 0x9d, 0xc0, 0x32, 0x6d,
0xf1, 0x40, 0x3c, 0x53, 0xfc, 0x43, 0x7b, 0x8c, 0x3a, 0x81, 0xe9, 0x3a, 0xc1, 0xce, 0xe9, 0xad,
0x9d, 0x2e, 0x75, 0xa8, 0x4f, 0x18, 0x35, 0xaa, 0x9e, 0xef, 0x32, 0x17, 0xdd, 0x4d, 0x02, 0x56,
0x65, 0xa4, 0xe8, 0x8f, 0x77, 0xd2, 0xad, 0x9e, 0xdc, 0x09, 0xaa, 0x3c, 0xa0, 0x78, 0xe0, 0x01,
0xab, 0x23, 0x01, 0xab, 0xa7, 0xb7, 0x0a, 0xf7, 0x9e, 0x53, 0x91, 0x4d, 0x19, 0x99, 0x22, 0xa4,
0xf0, 0x95, 0x54, 0x9c, 0xae, 0xdb, 0x75, 0x77, 0x84, 0xb9, 0x15, 0x76, 0xc4, 0x2f, 0xf1, 0x43,
0x3c, 0x29, 0x77, 0x1e, 0xb0, 0x6a, 0xba, 0x3c, 0xa6, 0x4d, 0xda, 0xc7, 0xa6, 0x43, 0xfd, 0xbe,
0x60, 0xf4, 0x43, 0x87, 0x99, 0x36, 0x9d, 0x88, 0xff, 0xf5, 0xcf, 0x1a, 0x10, 0xb4, 0x8f, 0xa9,
0x4d, 0xc6, 0xc7, 0x55, 0x3e, 0xd4, 0x40, 0x7f, 0x33, 0x0c, 0x98, 0x6b, 0x63, 0x1a, 0xb8, 0xa1,
0xdf, 0xa6, 0x7b, 0xb4, 0x63, 0x3a, 0x26, 0x33, 0x5d, 0x07, 0xf9, 0x90, 0xe5, 0xf3, 0x31, 0x08,
0x23, 0xba, 0x56, 0xd6, 0xb6, 0x73, 0xb7, 0x6b, 0xd5, 0xe7, 0x4c, 0x28, 0x1f, 0x5f, 0x3d, 0xbd,
0x55, 0x6d, 0xb6, 0xde, 0xa3, 0x6d, 0xd6, 0xa0, 0x8c, 0xd4, 0xd0, 0x93, 0x41, 0x69, 0x6e, 0x38,
0x28, 0x41, 0x62, 0xc3, 0x31, 0x4f, 0xe5, 0xc3, 0x0c, 0x5c, 0x39, 0x4b, 0xd0, 0xbe, 0x19, 0x30,
0xe4, 0x4c, 0x88, 0xfa, 0xce, 0xff, 0x2b, 0x8a, 0xc7, 0x13, 0x92, 0x36, 0x94, 0xa4, 0x6c, 0x64,
0x49, 0x04, 0xa1, 0x9f, 0xc2, 0xa2, 0xc9, 0xa8, 0x1d, 0xe8, 0x99, 0xf2, 0xfc, 0x76, 0xee, 0xf6,
0x0f, 0xaa, 0x2f, 0xb8, 0xa4, 0xaa, 0x67, 0xcd, 0xae, 0xb6, 0xa6, 0x54, 0x2c, 0xde, 0xe7, 0x7c,
0x58, 0xd2, 0x56, 0xfe, 0xad, 0x41, 0x71, 0x74, 0xc8, 0x61, 0xd8, 0xf2, 0xa3, 0xc7, 0x36, 0xb1,
0x28, 0xda, 0x83, 0x8d, 0xc0, 0xa3, 0x6d, 0x4c, 0x3d, 0xcb, 0x6c, 0x93, 0xe0, 0x80, 0xb0, 0x63,
0x91, 0x9a, 0x95, 0x9a, 0xae, 0x42, 0x6e, 0x1c, 0x8e, 0xe1, 0x78, 0x62, 0x04, 0x7a, 0x0b, 0x50,
0xc0, 0x08, 0x0b, 0x83, 0x91, 0x38, 0x19, 0x11, 0xa7, 0xa0, 0xe2, 0xa0, 0xc3, 0x09, 0x0f, 0x3c,
0x65, 0x14, 0x7a, 0x13, 0xf2, 0x16, 0x69, 0x51, 0xeb, 0x90, 0x5a, 0xb4, 0xcd, 0x5c, 0x5f, 0x84,
0x9a, 0x17, 0xa1, 0x36, 0x87, 0x83, 0x52, 0x7e, 0x7f, 0x1c, 0xc4, 0x93, 0xfe, 0x95, 0xab, 0x50,
0x3a, 0x7b, 0xe2, 0x82, 0xb3, 0xf2, 0xc7, 0x0c, 0x14, 0xce, 0xf4, 0x09, 0xd0, 0xcf, 0x34, 0x58,
0x92, 0xea, 0xd4, 0x52, 0x79, 0x77, 0xc6, 0x6f, 0x6f, 0x42, 0x51, 0x0d, 0x86, 0x83, 0xd2, 0x92,
0xca, 0x92, 0xe2, 0x46, 0x1f, 0x68, 0xb0, 0x18, 0xf0, 0x37, 0x25, 0xb2, 0x99, 0xbb, 0xfd, 0x93,
0x73, 0x54, 0xc1, 0x69, 0x6a, 0x2b, 0x7c, 0x15, 0x89, 0x47, 0x2c, 0x89, 0x2b, 0x7f, 0x9a, 0xd8,
0xe7, 0x8f, 0x88, 0x65, 0x1a, 0x44, 0xec, 0xf3, 0x5f, 0x6a, 0xb0, 0xee, 0x7a, 0xd4, 0xd9, 0x3d,
0xb8, 0xff, 0xe8, 0xab, 0x87, 0xe2, 0xa0, 0x50, 0xf9, 0x3a, 0x78, 0x61, 0xa5, 0x6f, 0x1d, 0x36,
0x1f, 0xc8, 0x90, 0x07, 0xbe, 0xeb, 0x05, 0xb5, 0x8b, 0xc3, 0x41, 0x69, 0xbd, 0x39, 0x4a, 0x86,
0xc7, 0xd9, 0x2b, 0x36, 0x6c, 0xd6, 0x7b, 0x8c, 0xfa, 0x0e, 0xb1, 0xf6, 0xdc, 0x76, 0x68, 0x53,
0x87, 0x49, 0xa9, 0x5f, 0x83, 0x9c, 0x41, 0x83, 0xb6, 0x6f, 0x7a, 0xfc, 0xa7, 0x5a, 0xe5, 0x17,
0xd5, 0xea, 0xcc, 0xed, 0x25, 0x10, 0x4e, 0xfb, 0xa1, 0x57, 0x61, 0x3e, 0xf4, 0x2d, 0xb5, 0x98,
0x73, 0xca, 0x7d, 0xfe, 0x21, 0xde, 0xc7, 0xdc, 0x5e, 0xb9, 0x0a, 0x0b, 0x5c, 0x27, 0xba, 0x0c,
0xf3, 0x3e, 0x79, 0x2c, 0xa2, 0xae, 0xd6, 0x96, 0xb9, 0x0b, 0x26, 0x8f, 0x31, 0xb7, 0x55, 0xfe,
0x73, 0x15, 0xd6, 0xc7, 0xe6, 0x82, 0x0a, 0x90, 0x31, 0x0d, 0xa5, 0x01, 0x54, 0xd0, 0xcc, 0xfd,
0x3d, 0x9c, 0x31, 0x0d, 0xf4, 0x06, 0x2c, 0xc9, 0x23, 0x57, 0x91, 0x96, 0x14, 0xbe, 0x24, 0x03,
0x3c, 0x1b, 0x94, 0xd6, 0x92, 0x70, 0x5c, 0x88, 0x72, 0x17, 0x1a, 0x68, 0x47, 0x6d, 0x16, 0xa9,
0x81, 0x76, 0x30, 0xb7, 0x8d, 0x4f, 0x7e, 0xe1, 0x7f, 0x9c, 0x7c, 0x19, 0x16, 0x58, 0xdf, 0xa3,
0xfa, 0xa2, 0xf0, 0x5f, 0x55, 0xfe, 0x0b, 0xef, 0xf4, 0x3d, 0x8a, 0x05, 0x82, 0xae, 0xc3, 0x52,
0xc7, 0xf5, 0x6d, 0xc2, 0xf4, 0x25, 0xe1, 0x73, 0x21, 0x12, 0x7b, 0x4f, 0x58, 0xb1, 0x42, 0xd1,
0x35, 0x58, 0x64, 0x26, 0xb3, 0xa8, 0xbe, 0x2c, 0xdc, 0xe2, 0x03, 0xeb, 0x1d, 0x6e, 0xc4, 0x12,
0x43, 0x16, 0x2c, 0x1b, 0xb4, 0x43, 0x42, 0x8b, 0xe9, 0x59, 0xb1, 0x88, 0xea, 0x33, 0x59, 0x44,
0xb5, 0xdc, 0x70, 0x50, 0x5a, 0xde, 0x93, 0x91, 0x71, 0x44, 0x81, 0x5e, 0x83, 0x65, 0x9b, 0xf4,
0x4c, 0x3b, 0xb4, 0xf5, 0x95, 0xb2, 0xb6, 0xad, 0x49, 0xb7, 0x86, 0x34, 0xe1, 0x08, 0xe3, 0x47,
0x24, 0xed, 0xb5, 0xad, 0x30, 0x30, 0x4f, 0xa9, 0x02, 0x75, 0x28, 0x6b, 0xdb, 0xd9, 0xe4, 0x88,
0xac, 0x8f, 0xe1, 0x78, 0x62, 0x84, 0x20, 0x33, 0x1d, 0x31, 0x38, 0x97, 0x22, 0x93, 0x26, 0x1c,
0x61, 0xa3, 0x64, 0xca, 0x7f, 0xf5, 0x2c, 0x32, 0x35, 0x78, 0x62, 0x04, 0x7a, 0x1d, 0x56, 0x6c,
0xd2, 0xdb, 0xa7, 0x4e, 0x97, 0x1d, 0xeb, 0x6b, 0x65, 0x6d, 0x7b, 0xbe, 0xb6, 0x36, 0x1c, 0x94,
0x56, 0x1a, 0x91, 0x11, 0x27, 0xb8, 0x70, 0x36, 0x1d, 0xe5, 0x7c, 0x21, 0xe5, 0x1c, 0x19, 0x71,
0x82, 0xa3, 0x1b, 0xb0, 0xec, 0x11, 0xc6, 0xb7, 0x97, 0xbe, 0x2e, 0x5e, 0xe4, 0xba, 0x92, 0xb5,
0x7c, 0x20, 0xcd, 0x38, 0xc2, 0xd1, 0x36, 0x64, 0x6d, 0xd2, 0x13, 0x17, 0x92, 0xbe, 0x21, 0xc2,
0xae, 0xf2, 0x7b, 0xb2, 0xa1, 0x6c, 0x38, 0x46, 0x85, 0xa7, 0xe9, 0x48, 0xcf, 0x7c, 0xca, 0x53,
0xd9, 0x70, 0x8c, 0xf2, 0x65, 0x1c, 0x3a, 0xe6, 0xfb, 0x21, 0x95, 0xce, 0x48, 0x64, 0x26, 0x5e,
0xc6, 0x0f, 0x13, 0x08, 0xa7, 0xfd, 0x50, 0x15, 0xc0, 0x0e, 0x2d, 0x66, 0x7a, 0x16, 0x6d, 0x76,
0xf4, 0x8b, 0x22, 0xff, 0x17, 0x78, 0x1d, 0xd1, 0x88, 0xad, 0x38, 0xe5, 0x81, 0xba, 0xb0, 0x40,
0x9d, 0xd0, 0xd6, 0x2f, 0x89, 0x7b, 0x7b, 0x46, 0x8b, 0x30, 0xde, 0x3d, 0x75, 0x27, 0xb4, 0xb1,
0x20, 0x40, 0x6f, 0xc0, 0x9a, 0x4d, 0x7a, 0xfc, 0x48, 0xa0, 0x3e, 0x33, 0x69, 0xa0, 0x6f, 0x8a,
0xe9, 0xe7, 0x87, 0x83, 0xd2, 0x5a, 0x23, 0x0d, 0xe0, 0x51, 0x3f, 0x31, 0xd0, 0x74, 0x52, 0x03,
0xb7, 0x52, 0x03, 0xd3, 0x00, 0x1e, 0xf5, 0xe3, 0xb9, 0xf6, 0xe9, 0xfb, 0xa1, 0xe9, 0x53, 0x43,
0xff, 0x42, 0x79, 0x9e, 0xef, 0x6a, 0x9e, 0x6b, 0xac, 0x6c, 0x38, 0x46, 0x51, 0x2f, 0xaa, 0x5e,
0x74, 0xb1, 0x15, 0xbf, 0x3f, 0xeb, 0xf3, 0xbc, 0xe9, 0xef, 0xfa, 0x3e, 0xe9, 0xcb, 0x1b, 0x27,
0x5d, 0xb7, 0xa0, 0x10, 0x16, 0x89, 0x65, 0x35, 0x3b, 0xfa, 0x65, 0x91, 0xff, 0xd9, 0xdf, 0x24,
0xf1, 0xe9, 0xb3, 0xcb, 0x69, 0xb0, 0x64, 0xe3, 0xb4, 0xae, 0xc3, 0x17, 0x48, 0xe1, 0xbc, 0x69,
0x9b, 0x9c, 0x06, 0x4b, 0x36, 0x31, 0x5b, 0xa7, 0xdf, 0xec, 0xe8, 0xaf, 0x9c, 0xfb, 0x6c, 0x39,
0x0d, 0x96, 0x6c, 0xe8, 0x04, 0xe6, 0x1d, 0x97, 0xe9, 0x57, 0xce, 0xe9, 0xb2, 0x16, 0xd7, 0xcf,
0x03, 0x97, 0x61, 0xce, 0x82, 0x7e, 0xa5, 0x01, 0x78, 0xc9, 0x62, 0x7d, 0x55, 0xcc, 0xf4, 0xdd,
0x59, 0x93, 0x56, 0x93, 0x75, 0x5e, 0x77, 0x98, 0xdf, 0x4f, 0xbe, 0x17, 0x52, 0xfb, 0x21, 0xa5,
0x03, 0xfd, 0x41, 0x83, 0x4b, 0xc4, 0x30, 0x44, 0x0d, 0x4d, 0xac, 0xd4, 0x6e, 0x2a, 0x8a, 0xac,
0x3c, 0x9a, 0xfd, 0x92, 0xaf, 0xb9, 0xae, 0x55, 0xd3, 0x87, 0x83, 0xd2, 0xa5, 0xdd, 0x29, 0xbc,
0x78, 0xaa, 0x1a, 0xf4, 0x67, 0x0d, 0xf2, 0xea, 0x54, 0x4d, 0x69, 0x2c, 0x89, 0x24, 0x76, 0x67,
0x9f, 0xc4, 0x71, 0x26, 0x99, 0xcb, 0xcb, 0x2a, 0x97, 0xf9, 0x09, 0x1c, 0x4f, 0x8a, 0x43, 0x7f,
0xd3, 0x60, 0xd5, 0xa0, 0x1e, 0x75, 0x0c, 0xea, 0xb4, 0xb9, 0xda, 0xb2, 0x50, 0xdb, 0x9a, 0xb9,
0xda, 0xbd, 0x14, 0x89, 0x14, 0x5a, 0x55, 0x42, 0x57, 0xd3, 0xd0, 0xb3, 0x41, 0x69, 0x2b, 0x19,
0x9a, 0x46, 0xf0, 0x88, 0x4e, 0xf4, 0x6b, 0x0d, 0xd6, 0x93, 0x97, 0x20, 0xaf, 0x99, 0xab, 0xe7,
0xba, 0x1a, 0x44, 0x59, 0xbb, 0x3b, 0x4a, 0x89, 0xc7, 0x35, 0xa0, 0xbf, 0x68, 0xbc, 0x82, 0x8b,
0x3e, 0xf8, 0x02, 0xbd, 0x22, 0xf2, 0x49, 0xce, 0x21, 0x9f, 0x31, 0x87, 0x4c, 0xe7, 0xcd, 0xa4,
0x48, 0x8c, 0x91, 0x67, 0x83, 0xd2, 0x66, 0x3a, 0x9b, 0x31, 0x80, 0xd3, 0x1a, 0xd1, 0x2f, 0x34,
0x58, 0xa5, 0x49, 0x2d, 0x1e, 0xe8, 0xd7, 0x66, 0x94, 0xc8, 0xa9, 0x05, 0x7e, 0x6d, 0x83, 0xbf,
0xf4, 0x14, 0x14, 0xe0, 0x11, 0x76, 0x5e, 0x5d, 0xd2, 0x1e, 0xb1, 0x3d, 0x8b, 0xea, 0x5f, 0x9c,
0x79, 0x75, 0x59, 0x97, 0x91, 0x71, 0x44, 0x81, 0x6e, 0x42, 0xd6, 0x09, 0x2d, 0x8b, 0xb4, 0x2c,
0xaa, 0xbf, 0x26, 0xea, 0x94, 0xb8, 0x55, 0xf0, 0x40, 0xd9, 0x71, 0xec, 0x81, 0x3a, 0x50, 0xee,
0xbd, 0x1d, 0xb6, 0xa8, 0xef, 0x50, 0x46, 0x83, 0x03, 0x9f, 0x06, 0xd4, 0x3f, 0xa5, 0x0f, 0x9d,
0x13, 0xc7, 0x7d, 0xec, 0xdc, 0x33, 0xa9, 0x65, 0x04, 0xfa, 0x75, 0x11, 0xa5, 0x30, 0x1c, 0x94,
0xb6, 0x8e, 0xa6, 0x7a, 0xe0, 0xcf, 0x8c, 0x81, 0x7e, 0x04, 0xaf, 0xa4, 0x7c, 0xea, 0x76, 0x8b,
0x1a, 0x06, 0x35, 0xa2, 0x0f, 0x3b, 0xfd, 0x4b, 0x82, 0x22, 0xde, 0xea, 0x47, 0xe3, 0x0e, 0xf8,
0xd3, 0x46, 0xa3, 0x7d, 0xd8, 0x4a, 0xc1, 0xf7, 0x1d, 0xd6, 0xf4, 0x0f, 0x99, 0x6f, 0x3a, 0x5d,
0x7d, 0x5b, 0xc4, 0xbd, 0x14, 0xed, 0xcc, 0xa3, 0x14, 0x86, 0xcf, 0x18, 0x83, 0xbe, 0x37, 0x12,
0x4d, 0xb4, 0x57, 0x88, 0xf7, 0x36, 0xed, 0x07, 0xfa, 0x0d, 0x51, 0xb7, 0x88, 0xd7, 0x7d, 0x94,
0xb2, 0xe3, 0x33, 0xfc, 0xd1, 0x5d, 0xb8, 0x38, 0x86, 0xf0, 0x0f, 0x18, 0xfd, 0xcb, 0xf2, 0x4b,
0x84, 0xd7, 0xba, 0x47, 0x91, 0x11, 0x4f, 0xf3, 0x44, 0xdf, 0x02, 0x94, 0x32, 0x37, 0x88, 0x27,
0xc6, 0xbf, 0x2e, 0x3f, 0x8a, 0xf8, 0x1b, 0x3d, 0x52, 0x36, 0x3c, 0xc5, 0x0f, 0xfd, 0x4e, 0x1b,
0x99, 0x49, 0xf2, 0xf5, 0x1c, 0xe8, 0x37, 0xc5, 0x2e, 0x6e, 0xbe, 0xf0, 0x3a, 0x4c, 0x62, 0xe2,
0xd0, 0xa2, 0xa9, 0x44, 0xa7, 0xc8, 0xf0, 0x19, 0x22, 0x0a, 0xfc, 0x1b, 0x7e, 0xec, 0xb4, 0x47,
0x1b, 0x30, 0x7f, 0x42, 0xfb, 0xf2, 0x03, 0x15, 0xf3, 0x47, 0xd4, 0x81, 0xc5, 0x53, 0x62, 0x85,
0x51, 0x23, 0x62, 0xe6, 0x15, 0x03, 0x96, 0xe1, 0xbf, 0x91, 0xb9, 0xa3, 0x15, 0x7e, 0xa3, 0xc1,
0xd6, 0xf4, 0x6b, 0xe8, 0x25, 0x0b, 0xfb, 0xbd, 0x06, 0xf9, 0x89, 0x1b, 0x67, 0x8a, 0xa6, 0x60,
0x54, 0xd3, 0x8f, 0x67, 0x7f, 0x75, 0xc8, 0x4d, 0x22, 0x2a, 0xe8, 0xb4, 0xc0, 0x8f, 0x34, 0xd8,
0x18, 0x3f, 0xc2, 0x5f, 0x6e, 0xce, 0x2a, 0xbf, 0xcd, 0xc0, 0xd6, 0xf4, 0xd2, 0x1f, 0xb1, 0xb8,
0xd3, 0x71, 0x5e, 0x3d, 0x23, 0x48, 0xfa, 0x26, 0x71, 0x9b, 0xe4, 0xe7, 0x1a, 0xe4, 0xde, 0x8b,
0xfd, 0xa2, 0xee, 0xec, 0x39, 0xf4, 0xab, 0xa2, 0x9b, 0x33, 0x01, 0x02, 0x9c, 0x66, 0xae, 0xfc,
0x55, 0x83, 0xcd, 0xa9, 0x45, 0x01, 0xba, 0x0e, 0x4b, 0xc4, 0xb2, 0xdc, 0xc7, 0xb2, 0xfb, 0x98,
0x4d, 0xda, 0x2a, 0xbb, 0xc2, 0x8a, 0x15, 0x9a, 0xca, 0x60, 0xe6, 0xf3, 0xcb, 0x60, 0xe5, 0xef,
0x1a, 0x5c, 0xf9, 0xb4, 0x15, 0xf9, 0x92, 0x5e, 0xec, 0x36, 0x64, 0x55, 0x71, 0xdf, 0x17, 0x2f,
0x55, 0x1d, 0xce, 0xea, 0x08, 0xe9, 0xe3, 0x18, 0xad, 0xfc, 0x23, 0x03, 0x17, 0x46, 0xcf, 0x4c,
0x54, 0x86, 0x05, 0x3f, 0xb4, 0xa8, 0xea, 0xc9, 0xc5, 0x1f, 0xeb, 0x1c, 0xc3, 0x02, 0x41, 0x37,
0x60, 0xd9, 0xa6, 0x41, 0x40, 0xba, 0x54, 0x35, 0xe6, 0xe2, 0xde, 0x47, 0x43, 0x9a, 0x71, 0x84,
0xa3, 0xef, 0x42, 0x5e, 0x3d, 0xd6, 0x7b, 0x9e, 0x4f, 0x03, 0x3e, 0x0d, 0xd5, 0x97, 0x8b, 0x2f,
0xd7, 0xc6, 0xb8, 0x03, 0x9e, 0x1c, 0x83, 0xbe, 0x09, 0x4b, 0x3e, 0x25, 0x41, 0xdc, 0xb2, 0xbb,
0xc6, 0xa7, 0x8d, 0x85, 0x85, 0x17, 0x61, 0xe2, 0x56, 0x7f, 0xc4, 0xb7, 0x5a, 0xdd, 0xf7, 0x5d,
0x5f, 0x02, 0x58, 0x0d, 0x41, 0x3b, 0xb0, 0xd2, 0xe1, 0x0e, 0xa2, 0x85, 0x2e, 0x5b, 0x78, 0x79,
0xc5, 0xbe, 0x72, 0x2f, 0x02, 0x70, 0xe2, 0x83, 0xbe, 0x0d, 0xeb, 0xae, 0x27, 0xab, 0xce, 0xa6,
0x65, 0x1c, 0x52, 0xab, 0x23, 0xba, 0x7a, 0xd9, 0xa8, 0xf5, 0x3a, 0x02, 0xe1, 0x71, 0xdf, 0x1a,
0x7d, 0xf2, 0xb4, 0x38, 0xf7, 0xf1, 0xd3, 0xe2, 0xdc, 0x27, 0x4f, 0x8b, 0x73, 0x1f, 0x0c, 0x8b,
0xda, 0x93, 0x61, 0x51, 0xfb, 0x78, 0x58, 0xd4, 0x3e, 0x19, 0x16, 0xb5, 0x7f, 0x0e, 0x8b, 0xda,
0x47, 0xff, 0x2a, 0xce, 0xfd, 0xf0, 0xee, 0x0b, 0xfe, 0x4f, 0xdd, 0x7f, 0x03, 0x00, 0x00, 0xff,
0xff, 0x1e, 0xda, 0x9c, 0x38, 0xeb, 0x1b, 0x00, 0x00,
}
func (m *CustomResourceDefinition) Marshal() (dAtA []byte, err error) {
size := m.Size()
dAtA = make([]byte, size)
n, err := m.MarshalToSizedBuffer(dAtA[:size])
if err != nil {
return nil, err
}
return dAtA[:n], nil
}
func (m *CustomResourceDefinition) MarshalTo(dAtA []byte) (int, error) {
size := m.Size()
return m.MarshalToSizedBuffer(dAtA[:size])
}
func (m *CustomResourceDefinition) MarshalToSizedBuffer(dAtA []byte) (int, error) {
i := len(dAtA)
_ = i
var l int
_ = l
{
size, err := m.ObjectMeta.MarshalToSizedBuffer(dAtA[:i])
if err != nil {
return 0, err
}
i -= size
i = encodeVarintGenerated(dAtA, i, uint64(size))
}
i--
dAtA[i] = 0xa
return len(dAtA) - i, nil
}
func (m *CustomResourceDefinitionList) Marshal() (dAtA []byte, err error) {
size := m.Size()
dAtA = make([]byte, size)
n, err := m.MarshalToSizedBuffer(dAtA[:size])
if err != nil {
return nil, err
}
return dAtA[:n], nil
}
func (m *CustomResourceDefinitionList) MarshalTo(dAtA []byte) (int, error) {
size := m.Size()
return m.MarshalToSizedBuffer(dAtA[:size])
}
func (m *CustomResourceDefinitionList) MarshalToSizedBuffer(dAtA []byte) (int, error) {
i := len(dAtA)
_ = i
var l int
_ = l
if len(m.Items) > 0 {
for iNdEx := len(m.Items) - 1; iNdEx >= 0; iNdEx-- {
{
size, err := m.Items[iNdEx].MarshalToSizedBuffer(dAtA[:i])
if err != nil {
return 0, err
}
i -= size
i = encodeVarintGenerated(dAtA, i, uint64(size))
}
i--
dAtA[i] = 0x12
}
}
{
size, err := m.ListMeta.MarshalToSizedBuffer(dAtA[:i])
if err != nil {
return 0, err
}
i -= size
i = encodeVarintGenerated(dAtA, i, uint64(size))
}
i--
dAtA[i] = 0xa
return len(dAtA) - i, nil
}
func (m *CustomResourceSubresourceScale) Marshal() (dAtA []byte, err error) {
size := m.Size()
dAtA = make([]byte, size)
n, err := m.MarshalToSizedBuffer(dAtA[:size])
if err != nil {
return nil, err
}
return dAtA[:n], nil
}
func (m *CustomResourceSubresourceScale) MarshalTo(dAtA []byte) (int, error) {
size := m.Size()
return m.MarshalToSizedBuffer(dAtA[:size])
}
func (m *CustomResourceSubresourceScale) MarshalToSizedBuffer(dAtA []byte) (int, error) {
i := len(dAtA)
_ = i
var l int
_ = l
if m.LabelSelectorPath != nil {
i -= len(*m.LabelSelectorPath)
copy(dAtA[i:], *m.LabelSelectorPath)
i = encodeVarintGenerated(dAtA, i, uint64(len(*m.LabelSelectorPath)))
i--
dAtA[i] = 0x1a
}
i -= len(m.StatusReplicasPath)
copy(dAtA[i:], m.StatusReplicasPath)
i = encodeVarintGenerated(dAtA, i, uint64(len(m.StatusReplicasPath)))
i--
dAtA[i] = 0x12
i -= len(m.SpecReplicasPath)
copy(dAtA[i:], m.SpecReplicasPath)
i = encodeVarintGenerated(dAtA, i, uint64(len(m.SpecReplicasPath)))
i--
dAtA[i] = 0xa
return len(dAtA) - i, nil
}
func (m *CustomResourceSubresourceStatus) Marshal() (dAtA []byte, err error) {
size := m.Size()
dAtA = make([]byte, size)
n, err := m.MarshalToSizedBuffer(dAtA[:size])
if err != nil {
return nil, err
}
return dAtA[:n], nil
}
func (m *CustomResourceSubresourceStatus) MarshalTo(dAtA []byte) (int, error) {
size := m.Size()
return m.MarshalToSizedBuffer(dAtA[:size])
}
func (m *CustomResourceSubresourceStatus) MarshalToSizedBuffer(dAtA []byte) (int, error) {
i := len(dAtA)
_ = i
var l int
_ = l
return len(dAtA) - i, nil
}
func (m *CustomResourceSubresources) Marshal() (dAtA []byte, err error) {
size := m.Size()
dAtA = make([]byte, size)
n, err := m.MarshalToSizedBuffer(dAtA[:size])
if err != nil {
return nil, err
}
return dAtA[:n], nil
}
func (m *CustomResourceSubresources) MarshalTo(dAtA []byte) (int, error) {
size := m.Size()
return m.MarshalToSizedBuffer(dAtA[:size])
}
func (m *CustomResourceSubresources) MarshalToSizedBuffer(dAtA []byte) (int, error) {
i := len(dAtA)
_ = i
var l int
_ = l
if m.Scale != nil {
{
size, err := m.Scale.MarshalToSizedBuffer(dAtA[:i])
if err != nil {
return 0, err
}
i -= size
i = encodeVarintGenerated(dAtA, i, uint64(size))
}
i--
dAtA[i] = 0x12
}
if m.Status != nil {
{
size, err := m.Status.MarshalToSizedBuffer(dAtA[:i])
if err != nil {
return 0, err
}
i -= size
i = encodeVarintGenerated(dAtA, i, uint64(size))
}
i--
dAtA[i] = 0xa
}
return len(dAtA) - i, nil
}
func (m *CustomResourceValidation) Marshal() (dAtA []byte, err error) {
size := m.Size()
dAtA = make([]byte, size)
n, err := m.MarshalToSizedBuffer(dAtA[:size])
if err != nil {
return nil, err
}
return dAtA[:n], nil
}
func (m *CustomResourceValidation) MarshalTo(dAtA []byte) (int, error) {
size := m.Size()
return m.MarshalToSizedBuffer(dAtA[:size])
}
func (m *CustomResourceValidation) MarshalToSizedBuffer(dAtA []byte) (int, error) {
i := len(dAtA)
_ = i
var l int
_ = l
if m.OpenAPIV3Schema != nil {
{
size, err := m.OpenAPIV3Schema.MarshalToSizedBuffer(dAtA[:i])
if err != nil {
return 0, err
}
i -= size
i = encodeVarintGenerated(dAtA, i, uint64(size))
}
i--
dAtA[i] = 0xa
}
return len(dAtA) - i, nil
}
func (m *ExternalDocumentation) Marshal() (dAtA []byte, err error) {
size := m.Size()
dAtA = make([]byte, size)
n, err := m.MarshalToSizedBuffer(dAtA[:size])
if err != nil {
return nil, err
}
return dAtA[:n], nil
}
func (m *ExternalDocumentation) MarshalTo(dAtA []byte) (int, error) {
size := m.Size()
return m.MarshalToSizedBuffer(dAtA[:size])
}
func (m *ExternalDocumentation) MarshalToSizedBuffer(dAtA []byte) (int, error) {
i := len(dAtA)
_ = i
var l int
_ = l
i -= len(m.URL)
copy(dAtA[i:], m.URL)
i = encodeVarintGenerated(dAtA, i, uint64(len(m.URL)))
i--
dAtA[i] = 0x12
i -= len(m.Description)
copy(dAtA[i:], m.Description)
i = encodeVarintGenerated(dAtA, i, uint64(len(m.Description)))
i--
dAtA[i] = 0xa
return len(dAtA) - i, nil
}
func (m *JSON) Marshal() (dAtA []byte, err error) {
size := m.Size()
dAtA = make([]byte, size)
n, err := m.MarshalToSizedBuffer(dAtA[:size])
if err != nil {
return nil, err
}
return dAtA[:n], nil
}
func (m *JSON) MarshalTo(dAtA []byte) (int, error) {
size := m.Size()
return m.MarshalToSizedBuffer(dAtA[:size])
}
func (m *JSON) MarshalToSizedBuffer(dAtA []byte) (int, error) {
i := len(dAtA)
_ = i
var l int
_ = l
if m.Raw != nil {
i -= len(m.Raw)
copy(dAtA[i:], m.Raw)
i = encodeVarintGenerated(dAtA, i, uint64(len(m.Raw)))
i--
dAtA[i] = 0xa
}
return len(dAtA) - i, nil
}
func (m *JSONSchemaProps) Marshal() (dAtA []byte, err error) {
size := m.Size()
dAtA = make([]byte, size)
n, err := m.MarshalToSizedBuffer(dAtA[:size])
if err != nil {
return nil, err
}
return dAtA[:n], nil
}
func (m *JSONSchemaProps) MarshalTo(dAtA []byte) (int, error) {
size := m.Size()
return m.MarshalToSizedBuffer(dAtA[:size])
}
func (m *JSONSchemaProps) MarshalToSizedBuffer(dAtA []byte) (int, error) {
i := len(dAtA)
_ = i
var l int
_ = l
if len(m.XValidations) > 0 {
for iNdEx := len(m.XValidations) - 1; iNdEx >= 0; iNdEx-- {
{
size, err := m.XValidations[iNdEx].MarshalToSizedBuffer(dAtA[:i])
if err != nil {
return 0, err
}
i -= size
i = encodeVarintGenerated(dAtA, i, uint64(size))
}
i--
dAtA[i] = 0x2
i--
dAtA[i] = 0xe2
}
}
if m.XMapType != nil {
i -= len(*m.XMapType)
copy(dAtA[i:], *m.XMapType)
i = encodeVarintGenerated(dAtA, i, uint64(len(*m.XMapType)))
i--
dAtA[i] = 0x2
i--
dAtA[i] = 0xda
}
if m.XListType != nil {
i -= len(*m.XListType)
copy(dAtA[i:], *m.XListType)
i = encodeVarintGenerated(dAtA, i, uint64(len(*m.XListType)))
i--
dAtA[i] = 0x2
i--
dAtA[i] = 0xd2
}
if len(m.XListMapKeys) > 0 {
for iNdEx := len(m.XListMapKeys) - 1; iNdEx >= 0; iNdEx-- {
i -= len(m.XListMapKeys[iNdEx])
copy(dAtA[i:], m.XListMapKeys[iNdEx])
i = encodeVarintGenerated(dAtA, i, uint64(len(m.XListMapKeys[iNdEx])))
i--
dAtA[i] = 0x2
i--
dAtA[i] = 0xca
}
}
i--
if m.XIntOrString {
dAtA[i] = 1
} else {
dAtA[i] = 0
}
i--
dAtA[i] = 0x2
i--
dAtA[i] = 0xc0
i--
if m.XEmbeddedResource {
dAtA[i] = 1
} else {
dAtA[i] = 0
}
i--
dAtA[i] = 0x2
i--
dAtA[i] = 0xb8
if m.XPreserveUnknownFields != nil {
i--
if *m.XPreserveUnknownFields {
dAtA[i] = 1
} else {
dAtA[i] = 0
}
i--
dAtA[i] = 0x2
i--
dAtA[i] = 0xb0
}
i--
if m.Nullable {
dAtA[i] = 1
} else {
dAtA[i] = 0
}
i--
dAtA[i] = 0x2
i--
dAtA[i] = 0xa8
if m.Example != nil {
{
size, err := m.Example.MarshalToSizedBuffer(dAtA[:i])
if err != nil {
return 0, err
}
i -= size
i = encodeVarintGenerated(dAtA, i, uint64(size))
}
i--
dAtA[i] = 0x2
i--
dAtA[i] = 0xa2
}
if m.ExternalDocs != nil {
{
size, err := m.ExternalDocs.MarshalToSizedBuffer(dAtA[:i])
if err != nil {
return 0, err
}
i -= size
i = encodeVarintGenerated(dAtA, i, uint64(size))
}
i--
dAtA[i] = 0x2
i--
dAtA[i] = 0x9a
}
if len(m.Definitions) > 0 {
keysForDefinitions := make([]string, 0, len(m.Definitions))
for k := range m.Definitions {
keysForDefinitions = append(keysForDefinitions, string(k))
}
github_com_gogo_protobuf_sortkeys.Strings(keysForDefinitions)
for iNdEx := len(keysForDefinitions) - 1; iNdEx >= 0; iNdEx-- {
v := m.Definitions[string(keysForDefinitions[iNdEx])]
baseI := i
{
size, err := (&v).MarshalToSizedBuffer(dAtA[:i])
if err != nil {
return 0, err
}
i -= size
i = encodeVarintGenerated(dAtA, i, uint64(size))
}
i--
dAtA[i] = 0x12
i -= len(keysForDefinitions[iNdEx])
copy(dAtA[i:], keysForDefinitions[iNdEx])
i = encodeVarintGenerated(dAtA, i, uint64(len(keysForDefinitions[iNdEx])))
i--
dAtA[i] = 0xa
i = encodeVarintGenerated(dAtA, i, uint64(baseI-i))
i--
dAtA[i] = 0x2
i--
dAtA[i] = 0x92
}
}
if m.AdditionalItems != nil {
{
size, err := m.AdditionalItems.MarshalToSizedBuffer(dAtA[:i])
if err != nil {
return 0, err
}
i -= size
i = encodeVarintGenerated(dAtA, i, uint64(size))
}
i--
dAtA[i] = 0x2
i--
dAtA[i] = 0x8a
}
if len(m.Dependencies) > 0 {
keysForDependencies := make([]string, 0, len(m.Dependencies))
for k := range m.Dependencies {
keysForDependencies = append(keysForDependencies, string(k))
}
github_com_gogo_protobuf_sortkeys.Strings(keysForDependencies)
for iNdEx := len(keysForDependencies) - 1; iNdEx >= 0; iNdEx-- {
v := m.Dependencies[string(keysForDependencies[iNdEx])]
baseI := i
{
size, err := (&v).MarshalToSizedBuffer(dAtA[:i])
if err != nil {
return 0, err
}
i -= size
i = encodeVarintGenerated(dAtA, i, uint64(size))
}
i--
dAtA[i] = 0x12
i -= len(keysForDependencies[iNdEx])
copy(dAtA[i:], keysForDependencies[iNdEx])
i = encodeVarintGenerated(dAtA, i, uint64(len(keysForDependencies[iNdEx])))
i--
dAtA[i] = 0xa
i = encodeVarintGenerated(dAtA, i, uint64(baseI-i))
i--
dAtA[i] = 0x2
i--
dAtA[i] = 0x82
}
}
if len(m.PatternProperties) > 0 {
keysForPatternProperties := make([]string, 0, len(m.PatternProperties))
for k := range m.PatternProperties {
keysForPatternProperties = append(keysForPatternProperties, string(k))
}
github_com_gogo_protobuf_sortkeys.Strings(keysForPatternProperties)
for iNdEx := len(keysForPatternProperties) - 1; iNdEx >= 0; iNdEx-- {
v := m.PatternProperties[string(keysForPatternProperties[iNdEx])]
baseI := i
{
size, err := (&v).MarshalToSizedBuffer(dAtA[:i])
if err != nil {
return 0, err
}
i -= size
i = encodeVarintGenerated(dAtA, i, uint64(size))
}
i--
dAtA[i] = 0x12
i -= len(keysForPatternProperties[iNdEx])
copy(dAtA[i:], keysForPatternProperties[iNdEx])
i = encodeVarintGenerated(dAtA, i, uint64(len(keysForPatternProperties[iNdEx])))
i--
dAtA[i] = 0xa
i = encodeVarintGenerated(dAtA, i, uint64(baseI-i))
i--
dAtA[i] = 0x1
i--
dAtA[i] = 0xfa
}
}
if m.AdditionalProperties != nil {
{
size, err := m.AdditionalProperties.MarshalToSizedBuffer(dAtA[:i])
if err != nil {
return 0, err
}
i -= size
i = encodeVarintGenerated(dAtA, i, uint64(size))
}
i--
dAtA[i] = 0x1
i--
dAtA[i] = 0xf2
}
if len(m.Properties) > 0 {
keysForProperties := make([]string, 0, len(m.Properties))
for k := range m.Properties {
keysForProperties = append(keysForProperties, string(k))
}
github_com_gogo_protobuf_sortkeys.Strings(keysForProperties)
for iNdEx := len(keysForProperties) - 1; iNdEx >= 0; iNdEx-- {
v := m.Properties[string(keysForProperties[iNdEx])]
baseI := i
{
size, err := (&v).MarshalToSizedBuffer(dAtA[:i])
if err != nil {
return 0, err
}
i -= size
i = encodeVarintGenerated(dAtA, i, uint64(size))
}
i--
dAtA[i] = 0x12
i -= len(keysForProperties[iNdEx])
copy(dAtA[i:], keysForProperties[iNdEx])
i = encodeVarintGenerated(dAtA, i, uint64(len(keysForProperties[iNdEx])))
i--
dAtA[i] = 0xa
i = encodeVarintGenerated(dAtA, i, uint64(baseI-i))
i--
dAtA[i] = 0x1
i--
dAtA[i] = 0xea
}
}
if m.Not != nil {
{
size, err := m.Not.MarshalToSizedBuffer(dAtA[:i])
if err != nil {
return 0, err
}
i -= size
i = encodeVarintGenerated(dAtA, i, uint64(size))
}
i--
dAtA[i] = 0x1
i--
dAtA[i] = 0xe2
}
if len(m.AnyOf) > 0 {
for iNdEx := len(m.AnyOf) - 1; iNdEx >= 0; iNdEx-- {
{
size, err := m.AnyOf[iNdEx].MarshalToSizedBuffer(dAtA[:i])
if err != nil {
return 0, err
}
i -= size
i = encodeVarintGenerated(dAtA, i, uint64(size))
}
i--
dAtA[i] = 0x1
i--
dAtA[i] = 0xda
}
}
if len(m.OneOf) > 0 {
for iNdEx := len(m.OneOf) - 1; iNdEx >= 0; iNdEx-- {
{
size, err := m.OneOf[iNdEx].MarshalToSizedBuffer(dAtA[:i])
if err != nil {
return 0, err
}
i -= size
i = encodeVarintGenerated(dAtA, i, uint64(size))
}
i--
dAtA[i] = 0x1
i--
dAtA[i] = 0xd2
}
}
if len(m.AllOf) > 0 {
for iNdEx := len(m.AllOf) - 1; iNdEx >= 0; iNdEx-- {
{
size, err := m.AllOf[iNdEx].MarshalToSizedBuffer(dAtA[:i])
if err != nil {
return 0, err
}
i -= size
i = encodeVarintGenerated(dAtA, i, uint64(size))
}
i--
dAtA[i] = 0x1
i--
dAtA[i] = 0xca
}
}
if m.Items != nil {
{
size, err := m.Items.MarshalToSizedBuffer(dAtA[:i])
if err != nil {
return 0, err
}
i -= size
i = encodeVarintGenerated(dAtA, i, uint64(size))
}
i--
dAtA[i] = 0x1
i--
dAtA[i] = 0xc2
}
if len(m.Required) > 0 {
for iNdEx := len(m.Required) - 1; iNdEx >= 0; iNdEx-- {
i -= len(m.Required[iNdEx])
copy(dAtA[i:], m.Required[iNdEx])
i = encodeVarintGenerated(dAtA, i, uint64(len(m.Required[iNdEx])))
i--
dAtA[i] = 0x1
i--
dAtA[i] = 0xba
}
}
if m.MinProperties != nil {
i = encodeVarintGenerated(dAtA, i, uint64(*m.MinProperties))
i--
dAtA[i] = 0x1
i--
dAtA[i] = 0xb0
}
if m.MaxProperties != nil {
i = encodeVarintGenerated(dAtA, i, uint64(*m.MaxProperties))
i--
dAtA[i] = 0x1
i--
dAtA[i] = 0xa8
}
if len(m.Enum) > 0 {
for iNdEx := len(m.Enum) - 1; iNdEx >= 0; iNdEx-- {
{
size, err := m.Enum[iNdEx].MarshalToSizedBuffer(dAtA[:i])
if err != nil {
return 0, err
}
i -= size
i = encodeVarintGenerated(dAtA, i, uint64(size))
}
i--
dAtA[i] = 0x1
i--
dAtA[i] = 0xa2
}
}
if m.MultipleOf != nil {
i -= 8
encoding_binary.LittleEndian.PutUint64(dAtA[i:], uint64(math.Float64bits(float64(*m.MultipleOf))))
i--
dAtA[i] = 0x1
i--
dAtA[i] = 0x99
}
i--
if m.UniqueItems {
dAtA[i] = 1
} else {
dAtA[i] = 0
}
i--
dAtA[i] = 0x1
i--
dAtA[i] = 0x90
if m.MinItems != nil {
i = encodeVarintGenerated(dAtA, i, uint64(*m.MinItems))
i--
dAtA[i] = 0x1
i--
dAtA[i] = 0x88
}
if m.MaxItems != nil {
i = encodeVarintGenerated(dAtA, i, uint64(*m.MaxItems))
i--
dAtA[i] = 0x1
i--
dAtA[i] = 0x80
}
i -= len(m.Pattern)
copy(dAtA[i:], m.Pattern)
i = encodeVarintGenerated(dAtA, i, uint64(len(m.Pattern)))
i--
dAtA[i] = 0x7a
if m.MinLength != nil {
i = encodeVarintGenerated(dAtA, i, uint64(*m.MinLength))
i--
dAtA[i] = 0x70
}
if m.MaxLength != nil {
i = encodeVarintGenerated(dAtA, i, uint64(*m.MaxLength))
i--
dAtA[i] = 0x68
}
i--
if m.ExclusiveMinimum {
dAtA[i] = 1
} else {
dAtA[i] = 0
}
i--
dAtA[i] = 0x60
if m.Minimum != nil {
i -= 8
encoding_binary.LittleEndian.PutUint64(dAtA[i:], uint64(math.Float64bits(float64(*m.Minimum))))
i--
dAtA[i] = 0x59
}
i--
if m.ExclusiveMaximum {
dAtA[i] = 1
} else {
dAtA[i] = 0
}
i--
dAtA[i] = 0x50
if m.Maximum != nil {
i -= 8
encoding_binary.LittleEndian.PutUint64(dAtA[i:], uint64(math.Float64bits(float64(*m.Maximum))))
i--
dAtA[i] = 0x49
}
if m.Default != nil {
{
size, err := m.Default.MarshalToSizedBuffer(dAtA[:i])
if err != nil {
return 0, err
}
i -= size
i = encodeVarintGenerated(dAtA, i, uint64(size))
}
i--
dAtA[i] = 0x42
}
i -= len(m.Title)
copy(dAtA[i:], m.Title)
i = encodeVarintGenerated(dAtA, i, uint64(len(m.Title)))
i--
dAtA[i] = 0x3a
i -= len(m.Format)
copy(dAtA[i:], m.Format)
i = encodeVarintGenerated(dAtA, i, uint64(len(m.Format)))
i--
dAtA[i] = 0x32
i -= len(m.Type)
copy(dAtA[i:], m.Type)
i = encodeVarintGenerated(dAtA, i, uint64(len(m.Type)))
i--
dAtA[i] = 0x2a
i -= len(m.Description)
copy(dAtA[i:], m.Description)
i = encodeVarintGenerated(dAtA, i, uint64(len(m.Description)))
i--
dAtA[i] = 0x22
if m.Ref != nil {
i -= len(*m.Ref)
copy(dAtA[i:], *m.Ref)
i = encodeVarintGenerated(dAtA, i, uint64(len(*m.Ref)))
i--
dAtA[i] = 0x1a
}
i -= len(m.Schema)
copy(dAtA[i:], m.Schema)
i = encodeVarintGenerated(dAtA, i, uint64(len(m.Schema)))
i--
dAtA[i] = 0x12
i -= len(m.ID)
copy(dAtA[i:], m.ID)
i = encodeVarintGenerated(dAtA, i, uint64(len(m.ID)))
i--
dAtA[i] = 0xa
return len(dAtA) - i, nil
}
func (m *JSONSchemaPropsOrArray) Marshal() (dAtA []byte, err error) {
size := m.Size()
dAtA = make([]byte, size)
n, err := m.MarshalToSizedBuffer(dAtA[:size])
if err != nil {
return nil, err
}
return dAtA[:n], nil
}
func (m *JSONSchemaPropsOrArray) MarshalTo(dAtA []byte) (int, error) {
size := m.Size()
return m.MarshalToSizedBuffer(dAtA[:size])
}
func (m *JSONSchemaPropsOrArray) MarshalToSizedBuffer(dAtA []byte) (int, error) {
i := len(dAtA)
_ = i
var l int
_ = l
if len(m.JSONSchemas) > 0 {
for iNdEx := len(m.JSONSchemas) - 1; iNdEx >= 0; iNdEx-- {
{
size, err := m.JSONSchemas[iNdEx].MarshalToSizedBuffer(dAtA[:i])
if err != nil {
return 0, err
}
i -= size
i = encodeVarintGenerated(dAtA, i, uint64(size))
}
i--
dAtA[i] = 0x12
}
}
if m.Schema != nil {
{
size, err := m.Schema.MarshalToSizedBuffer(dAtA[:i])
if err != nil {
return 0, err
}
i -= size
i = encodeVarintGenerated(dAtA, i, uint64(size))
}
i--
dAtA[i] = 0xa
}
return len(dAtA) - i, nil
}
func (m *JSONSchemaPropsOrBool) Marshal() (dAtA []byte, err error) {
size := m.Size()
dAtA = make([]byte, size)
n, err := m.MarshalToSizedBuffer(dAtA[:size])
if err != nil {
return nil, err
}
return dAtA[:n], nil
}
func (m *JSONSchemaPropsOrBool) MarshalTo(dAtA []byte) (int, error) {
size := m.Size()
return m.MarshalToSizedBuffer(dAtA[:size])
}
func (m *JSONSchemaPropsOrBool) MarshalToSizedBuffer(dAtA []byte) (int, error) {
i := len(dAtA)
_ = i
var l int
_ = l
if m.Schema != nil {
{
size, err := m.Schema.MarshalToSizedBuffer(dAtA[:i])
if err != nil {
return 0, err
}
i -= size
i = encodeVarintGenerated(dAtA, i, uint64(size))
}
i--
dAtA[i] = 0x12
}
i--
if m.Allows {
dAtA[i] = 1
} else {
dAtA[i] = 0
}
i--
dAtA[i] = 0x8
return len(dAtA) - i, nil
}
func (m *JSONSchemaPropsOrStringArray) Marshal() (dAtA []byte, err error) {
size := m.Size()
dAtA = make([]byte, size)
n, err := m.MarshalToSizedBuffer(dAtA[:size])
if err != nil {
return nil, err
}
return dAtA[:n], nil
}
func (m *JSONSchemaPropsOrStringArray) MarshalTo(dAtA []byte) (int, error) {
size := m.Size()
return m.MarshalToSizedBuffer(dAtA[:size])
}
func (m *JSONSchemaPropsOrStringArray) MarshalToSizedBuffer(dAtA []byte) (int, error) {
i := len(dAtA)
_ = i
var l int
_ = l
if len(m.Property) > 0 {
for iNdEx := len(m.Property) - 1; iNdEx >= 0; iNdEx-- {
i -= len(m.Property[iNdEx])
copy(dAtA[i:], m.Property[iNdEx])
i = encodeVarintGenerated(dAtA, i, uint64(len(m.Property[iNdEx])))
i--
dAtA[i] = 0x12
}
}
if m.Schema != nil {
{
size, err := m.Schema.MarshalToSizedBuffer(dAtA[:i])
if err != nil {
return 0, err
}
i -= size
i = encodeVarintGenerated(dAtA, i, uint64(size))
}
i--
dAtA[i] = 0xa
}
return len(dAtA) - i, nil
}
func (m *ValidationRule) Marshal() (dAtA []byte, err error) {
size := m.Size()
dAtA = make([]byte, size)
n, err := m.MarshalToSizedBuffer(dAtA[:size])
if err != nil {
return nil, err
}
return dAtA[:n], nil
}
func (m *ValidationRule) MarshalTo(dAtA []byte) (int, error) {
size := m.Size()
return m.MarshalToSizedBuffer(dAtA[:size])
}
func (m *ValidationRule) MarshalToSizedBuffer(dAtA []byte) (int, error) {
i := len(dAtA)
_ = i
var l int
_ = l
if m.OptionalOldSelf != nil {
i--
if *m.OptionalOldSelf {
dAtA[i] = 1
} else {
dAtA[i] = 0
}
i--
dAtA[i] = 0x30
}
i -= len(m.FieldPath)
copy(dAtA[i:], m.FieldPath)
i = encodeVarintGenerated(dAtA, i, uint64(len(m.FieldPath)))
i--
dAtA[i] = 0x2a
if m.Reason != nil {
i -= len(*m.Reason)
copy(dAtA[i:], *m.Reason)
i = encodeVarintGenerated(dAtA, i, uint64(len(*m.Reason)))
i--
dAtA[i] = 0x22
}
i -= len(m.MessageExpression)
copy(dAtA[i:], m.MessageExpression)
i = encodeVarintGenerated(dAtA, i, uint64(len(m.MessageExpression)))
i--
dAtA[i] = 0x1a
i -= len(m.Message)
copy(dAtA[i:], m.Message)
i = encodeVarintGenerated(dAtA, i, uint64(len(m.Message)))
i--
dAtA[i] = 0x12
i -= len(m.Rule)
copy(dAtA[i:], m.Rule)
i = encodeVarintGenerated(dAtA, i, uint64(len(m.Rule)))
i--
dAtA[i] = 0xa
return len(dAtA) - i, nil
}
func encodeVarintGenerated(dAtA []byte, offset int, v uint64) int {
offset -= sovGenerated(v)
base := offset
for v >= 1<<7 {
dAtA[offset] = uint8(v&0x7f | 0x80)
v >>= 7
offset++
}
dAtA[offset] = uint8(v)
return base
}
func (m *CustomResourceDefinition) Size() (n int) {
if m == nil {
return 0
}
var l int
_ = l
l = m.ObjectMeta.Size()
n += 1 + l + sovGenerated(uint64(l))
return n
}
func (m *CustomResourceDefinitionList) Size() (n int) {
if m == nil {
return 0
}
var l int
_ = l
l = m.ListMeta.Size()
n += 1 + l + sovGenerated(uint64(l))
if len(m.Items) > 0 {
for _, e := range m.Items {
l = e.Size()
n += 1 + l + sovGenerated(uint64(l))
}
}
return n
}
func (m *CustomResourceSubresourceScale) Size() (n int) {
if m == nil {
return 0
}
var l int
_ = l
l = len(m.SpecReplicasPath)
n += 1 + l + sovGenerated(uint64(l))
l = len(m.StatusReplicasPath)
n += 1 + l + sovGenerated(uint64(l))
if m.LabelSelectorPath != nil {
l = len(*m.LabelSelectorPath)
n += 1 + l + sovGenerated(uint64(l))
}
return n
}
func (m *CustomResourceSubresourceStatus) Size() (n int) {
if m == nil {
return 0
}
var l int
_ = l
return n
}
func (m *CustomResourceSubresources) Size() (n int) {
if m == nil {
return 0
}
var l int
_ = l
if m.Status != nil {
l = m.Status.Size()
n += 1 + l + sovGenerated(uint64(l))
}
if m.Scale != nil {
l = m.Scale.Size()
n += 1 + l + sovGenerated(uint64(l))
}
return n
}
func (m *CustomResourceValidation) Size() (n int) {
if m == nil {
return 0
}
var l int
_ = l
if m.OpenAPIV3Schema != nil {
l = m.OpenAPIV3Schema.Size()
n += 1 + l + sovGenerated(uint64(l))
}
return n
}
func (m *ExternalDocumentation) Size() (n int) {
if m == nil {
return 0
}
var l int
_ = l
l = len(m.Description)
n += 1 + l + sovGenerated(uint64(l))
l = len(m.URL)
n += 1 + l + sovGenerated(uint64(l))
return n
}
func (m *JSON) Size() (n int) {
if m == nil {
return 0
}
var l int
_ = l
if m.Raw != nil {
l = len(m.Raw)
n += 1 + l + sovGenerated(uint64(l))
}
return n
}
func (m *JSONSchemaProps) Size() (n int) {
if m == nil {
return 0
}
var l int
_ = l
l = len(m.ID)
n += 1 + l + sovGenerated(uint64(l))
l = len(m.Schema)
n += 1 + l + sovGenerated(uint64(l))
if m.Ref != nil {
l = len(*m.Ref)
n += 1 + l + sovGenerated(uint64(l))
}
l = len(m.Description)
n += 1 + l + sovGenerated(uint64(l))
l = len(m.Type)
n += 1 + l + sovGenerated(uint64(l))
l = len(m.Format)
n += 1 + l + sovGenerated(uint64(l))
l = len(m.Title)
n += 1 + l + sovGenerated(uint64(l))
if m.Default != nil {
l = m.Default.Size()
n += 1 + l + sovGenerated(uint64(l))
}
if m.Maximum != nil {
n += 9
}
n += 2
if m.Minimum != nil {
n += 9
}
n += 2
if m.MaxLength != nil {
n += 1 + sovGenerated(uint64(*m.MaxLength))
}
if m.MinLength != nil {
n += 1 + sovGenerated(uint64(*m.MinLength))
}
l = len(m.Pattern)
n += 1 + l + sovGenerated(uint64(l))
if m.MaxItems != nil {
n += 2 + sovGenerated(uint64(*m.MaxItems))
}
if m.MinItems != nil {
n += 2 + sovGenerated(uint64(*m.MinItems))
}
n += 3
if m.MultipleOf != nil {
n += 10
}
if len(m.Enum) > 0 {
for _, e := range m.Enum {
l = e.Size()
n += 2 + l + sovGenerated(uint64(l))
}
}
if m.MaxProperties != nil {
n += 2 + sovGenerated(uint64(*m.MaxProperties))
}
if m.MinProperties != nil {
n += 2 + sovGenerated(uint64(*m.MinProperties))
}
if len(m.Required) > 0 {
for _, s := range m.Required {
l = len(s)
n += 2 + l + sovGenerated(uint64(l))
}
}
if m.Items != nil {
l = m.Items.Size()
n += 2 + l + sovGenerated(uint64(l))
}
if len(m.AllOf) > 0 {
for _, e := range m.AllOf {
l = e.Size()
n += 2 + l + sovGenerated(uint64(l))
}
}
if len(m.OneOf) > 0 {
for _, e := range m.OneOf {
l = e.Size()
n += 2 + l + sovGenerated(uint64(l))
}
}
if len(m.AnyOf) > 0 {
for _, e := range m.AnyOf {
l = e.Size()
n += 2 + l + sovGenerated(uint64(l))
}
}
if m.Not != nil {
l = m.Not.Size()
n += 2 + l + sovGenerated(uint64(l))
}
if len(m.Properties) > 0 {
for k, v := range m.Properties {
_ = k
_ = v
l = v.Size()
mapEntrySize := 1 + len(k) + sovGenerated(uint64(len(k))) + 1 + l + sovGenerated(uint64(l))
n += mapEntrySize + 2 + sovGenerated(uint64(mapEntrySize))
}
}
if m.AdditionalProperties != nil {
l = m.AdditionalProperties.Size()
n += 2 + l + sovGenerated(uint64(l))
}
if len(m.PatternProperties) > 0 {
for k, v := range m.PatternProperties {
_ = k
_ = v
l = v.Size()
mapEntrySize := 1 + len(k) + sovGenerated(uint64(len(k))) + 1 + l + sovGenerated(uint64(l))
n += mapEntrySize + 2 + sovGenerated(uint64(mapEntrySize))
}
}
if len(m.Dependencies) > 0 {
for k, v := range m.Dependencies {
_ = k
_ = v
l = v.Size()
mapEntrySize := 1 + len(k) + sovGenerated(uint64(len(k))) + 1 + l + sovGenerated(uint64(l))
n += mapEntrySize + 2 + sovGenerated(uint64(mapEntrySize))
}
}
if m.AdditionalItems != nil {
l = m.AdditionalItems.Size()
n += 2 + l + sovGenerated(uint64(l))
}
if len(m.Definitions) > 0 {
for k, v := range m.Definitions {
_ = k
_ = v
l = v.Size()
mapEntrySize := 1 + len(k) + sovGenerated(uint64(len(k))) + 1 + l + sovGenerated(uint64(l))
n += mapEntrySize + 2 + sovGenerated(uint64(mapEntrySize))
}
}
if m.ExternalDocs != nil {
l = m.ExternalDocs.Size()
n += 2 + l + sovGenerated(uint64(l))
}
if m.Example != nil {
l = m.Example.Size()
n += 2 + l + sovGenerated(uint64(l))
}
n += 3
if m.XPreserveUnknownFields != nil {
n += 3
}
n += 3
n += 3
if len(m.XListMapKeys) > 0 {
for _, s := range m.XListMapKeys {
l = len(s)
n += 2 + l + sovGenerated(uint64(l))
}
}
if m.XListType != nil {
l = len(*m.XListType)
n += 2 + l + sovGenerated(uint64(l))
}
if m.XMapType != nil {
l = len(*m.XMapType)
n += 2 + l + sovGenerated(uint64(l))
}
if len(m.XValidations) > 0 {
for _, e := range m.XValidations {
l = e.Size()
n += 2 + l + sovGenerated(uint64(l))
}
}
return n
}
func (m *JSONSchemaPropsOrArray) Size() (n int) {
if m == nil {
return 0
}
var l int
_ = l
if m.Schema != nil {
l = m.Schema.Size()
n += 1 + l + sovGenerated(uint64(l))
}
if len(m.JSONSchemas) > 0 {
for _, e := range m.JSONSchemas {
l = e.Size()
n += 1 + l + sovGenerated(uint64(l))
}
}
return n
}
func (m *JSONSchemaPropsOrBool) Size() (n int) {
if m == nil {
return 0
}
var l int
_ = l
n += 2
if m.Schema != nil {
l = m.Schema.Size()
n += 1 + l + sovGenerated(uint64(l))
}
return n
}
func (m *JSONSchemaPropsOrStringArray) Size() (n int) {
if m == nil {
return 0
}
var l int
_ = l
if m.Schema != nil {
l = m.Schema.Size()
n += 1 + l + sovGenerated(uint64(l))
}
if len(m.Property) > 0 {
for _, s := range m.Property {
l = len(s)
n += 1 + l + sovGenerated(uint64(l))
}
}
return n
}
func (m *ValidationRule) Size() (n int) {
if m == nil {
return 0
}
var l int
_ = l
l = len(m.Rule)
n += 1 + l + sovGenerated(uint64(l))
l = len(m.Message)
n += 1 + l + sovGenerated(uint64(l))
l = len(m.MessageExpression)
n += 1 + l + sovGenerated(uint64(l))
if m.Reason != nil {
l = len(*m.Reason)
n += 1 + l + sovGenerated(uint64(l))
}
l = len(m.FieldPath)
n += 1 + l + sovGenerated(uint64(l))
if m.OptionalOldSelf != nil {
n += 2
}
return n
}
func sovGenerated(x uint64) (n int) {
return (math_bits.Len64(x|1) + 6) / 7
}
func sozGenerated(x uint64) (n int) {
return sovGenerated(uint64((x << 1) ^ uint64((int64(x) >> 63))))
}
func (this *CustomResourceDefinition) String() string {
if this == nil {
return "nil"
}
s := strings.Join([]string{`&CustomResourceDefinition{`,
`ObjectMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ObjectMeta), "ObjectMeta", "v1.ObjectMeta", 1), `&`, ``, 1) + `,`,
`}`,
}, "")
return s
}
func (this *CustomResourceDefinitionList) String() string {
if this == nil {
return "nil"
}
repeatedStringForItems := "[]CustomResourceDefinition{"
for _, f := range this.Items {
repeatedStringForItems += strings.Replace(strings.Replace(f.String(), "CustomResourceDefinition", "CustomResourceDefinition", 1), `&`, ``, 1) + ","
}
repeatedStringForItems += "}"
s := strings.Join([]string{`&CustomResourceDefinitionList{`,
`ListMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ListMeta), "ListMeta", "v1.ListMeta", 1), `&`, ``, 1) + `,`,
`Items:` + repeatedStringForItems + `,`,
`}`,
}, "")
return s
}
func (this *CustomResourceSubresourceScale) String() string {
if this == nil {
return "nil"
}
s := strings.Join([]string{`&CustomResourceSubresourceScale{`,
`SpecReplicasPath:` + fmt.Sprintf("%v", this.SpecReplicasPath) + `,`,
`StatusReplicasPath:` + fmt.Sprintf("%v", this.StatusReplicasPath) + `,`,
`LabelSelectorPath:` + valueToStringGenerated(this.LabelSelectorPath) + `,`,
`}`,
}, "")
return s
}
func (this *CustomResourceSubresourceStatus) String() string {
if this == nil {
return "nil"
}
s := strings.Join([]string{`&CustomResourceSubresourceStatus{`,
`}`,
}, "")
return s
}
func (this *CustomResourceSubresources) String() string {
if this == nil {
return "nil"
}
s := strings.Join([]string{`&CustomResourceSubresources{`,
`Status:` + strings.Replace(this.Status.String(), "CustomResourceSubresourceStatus", "CustomResourceSubresourceStatus", 1) + `,`,
`Scale:` + strings.Replace(this.Scale.String(), "CustomResourceSubresourceScale", "CustomResourceSubresourceScale", 1) + `,`,
`}`,
}, "")
return s
}
func (this *CustomResourceValidation) String() string {
if this == nil {
return "nil"
}
s := strings.Join([]string{`&CustomResourceValidation{`,
`OpenAPIV3Schema:` + strings.Replace(this.OpenAPIV3Schema.String(), "JSONSchemaProps", "JSONSchemaProps", 1) + `,`,
`}`,
}, "")
return s
}
func (this *ExternalDocumentation) String() string {
if this == nil {
return "nil"
}
s := strings.Join([]string{`&ExternalDocumentation{`,
`Description:` + fmt.Sprintf("%v", this.Description) + `,`,
`URL:` + fmt.Sprintf("%v", this.URL) + `,`,
`}`,
}, "")
return s
}
func (this *JSON) String() string {
if this == nil {
return "nil"
}
s := strings.Join([]string{`&JSON{`,
`Raw:` + valueToStringGenerated(this.Raw) + `,`,
`}`,
}, "")
return s
}
func (this *JSONSchemaProps) String() string {
if this == nil {
return "nil"
}
repeatedStringForEnum := "[]JSON{"
for _, f := range this.Enum {
repeatedStringForEnum += strings.Replace(strings.Replace(f.String(), "JSON", "JSON", 1), `&`, ``, 1) + ","
}
repeatedStringForEnum += "}"
repeatedStringForAllOf := "[]JSONSchemaProps{"
for _, f := range this.AllOf {
repeatedStringForAllOf += strings.Replace(strings.Replace(f.String(), "JSONSchemaProps", "JSONSchemaProps", 1), `&`, ``, 1) + ","
}
repeatedStringForAllOf += "}"
repeatedStringForOneOf := "[]JSONSchemaProps{"
for _, f := range this.OneOf {
repeatedStringForOneOf += strings.Replace(strings.Replace(f.String(), "JSONSchemaProps", "JSONSchemaProps", 1), `&`, ``, 1) + ","
}
repeatedStringForOneOf += "}"
repeatedStringForAnyOf := "[]JSONSchemaProps{"
for _, f := range this.AnyOf {
repeatedStringForAnyOf += strings.Replace(strings.Replace(f.String(), "JSONSchemaProps", "JSONSchemaProps", 1), `&`, ``, 1) + ","
}
repeatedStringForAnyOf += "}"
repeatedStringForXValidations := "[]ValidationRule{"
for _, f := range this.XValidations {
repeatedStringForXValidations += strings.Replace(strings.Replace(f.String(), "ValidationRule", "ValidationRule", 1), `&`, ``, 1) + ","
}
repeatedStringForXValidations += "}"
keysForProperties := make([]string, 0, len(this.Properties))
for k := range this.Properties {
keysForProperties = append(keysForProperties, k)
}
github_com_gogo_protobuf_sortkeys.Strings(keysForProperties)
mapStringForProperties := "map[string]JSONSchemaProps{"
for _, k := range keysForProperties {
mapStringForProperties += fmt.Sprintf("%v: %v,", k, this.Properties[k])
}
mapStringForProperties += "}"
keysForPatternProperties := make([]string, 0, len(this.PatternProperties))
for k := range this.PatternProperties {
keysForPatternProperties = append(keysForPatternProperties, k)
}
github_com_gogo_protobuf_sortkeys.Strings(keysForPatternProperties)
mapStringForPatternProperties := "map[string]JSONSchemaProps{"
for _, k := range keysForPatternProperties {
mapStringForPatternProperties += fmt.Sprintf("%v: %v,", k, this.PatternProperties[k])
}
mapStringForPatternProperties += "}"
keysForDependencies := make([]string, 0, len(this.Dependencies))
for k := range this.Dependencies {
keysForDependencies = append(keysForDependencies, k)
}
github_com_gogo_protobuf_sortkeys.Strings(keysForDependencies)
mapStringForDependencies := "JSONSchemaDependencies{"
for _, k := range keysForDependencies {
mapStringForDependencies += fmt.Sprintf("%v: %v,", k, this.Dependencies[k])
}
mapStringForDependencies += "}"
keysForDefinitions := make([]string, 0, len(this.Definitions))
for k := range this.Definitions {
keysForDefinitions = append(keysForDefinitions, k)
}
github_com_gogo_protobuf_sortkeys.Strings(keysForDefinitions)
mapStringForDefinitions := "JSONSchemaDefinitions{"
for _, k := range keysForDefinitions {
mapStringForDefinitions += fmt.Sprintf("%v: %v,", k, this.Definitions[k])
}
mapStringForDefinitions += "}"
s := strings.Join([]string{`&JSONSchemaProps{`,
`ID:` + fmt.Sprintf("%v", this.ID) + `,`,
`Schema:` + fmt.Sprintf("%v", this.Schema) + `,`,
`Ref:` + valueToStringGenerated(this.Ref) + `,`,
`Description:` + fmt.Sprintf("%v", this.Description) + `,`,
`Type:` + fmt.Sprintf("%v", this.Type) + `,`,
`Format:` + fmt.Sprintf("%v", this.Format) + `,`,
`Title:` + fmt.Sprintf("%v", this.Title) + `,`,
`Default:` + strings.Replace(this.Default.String(), "JSON", "JSON", 1) + `,`,
`Maximum:` + valueToStringGenerated(this.Maximum) + `,`,
`ExclusiveMaximum:` + fmt.Sprintf("%v", this.ExclusiveMaximum) + `,`,
`Minimum:` + valueToStringGenerated(this.Minimum) + `,`,
`ExclusiveMinimum:` + fmt.Sprintf("%v", this.ExclusiveMinimum) + `,`,
`MaxLength:` + valueToStringGenerated(this.MaxLength) + `,`,
`MinLength:` + valueToStringGenerated(this.MinLength) + `,`,
`Pattern:` + fmt.Sprintf("%v", this.Pattern) + `,`,
`MaxItems:` + valueToStringGenerated(this.MaxItems) + `,`,
`MinItems:` + valueToStringGenerated(this.MinItems) + `,`,
`UniqueItems:` + fmt.Sprintf("%v", this.UniqueItems) + `,`,
`MultipleOf:` + valueToStringGenerated(this.MultipleOf) + `,`,
`Enum:` + repeatedStringForEnum + `,`,
`MaxProperties:` + valueToStringGenerated(this.MaxProperties) + `,`,
`MinProperties:` + valueToStringGenerated(this.MinProperties) + `,`,
`Required:` + fmt.Sprintf("%v", this.Required) + `,`,
`Items:` + strings.Replace(this.Items.String(), "JSONSchemaPropsOrArray", "JSONSchemaPropsOrArray", 1) + `,`,
`AllOf:` + repeatedStringForAllOf + `,`,
`OneOf:` + repeatedStringForOneOf + `,`,
`AnyOf:` + repeatedStringForAnyOf + `,`,
`Not:` + strings.Replace(this.Not.String(), "JSONSchemaProps", "JSONSchemaProps", 1) + `,`,
`Properties:` + mapStringForProperties + `,`,
`AdditionalProperties:` + strings.Replace(this.AdditionalProperties.String(), "JSONSchemaPropsOrBool", "JSONSchemaPropsOrBool", 1) + `,`,
`PatternProperties:` + mapStringForPatternProperties + `,`,
`Dependencies:` + mapStringForDependencies + `,`,
`AdditionalItems:` + strings.Replace(this.AdditionalItems.String(), "JSONSchemaPropsOrBool", "JSONSchemaPropsOrBool", 1) + `,`,
`Definitions:` + mapStringForDefinitions + `,`,
`ExternalDocs:` + strings.Replace(this.ExternalDocs.String(), "ExternalDocumentation", "ExternalDocumentation", 1) + `,`,
`Example:` + strings.Replace(this.Example.String(), "JSON", "JSON", 1) + `,`,
`Nullable:` + fmt.Sprintf("%v", this.Nullable) + `,`,
`XPreserveUnknownFields:` + valueToStringGenerated(this.XPreserveUnknownFields) + `,`,
`XEmbeddedResource:` + fmt.Sprintf("%v", this.XEmbeddedResource) + `,`,
`XIntOrString:` + fmt.Sprintf("%v", this.XIntOrString) + `,`,
`XListMapKeys:` + fmt.Sprintf("%v", this.XListMapKeys) + `,`,
`XListType:` + valueToStringGenerated(this.XListType) + `,`,
`XMapType:` + valueToStringGenerated(this.XMapType) + `,`,
`XValidations:` + repeatedStringForXValidations + `,`,
`}`,
}, "")
return s
}
func (this *JSONSchemaPropsOrArray) String() string {
if this == nil {
return "nil"
}
repeatedStringForJSONSchemas := "[]JSONSchemaProps{"
for _, f := range this.JSONSchemas {
repeatedStringForJSONSchemas += strings.Replace(strings.Replace(f.String(), "JSONSchemaProps", "JSONSchemaProps", 1), `&`, ``, 1) + ","
}
repeatedStringForJSONSchemas += "}"
s := strings.Join([]string{`&JSONSchemaPropsOrArray{`,
`Schema:` + strings.Replace(this.Schema.String(), "JSONSchemaProps", "JSONSchemaProps", 1) + `,`,
`JSONSchemas:` + repeatedStringForJSONSchemas + `,`,
`}`,
}, "")
return s
}
func (this *JSONSchemaPropsOrBool) String() string {
if this == nil {
return "nil"
}
s := strings.Join([]string{`&JSONSchemaPropsOrBool{`,
`Allows:` + fmt.Sprintf("%v", this.Allows) + `,`,
`Schema:` + strings.Replace(this.Schema.String(), "JSONSchemaProps", "JSONSchemaProps", 1) + `,`,
`}`,
}, "")
return s
}
func (this *JSONSchemaPropsOrStringArray) String() string {
if this == nil {
return "nil"
}
s := strings.Join([]string{`&JSONSchemaPropsOrStringArray{`,
`Schema:` + strings.Replace(this.Schema.String(), "JSONSchemaProps", "JSONSchemaProps", 1) + `,`,
`Property:` + fmt.Sprintf("%v", this.Property) + `,`,
`}`,
}, "")
return s
}
func (this *ValidationRule) String() string {
if this == nil {
return "nil"
}
s := strings.Join([]string{`&ValidationRule{`,
`Rule:` + fmt.Sprintf("%v", this.Rule) + `,`,
`Message:` + fmt.Sprintf("%v", this.Message) + `,`,
`MessageExpression:` + fmt.Sprintf("%v", this.MessageExpression) + `,`,
`Reason:` + valueToStringGenerated(this.Reason) + `,`,
`FieldPath:` + fmt.Sprintf("%v", this.FieldPath) + `,`,
`OptionalOldSelf:` + valueToStringGenerated(this.OptionalOldSelf) + `,`,
`}`,
}, "")
return s
}
func valueToStringGenerated(v interface{}) string {
rv := reflect.ValueOf(v)
if rv.IsNil() {
return "nil"
}
pv := reflect.Indirect(rv).Interface()
return fmt.Sprintf("*%v", pv)
}
func (m *CustomResourceDefinition) Unmarshal(dAtA []byte) error {
l := len(dAtA)
iNdEx := 0
for iNdEx < l {
preIndex := iNdEx
var wire uint64
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowGenerated
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
wire |= uint64(b&0x7F) << shift
if b < 0x80 {
break
}
}
fieldNum := int32(wire >> 3)
wireType := int(wire & 0x7)
if wireType == 4 {
return fmt.Errorf("proto: CustomResourceDefinition: wiretype end group for non-group")
}
if fieldNum <= 0 {
return fmt.Errorf("proto: CustomResourceDefinition: illegal tag %d (wire type %d)", fieldNum, wire)
}
switch fieldNum {
case 1:
if wireType != 2 {
return fmt.Errorf("proto: wrong wireType = %d for field ObjectMeta", wireType)
}
var msglen int
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowGenerated
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
msglen |= int(b&0x7F) << shift
if b < 0x80 {
break
}
}
if msglen < 0 {
return ErrInvalidLengthGenerated
}
postIndex := iNdEx + msglen
if postIndex < 0 {
return ErrInvalidLengthGenerated
}
if postIndex > l {
return io.ErrUnexpectedEOF
}
if err := m.ObjectMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
return err
}
iNdEx = postIndex
default:
iNdEx = preIndex
skippy, err := skipGenerated(dAtA[iNdEx:])
if err != nil {
return err
}
if (skippy < 0) || (iNdEx+skippy) < 0 {
return ErrInvalidLengthGenerated
}
if (iNdEx + skippy) > l {
return io.ErrUnexpectedEOF
}
iNdEx += skippy
}
}
if iNdEx > l {
return io.ErrUnexpectedEOF
}
return nil
}
func (m *CustomResourceDefinitionList) Unmarshal(dAtA []byte) error {
l := len(dAtA)
iNdEx := 0
for iNdEx < l {
preIndex := iNdEx
var wire uint64
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowGenerated
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
wire |= uint64(b&0x7F) << shift
if b < 0x80 {
break
}
}
fieldNum := int32(wire >> 3)
wireType := int(wire & 0x7)
if wireType == 4 {
return fmt.Errorf("proto: CustomResourceDefinitionList: wiretype end group for non-group")
}
if fieldNum <= 0 {
return fmt.Errorf("proto: CustomResourceDefinitionList: illegal tag %d (wire type %d)", fieldNum, wire)
}
switch fieldNum {
case 1:
if wireType != 2 {
return fmt.Errorf("proto: wrong wireType = %d for field ListMeta", wireType)
}
var msglen int
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowGenerated
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
msglen |= int(b&0x7F) << shift
if b < 0x80 {
break
}
}
if msglen < 0 {
return ErrInvalidLengthGenerated
}
postIndex := iNdEx + msglen
if postIndex < 0 {
return ErrInvalidLengthGenerated
}
if postIndex > l {
return io.ErrUnexpectedEOF
}
if err := m.ListMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
return err
}
iNdEx = postIndex
case 2:
if wireType != 2 {
return fmt.Errorf("proto: wrong wireType = %d for field Items", wireType)
}
var msglen int
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowGenerated
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
msglen |= int(b&0x7F) << shift
if b < 0x80 {
break
}
}
if msglen < 0 {
return ErrInvalidLengthGenerated
}
postIndex := iNdEx + msglen
if postIndex < 0 {
return ErrInvalidLengthGenerated
}
if postIndex > l {
return io.ErrUnexpectedEOF
}
m.Items = append(m.Items, CustomResourceDefinition{})
if err := m.Items[len(m.Items)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
return err
}
iNdEx = postIndex
default:
iNdEx = preIndex
skippy, err := skipGenerated(dAtA[iNdEx:])
if err != nil {
return err
}
if (skippy < 0) || (iNdEx+skippy) < 0 {
return ErrInvalidLengthGenerated
}
if (iNdEx + skippy) > l {
return io.ErrUnexpectedEOF
}
iNdEx += skippy
}
}
if iNdEx > l {
return io.ErrUnexpectedEOF
}
return nil
}
func (m *CustomResourceSubresourceScale) Unmarshal(dAtA []byte) error {
l := len(dAtA)
iNdEx := 0
for iNdEx < l {
preIndex := iNdEx
var wire uint64
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowGenerated
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
wire |= uint64(b&0x7F) << shift
if b < 0x80 {
break
}
}
fieldNum := int32(wire >> 3)
wireType := int(wire & 0x7)
if wireType == 4 {
return fmt.Errorf("proto: CustomResourceSubresourceScale: wiretype end group for non-group")
}
if fieldNum <= 0 {
return fmt.Errorf("proto: CustomResourceSubresourceScale: illegal tag %d (wire type %d)", fieldNum, wire)
}
switch fieldNum {
case 1:
if wireType != 2 {
return fmt.Errorf("proto: wrong wireType = %d for field SpecReplicasPath", wireType)
}
var stringLen uint64
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowGenerated
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
stringLen |= uint64(b&0x7F) << shift
if b < 0x80 {
break
}
}
intStringLen := int(stringLen)
if intStringLen < 0 {
return ErrInvalidLengthGenerated
}
postIndex := iNdEx + intStringLen
if postIndex < 0 {
return ErrInvalidLengthGenerated
}
if postIndex > l {
return io.ErrUnexpectedEOF
}
m.SpecReplicasPath = string(dAtA[iNdEx:postIndex])
iNdEx = postIndex
case 2:
if wireType != 2 {
return fmt.Errorf("proto: wrong wireType = %d for field StatusReplicasPath", wireType)
}
var stringLen uint64
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowGenerated
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
stringLen |= uint64(b&0x7F) << shift
if b < 0x80 {
break
}
}
intStringLen := int(stringLen)
if intStringLen < 0 {
return ErrInvalidLengthGenerated
}
postIndex := iNdEx + intStringLen
if postIndex < 0 {
return ErrInvalidLengthGenerated
}
if postIndex > l {
return io.ErrUnexpectedEOF
}
m.StatusReplicasPath = string(dAtA[iNdEx:postIndex])
iNdEx = postIndex
case 3:
if wireType != 2 {
return fmt.Errorf("proto: wrong wireType = %d for field LabelSelectorPath", wireType)
}
var stringLen uint64
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowGenerated
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
stringLen |= uint64(b&0x7F) << shift
if b < 0x80 {
break
}
}
intStringLen := int(stringLen)
if intStringLen < 0 {
return ErrInvalidLengthGenerated
}
postIndex := iNdEx + intStringLen
if postIndex < 0 {
return ErrInvalidLengthGenerated
}
if postIndex > l {
return io.ErrUnexpectedEOF
}
s := string(dAtA[iNdEx:postIndex])
m.LabelSelectorPath = &s
iNdEx = postIndex
default:
iNdEx = preIndex
skippy, err := skipGenerated(dAtA[iNdEx:])
if err != nil {
return err
}
if (skippy < 0) || (iNdEx+skippy) < 0 {
return ErrInvalidLengthGenerated
}
if (iNdEx + skippy) > l {
return io.ErrUnexpectedEOF
}
iNdEx += skippy
}
}
if iNdEx > l {
return io.ErrUnexpectedEOF
}
return nil
}
func (m *CustomResourceSubresourceStatus) Unmarshal(dAtA []byte) error {
l := len(dAtA)
iNdEx := 0
for iNdEx < l {
preIndex := iNdEx
var wire uint64
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowGenerated
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
wire |= uint64(b&0x7F) << shift
if b < 0x80 {
break
}
}
fieldNum := int32(wire >> 3)
wireType := int(wire & 0x7)
if wireType == 4 {
return fmt.Errorf("proto: CustomResourceSubresourceStatus: wiretype end group for non-group")
}
if fieldNum <= 0 {
return fmt.Errorf("proto: CustomResourceSubresourceStatus: illegal tag %d (wire type %d)", fieldNum, wire)
}
switch fieldNum {
default:
iNdEx = preIndex
skippy, err := skipGenerated(dAtA[iNdEx:])
if err != nil {
return err
}
if (skippy < 0) || (iNdEx+skippy) < 0 {
return ErrInvalidLengthGenerated
}
if (iNdEx + skippy) > l {
return io.ErrUnexpectedEOF
}
iNdEx += skippy
}
}
if iNdEx > l {
return io.ErrUnexpectedEOF
}
return nil
}
func (m *CustomResourceSubresources) Unmarshal(dAtA []byte) error {
l := len(dAtA)
iNdEx := 0
for iNdEx < l {
preIndex := iNdEx
var wire uint64
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowGenerated
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
wire |= uint64(b&0x7F) << shift
if b < 0x80 {
break
}
}
fieldNum := int32(wire >> 3)
wireType := int(wire & 0x7)
if wireType == 4 {
return fmt.Errorf("proto: CustomResourceSubresources: wiretype end group for non-group")
}
if fieldNum <= 0 {
return fmt.Errorf("proto: CustomResourceSubresources: illegal tag %d (wire type %d)", fieldNum, wire)
}
switch fieldNum {
case 1:
if wireType != 2 {
return fmt.Errorf("proto: wrong wireType = %d for field Status", wireType)
}
var msglen int
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowGenerated
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
msglen |= int(b&0x7F) << shift
if b < 0x80 {
break
}
}
if msglen < 0 {
return ErrInvalidLengthGenerated
}
postIndex := iNdEx + msglen
if postIndex < 0 {
return ErrInvalidLengthGenerated
}
if postIndex > l {
return io.ErrUnexpectedEOF
}
if m.Status == nil {
m.Status = &CustomResourceSubresourceStatus{}
}
if err := m.Status.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
return err
}
iNdEx = postIndex
case 2:
if wireType != 2 {
return fmt.Errorf("proto: wrong wireType = %d for field Scale", wireType)
}
var msglen int
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowGenerated
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
msglen |= int(b&0x7F) << shift
if b < 0x80 {
break
}
}
if msglen < 0 {
return ErrInvalidLengthGenerated
}
postIndex := iNdEx + msglen
if postIndex < 0 {
return ErrInvalidLengthGenerated
}
if postIndex > l {
return io.ErrUnexpectedEOF
}
if m.Scale == nil {
m.Scale = &CustomResourceSubresourceScale{}
}
if err := m.Scale.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
return err
}
iNdEx = postIndex
default:
iNdEx = preIndex
skippy, err := skipGenerated(dAtA[iNdEx:])
if err != nil {
return err
}
if (skippy < 0) || (iNdEx+skippy) < 0 {
return ErrInvalidLengthGenerated
}
if (iNdEx + skippy) > l {
return io.ErrUnexpectedEOF
}
iNdEx += skippy
}
}
if iNdEx > l {
return io.ErrUnexpectedEOF
}
return nil
}
func (m *CustomResourceValidation) Unmarshal(dAtA []byte) error {
l := len(dAtA)
iNdEx := 0
for iNdEx < l {
preIndex := iNdEx
var wire uint64
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowGenerated
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
wire |= uint64(b&0x7F) << shift
if b < 0x80 {
break
}
}
fieldNum := int32(wire >> 3)
wireType := int(wire & 0x7)
if wireType == 4 {
return fmt.Errorf("proto: CustomResourceValidation: wiretype end group for non-group")
}
if fieldNum <= 0 {
return fmt.Errorf("proto: CustomResourceValidation: illegal tag %d (wire type %d)", fieldNum, wire)
}
switch fieldNum {
case 1:
if wireType != 2 {
return fmt.Errorf("proto: wrong wireType = %d for field OpenAPIV3Schema", wireType)
}
var msglen int
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowGenerated
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
msglen |= int(b&0x7F) << shift
if b < 0x80 {
break
}
}
if msglen < 0 {
return ErrInvalidLengthGenerated
}
postIndex := iNdEx + msglen
if postIndex < 0 {
return ErrInvalidLengthGenerated
}
if postIndex > l {
return io.ErrUnexpectedEOF
}
if m.OpenAPIV3Schema == nil {
m.OpenAPIV3Schema = &JSONSchemaProps{}
}
if err := m.OpenAPIV3Schema.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
return err
}
iNdEx = postIndex
default:
iNdEx = preIndex
skippy, err := skipGenerated(dAtA[iNdEx:])
if err != nil {
return err
}
if (skippy < 0) || (iNdEx+skippy) < 0 {
return ErrInvalidLengthGenerated
}
if (iNdEx + skippy) > l {
return io.ErrUnexpectedEOF
}
iNdEx += skippy
}
}
if iNdEx > l {
return io.ErrUnexpectedEOF
}
return nil
}
func (m *ExternalDocumentation) Unmarshal(dAtA []byte) error {
l := len(dAtA)
iNdEx := 0
for iNdEx < l {
preIndex := iNdEx
var wire uint64
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowGenerated
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
wire |= uint64(b&0x7F) << shift
if b < 0x80 {
break
}
}
fieldNum := int32(wire >> 3)
wireType := int(wire & 0x7)
if wireType == 4 {
return fmt.Errorf("proto: ExternalDocumentation: wiretype end group for non-group")
}
if fieldNum <= 0 {
return fmt.Errorf("proto: ExternalDocumentation: illegal tag %d (wire type %d)", fieldNum, wire)
}
switch fieldNum {
case 1:
if wireType != 2 {
return fmt.Errorf("proto: wrong wireType = %d for field Description", wireType)
}
var stringLen uint64
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowGenerated
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
stringLen |= uint64(b&0x7F) << shift
if b < 0x80 {
break
}
}
intStringLen := int(stringLen)
if intStringLen < 0 {
return ErrInvalidLengthGenerated
}
postIndex := iNdEx + intStringLen
if postIndex < 0 {
return ErrInvalidLengthGenerated
}
if postIndex > l {
return io.ErrUnexpectedEOF
}
m.Description = string(dAtA[iNdEx:postIndex])
iNdEx = postIndex
case 2:
if wireType != 2 {
return fmt.Errorf("proto: wrong wireType = %d for field URL", wireType)
}
var stringLen uint64
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowGenerated
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
stringLen |= uint64(b&0x7F) << shift
if b < 0x80 {
break
}
}
intStringLen := int(stringLen)
if intStringLen < 0 {
return ErrInvalidLengthGenerated
}
postIndex := iNdEx + intStringLen
if postIndex < 0 {
return ErrInvalidLengthGenerated
}
if postIndex > l {
return io.ErrUnexpectedEOF
}
m.URL = string(dAtA[iNdEx:postIndex])
iNdEx = postIndex
default:
iNdEx = preIndex
skippy, err := skipGenerated(dAtA[iNdEx:])
if err != nil {
return err
}
if (skippy < 0) || (iNdEx+skippy) < 0 {
return ErrInvalidLengthGenerated
}
if (iNdEx + skippy) > l {
return io.ErrUnexpectedEOF
}
iNdEx += skippy
}
}
if iNdEx > l {
return io.ErrUnexpectedEOF
}
return nil
}
func (m *JSON) Unmarshal(dAtA []byte) error {
l := len(dAtA)
iNdEx := 0
for iNdEx < l {
preIndex := iNdEx
var wire uint64
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowGenerated
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
wire |= uint64(b&0x7F) << shift
if b < 0x80 {
break
}
}
fieldNum := int32(wire >> 3)
wireType := int(wire & 0x7)
if wireType == 4 {
return fmt.Errorf("proto: JSON: wiretype end group for non-group")
}
if fieldNum <= 0 {
return fmt.Errorf("proto: JSON: illegal tag %d (wire type %d)", fieldNum, wire)
}
switch fieldNum {
case 1:
if wireType != 2 {
return fmt.Errorf("proto: wrong wireType = %d for field Raw", wireType)
}
var byteLen int
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowGenerated
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
byteLen |= int(b&0x7F) << shift
if b < 0x80 {
break
}
}
if byteLen < 0 {
return ErrInvalidLengthGenerated
}
postIndex := iNdEx + byteLen
if postIndex < 0 {
return ErrInvalidLengthGenerated
}
if postIndex > l {
return io.ErrUnexpectedEOF
}
m.Raw = append(m.Raw[:0], dAtA[iNdEx:postIndex]...)
if m.Raw == nil {
m.Raw = []byte{}
}
iNdEx = postIndex
default:
iNdEx = preIndex
skippy, err := skipGenerated(dAtA[iNdEx:])
if err != nil {
return err
}
if (skippy < 0) || (iNdEx+skippy) < 0 {
return ErrInvalidLengthGenerated
}
if (iNdEx + skippy) > l {
return io.ErrUnexpectedEOF
}
iNdEx += skippy
}
}
if iNdEx > l {
return io.ErrUnexpectedEOF
}
return nil
}
func (m *JSONSchemaProps) Unmarshal(dAtA []byte) error {
l := len(dAtA)
iNdEx := 0
for iNdEx < l {
preIndex := iNdEx
var wire uint64
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowGenerated
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
wire |= uint64(b&0x7F) << shift
if b < 0x80 {
break
}
}
fieldNum := int32(wire >> 3)
wireType := int(wire & 0x7)
if wireType == 4 {
return fmt.Errorf("proto: JSONSchemaProps: wiretype end group for non-group")
}
if fieldNum <= 0 {
return fmt.Errorf("proto: JSONSchemaProps: illegal tag %d (wire type %d)", fieldNum, wire)
}
switch fieldNum {
case 1:
if wireType != 2 {
return fmt.Errorf("proto: wrong wireType = %d for field ID", wireType)
}
var stringLen uint64
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowGenerated
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
stringLen |= uint64(b&0x7F) << shift
if b < 0x80 {
break
}
}
intStringLen := int(stringLen)
if intStringLen < 0 {
return ErrInvalidLengthGenerated
}
postIndex := iNdEx + intStringLen
if postIndex < 0 {
return ErrInvalidLengthGenerated
}
if postIndex > l {
return io.ErrUnexpectedEOF
}
m.ID = string(dAtA[iNdEx:postIndex])
iNdEx = postIndex
case 2:
if wireType != 2 {
return fmt.Errorf("proto: wrong wireType = %d for field Schema", wireType)
}
var stringLen uint64
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowGenerated
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
stringLen |= uint64(b&0x7F) << shift
if b < 0x80 {
break
}
}
intStringLen := int(stringLen)
if intStringLen < 0 {
return ErrInvalidLengthGenerated
}
postIndex := iNdEx + intStringLen
if postIndex < 0 {
return ErrInvalidLengthGenerated
}
if postIndex > l {
return io.ErrUnexpectedEOF
}
m.Schema = JSONSchemaURL(dAtA[iNdEx:postIndex])
iNdEx = postIndex
case 3:
if wireType != 2 {
return fmt.Errorf("proto: wrong wireType = %d for field Ref", wireType)
}
var stringLen uint64
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowGenerated
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
stringLen |= uint64(b&0x7F) << shift
if b < 0x80 {
break
}
}
intStringLen := int(stringLen)
if intStringLen < 0 {
return ErrInvalidLengthGenerated
}
postIndex := iNdEx + intStringLen
if postIndex < 0 {
return ErrInvalidLengthGenerated
}
if postIndex > l {
return io.ErrUnexpectedEOF
}
s := string(dAtA[iNdEx:postIndex])
m.Ref = &s
iNdEx = postIndex
case 4:
if wireType != 2 {
return fmt.Errorf("proto: wrong wireType = %d for field Description", wireType)
}
var stringLen uint64
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowGenerated
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
stringLen |= uint64(b&0x7F) << shift
if b < 0x80 {
break
}
}
intStringLen := int(stringLen)
if intStringLen < 0 {
return ErrInvalidLengthGenerated
}
postIndex := iNdEx + intStringLen
if postIndex < 0 {
return ErrInvalidLengthGenerated
}
if postIndex > l {
return io.ErrUnexpectedEOF
}
m.Description = string(dAtA[iNdEx:postIndex])
iNdEx = postIndex
case 5:
if wireType != 2 {
return fmt.Errorf("proto: wrong wireType = %d for field Type", wireType)
}
var stringLen uint64
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowGenerated
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
stringLen |= uint64(b&0x7F) << shift
if b < 0x80 {
break
}
}
intStringLen := int(stringLen)
if intStringLen < 0 {
return ErrInvalidLengthGenerated
}
postIndex := iNdEx + intStringLen
if postIndex < 0 {
return ErrInvalidLengthGenerated
}
if postIndex > l {
return io.ErrUnexpectedEOF
}
m.Type = string(dAtA[iNdEx:postIndex])
iNdEx = postIndex
case 6:
if wireType != 2 {
return fmt.Errorf("proto: wrong wireType = %d for field Format", wireType)
}
var stringLen uint64
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowGenerated
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
stringLen |= uint64(b&0x7F) << shift
if b < 0x80 {
break
}
}
intStringLen := int(stringLen)
if intStringLen < 0 {
return ErrInvalidLengthGenerated
}
postIndex := iNdEx + intStringLen
if postIndex < 0 {
return ErrInvalidLengthGenerated
}
if postIndex > l {
return io.ErrUnexpectedEOF
}
m.Format = string(dAtA[iNdEx:postIndex])
iNdEx = postIndex
case 7:
if wireType != 2 {
return fmt.Errorf("proto: wrong wireType = %d for field Title", wireType)
}
var stringLen uint64
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowGenerated
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
stringLen |= uint64(b&0x7F) << shift
if b < 0x80 {
break
}
}
intStringLen := int(stringLen)
if intStringLen < 0 {
return ErrInvalidLengthGenerated
}
postIndex := iNdEx + intStringLen
if postIndex < 0 {
return ErrInvalidLengthGenerated
}
if postIndex > l {
return io.ErrUnexpectedEOF
}
m.Title = string(dAtA[iNdEx:postIndex])
iNdEx = postIndex
case 8:
if wireType != 2 {
return fmt.Errorf("proto: wrong wireType = %d for field Default", wireType)
}
var msglen int
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowGenerated
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
msglen |= int(b&0x7F) << shift
if b < 0x80 {
break
}
}
if msglen < 0 {
return ErrInvalidLengthGenerated
}
postIndex := iNdEx + msglen
if postIndex < 0 {
return ErrInvalidLengthGenerated
}
if postIndex > l {
return io.ErrUnexpectedEOF
}
if m.Default == nil {
m.Default = &JSON{}
}
if err := m.Default.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
return err
}
iNdEx = postIndex
case 9:
if wireType != 1 {
return fmt.Errorf("proto: wrong wireType = %d for field Maximum", wireType)
}
var v uint64
if (iNdEx + 8) > l {
return io.ErrUnexpectedEOF
}
v = uint64(encoding_binary.LittleEndian.Uint64(dAtA[iNdEx:]))
iNdEx += 8
v2 := float64(math.Float64frombits(v))
m.Maximum = &v2
case 10:
if wireType != 0 {
return fmt.Errorf("proto: wrong wireType = %d for field ExclusiveMaximum", wireType)
}
var v int
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowGenerated
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
v |= int(b&0x7F) << shift
if b < 0x80 {
break
}
}
m.ExclusiveMaximum = bool(v != 0)
case 11:
if wireType != 1 {
return fmt.Errorf("proto: wrong wireType = %d for field Minimum", wireType)
}
var v uint64
if (iNdEx + 8) > l {
return io.ErrUnexpectedEOF
}
v = uint64(encoding_binary.LittleEndian.Uint64(dAtA[iNdEx:]))
iNdEx += 8
v2 := float64(math.Float64frombits(v))
m.Minimum = &v2
case 12:
if wireType != 0 {
return fmt.Errorf("proto: wrong wireType = %d for field ExclusiveMinimum", wireType)
}
var v int
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowGenerated
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
v |= int(b&0x7F) << shift
if b < 0x80 {
break
}
}
m.ExclusiveMinimum = bool(v != 0)
case 13:
if wireType != 0 {
return fmt.Errorf("proto: wrong wireType = %d for field MaxLength", wireType)
}
var v int64
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowGenerated
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
v |= int64(b&0x7F) << shift
if b < 0x80 {
break
}
}
m.MaxLength = &v
case 14:
if wireType != 0 {
return fmt.Errorf("proto: wrong wireType = %d for field MinLength", wireType)
}
var v int64
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowGenerated
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
v |= int64(b&0x7F) << shift
if b < 0x80 {
break
}
}
m.MinLength = &v
case 15:
if wireType != 2 {
return fmt.Errorf("proto: wrong wireType = %d for field Pattern", wireType)
}
var stringLen uint64
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowGenerated
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
stringLen |= uint64(b&0x7F) << shift
if b < 0x80 {
break
}
}
intStringLen := int(stringLen)
if intStringLen < 0 {
return ErrInvalidLengthGenerated
}
postIndex := iNdEx + intStringLen
if postIndex < 0 {
return ErrInvalidLengthGenerated
}
if postIndex > l {
return io.ErrUnexpectedEOF
}
m.Pattern = string(dAtA[iNdEx:postIndex])
iNdEx = postIndex
case 16:
if wireType != 0 {
return fmt.Errorf("proto: wrong wireType = %d for field MaxItems", wireType)
}
var v int64
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowGenerated
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
v |= int64(b&0x7F) << shift
if b < 0x80 {
break
}
}
m.MaxItems = &v
case 17:
if wireType != 0 {
return fmt.Errorf("proto: wrong wireType = %d for field MinItems", wireType)
}
var v int64
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowGenerated
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
v |= int64(b&0x7F) << shift
if b < 0x80 {
break
}
}
m.MinItems = &v
case 18:
if wireType != 0 {
return fmt.Errorf("proto: wrong wireType = %d for field UniqueItems", wireType)
}
var v int
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowGenerated
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
v |= int(b&0x7F) << shift
if b < 0x80 {
break
}
}
m.UniqueItems = bool(v != 0)
case 19:
if wireType != 1 {
return fmt.Errorf("proto: wrong wireType = %d for field MultipleOf", wireType)
}
var v uint64
if (iNdEx + 8) > l {
return io.ErrUnexpectedEOF
}
v = uint64(encoding_binary.LittleEndian.Uint64(dAtA[iNdEx:]))
iNdEx += 8
v2 := float64(math.Float64frombits(v))
m.MultipleOf = &v2
case 20:
if wireType != 2 {
return fmt.Errorf("proto: wrong wireType = %d for field Enum", wireType)
}
var msglen int
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowGenerated
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
msglen |= int(b&0x7F) << shift
if b < 0x80 {
break
}
}
if msglen < 0 {
return ErrInvalidLengthGenerated
}
postIndex := iNdEx + msglen
if postIndex < 0 {
return ErrInvalidLengthGenerated
}
if postIndex > l {
return io.ErrUnexpectedEOF
}
m.Enum = append(m.Enum, JSON{})
if err := m.Enum[len(m.Enum)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
return err
}
iNdEx = postIndex
case 21:
if wireType != 0 {
return fmt.Errorf("proto: wrong wireType = %d for field MaxProperties", wireType)
}
var v int64
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowGenerated
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
v |= int64(b&0x7F) << shift
if b < 0x80 {
break
}
}
m.MaxProperties = &v
case 22:
if wireType != 0 {
return fmt.Errorf("proto: wrong wireType = %d for field MinProperties", wireType)
}
var v int64
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowGenerated
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
v |= int64(b&0x7F) << shift
if b < 0x80 {
break
}
}
m.MinProperties = &v
case 23:
if wireType != 2 {
return fmt.Errorf("proto: wrong wireType = %d for field Required", wireType)
}
var stringLen uint64
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowGenerated
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
stringLen |= uint64(b&0x7F) << shift
if b < 0x80 {
break
}
}
intStringLen := int(stringLen)
if intStringLen < 0 {
return ErrInvalidLengthGenerated
}
postIndex := iNdEx + intStringLen
if postIndex < 0 {
return ErrInvalidLengthGenerated
}
if postIndex > l {
return io.ErrUnexpectedEOF
}
m.Required = append(m.Required, string(dAtA[iNdEx:postIndex]))
iNdEx = postIndex
case 24:
if wireType != 2 {
return fmt.Errorf("proto: wrong wireType = %d for field Items", wireType)
}
var msglen int
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowGenerated
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
msglen |= int(b&0x7F) << shift
if b < 0x80 {
break
}
}
if msglen < 0 {
return ErrInvalidLengthGenerated
}
postIndex := iNdEx + msglen
if postIndex < 0 {
return ErrInvalidLengthGenerated
}
if postIndex > l {
return io.ErrUnexpectedEOF
}
if m.Items == nil {
m.Items = &JSONSchemaPropsOrArray{}
}
if err := m.Items.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
return err
}
iNdEx = postIndex
case 25:
if wireType != 2 {
return fmt.Errorf("proto: wrong wireType = %d for field AllOf", wireType)
}
var msglen int
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowGenerated
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
msglen |= int(b&0x7F) << shift
if b < 0x80 {
break
}
}
if msglen < 0 {
return ErrInvalidLengthGenerated
}
postIndex := iNdEx + msglen
if postIndex < 0 {
return ErrInvalidLengthGenerated
}
if postIndex > l {
return io.ErrUnexpectedEOF
}
m.AllOf = append(m.AllOf, JSONSchemaProps{})
if err := m.AllOf[len(m.AllOf)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
return err
}
iNdEx = postIndex
case 26:
if wireType != 2 {
return fmt.Errorf("proto: wrong wireType = %d for field OneOf", wireType)
}
var msglen int
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowGenerated
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
msglen |= int(b&0x7F) << shift
if b < 0x80 {
break
}
}
if msglen < 0 {
return ErrInvalidLengthGenerated
}
postIndex := iNdEx + msglen
if postIndex < 0 {
return ErrInvalidLengthGenerated
}
if postIndex > l {
return io.ErrUnexpectedEOF
}
m.OneOf = append(m.OneOf, JSONSchemaProps{})
if err := m.OneOf[len(m.OneOf)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
return err
}
iNdEx = postIndex
case 27:
if wireType != 2 {
return fmt.Errorf("proto: wrong wireType = %d for field AnyOf", wireType)
}
var msglen int
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowGenerated
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
msglen |= int(b&0x7F) << shift
if b < 0x80 {
break
}
}
if msglen < 0 {
return ErrInvalidLengthGenerated
}
postIndex := iNdEx + msglen
if postIndex < 0 {
return ErrInvalidLengthGenerated
}
if postIndex > l {
return io.ErrUnexpectedEOF
}
m.AnyOf = append(m.AnyOf, JSONSchemaProps{})
if err := m.AnyOf[len(m.AnyOf)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
return err
}
iNdEx = postIndex
case 28:
if wireType != 2 {
return fmt.Errorf("proto: wrong wireType = %d for field Not", wireType)
}
var msglen int
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowGenerated
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
msglen |= int(b&0x7F) << shift
if b < 0x80 {
break
}
}
if msglen < 0 {
return ErrInvalidLengthGenerated
}
postIndex := iNdEx + msglen
if postIndex < 0 {
return ErrInvalidLengthGenerated
}
if postIndex > l {
return io.ErrUnexpectedEOF
}
if m.Not == nil {
m.Not = &JSONSchemaProps{}
}
if err := m.Not.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
return err
}
iNdEx = postIndex
case 29:
if wireType != 2 {
return fmt.Errorf("proto: wrong wireType = %d for field Properties", wireType)
}
var msglen int
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowGenerated
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
msglen |= int(b&0x7F) << shift
if b < 0x80 {
break
}
}
if msglen < 0 {
return ErrInvalidLengthGenerated
}
postIndex := iNdEx + msglen
if postIndex < 0 {
return ErrInvalidLengthGenerated
}
if postIndex > l {
return io.ErrUnexpectedEOF
}
if m.Properties == nil {
m.Properties = make(map[string]JSONSchemaProps)
}
var mapkey string
mapvalue := &JSONSchemaProps{}
for iNdEx < postIndex {
entryPreIndex := iNdEx
var wire uint64
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowGenerated
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
wire |= uint64(b&0x7F) << shift
if b < 0x80 {
break
}
}
fieldNum := int32(wire >> 3)
if fieldNum == 1 {
var stringLenmapkey uint64
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowGenerated
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
stringLenmapkey |= uint64(b&0x7F) << shift
if b < 0x80 {
break
}
}
intStringLenmapkey := int(stringLenmapkey)
if intStringLenmapkey < 0 {
return ErrInvalidLengthGenerated
}
postStringIndexmapkey := iNdEx + intStringLenmapkey
if postStringIndexmapkey < 0 {
return ErrInvalidLengthGenerated
}
if postStringIndexmapkey > l {
return io.ErrUnexpectedEOF
}
mapkey = string(dAtA[iNdEx:postStringIndexmapkey])
iNdEx = postStringIndexmapkey
} else if fieldNum == 2 {
var mapmsglen int
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowGenerated
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
mapmsglen |= int(b&0x7F) << shift
if b < 0x80 {
break
}
}
if mapmsglen < 0 {
return ErrInvalidLengthGenerated
}
postmsgIndex := iNdEx + mapmsglen
if postmsgIndex < 0 {
return ErrInvalidLengthGenerated
}
if postmsgIndex > l {
return io.ErrUnexpectedEOF
}
mapvalue = &JSONSchemaProps{}
if err := mapvalue.Unmarshal(dAtA[iNdEx:postmsgIndex]); err != nil {
return err
}
iNdEx = postmsgIndex
} else {
iNdEx = entryPreIndex
skippy, err := skipGenerated(dAtA[iNdEx:])
if err != nil {
return err
}
if (skippy < 0) || (iNdEx+skippy) < 0 {
return ErrInvalidLengthGenerated
}
if (iNdEx + skippy) > postIndex {
return io.ErrUnexpectedEOF
}
iNdEx += skippy
}
}
m.Properties[mapkey] = *mapvalue
iNdEx = postIndex
case 30:
if wireType != 2 {
return fmt.Errorf("proto: wrong wireType = %d for field AdditionalProperties", wireType)
}
var msglen int
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowGenerated
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
msglen |= int(b&0x7F) << shift
if b < 0x80 {
break
}
}
if msglen < 0 {
return ErrInvalidLengthGenerated
}
postIndex := iNdEx + msglen
if postIndex < 0 {
return ErrInvalidLengthGenerated
}
if postIndex > l {
return io.ErrUnexpectedEOF
}
if m.AdditionalProperties == nil {
m.AdditionalProperties = &JSONSchemaPropsOrBool{}
}
if err := m.AdditionalProperties.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
return err
}
iNdEx = postIndex
case 31:
if wireType != 2 {
return fmt.Errorf("proto: wrong wireType = %d for field PatternProperties", wireType)
}
var msglen int
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowGenerated
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
msglen |= int(b&0x7F) << shift
if b < 0x80 {
break
}
}
if msglen < 0 {
return ErrInvalidLengthGenerated
}
postIndex := iNdEx + msglen
if postIndex < 0 {
return ErrInvalidLengthGenerated
}
if postIndex > l {
return io.ErrUnexpectedEOF
}
if m.PatternProperties == nil {
m.PatternProperties = make(map[string]JSONSchemaProps)
}
var mapkey string
mapvalue := &JSONSchemaProps{}
for iNdEx < postIndex {
entryPreIndex := iNdEx
var wire uint64
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowGenerated
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
wire |= uint64(b&0x7F) << shift
if b < 0x80 {
break
}
}
fieldNum := int32(wire >> 3)
if fieldNum == 1 {
var stringLenmapkey uint64
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowGenerated
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
stringLenmapkey |= uint64(b&0x7F) << shift
if b < 0x80 {
break
}
}
intStringLenmapkey := int(stringLenmapkey)
if intStringLenmapkey < 0 {
return ErrInvalidLengthGenerated
}
postStringIndexmapkey := iNdEx + intStringLenmapkey
if postStringIndexmapkey < 0 {
return ErrInvalidLengthGenerated
}
if postStringIndexmapkey > l {
return io.ErrUnexpectedEOF
}
mapkey = string(dAtA[iNdEx:postStringIndexmapkey])
iNdEx = postStringIndexmapkey
} else if fieldNum == 2 {
var mapmsglen int
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowGenerated
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
mapmsglen |= int(b&0x7F) << shift
if b < 0x80 {
break
}
}
if mapmsglen < 0 {
return ErrInvalidLengthGenerated
}
postmsgIndex := iNdEx + mapmsglen
if postmsgIndex < 0 {
return ErrInvalidLengthGenerated
}
if postmsgIndex > l {
return io.ErrUnexpectedEOF
}
mapvalue = &JSONSchemaProps{}
if err := mapvalue.Unmarshal(dAtA[iNdEx:postmsgIndex]); err != nil {
return err
}
iNdEx = postmsgIndex
} else {
iNdEx = entryPreIndex
skippy, err := skipGenerated(dAtA[iNdEx:])
if err != nil {
return err
}
if (skippy < 0) || (iNdEx+skippy) < 0 {
return ErrInvalidLengthGenerated
}
if (iNdEx + skippy) > postIndex {
return io.ErrUnexpectedEOF
}
iNdEx += skippy
}
}
m.PatternProperties[mapkey] = *mapvalue
iNdEx = postIndex
case 32:
if wireType != 2 {
return fmt.Errorf("proto: wrong wireType = %d for field Dependencies", wireType)
}
var msglen int
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowGenerated
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
msglen |= int(b&0x7F) << shift
if b < 0x80 {
break
}
}
if msglen < 0 {
return ErrInvalidLengthGenerated
}
postIndex := iNdEx + msglen
if postIndex < 0 {
return ErrInvalidLengthGenerated
}
if postIndex > l {
return io.ErrUnexpectedEOF
}
if m.Dependencies == nil {
m.Dependencies = make(JSONSchemaDependencies)
}
var mapkey string
mapvalue := &JSONSchemaPropsOrStringArray{}
for iNdEx < postIndex {
entryPreIndex := iNdEx
var wire uint64
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowGenerated
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
wire |= uint64(b&0x7F) << shift
if b < 0x80 {
break
}
}
fieldNum := int32(wire >> 3)
if fieldNum == 1 {
var stringLenmapkey uint64
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowGenerated
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
stringLenmapkey |= uint64(b&0x7F) << shift
if b < 0x80 {
break
}
}
intStringLenmapkey := int(stringLenmapkey)
if intStringLenmapkey < 0 {
return ErrInvalidLengthGenerated
}
postStringIndexmapkey := iNdEx + intStringLenmapkey
if postStringIndexmapkey < 0 {
return ErrInvalidLengthGenerated
}
if postStringIndexmapkey > l {
return io.ErrUnexpectedEOF
}
mapkey = string(dAtA[iNdEx:postStringIndexmapkey])
iNdEx = postStringIndexmapkey
} else if fieldNum == 2 {
var mapmsglen int
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowGenerated
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
mapmsglen |= int(b&0x7F) << shift
if b < 0x80 {
break
}
}
if mapmsglen < 0 {
return ErrInvalidLengthGenerated
}
postmsgIndex := iNdEx + mapmsglen
if postmsgIndex < 0 {
return ErrInvalidLengthGenerated
}
if postmsgIndex > l {
return io.ErrUnexpectedEOF
}
mapvalue = &JSONSchemaPropsOrStringArray{}
if err := mapvalue.Unmarshal(dAtA[iNdEx:postmsgIndex]); err != nil {
return err
}
iNdEx = postmsgIndex
} else {
iNdEx = entryPreIndex
skippy, err := skipGenerated(dAtA[iNdEx:])
if err != nil {
return err
}
if (skippy < 0) || (iNdEx+skippy) < 0 {
return ErrInvalidLengthGenerated
}
if (iNdEx + skippy) > postIndex {
return io.ErrUnexpectedEOF
}
iNdEx += skippy
}
}
m.Dependencies[mapkey] = *mapvalue
iNdEx = postIndex
case 33:
if wireType != 2 {
return fmt.Errorf("proto: wrong wireType = %d for field AdditionalItems", wireType)
}
var msglen int
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowGenerated
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
msglen |= int(b&0x7F) << shift
if b < 0x80 {
break
}
}
if msglen < 0 {
return ErrInvalidLengthGenerated
}
postIndex := iNdEx + msglen
if postIndex < 0 {
return ErrInvalidLengthGenerated
}
if postIndex > l {
return io.ErrUnexpectedEOF
}
if m.AdditionalItems == nil {
m.AdditionalItems = &JSONSchemaPropsOrBool{}
}
if err := m.AdditionalItems.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
return err
}
iNdEx = postIndex
case 34:
if wireType != 2 {
return fmt.Errorf("proto: wrong wireType = %d for field Definitions", wireType)
}
var msglen int
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowGenerated
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
msglen |= int(b&0x7F) << shift
if b < 0x80 {
break
}
}
if msglen < 0 {
return ErrInvalidLengthGenerated
}
postIndex := iNdEx + msglen
if postIndex < 0 {
return ErrInvalidLengthGenerated
}
if postIndex > l {
return io.ErrUnexpectedEOF
}
if m.Definitions == nil {
m.Definitions = make(JSONSchemaDefinitions)
}
var mapkey string
mapvalue := &JSONSchemaProps{}
for iNdEx < postIndex {
entryPreIndex := iNdEx
var wire uint64
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowGenerated
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
wire |= uint64(b&0x7F) << shift
if b < 0x80 {
break
}
}
fieldNum := int32(wire >> 3)
if fieldNum == 1 {
var stringLenmapkey uint64
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowGenerated
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
stringLenmapkey |= uint64(b&0x7F) << shift
if b < 0x80 {
break
}
}
intStringLenmapkey := int(stringLenmapkey)
if intStringLenmapkey < 0 {
return ErrInvalidLengthGenerated
}
postStringIndexmapkey := iNdEx + intStringLenmapkey
if postStringIndexmapkey < 0 {
return ErrInvalidLengthGenerated
}
if postStringIndexmapkey > l {
return io.ErrUnexpectedEOF
}
mapkey = string(dAtA[iNdEx:postStringIndexmapkey])
iNdEx = postStringIndexmapkey
} else if fieldNum == 2 {
var mapmsglen int
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowGenerated
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
mapmsglen |= int(b&0x7F) << shift
if b < 0x80 {
break
}
}
if mapmsglen < 0 {
return ErrInvalidLengthGenerated
}
postmsgIndex := iNdEx + mapmsglen
if postmsgIndex < 0 {
return ErrInvalidLengthGenerated
}
if postmsgIndex > l {
return io.ErrUnexpectedEOF
}
mapvalue = &JSONSchemaProps{}
if err := mapvalue.Unmarshal(dAtA[iNdEx:postmsgIndex]); err != nil {
return err
}
iNdEx = postmsgIndex
} else {
iNdEx = entryPreIndex
skippy, err := skipGenerated(dAtA[iNdEx:])
if err != nil {
return err
}
if (skippy < 0) || (iNdEx+skippy) < 0 {
return ErrInvalidLengthGenerated
}
if (iNdEx + skippy) > postIndex {
return io.ErrUnexpectedEOF
}
iNdEx += skippy
}
}
m.Definitions[mapkey] = *mapvalue
iNdEx = postIndex
case 35:
if wireType != 2 {
return fmt.Errorf("proto: wrong wireType = %d for field ExternalDocs", wireType)
}
var msglen int
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowGenerated
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
msglen |= int(b&0x7F) << shift
if b < 0x80 {
break
}
}
if msglen < 0 {
return ErrInvalidLengthGenerated
}
postIndex := iNdEx + msglen
if postIndex < 0 {
return ErrInvalidLengthGenerated
}
if postIndex > l {
return io.ErrUnexpectedEOF
}
if m.ExternalDocs == nil {
m.ExternalDocs = &ExternalDocumentation{}
}
if err := m.ExternalDocs.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
return err
}
iNdEx = postIndex
case 36:
if wireType != 2 {
return fmt.Errorf("proto: wrong wireType = %d for field Example", wireType)
}
var msglen int
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowGenerated
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
msglen |= int(b&0x7F) << shift
if b < 0x80 {
break
}
}
if msglen < 0 {
return ErrInvalidLengthGenerated
}
postIndex := iNdEx + msglen
if postIndex < 0 {
return ErrInvalidLengthGenerated
}
if postIndex > l {
return io.ErrUnexpectedEOF
}
if m.Example == nil {
m.Example = &JSON{}
}
if err := m.Example.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
return err
}
iNdEx = postIndex
case 37:
if wireType != 0 {
return fmt.Errorf("proto: wrong wireType = %d for field Nullable", wireType)
}
var v int
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowGenerated
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
v |= int(b&0x7F) << shift
if b < 0x80 {
break
}
}
m.Nullable = bool(v != 0)
case 38:
if wireType != 0 {
return fmt.Errorf("proto: wrong wireType = %d for field XPreserveUnknownFields", wireType)
}
var v int
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowGenerated
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
v |= int(b&0x7F) << shift
if b < 0x80 {
break
}
}
b := bool(v != 0)
m.XPreserveUnknownFields = &b
case 39:
if wireType != 0 {
return fmt.Errorf("proto: wrong wireType = %d for field XEmbeddedResource", wireType)
}
var v int
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowGenerated
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
v |= int(b&0x7F) << shift
if b < 0x80 {
break
}
}
m.XEmbeddedResource = bool(v != 0)
case 40:
if wireType != 0 {
return fmt.Errorf("proto: wrong wireType = %d for field XIntOrString", wireType)
}
var v int
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowGenerated
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
v |= int(b&0x7F) << shift
if b < 0x80 {
break
}
}
m.XIntOrString = bool(v != 0)
case 41:
if wireType != 2 {
return fmt.Errorf("proto: wrong wireType = %d for field XListMapKeys", wireType)
}
var stringLen uint64
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowGenerated
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
stringLen |= uint64(b&0x7F) << shift
if b < 0x80 {
break
}
}
intStringLen := int(stringLen)
if intStringLen < 0 {
return ErrInvalidLengthGenerated
}
postIndex := iNdEx + intStringLen
if postIndex < 0 {
return ErrInvalidLengthGenerated
}
if postIndex > l {
return io.ErrUnexpectedEOF
}
m.XListMapKeys = append(m.XListMapKeys, string(dAtA[iNdEx:postIndex]))
iNdEx = postIndex
case 42:
if wireType != 2 {
return fmt.Errorf("proto: wrong wireType = %d for field XListType", wireType)
}
var stringLen uint64
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowGenerated
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
stringLen |= uint64(b&0x7F) << shift
if b < 0x80 {
break
}
}
intStringLen := int(stringLen)
if intStringLen < 0 {
return ErrInvalidLengthGenerated
}
postIndex := iNdEx + intStringLen
if postIndex < 0 {
return ErrInvalidLengthGenerated
}
if postIndex > l {
return io.ErrUnexpectedEOF
}
s := string(dAtA[iNdEx:postIndex])
m.XListType = &s
iNdEx = postIndex
case 43:
if wireType != 2 {
return fmt.Errorf("proto: wrong wireType = %d for field XMapType", wireType)
}
var stringLen uint64
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowGenerated
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
stringLen |= uint64(b&0x7F) << shift
if b < 0x80 {
break
}
}
intStringLen := int(stringLen)
if intStringLen < 0 {
return ErrInvalidLengthGenerated
}
postIndex := iNdEx + intStringLen
if postIndex < 0 {
return ErrInvalidLengthGenerated
}
if postIndex > l {
return io.ErrUnexpectedEOF
}
s := string(dAtA[iNdEx:postIndex])
m.XMapType = &s
iNdEx = postIndex
case 44:
if wireType != 2 {
return fmt.Errorf("proto: wrong wireType = %d for field XValidations", wireType)
}
var msglen int
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowGenerated
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
msglen |= int(b&0x7F) << shift
if b < 0x80 {
break
}
}
if msglen < 0 {
return ErrInvalidLengthGenerated
}
postIndex := iNdEx + msglen
if postIndex < 0 {
return ErrInvalidLengthGenerated
}
if postIndex > l {
return io.ErrUnexpectedEOF
}
m.XValidations = append(m.XValidations, ValidationRule{})
if err := m.XValidations[len(m.XValidations)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
return err
}
iNdEx = postIndex
default:
iNdEx = preIndex
skippy, err := skipGenerated(dAtA[iNdEx:])
if err != nil {
return err
}
if (skippy < 0) || (iNdEx+skippy) < 0 {
return ErrInvalidLengthGenerated
}
if (iNdEx + skippy) > l {
return io.ErrUnexpectedEOF
}
iNdEx += skippy
}
}
if iNdEx > l {
return io.ErrUnexpectedEOF
}
return nil
}
func (m *JSONSchemaPropsOrArray) Unmarshal(dAtA []byte) error {
l := len(dAtA)
iNdEx := 0
for iNdEx < l {
preIndex := iNdEx
var wire uint64
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowGenerated
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
wire |= uint64(b&0x7F) << shift
if b < 0x80 {
break
}
}
fieldNum := int32(wire >> 3)
wireType := int(wire & 0x7)
if wireType == 4 {
return fmt.Errorf("proto: JSONSchemaPropsOrArray: wiretype end group for non-group")
}
if fieldNum <= 0 {
return fmt.Errorf("proto: JSONSchemaPropsOrArray: illegal tag %d (wire type %d)", fieldNum, wire)
}
switch fieldNum {
case 1:
if wireType != 2 {
return fmt.Errorf("proto: wrong wireType = %d for field Schema", wireType)
}
var msglen int
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowGenerated
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
msglen |= int(b&0x7F) << shift
if b < 0x80 {
break
}
}
if msglen < 0 {
return ErrInvalidLengthGenerated
}
postIndex := iNdEx + msglen
if postIndex < 0 {
return ErrInvalidLengthGenerated
}
if postIndex > l {
return io.ErrUnexpectedEOF
}
if m.Schema == nil {
m.Schema = &JSONSchemaProps{}
}
if err := m.Schema.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
return err
}
iNdEx = postIndex
case 2:
if wireType != 2 {
return fmt.Errorf("proto: wrong wireType = %d for field JSONSchemas", wireType)
}
var msglen int
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowGenerated
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
msglen |= int(b&0x7F) << shift
if b < 0x80 {
break
}
}
if msglen < 0 {
return ErrInvalidLengthGenerated
}
postIndex := iNdEx + msglen
if postIndex < 0 {
return ErrInvalidLengthGenerated
}
if postIndex > l {
return io.ErrUnexpectedEOF
}
m.JSONSchemas = append(m.JSONSchemas, JSONSchemaProps{})
if err := m.JSONSchemas[len(m.JSONSchemas)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
return err
}
iNdEx = postIndex
default:
iNdEx = preIndex
skippy, err := skipGenerated(dAtA[iNdEx:])
if err != nil {
return err
}
if (skippy < 0) || (iNdEx+skippy) < 0 {
return ErrInvalidLengthGenerated
}
if (iNdEx + skippy) > l {
return io.ErrUnexpectedEOF
}
iNdEx += skippy
}
}
if iNdEx > l {
return io.ErrUnexpectedEOF
}
return nil
}
func (m *JSONSchemaPropsOrBool) Unmarshal(dAtA []byte) error {
l := len(dAtA)
iNdEx := 0
for iNdEx < l {
preIndex := iNdEx
var wire uint64
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowGenerated
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
wire |= uint64(b&0x7F) << shift
if b < 0x80 {
break
}
}
fieldNum := int32(wire >> 3)
wireType := int(wire & 0x7)
if wireType == 4 {
return fmt.Errorf("proto: JSONSchemaPropsOrBool: wiretype end group for non-group")
}
if fieldNum <= 0 {
return fmt.Errorf("proto: JSONSchemaPropsOrBool: illegal tag %d (wire type %d)", fieldNum, wire)
}
switch fieldNum {
case 1:
if wireType != 0 {
return fmt.Errorf("proto: wrong wireType = %d for field Allows", wireType)
}
var v int
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowGenerated
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
v |= int(b&0x7F) << shift
if b < 0x80 {
break
}
}
m.Allows = bool(v != 0)
case 2:
if wireType != 2 {
return fmt.Errorf("proto: wrong wireType = %d for field Schema", wireType)
}
var msglen int
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowGenerated
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
msglen |= int(b&0x7F) << shift
if b < 0x80 {
break
}
}
if msglen < 0 {
return ErrInvalidLengthGenerated
}
postIndex := iNdEx + msglen
if postIndex < 0 {
return ErrInvalidLengthGenerated
}
if postIndex > l {
return io.ErrUnexpectedEOF
}
if m.Schema == nil {
m.Schema = &JSONSchemaProps{}
}
if err := m.Schema.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
return err
}
iNdEx = postIndex
default:
iNdEx = preIndex
skippy, err := skipGenerated(dAtA[iNdEx:])
if err != nil {
return err
}
if (skippy < 0) || (iNdEx+skippy) < 0 {
return ErrInvalidLengthGenerated
}
if (iNdEx + skippy) > l {
return io.ErrUnexpectedEOF
}
iNdEx += skippy
}
}
if iNdEx > l {
return io.ErrUnexpectedEOF
}
return nil
}
func (m *JSONSchemaPropsOrStringArray) Unmarshal(dAtA []byte) error {
l := len(dAtA)
iNdEx := 0
for iNdEx < l {
preIndex := iNdEx
var wire uint64
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowGenerated
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
wire |= uint64(b&0x7F) << shift
if b < 0x80 {
break
}
}
fieldNum := int32(wire >> 3)
wireType := int(wire & 0x7)
if wireType == 4 {
return fmt.Errorf("proto: JSONSchemaPropsOrStringArray: wiretype end group for non-group")
}
if fieldNum <= 0 {
return fmt.Errorf("proto: JSONSchemaPropsOrStringArray: illegal tag %d (wire type %d)", fieldNum, wire)
}
switch fieldNum {
case 1:
if wireType != 2 {
return fmt.Errorf("proto: wrong wireType = %d for field Schema", wireType)
}
var msglen int
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowGenerated
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
msglen |= int(b&0x7F) << shift
if b < 0x80 {
break
}
}
if msglen < 0 {
return ErrInvalidLengthGenerated
}
postIndex := iNdEx + msglen
if postIndex < 0 {
return ErrInvalidLengthGenerated
}
if postIndex > l {
return io.ErrUnexpectedEOF
}
if m.Schema == nil {
m.Schema = &JSONSchemaProps{}
}
if err := m.Schema.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
return err
}
iNdEx = postIndex
case 2:
if wireType != 2 {
return fmt.Errorf("proto: wrong wireType = %d for field Property", wireType)
}
var stringLen uint64
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowGenerated
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
stringLen |= uint64(b&0x7F) << shift
if b < 0x80 {
break
}
}
intStringLen := int(stringLen)
if intStringLen < 0 {
return ErrInvalidLengthGenerated
}
postIndex := iNdEx + intStringLen
if postIndex < 0 {
return ErrInvalidLengthGenerated
}
if postIndex > l {
return io.ErrUnexpectedEOF
}
m.Property = append(m.Property, string(dAtA[iNdEx:postIndex]))
iNdEx = postIndex
default:
iNdEx = preIndex
skippy, err := skipGenerated(dAtA[iNdEx:])
if err != nil {
return err
}
if (skippy < 0) || (iNdEx+skippy) < 0 {
return ErrInvalidLengthGenerated
}
if (iNdEx + skippy) > l {
return io.ErrUnexpectedEOF
}
iNdEx += skippy
}
}
if iNdEx > l {
return io.ErrUnexpectedEOF
}
return nil
}
func (m *ValidationRule) Unmarshal(dAtA []byte) error {
l := len(dAtA)
iNdEx := 0
for iNdEx < l {
preIndex := iNdEx
var wire uint64
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowGenerated
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
wire |= uint64(b&0x7F) << shift
if b < 0x80 {
break
}
}
fieldNum := int32(wire >> 3)
wireType := int(wire & 0x7)
if wireType == 4 {
return fmt.Errorf("proto: ValidationRule: wiretype end group for non-group")
}
if fieldNum <= 0 {
return fmt.Errorf("proto: ValidationRule: illegal tag %d (wire type %d)", fieldNum, wire)
}
switch fieldNum {
case 1:
if wireType != 2 {
return fmt.Errorf("proto: wrong wireType = %d for field Rule", wireType)
}
var stringLen uint64
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowGenerated
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
stringLen |= uint64(b&0x7F) << shift
if b < 0x80 {
break
}
}
intStringLen := int(stringLen)
if intStringLen < 0 {
return ErrInvalidLengthGenerated
}
postIndex := iNdEx + intStringLen
if postIndex < 0 {
return ErrInvalidLengthGenerated
}
if postIndex > l {
return io.ErrUnexpectedEOF
}
m.Rule = string(dAtA[iNdEx:postIndex])
iNdEx = postIndex
case 2:
if wireType != 2 {
return fmt.Errorf("proto: wrong wireType = %d for field Message", wireType)
}
var stringLen uint64
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowGenerated
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
stringLen |= uint64(b&0x7F) << shift
if b < 0x80 {
break
}
}
intStringLen := int(stringLen)
if intStringLen < 0 {
return ErrInvalidLengthGenerated
}
postIndex := iNdEx + intStringLen
if postIndex < 0 {
return ErrInvalidLengthGenerated
}
if postIndex > l {
return io.ErrUnexpectedEOF
}
m.Message = string(dAtA[iNdEx:postIndex])
iNdEx = postIndex
case 3:
if wireType != 2 {
return fmt.Errorf("proto: wrong wireType = %d for field MessageExpression", wireType)
}
var stringLen uint64
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowGenerated
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
stringLen |= uint64(b&0x7F) << shift
if b < 0x80 {
break
}
}
intStringLen := int(stringLen)
if intStringLen < 0 {
return ErrInvalidLengthGenerated
}
postIndex := iNdEx + intStringLen
if postIndex < 0 {
return ErrInvalidLengthGenerated
}
if postIndex > l {
return io.ErrUnexpectedEOF
}
m.MessageExpression = string(dAtA[iNdEx:postIndex])
iNdEx = postIndex
case 4:
if wireType != 2 {
return fmt.Errorf("proto: wrong wireType = %d for field Reason", wireType)
}
var stringLen uint64
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowGenerated
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
stringLen |= uint64(b&0x7F) << shift
if b < 0x80 {
break
}
}
intStringLen := int(stringLen)
if intStringLen < 0 {
return ErrInvalidLengthGenerated
}
postIndex := iNdEx + intStringLen
if postIndex < 0 {
return ErrInvalidLengthGenerated
}
if postIndex > l {
return io.ErrUnexpectedEOF
}
s := FieldValueErrorReason(dAtA[iNdEx:postIndex])
m.Reason = &s
iNdEx = postIndex
case 5:
if wireType != 2 {
return fmt.Errorf("proto: wrong wireType = %d for field FieldPath", wireType)
}
var stringLen uint64
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowGenerated
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
stringLen |= uint64(b&0x7F) << shift
if b < 0x80 {
break
}
}
intStringLen := int(stringLen)
if intStringLen < 0 {
return ErrInvalidLengthGenerated
}
postIndex := iNdEx + intStringLen
if postIndex < 0 {
return ErrInvalidLengthGenerated
}
if postIndex > l {
return io.ErrUnexpectedEOF
}
m.FieldPath = string(dAtA[iNdEx:postIndex])
iNdEx = postIndex
case 6:
if wireType != 0 {
return fmt.Errorf("proto: wrong wireType = %d for field OptionalOldSelf", wireType)
}
var v int
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowGenerated
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
v |= int(b&0x7F) << shift
if b < 0x80 {
break
}
}
b := bool(v != 0)
m.OptionalOldSelf = &b
default:
iNdEx = preIndex
skippy, err := skipGenerated(dAtA[iNdEx:])
if err != nil {
return err
}
if (skippy < 0) || (iNdEx+skippy) < 0 {
return ErrInvalidLengthGenerated
}
if (iNdEx + skippy) > l {
return io.ErrUnexpectedEOF
}
iNdEx += skippy
}
}
if iNdEx > l {
return io.ErrUnexpectedEOF
}
return nil
}
func skipGenerated(dAtA []byte) (n int, err error) {
l := len(dAtA)
iNdEx := 0
depth := 0
for iNdEx < l {
var wire uint64
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return 0, ErrIntOverflowGenerated
}
if iNdEx >= l {
return 0, io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
wire |= (uint64(b) & 0x7F) << shift
if b < 0x80 {
break
}
}
wireType := int(wire & 0x7)
switch wireType {
case 0:
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return 0, ErrIntOverflowGenerated
}
if iNdEx >= l {
return 0, io.ErrUnexpectedEOF
}
iNdEx++
if dAtA[iNdEx-1] < 0x80 {
break
}
}
case 1:
iNdEx += 8
case 2:
var length int
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return 0, ErrIntOverflowGenerated
}
if iNdEx >= l {
return 0, io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
length |= (int(b) & 0x7F) << shift
if b < 0x80 {
break
}
}
if length < 0 {
return 0, ErrInvalidLengthGenerated
}
iNdEx += length
case 3:
depth++
case 4:
if depth == 0 {
return 0, ErrUnexpectedEndOfGroupGenerated
}
depth--
case 5:
iNdEx += 4
default:
return 0, fmt.Errorf("proto: illegal wireType %d", wireType)
}
if iNdEx < 0 {
return 0, ErrInvalidLengthGenerated
}
if depth == 0 {
return iNdEx, nil
}
}
return 0, io.ErrUnexpectedEOF
}
var (
ErrInvalidLengthGenerated = fmt.Errorf("proto: negative length found during unmarshaling")
ErrIntOverflowGenerated = fmt.Errorf("proto: integer overflow")
ErrUnexpectedEndOfGroupGenerated = fmt.Errorf("proto: unexpected end of group")
)
// SPDX-License-Identifier: Apache-2.0
// Copyright Authors of Cilium
// Copyright 2017 The Kubernetes Authors.
package v1
import (
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/runtime"
"k8s.io/apimachinery/pkg/runtime/schema"
)
const GroupName = "apiextensions.k8s.io"
// SchemeGroupVersion is group version used to register these objects
var SchemeGroupVersion = schema.GroupVersion{Group: GroupName, Version: "v1"}
// Kind takes an unqualified kind and returns back a Group qualified GroupKind
func Kind(kind string) schema.GroupKind {
return SchemeGroupVersion.WithKind(kind).GroupKind()
}
// Resource takes an unqualified resource and returns back a Group qualified GroupResource
func Resource(resource string) schema.GroupResource {
return SchemeGroupVersion.WithResource(resource).GroupResource()
}
var (
SchemeBuilder = runtime.NewSchemeBuilder(addKnownTypes, addDefaultingFuncs)
localSchemeBuilder = &SchemeBuilder
AddToScheme = localSchemeBuilder.AddToScheme
)
// Adds the list of known types to the given scheme.
func addKnownTypes(scheme *runtime.Scheme) error {
scheme.AddKnownTypes(SchemeGroupVersion,
&CustomResourceDefinition{},
&CustomResourceDefinitionList{},
)
metav1.AddToGroupVersion(scheme, SchemeGroupVersion)
return nil
}
func init() {
// We only register manually written functions here. The registration of the
// generated functions takes place in the generated files. The separation
// makes the code compile even when the generated files are missing.
localSchemeBuilder.Register(addDefaultingFuncs)
}
// SPDX-License-Identifier: Apache-2.0
// Copyright Authors of Cilium
// Copyright 2019 The Kubernetes Authors.
package v1
// FieldValueErrorReason is a machine-readable value providing more detail about why a field failed the validation.
// +enum
type FieldValueErrorReason string
const (
// FieldValueRequired is used to report required values that are not
// provided (e.g. empty strings, null values, or empty arrays).
FieldValueRequired FieldValueErrorReason = "FieldValueRequired"
// FieldValueDuplicate is used to report collisions of values that must be
// unique (e.g. unique IDs).
FieldValueDuplicate FieldValueErrorReason = "FieldValueDuplicate"
// FieldValueInvalid is used to report malformed values (e.g. failed regex
// match, too long, out of bounds).
FieldValueInvalid FieldValueErrorReason = "FieldValueInvalid"
// FieldValueForbidden is used to report valid (as per formatting rules)
// values which would be accepted under some conditions, but which are not
// permitted by the current conditions (such as security policy).
FieldValueForbidden FieldValueErrorReason = "FieldValueForbidden"
)
// JSONSchemaProps is a JSON-Schema following Specification Draft 4 (http://json-schema.org/).
type JSONSchemaProps struct {
ID string `json:"id,omitempty" protobuf:"bytes,1,opt,name=id"`
Schema JSONSchemaURL `json:"$schema,omitempty" protobuf:"bytes,2,opt,name=schema"`
Ref *string `json:"$ref,omitempty" protobuf:"bytes,3,opt,name=ref"`
Description string `json:"description,omitempty" protobuf:"bytes,4,opt,name=description"`
Type string `json:"type,omitempty" protobuf:"bytes,5,opt,name=type"`
// format is an OpenAPI v3 format string. Unknown formats are ignored. The following formats are validated:
//
// - bsonobjectid: a bson object ID, i.e. a 24 characters hex string
// - uri: an URI as parsed by Golang net/url.ParseRequestURI
// - email: an email address as parsed by Golang net/mail.ParseAddress
// - hostname: a valid representation for an Internet host name, as defined by RFC 1034, section 3.1 [RFC1034].
// - ipv4: an IPv4 IP as parsed by Golang net.ParseIP
// - ipv6: an IPv6 IP as parsed by Golang net.ParseIP
// - cidr: a CIDR as parsed by Golang net.ParseCIDR
// - mac: a MAC address as parsed by Golang net.ParseMAC
// - uuid: an UUID that allows uppercase defined by the regex (?i)^[0-9a-f]{8}-?[0-9a-f]{4}-?[0-9a-f]{4}-?[0-9a-f]{4}-?[0-9a-f]{12}$
// - uuid3: an UUID3 that allows uppercase defined by the regex (?i)^[0-9a-f]{8}-?[0-9a-f]{4}-?3[0-9a-f]{3}-?[0-9a-f]{4}-?[0-9a-f]{12}$
// - uuid4: an UUID4 that allows uppercase defined by the regex (?i)^[0-9a-f]{8}-?[0-9a-f]{4}-?4[0-9a-f]{3}-?[89ab][0-9a-f]{3}-?[0-9a-f]{12}$
// - uuid5: an UUID5 that allows uppercase defined by the regex (?i)^[0-9a-f]{8}-?[0-9a-f]{4}-?5[0-9a-f]{3}-?[89ab][0-9a-f]{3}-?[0-9a-f]{12}$
// - isbn: an ISBN10 or ISBN13 number string like "0321751043" or "978-0321751041"
// - isbn10: an ISBN10 number string like "0321751043"
// - isbn13: an ISBN13 number string like "978-0321751041"
// - creditcard: a credit card number defined by the regex ^(?:4[0-9]{12}(?:[0-9]{3})?|5[1-5][0-9]{14}|6(?:011|5[0-9][0-9])[0-9]{12}|3[47][0-9]{13}|3(?:0[0-5]|[68][0-9])[0-9]{11}|(?:2131|1800|35\\d{3})\\d{11})$ with any non digit characters mixed in
// - ssn: a U.S. social security number following the regex ^\\d{3}[- ]?\\d{2}[- ]?\\d{4}$
// - hexcolor: an hexadecimal color code like "#FFFFFF: following the regex ^#?([0-9a-fA-F]{3}|[0-9a-fA-F]{6})$
// - rgbcolor: an RGB color code like rgb like "rgb(255,255,2559"
// - byte: base64 encoded binary data
// - password: any kind of string
// - date: a date string like "2006-01-02" as defined by full-date in RFC3339
// - duration: a duration string like "22 ns" as parsed by Golang time.ParseDuration or compatible with Scala duration format
// - datetime: a date time string like "2014-12-15T19:30:20.000Z" as defined by date-time in RFC3339.
Format string `json:"format,omitempty" protobuf:"bytes,6,opt,name=format"`
Title string `json:"title,omitempty" protobuf:"bytes,7,opt,name=title"`
// default is a default value for undefined object fields.
// Defaulting is a beta feature under the CustomResourceDefaulting feature gate.
// Defaulting requires spec.preserveUnknownFields to be false.
Default *JSON `json:"default,omitempty" protobuf:"bytes,8,opt,name=default"`
Maximum *float64 `json:"maximum,omitempty" protobuf:"bytes,9,opt,name=maximum"`
ExclusiveMaximum bool `json:"exclusiveMaximum,omitempty" protobuf:"bytes,10,opt,name=exclusiveMaximum"`
Minimum *float64 `json:"minimum,omitempty" protobuf:"bytes,11,opt,name=minimum"`
ExclusiveMinimum bool `json:"exclusiveMinimum,omitempty" protobuf:"bytes,12,opt,name=exclusiveMinimum"`
MaxLength *int64 `json:"maxLength,omitempty" protobuf:"bytes,13,opt,name=maxLength"`
MinLength *int64 `json:"minLength,omitempty" protobuf:"bytes,14,opt,name=minLength"`
Pattern string `json:"pattern,omitempty" protobuf:"bytes,15,opt,name=pattern"`
MaxItems *int64 `json:"maxItems,omitempty" protobuf:"bytes,16,opt,name=maxItems"`
MinItems *int64 `json:"minItems,omitempty" protobuf:"bytes,17,opt,name=minItems"`
UniqueItems bool `json:"uniqueItems,omitempty" protobuf:"bytes,18,opt,name=uniqueItems"`
MultipleOf *float64 `json:"multipleOf,omitempty" protobuf:"bytes,19,opt,name=multipleOf"`
// +listType=atomic
Enum []JSON `json:"enum,omitempty" protobuf:"bytes,20,rep,name=enum"`
MaxProperties *int64 `json:"maxProperties,omitempty" protobuf:"bytes,21,opt,name=maxProperties"`
MinProperties *int64 `json:"minProperties,omitempty" protobuf:"bytes,22,opt,name=minProperties"`
// +listType=atomic
Required []string `json:"required,omitempty" protobuf:"bytes,23,rep,name=required"`
Items *JSONSchemaPropsOrArray `json:"items,omitempty" protobuf:"bytes,24,opt,name=items"`
// +listType=atomic
AllOf []JSONSchemaProps `json:"allOf,omitempty" protobuf:"bytes,25,rep,name=allOf"`
// +listType=atomic
OneOf []JSONSchemaProps `json:"oneOf,omitempty" protobuf:"bytes,26,rep,name=oneOf"`
// +listType=atomic
AnyOf []JSONSchemaProps `json:"anyOf,omitempty" protobuf:"bytes,27,rep,name=anyOf"`
Not *JSONSchemaProps `json:"not,omitempty" protobuf:"bytes,28,opt,name=not"`
Properties map[string]JSONSchemaProps `json:"properties,omitempty" protobuf:"bytes,29,rep,name=properties"`
AdditionalProperties *JSONSchemaPropsOrBool `json:"additionalProperties,omitempty" protobuf:"bytes,30,opt,name=additionalProperties"`
PatternProperties map[string]JSONSchemaProps `json:"patternProperties,omitempty" protobuf:"bytes,31,rep,name=patternProperties"`
Dependencies JSONSchemaDependencies `json:"dependencies,omitempty" protobuf:"bytes,32,opt,name=dependencies"`
AdditionalItems *JSONSchemaPropsOrBool `json:"additionalItems,omitempty" protobuf:"bytes,33,opt,name=additionalItems"`
Definitions JSONSchemaDefinitions `json:"definitions,omitempty" protobuf:"bytes,34,opt,name=definitions"`
ExternalDocs *ExternalDocumentation `json:"externalDocs,omitempty" protobuf:"bytes,35,opt,name=externalDocs"`
Example *JSON `json:"example,omitempty" protobuf:"bytes,36,opt,name=example"`
Nullable bool `json:"nullable,omitempty" protobuf:"bytes,37,opt,name=nullable"`
// x-kubernetes-preserve-unknown-fields stops the API server
// decoding step from pruning fields which are not specified
// in the validation schema. This affects fields recursively,
// but switches back to normal pruning behaviour if nested
// properties or additionalProperties are specified in the schema.
// This can either be true or undefined. False is forbidden.
XPreserveUnknownFields *bool `json:"x-kubernetes-preserve-unknown-fields,omitempty" protobuf:"bytes,38,opt,name=xKubernetesPreserveUnknownFields"`
// x-kubernetes-embedded-resource defines that the value is an
// embedded Kubernetes runtime.Object, with TypeMeta and
// ObjectMeta. The type must be object. It is allowed to further
// restrict the embedded object. kind, apiVersion and metadata
// are validated automatically. x-kubernetes-preserve-unknown-fields
// is allowed to be true, but does not have to be if the object
// is fully specified (up to kind, apiVersion, metadata).
XEmbeddedResource bool `json:"x-kubernetes-embedded-resource,omitempty" protobuf:"bytes,39,opt,name=xKubernetesEmbeddedResource"`
// x-kubernetes-int-or-string specifies that this value is
// either an integer or a string. If this is true, an empty
// type is allowed and type as child of anyOf is permitted
// if following one of the following patterns:
//
// 1) anyOf:
// - type: integer
// - type: string
// 2) allOf:
// - anyOf:
// - type: integer
// - type: string
// - ... zero or more
XIntOrString bool `json:"x-kubernetes-int-or-string,omitempty" protobuf:"bytes,40,opt,name=xKubernetesIntOrString"`
// x-kubernetes-list-map-keys annotates an array with the x-kubernetes-list-type `map` by specifying the keys used
// as the index of the map.
//
// This tag MUST only be used on lists that have the "x-kubernetes-list-type"
// extension set to "map". Also, the values specified for this attribute must
// be a scalar typed field of the child structure (no nesting is supported).
//
// The properties specified must either be required or have a default value,
// to ensure those properties are present for all list items.
//
// +optional
// +listType=atomic
XListMapKeys []string `json:"x-kubernetes-list-map-keys,omitempty" protobuf:"bytes,41,rep,name=xKubernetesListMapKeys"`
// x-kubernetes-list-type annotates an array to further describe its topology.
// This extension must only be used on lists and may have 3 possible values:
//
// 1) `atomic`: the list is treated as a single entity, like a scalar.
// Atomic lists will be entirely replaced when updated. This extension
// may be used on any type of list (struct, scalar, ...).
// 2) `set`:
// Sets are lists that must not have multiple items with the same value. Each
// value must be a scalar, an object with x-kubernetes-map-type `atomic` or an
// array with x-kubernetes-list-type `atomic`.
// 3) `map`:
// These lists are like maps in that their elements have a non-index key
// used to identify them. Order is preserved upon merge. The map tag
// must only be used on a list with elements of type object.
// Defaults to atomic for arrays.
// +optional
XListType *string `json:"x-kubernetes-list-type,omitempty" protobuf:"bytes,42,opt,name=xKubernetesListType"`
// x-kubernetes-map-type annotates an object to further describe its topology.
// This extension must only be used when type is object and may have 2 possible values:
//
// 1) `granular`:
// These maps are actual maps (key-value pairs) and each fields are independent
// from each other (they can each be manipulated by separate actors). This is
// the default behaviour for all maps.
// 2) `atomic`: the list is treated as a single entity, like a scalar.
// Atomic maps will be entirely replaced when updated.
// +optional
XMapType *string `json:"x-kubernetes-map-type,omitempty" protobuf:"bytes,43,opt,name=xKubernetesMapType"`
// x-kubernetes-validations describes a list of validation rules written in the CEL expression language.
// This field is an alpha-level. Using this field requires the feature gate `CustomResourceValidationExpressions` to be enabled.
// +patchMergeKey=rule
// +patchStrategy=merge
// +listType=map
// +listMapKey=rule
XValidations ValidationRules `json:"x-kubernetes-validations,omitempty" patchStrategy:"merge" patchMergeKey:"rule" protobuf:"bytes,44,rep,name=xKubernetesValidations"`
}
// ValidationRules describes a list of validation rules written in the CEL expression language.
type ValidationRules []ValidationRule
// ValidationRule describes a validation rule written in the CEL expression language.
type ValidationRule struct {
// Rule represents the expression which will be evaluated by CEL.
// ref: https://github.com/google/cel-spec
// The Rule is scoped to the location of the x-kubernetes-validations extension in the schema.
// The `self` variable in the CEL expression is bound to the scoped value.
// Example:
// - Rule scoped to the root of a resource with a status subresource: {"rule": "self.status.actual <= self.spec.maxDesired"}
//
// If the Rule is scoped to an object with properties, the accessible properties of the object are field selectable
// via `self.field` and field presence can be checked via `has(self.field)`. Null valued fields are treated as
// absent fields in CEL expressions.
// If the Rule is scoped to an object with additionalProperties (i.e. a map) the value of the map
// are accessible via `self[mapKey]`, map containment can be checked via `mapKey in self` and all entries of the map
// are accessible via CEL macros and functions such as `self.all(...)`.
// If the Rule is scoped to an array, the elements of the array are accessible via `self[i]` and also by macros and
// functions.
// If the Rule is scoped to a scalar, `self` is bound to the scalar value.
// Examples:
// - Rule scoped to a map of objects: {"rule": "self.components['Widget'].priority < 10"}
// - Rule scoped to a list of integers: {"rule": "self.values.all(value, value >= 0 && value < 100)"}
// - Rule scoped to a string value: {"rule": "self.startsWith('kube')"}
//
// The `apiVersion`, `kind`, `metadata.name` and `metadata.generateName` are always accessible from the root of the
// object and from any x-kubernetes-embedded-resource annotated objects. No other metadata properties are accessible.
//
// Unknown data preserved in custom resources via x-kubernetes-preserve-unknown-fields is not accessible in CEL
// expressions. This includes:
// - Unknown field values that are preserved by object schemas with x-kubernetes-preserve-unknown-fields.
// - Object properties where the property schema is of an "unknown type". An "unknown type" is recursively defined as:
// - A schema with no type and x-kubernetes-preserve-unknown-fields set to true
// - An array where the items schema is of an "unknown type"
// - An object where the additionalProperties schema is of an "unknown type"
//
// Only property names of the form `[a-zA-Z_.-/][a-zA-Z0-9_.-/]*` are accessible.
// Accessible property names are escaped according to the following rules when accessed in the expression:
// - '__' escapes to '__underscores__'
// - '.' escapes to '__dot__'
// - '-' escapes to '__dash__'
// - '/' escapes to '__slash__'
// - Property names that exactly match a CEL RESERVED keyword escape to '__{keyword}__'. The keywords are:
// "true", "false", "null", "in", "as", "break", "const", "continue", "else", "for", "function", "if",
// "import", "let", "loop", "package", "namespace", "return".
// Examples:
// - Rule accessing a property named "namespace": {"rule": "self.__namespace__ > 0"}
// - Rule accessing a property named "x-prop": {"rule": "self.x__dash__prop > 0"}
// - Rule accessing a property named "redact__d": {"rule": "self.redact__underscores__d > 0"}
//
// Equality on arrays with x-kubernetes-list-type of 'set' or 'map' ignores element order, i.e. [1, 2] == [2, 1].
// Concatenation on arrays with x-kubernetes-list-type use the semantics of the list type:
// - 'set': `X + Y` performs a union where the array positions of all elements in `X` are preserved and
// non-intersecting elements in `Y` are appended, retaining their partial order.
// - 'map': `X + Y` performs a merge where the array positions of all keys in `X` are preserved but the values
// are overwritten by values in `Y` when the key sets of `X` and `Y` intersect. Elements in `Y` with
// non-intersecting keys are appended, retaining their partial order.
//
// If `rule` makes use of the `oldSelf` variable it is implicitly a
// `transition rule`.
//
// By default, the `oldSelf` variable is the same type as `self`.
// When `optionalOldSelf` is true, the `oldSelf` variable is a CEL optional
// variable whose value() is the same type as `self`.
// See the documentation for the `optionalOldSelf` field for details.
//
// Transition rules by default are applied only on UPDATE requests and are
// skipped if an old value could not be found. You can opt a transition
// rule into unconditional evaluation by setting `optionalOldSelf` to true.
//
Rule string `json:"rule" protobuf:"bytes,1,opt,name=rule"`
// Message represents the message displayed when validation fails. The message is required if the Rule contains
// line breaks. The message must not contain line breaks.
// If unset, the message is "failed rule: {Rule}".
// e.g. "must be a URL with the host matching spec.host"
Message string `json:"message,omitempty" protobuf:"bytes,2,opt,name=message"`
// MessageExpression declares a CEL expression that evaluates to the validation failure message that is returned when this rule fails.
// Since messageExpression is used as a failure message, it must evaluate to a string.
// If both message and messageExpression are present on a rule, then messageExpression will be used if validation
// fails. If messageExpression results in a runtime error, the runtime error is logged, and the validation failure message is produced
// as if the messageExpression field were unset. If messageExpression evaluates to an empty string, a string with only spaces, or a string
// that contains line breaks, then the validation failure message will also be produced as if the messageExpression field were unset, and
// the fact that messageExpression produced an empty string/string with only spaces/string with line breaks will be logged.
// messageExpression has access to all the same variables as the rule; the only difference is the return type.
// Example:
// "x must be less than max ("+string(self.max)+")"
// +optional
MessageExpression string `json:"messageExpression,omitempty" protobuf:"bytes,3,opt,name=messageExpression"`
// reason provides a machine-readable validation failure reason that is returned to the caller when a request fails this validation rule.
// The HTTP status code returned to the caller will match the reason of the reason of the first failed validation rule.
// The currently supported reasons are: "FieldValueInvalid", "FieldValueForbidden", "FieldValueRequired", "FieldValueDuplicate".
// If not set, default to use "FieldValueInvalid".
// All future added reasons must be accepted by clients when reading this value and unknown reasons should be treated as FieldValueInvalid.
// +optional
Reason *FieldValueErrorReason `json:"reason,omitempty" protobuf:"bytes,4,opt,name=reason"`
// fieldPath represents the field path returned when the validation fails.
// It must be a relative JSON path (i.e. with array notation) scoped to the location of this x-kubernetes-validations extension in the schema and refer to an existing field.
// e.g. when validation checks if a specific attribute `foo` under a map `testMap`, the fieldPath could be set to `.testMap.foo`
// If the validation checks two lists must have unique attributes, the fieldPath could be set to either of the list: e.g. `.testList`
// It does not support list numeric index.
// It supports child operation to refer to an existing field currently. Refer to [JSONPath support in Kubernetes](https://kubernetes.io/docs/reference/kubectl/jsonpath/) for more info.
// Numeric index of array is not supported.
// For field name which contains special characters, use `['specialName']` to refer the field name.
// e.g. for attribute `foo.34$` appears in a list `testList`, the fieldPath could be set to `.testList['foo.34$']`
// +optional
FieldPath string `json:"fieldPath,omitempty" protobuf:"bytes,5,opt,name=fieldPath"`
// optionalOldSelf is used to opt a transition rule into evaluation
// even when the object is first created, or if the old object is
// missing the value.
//
// When enabled `oldSelf` will be a CEL optional whose value will be
// `None` if there is no old value, or when the object is initially created.
//
// You may check for presence of oldSelf using `oldSelf.hasValue()` and
// unwrap it after checking using `oldSelf.value()`. Check the CEL
// documentation for Optional types for more information:
// https://pkg.go.dev/github.com/google/cel-go/cel#OptionalTypes
//
// May not be set unless `oldSelf` is used in `rule`.
//
// +featureGate=CRDValidationRatcheting
// +optional
OptionalOldSelf *bool `json:"optionalOldSelf,omitempty" protobuf:"bytes,6,opt,name=optionalOldSelf"`
}
// JSON represents any valid JSON value.
// These types are supported: bool, int64, float64, string, []interface{}, map[string]interface{} and nil.
type JSON struct {
Raw []byte `json:"-" protobuf:"bytes,1,opt,name=raw"`
}
// OpenAPISchemaType is used by the kube-openapi generator when constructing
// the OpenAPI spec of this type.
//
// See: https://github.com/kubernetes/kube-openapi/tree/master/pkg/generators
func (JSON) OpenAPISchemaType() []string {
// TODO: return actual types when anyOf is supported
return nil
}
// OpenAPISchemaFormat is used by the kube-openapi generator when constructing
// the OpenAPI spec of this type.
func (JSON) OpenAPISchemaFormat() string { return "" }
// JSONSchemaURL represents a schema url.
type JSONSchemaURL string
// JSONSchemaPropsOrArray represents a value that can either be a JSONSchemaProps
// or an array of JSONSchemaProps. Mainly here for serialization purposes.
type JSONSchemaPropsOrArray struct {
Schema *JSONSchemaProps `protobuf:"bytes,1,opt,name=schema"`
// +listType=atomic
JSONSchemas []JSONSchemaProps `protobuf:"bytes,2,rep,name=jSONSchemas"`
}
// OpenAPISchemaType is used by the kube-openapi generator when constructing
// the OpenAPI spec of this type.
//
// See: https://github.com/kubernetes/kube-openapi/tree/master/pkg/generators
func (_ JSONSchemaPropsOrArray) OpenAPISchemaType() []string {
// TODO: return actual types when anyOf is supported
return nil
}
// OpenAPISchemaFormat is used by the kube-openapi generator when constructing
// the OpenAPI spec of this type.
func (_ JSONSchemaPropsOrArray) OpenAPISchemaFormat() string { return "" }
// JSONSchemaPropsOrBool represents JSONSchemaProps or a boolean value.
// Defaults to true for the boolean property.
type JSONSchemaPropsOrBool struct {
Allows bool `protobuf:"varint,1,opt,name=allows"`
Schema *JSONSchemaProps `protobuf:"bytes,2,opt,name=schema"`
}
// OpenAPISchemaType is used by the kube-openapi generator when constructing
// the OpenAPI spec of this type.
//
// See: https://github.com/kubernetes/kube-openapi/tree/master/pkg/generators
func (_ JSONSchemaPropsOrBool) OpenAPISchemaType() []string {
// TODO: return actual types when anyOf is supported
return nil
}
// OpenAPISchemaFormat is used by the kube-openapi generator when constructing
// the OpenAPI spec of this type.
func (_ JSONSchemaPropsOrBool) OpenAPISchemaFormat() string { return "" }
// JSONSchemaDependencies represent a dependencies property.
type JSONSchemaDependencies map[string]JSONSchemaPropsOrStringArray
// JSONSchemaPropsOrStringArray represents a JSONSchemaProps or a string array.
type JSONSchemaPropsOrStringArray struct {
Schema *JSONSchemaProps `protobuf:"bytes,1,opt,name=schema"`
// +listType=atomic
Property []string `protobuf:"bytes,2,rep,name=property"`
}
// OpenAPISchemaType is used by the kube-openapi generator when constructing
// the OpenAPI spec of this type.
//
// See: https://github.com/kubernetes/kube-openapi/tree/master/pkg/generators
func (JSONSchemaPropsOrStringArray) OpenAPISchemaType() []string {
// TODO: return actual types when anyOf is supported
return nil
}
// OpenAPISchemaFormat is used by the kube-openapi generator when constructing
// the OpenAPI spec of this type.
func (JSONSchemaPropsOrStringArray) OpenAPISchemaFormat() string { return "" }
// JSONSchemaDefinitions contains the models explicitly defined in this spec.
type JSONSchemaDefinitions map[string]JSONSchemaProps
// ExternalDocumentation allows referencing an external resource for extended documentation.
type ExternalDocumentation struct {
Description string `json:"description,omitempty" protobuf:"bytes,1,opt,name=description"`
URL string `json:"url,omitempty" protobuf:"bytes,2,opt,name=url"`
}
//go:build !ignore_autogenerated
// +build !ignore_autogenerated
// SPDX-License-Identifier: Apache-2.0
// Copyright Authors of Cilium
// Code generated by conversion-gen. DO NOT EDIT.
package v1
import (
runtime "k8s.io/apimachinery/pkg/runtime"
)
func init() {
localSchemeBuilder.Register(RegisterConversions)
}
// RegisterConversions adds conversion functions to the given scheme.
// Public to allow building arbitrary schemes.
func RegisterConversions(s *runtime.Scheme) error {
return nil
}
//go:build !ignore_autogenerated
// +build !ignore_autogenerated
// SPDX-License-Identifier: Apache-2.0
// Copyright Authors of Cilium
// Code generated by deepcopy-gen. DO NOT EDIT.
package v1
import (
runtime "k8s.io/apimachinery/pkg/runtime"
)
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *CustomResourceDefinition) DeepCopyInto(out *CustomResourceDefinition) {
*out = *in
out.TypeMeta = in.TypeMeta
in.ObjectMeta.DeepCopyInto(&out.ObjectMeta)
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CustomResourceDefinition.
func (in *CustomResourceDefinition) DeepCopy() *CustomResourceDefinition {
if in == nil {
return nil
}
out := new(CustomResourceDefinition)
in.DeepCopyInto(out)
return out
}
// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
func (in *CustomResourceDefinition) DeepCopyObject() runtime.Object {
if c := in.DeepCopy(); c != nil {
return c
}
return nil
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *CustomResourceDefinitionList) DeepCopyInto(out *CustomResourceDefinitionList) {
*out = *in
out.TypeMeta = in.TypeMeta
in.ListMeta.DeepCopyInto(&out.ListMeta)
if in.Items != nil {
in, out := &in.Items, &out.Items
*out = make([]CustomResourceDefinition, len(*in))
for i := range *in {
(*in)[i].DeepCopyInto(&(*out)[i])
}
}
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CustomResourceDefinitionList.
func (in *CustomResourceDefinitionList) DeepCopy() *CustomResourceDefinitionList {
if in == nil {
return nil
}
out := new(CustomResourceDefinitionList)
in.DeepCopyInto(out)
return out
}
// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
func (in *CustomResourceDefinitionList) DeepCopyObject() runtime.Object {
if c := in.DeepCopy(); c != nil {
return c
}
return nil
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *CustomResourceSubresourceScale) DeepCopyInto(out *CustomResourceSubresourceScale) {
*out = *in
if in.LabelSelectorPath != nil {
in, out := &in.LabelSelectorPath, &out.LabelSelectorPath
*out = new(string)
**out = **in
}
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CustomResourceSubresourceScale.
func (in *CustomResourceSubresourceScale) DeepCopy() *CustomResourceSubresourceScale {
if in == nil {
return nil
}
out := new(CustomResourceSubresourceScale)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *CustomResourceSubresourceStatus) DeepCopyInto(out *CustomResourceSubresourceStatus) {
*out = *in
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CustomResourceSubresourceStatus.
func (in *CustomResourceSubresourceStatus) DeepCopy() *CustomResourceSubresourceStatus {
if in == nil {
return nil
}
out := new(CustomResourceSubresourceStatus)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *CustomResourceSubresources) DeepCopyInto(out *CustomResourceSubresources) {
*out = *in
if in.Status != nil {
in, out := &in.Status, &out.Status
*out = new(CustomResourceSubresourceStatus)
**out = **in
}
if in.Scale != nil {
in, out := &in.Scale, &out.Scale
*out = new(CustomResourceSubresourceScale)
(*in).DeepCopyInto(*out)
}
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CustomResourceSubresources.
func (in *CustomResourceSubresources) DeepCopy() *CustomResourceSubresources {
if in == nil {
return nil
}
out := new(CustomResourceSubresources)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *CustomResourceValidation) DeepCopyInto(out *CustomResourceValidation) {
*out = *in
if in.OpenAPIV3Schema != nil {
in, out := &in.OpenAPIV3Schema, &out.OpenAPIV3Schema
*out = new(JSONSchemaProps)
(*in).DeepCopyInto(*out)
}
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CustomResourceValidation.
func (in *CustomResourceValidation) DeepCopy() *CustomResourceValidation {
if in == nil {
return nil
}
out := new(CustomResourceValidation)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *ExternalDocumentation) DeepCopyInto(out *ExternalDocumentation) {
*out = *in
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ExternalDocumentation.
func (in *ExternalDocumentation) DeepCopy() *ExternalDocumentation {
if in == nil {
return nil
}
out := new(ExternalDocumentation)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *JSON) DeepCopyInto(out *JSON) {
*out = *in
if in.Raw != nil {
in, out := &in.Raw, &out.Raw
*out = make([]byte, len(*in))
copy(*out, *in)
}
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new JSON.
func (in *JSON) DeepCopy() *JSON {
if in == nil {
return nil
}
out := new(JSON)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in JSONSchemaDefinitions) DeepCopyInto(out *JSONSchemaDefinitions) {
{
in := &in
*out = make(JSONSchemaDefinitions, len(*in))
for key, val := range *in {
(*out)[key] = *val.DeepCopy()
}
return
}
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new JSONSchemaDefinitions.
func (in JSONSchemaDefinitions) DeepCopy() JSONSchemaDefinitions {
if in == nil {
return nil
}
out := new(JSONSchemaDefinitions)
in.DeepCopyInto(out)
return *out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in JSONSchemaDependencies) DeepCopyInto(out *JSONSchemaDependencies) {
{
in := &in
*out = make(JSONSchemaDependencies, len(*in))
for key, val := range *in {
(*out)[key] = *val.DeepCopy()
}
return
}
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new JSONSchemaDependencies.
func (in JSONSchemaDependencies) DeepCopy() JSONSchemaDependencies {
if in == nil {
return nil
}
out := new(JSONSchemaDependencies)
in.DeepCopyInto(out)
return *out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *JSONSchemaProps) DeepCopyInto(out *JSONSchemaProps) {
*out = *in
if in.Ref != nil {
in, out := &in.Ref, &out.Ref
*out = new(string)
**out = **in
}
if in.Default != nil {
in, out := &in.Default, &out.Default
*out = new(JSON)
(*in).DeepCopyInto(*out)
}
if in.Maximum != nil {
in, out := &in.Maximum, &out.Maximum
*out = new(float64)
**out = **in
}
if in.Minimum != nil {
in, out := &in.Minimum, &out.Minimum
*out = new(float64)
**out = **in
}
if in.MaxLength != nil {
in, out := &in.MaxLength, &out.MaxLength
*out = new(int64)
**out = **in
}
if in.MinLength != nil {
in, out := &in.MinLength, &out.MinLength
*out = new(int64)
**out = **in
}
if in.MaxItems != nil {
in, out := &in.MaxItems, &out.MaxItems
*out = new(int64)
**out = **in
}
if in.MinItems != nil {
in, out := &in.MinItems, &out.MinItems
*out = new(int64)
**out = **in
}
if in.MultipleOf != nil {
in, out := &in.MultipleOf, &out.MultipleOf
*out = new(float64)
**out = **in
}
if in.Enum != nil {
in, out := &in.Enum, &out.Enum
*out = make([]JSON, len(*in))
for i := range *in {
(*in)[i].DeepCopyInto(&(*out)[i])
}
}
if in.MaxProperties != nil {
in, out := &in.MaxProperties, &out.MaxProperties
*out = new(int64)
**out = **in
}
if in.MinProperties != nil {
in, out := &in.MinProperties, &out.MinProperties
*out = new(int64)
**out = **in
}
if in.Required != nil {
in, out := &in.Required, &out.Required
*out = make([]string, len(*in))
copy(*out, *in)
}
if in.Items != nil {
in, out := &in.Items, &out.Items
*out = new(JSONSchemaPropsOrArray)
(*in).DeepCopyInto(*out)
}
if in.AllOf != nil {
in, out := &in.AllOf, &out.AllOf
*out = make([]JSONSchemaProps, len(*in))
for i := range *in {
(*in)[i].DeepCopyInto(&(*out)[i])
}
}
if in.OneOf != nil {
in, out := &in.OneOf, &out.OneOf
*out = make([]JSONSchemaProps, len(*in))
for i := range *in {
(*in)[i].DeepCopyInto(&(*out)[i])
}
}
if in.AnyOf != nil {
in, out := &in.AnyOf, &out.AnyOf
*out = make([]JSONSchemaProps, len(*in))
for i := range *in {
(*in)[i].DeepCopyInto(&(*out)[i])
}
}
if in.Not != nil {
in, out := &in.Not, &out.Not
*out = new(JSONSchemaProps)
(*in).DeepCopyInto(*out)
}
if in.Properties != nil {
in, out := &in.Properties, &out.Properties
*out = make(map[string]JSONSchemaProps, len(*in))
for key, val := range *in {
(*out)[key] = *val.DeepCopy()
}
}
if in.AdditionalProperties != nil {
in, out := &in.AdditionalProperties, &out.AdditionalProperties
*out = new(JSONSchemaPropsOrBool)
(*in).DeepCopyInto(*out)
}
if in.PatternProperties != nil {
in, out := &in.PatternProperties, &out.PatternProperties
*out = make(map[string]JSONSchemaProps, len(*in))
for key, val := range *in {
(*out)[key] = *val.DeepCopy()
}
}
if in.Dependencies != nil {
in, out := &in.Dependencies, &out.Dependencies
*out = make(JSONSchemaDependencies, len(*in))
for key, val := range *in {
(*out)[key] = *val.DeepCopy()
}
}
if in.AdditionalItems != nil {
in, out := &in.AdditionalItems, &out.AdditionalItems
*out = new(JSONSchemaPropsOrBool)
(*in).DeepCopyInto(*out)
}
if in.Definitions != nil {
in, out := &in.Definitions, &out.Definitions
*out = make(JSONSchemaDefinitions, len(*in))
for key, val := range *in {
(*out)[key] = *val.DeepCopy()
}
}
if in.ExternalDocs != nil {
in, out := &in.ExternalDocs, &out.ExternalDocs
*out = new(ExternalDocumentation)
**out = **in
}
if in.Example != nil {
in, out := &in.Example, &out.Example
*out = new(JSON)
(*in).DeepCopyInto(*out)
}
if in.XPreserveUnknownFields != nil {
in, out := &in.XPreserveUnknownFields, &out.XPreserveUnknownFields
*out = new(bool)
**out = **in
}
if in.XListMapKeys != nil {
in, out := &in.XListMapKeys, &out.XListMapKeys
*out = make([]string, len(*in))
copy(*out, *in)
}
if in.XListType != nil {
in, out := &in.XListType, &out.XListType
*out = new(string)
**out = **in
}
if in.XMapType != nil {
in, out := &in.XMapType, &out.XMapType
*out = new(string)
**out = **in
}
if in.XValidations != nil {
in, out := &in.XValidations, &out.XValidations
*out = make(ValidationRules, len(*in))
for i := range *in {
(*in)[i].DeepCopyInto(&(*out)[i])
}
}
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new JSONSchemaProps.
func (in *JSONSchemaProps) DeepCopy() *JSONSchemaProps {
if in == nil {
return nil
}
out := new(JSONSchemaProps)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *JSONSchemaPropsOrArray) DeepCopyInto(out *JSONSchemaPropsOrArray) {
*out = *in
if in.Schema != nil {
in, out := &in.Schema, &out.Schema
*out = new(JSONSchemaProps)
(*in).DeepCopyInto(*out)
}
if in.JSONSchemas != nil {
in, out := &in.JSONSchemas, &out.JSONSchemas
*out = make([]JSONSchemaProps, len(*in))
for i := range *in {
(*in)[i].DeepCopyInto(&(*out)[i])
}
}
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new JSONSchemaPropsOrArray.
func (in *JSONSchemaPropsOrArray) DeepCopy() *JSONSchemaPropsOrArray {
if in == nil {
return nil
}
out := new(JSONSchemaPropsOrArray)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *JSONSchemaPropsOrBool) DeepCopyInto(out *JSONSchemaPropsOrBool) {
*out = *in
if in.Schema != nil {
in, out := &in.Schema, &out.Schema
*out = new(JSONSchemaProps)
(*in).DeepCopyInto(*out)
}
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new JSONSchemaPropsOrBool.
func (in *JSONSchemaPropsOrBool) DeepCopy() *JSONSchemaPropsOrBool {
if in == nil {
return nil
}
out := new(JSONSchemaPropsOrBool)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *JSONSchemaPropsOrStringArray) DeepCopyInto(out *JSONSchemaPropsOrStringArray) {
*out = *in
if in.Schema != nil {
in, out := &in.Schema, &out.Schema
*out = new(JSONSchemaProps)
(*in).DeepCopyInto(*out)
}
if in.Property != nil {
in, out := &in.Property, &out.Property
*out = make([]string, len(*in))
copy(*out, *in)
}
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new JSONSchemaPropsOrStringArray.
func (in *JSONSchemaPropsOrStringArray) DeepCopy() *JSONSchemaPropsOrStringArray {
if in == nil {
return nil
}
out := new(JSONSchemaPropsOrStringArray)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *ValidationRule) DeepCopyInto(out *ValidationRule) {
*out = *in
if in.Reason != nil {
in, out := &in.Reason, &out.Reason
*out = new(FieldValueErrorReason)
**out = **in
}
if in.OptionalOldSelf != nil {
in, out := &in.OptionalOldSelf, &out.OptionalOldSelf
*out = new(bool)
**out = **in
}
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ValidationRule.
func (in *ValidationRule) DeepCopy() *ValidationRule {
if in == nil {
return nil
}
out := new(ValidationRule)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in ValidationRules) DeepCopyInto(out *ValidationRules) {
{
in := &in
*out = make(ValidationRules, len(*in))
for i := range *in {
(*in)[i].DeepCopyInto(&(*out)[i])
}
return
}
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ValidationRules.
func (in ValidationRules) DeepCopy() ValidationRules {
if in == nil {
return nil
}
out := new(ValidationRules)
in.DeepCopyInto(out)
return *out
}
//go:build !ignore_autogenerated
// +build !ignore_autogenerated
// SPDX-License-Identifier: Apache-2.0
// Copyright Authors of Cilium
// Code generated by defaulter-gen. DO NOT EDIT.
package v1
import (
runtime "k8s.io/apimachinery/pkg/runtime"
)
// RegisterDefaults adds defaulters functions to the given scheme.
// Public to allow building arbitrary schemes.
// All generated defaulters are covering - they call all nested defaulters.
func RegisterDefaults(scheme *runtime.Scheme) error {
return nil
}
// SPDX-License-Identifier: Apache-2.0
// Copyright Authors of Cilium
// Copyright 2014 The Kubernetes Authors.
package labels
import (
"fmt"
"maps"
"sort"
"strings"
"k8s.io/apimachinery/pkg/util/validation/field"
)
// Labels allows you to present labels independently from their storage.
type Labels interface {
// Has returns whether the provided label exists.
Has(label string) (exists bool)
// Get returns the value for the provided label.
Get(label string) (value string)
}
// Set is a map of label:value. It implements Labels.
type Set map[string]string
// String returns all labels listed as a human readable string.
// Conveniently, exactly the format that ParseSelector takes.
func (ls Set) String() string {
selector := make([]string, 0, len(ls))
for key, value := range ls {
selector = append(selector, key+"="+value)
}
// Sort for determinism.
sort.StringSlice(selector).Sort()
return strings.Join(selector, ",")
}
// Has returns whether the provided label exists in the map.
func (ls Set) Has(label string) bool {
_, exists := ls[label]
return exists
}
// Get returns the value in the map for the provided label.
func (ls Set) Get(label string) string {
return ls[label]
}
// AsSelector converts labels into a selectors. It does not
// perform any validation, which means the server will reject
// the request if the Set contains invalid values.
func (ls Set) AsSelector() Selector {
return SelectorFromSet(ls)
}
// AsValidatedSelector converts labels into a selectors.
// The Set is validated client-side, which allows to catch errors early.
func (ls Set) AsValidatedSelector() (Selector, error) {
return ValidatedSelectorFromSet(ls)
}
// AsSelectorPreValidated converts labels into a selector, but
// assumes that labels are already validated and thus doesn't
// perform any validation.
// According to our measurements this is significantly faster
// in codepaths that matter at high scale.
// Note: this method copies the Set; if the Set is immutable, consider wrapping it with ValidatedSetSelector
// instead, which does not copy.
func (ls Set) AsSelectorPreValidated() Selector {
return SelectorFromValidatedSet(ls)
}
// FormatLabels converts label map into plain string
func FormatLabels(labelMap map[string]string) string {
l := Set(labelMap).String()
if l == "" {
l = "<none>"
}
return l
}
// Conflicts takes 2 maps and returns true if there a key match between
// the maps but the value doesn't match, and returns false in other cases
func Conflicts(labels1, labels2 Set) bool {
small := labels1
big := labels2
if len(labels2) < len(labels1) {
small = labels2
big = labels1
}
for k, v := range small {
if val, match := big[k]; match {
if val != v {
return true
}
}
}
return false
}
// Merge combines given maps, and does not check for any conflicts
// between the maps. In case of conflicts, second map (labels2) wins
func Merge(labels1, labels2 Set) Set {
mergedMap := maps.Clone(labels1)
maps.Copy(mergedMap, labels2)
return mergedMap
}
// Equals returns true if the given maps are equal
func Equals(labels1, labels2 Set) bool {
if len(labels1) != len(labels2) {
return false
}
for k, v := range labels1 {
value, ok := labels2[k]
if !ok {
return false
}
if value != v {
return false
}
}
return true
}
// ConvertSelectorToLabelsMap converts selector string to labels map
// and validates keys and values
func ConvertSelectorToLabelsMap(selector string, opts ...field.PathOption) (Set, error) {
labelsMap := Set{}
if len(selector) == 0 {
return labelsMap, nil
}
for label := range strings.SplitSeq(selector, ",") {
l := strings.Split(label, "=")
if len(l) != 2 {
return labelsMap, fmt.Errorf("invalid selector: %s", l)
}
key := strings.TrimSpace(l[0])
if err := validateLabelKey(key, field.ToPath(opts...)); err != nil {
return labelsMap, err
}
value := strings.TrimSpace(l[1])
if err := validateLabelValue(key, value, field.ToPath(opts...)); err != nil {
return labelsMap, err
}
labelsMap[key] = value
}
return labelsMap, nil
}
// Copyright 2022 ADA Logics Ltd
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//
package labels
func FuzzLabelsParse(data []byte) int {
_, _ = Parse(string(data))
return 1
}
// SPDX-License-Identifier: Apache-2.0
// Copyright Authors of Cilium
// Copyright 2014 The Kubernetes Authors.
package labels
import (
"fmt"
"maps"
"slices"
"sort"
"strconv"
"strings"
"k8s.io/apimachinery/pkg/util/sets"
"k8s.io/apimachinery/pkg/util/validation"
"k8s.io/apimachinery/pkg/util/validation/field"
"k8s.io/klog/v2"
"github.com/cilium/cilium/pkg/k8s/slim/k8s/apis/selection"
)
var (
unaryOperators = []string{
string(selection.Exists), string(selection.DoesNotExist),
}
binaryOperators = []string{
string(selection.In), string(selection.NotIn),
string(selection.Equals), string(selection.DoubleEquals), string(selection.NotEquals),
string(selection.GreaterThan), string(selection.LessThan),
}
validRequirementOperators = append(binaryOperators, unaryOperators...)
)
// Requirements is AND of all requirements.
type Requirements []Requirement
func (r Requirements) String() string {
var sb strings.Builder
for i, requirement := range r {
if i > 0 {
sb.WriteString(", ")
}
sb.WriteString(requirement.String())
}
return sb.String()
}
// Selector represents a label selector.
type Selector interface {
// Matches returns true if this selector matches the given set of labels.
Matches(Labels) bool
// Empty returns true if this selector does not restrict the selection space.
Empty() bool
// String returns a human readable string that represents this selector.
String() string
// Add adds requirements to the Selector
Add(r ...Requirement) Selector
// Requirements converts this interface into Requirements to expose
// more detailed selection information.
// If there are querying parameters, it will return converted requirements and selectable=true.
// If this selector doesn't want to select anything, it will return selectable=false.
Requirements() (requirements Requirements, selectable bool)
// Make a deep copy of the selector.
DeepCopySelector() Selector
// RequiresExactMatch allows a caller to introspect whether a given selector
// requires a single specific label to be set, and if so returns the value it
// requires.
RequiresExactMatch(label string) (value string, found bool)
}
// Sharing this saves 1 alloc per use; this is safe because it's immutable.
var sharedEverythingSelector Selector = internalSelector{}
// Everything returns a selector that matches all labels.
func Everything() Selector {
return sharedEverythingSelector
}
type nothingSelector struct{}
func (n nothingSelector) Matches(_ Labels) bool { return false }
func (n nothingSelector) Empty() bool { return false }
func (n nothingSelector) String() string { return "" }
func (n nothingSelector) Add(_ ...Requirement) Selector { return n }
func (n nothingSelector) Requirements() (Requirements, bool) { return nil, false }
func (n nothingSelector) DeepCopySelector() Selector { return n }
func (n nothingSelector) RequiresExactMatch(label string) (value string, found bool) {
return "", false
}
// Sharing this saves 1 alloc per use; this is safe because it's immutable.
var sharedNothingSelector Selector = nothingSelector{}
// Nothing returns a selector that matches no labels
func Nothing() Selector {
return sharedNothingSelector
}
// NewSelector returns a nil selector
func NewSelector() Selector {
return internalSelector(nil)
}
type internalSelector []Requirement
func (s internalSelector) DeepCopy() internalSelector {
if s == nil {
return nil
}
result := make([]Requirement, len(s))
for i := range s {
s[i].DeepCopyInto(&result[i])
}
return result
}
func (s internalSelector) DeepCopySelector() Selector {
return s.DeepCopy()
}
// ByKey sorts requirements by key to obtain deterministic parser
type ByKey []Requirement
func (a ByKey) Len() int { return len(a) }
func (a ByKey) Swap(i, j int) { a[i], a[j] = a[j], a[i] }
func (a ByKey) Less(i, j int) bool { return a[i].key < a[j].key }
// Requirement contains values, a key, and an operator that relates the key and values.
// The zero value of Requirement is invalid.
// Requirement implements both set based match and exact match
// Requirement should be initialized via NewRequirement constructor for creating a valid Requirement.
// +k8s:deepcopy-gen=true
type Requirement struct {
key string
operator selection.Operator
// In huge majority of cases we have at most one value here.
// It is generally faster to operate on a single-element slice
// than on a single-element map, so we have a slice here.
strValues []string
}
// NewRequirement is the constructor for a Requirement.
// If any of these rules is violated, an error is returned:
// 1. The operator can only be In, NotIn, Equals, DoubleEquals, Gt, Lt, NotEquals, Exists, or DoesNotExist.
// 2. If the operator is In or NotIn, the values set must be non-empty.
// 3. If the operator is Equals, DoubleEquals, or NotEquals, the values set must contain one value.
// 4. If the operator is Exists or DoesNotExist, the value set must be empty.
// 5. If the operator is Gt or Lt, the values set must contain only one value, which will be interpreted as an integer.
// 6. The key is invalid due to its length, or sequence of characters. See validateLabelKey for more details.
//
// The empty string is a valid value in the input values set.
// Returned error, if not nil, is guaranteed to be an aggregated field.ErrorList
func NewRequirement(key string, op selection.Operator, vals []string, opts ...field.PathOption) (*Requirement, error) {
var allErrs field.ErrorList
path := field.ToPath(opts...)
if err := validateLabelKey(key, path.Child("key")); err != nil {
allErrs = append(allErrs, err)
}
valuePath := path.Child("values")
switch op {
case selection.In, selection.NotIn:
if len(vals) == 0 {
allErrs = append(allErrs, field.Invalid(valuePath, vals, "for 'in', 'notin' operators, values set can't be empty"))
}
case selection.Equals, selection.DoubleEquals, selection.NotEquals:
if len(vals) != 1 {
allErrs = append(allErrs, field.Invalid(valuePath, vals, "exact-match compatibility requires one single value"))
}
case selection.Exists, selection.DoesNotExist:
if len(vals) != 0 {
allErrs = append(allErrs, field.Invalid(valuePath, vals, "values set must be empty for exists and does not exist"))
}
case selection.GreaterThan, selection.LessThan:
if len(vals) != 1 {
allErrs = append(allErrs, field.Invalid(valuePath, vals, "for 'Gt', 'Lt' operators, exactly one value is required"))
}
for i := range vals {
if _, err := strconv.ParseInt(vals[i], 10, 64); err != nil {
allErrs = append(allErrs, field.Invalid(valuePath.Index(i), vals[i], "for 'Gt', 'Lt' operators, the value must be an integer"))
}
}
default:
allErrs = append(allErrs, field.NotSupported(path.Child("operator"), op, validRequirementOperators))
}
for i := range vals {
if err := validateLabelValue(key, vals[i], valuePath.Index(i)); err != nil {
allErrs = append(allErrs, err)
}
}
return &Requirement{key: key, operator: op, strValues: vals}, allErrs.ToAggregate()
}
func (r *Requirement) hasValue(value string) bool {
return slices.Contains(r.strValues, value)
}
// Matches returns true if the Requirement matches the input Labels.
// There is a match in the following cases:
// 1. The operator is Exists and Labels has the Requirement's key.
// 2. The operator is In, Labels has the Requirement's key and Labels'
// value for that key is in Requirement's value set.
// 3. The operator is NotIn, Labels has the Requirement's key and
// Labels' value for that key is not in Requirement's value set.
// 4. The operator is DoesNotExist or NotIn and Labels does not have the
// Requirement's key.
// 5. The operator is GreaterThanOperator or LessThanOperator, and Labels has
// the Requirement's key and the corresponding value satisfies mathematical inequality.
func (r *Requirement) Matches(ls Labels) bool {
switch r.operator {
case selection.In, selection.Equals, selection.DoubleEquals:
if !ls.Has(r.key) {
return false
}
return r.hasValue(ls.Get(r.key))
case selection.NotIn, selection.NotEquals:
if !ls.Has(r.key) {
return true
}
return !r.hasValue(ls.Get(r.key))
case selection.Exists:
return ls.Has(r.key)
case selection.DoesNotExist:
return !ls.Has(r.key)
case selection.GreaterThan, selection.LessThan:
if !ls.Has(r.key) {
return false
}
lsValue, err := strconv.ParseInt(ls.Get(r.key), 10, 64)
if err != nil {
klog.V(10).Infof("ParseInt failed for value %+v in label %+v, %+v", ls.Get(r.key), ls, err)
return false
}
// There should be only one strValue in r.strValues, and can be converted to an integer.
if len(r.strValues) != 1 {
klog.V(10).Infof("Invalid values count %+v of requirement %#v, for 'Gt', 'Lt' operators, exactly one value is required", len(r.strValues), r)
return false
}
var rValue int64
for i := range r.strValues {
rValue, err = strconv.ParseInt(r.strValues[i], 10, 64)
if err != nil {
klog.V(10).Infof("ParseInt failed for value %+v in requirement %#v, for 'Gt', 'Lt' operators, the value must be an integer", r.strValues[i], r)
return false
}
}
return (r.operator == selection.GreaterThan && lsValue > rValue) || (r.operator == selection.LessThan && lsValue < rValue)
default:
return false
}
}
// Key returns requirement key
func (r *Requirement) Key() string {
return r.key
}
// Operator returns requirement operator
func (r *Requirement) Operator() selection.Operator {
return r.operator
}
// Values returns requirement values
func (r *Requirement) Values() sets.Set[string] {
ret := sets.New[string]()
for i := range r.strValues {
ret.Insert(r.strValues[i])
}
return ret
}
// ValuesUnsorted returns a copy of requirement values as passed to NewRequirement without sorting.
func (r *Requirement) ValuesUnsorted() []string {
ret := make([]string, 0, len(r.strValues))
ret = append(ret, r.strValues...)
return ret
}
// Equal checks the equality of requirement.
func (r Requirement) Equal(x Requirement) bool {
if r.key != x.key {
return false
}
if r.operator != x.operator {
return false
}
return slices.Equal(r.strValues, x.strValues)
}
// Empty returns true if the internalSelector doesn't restrict selection space
func (s internalSelector) Empty() bool {
if s == nil {
return true
}
return len(s) == 0
}
// String returns a human-readable string that represents this
// Requirement. If called on an invalid Requirement, an error is
// returned. See NewRequirement for creating a valid Requirement.
func (r *Requirement) String() string {
var sb strings.Builder
sb.Grow(
// length of r.key
len(r.key) +
// length of 'r.operator' + 2 spaces for the worst case ('in' and 'notin')
len(r.operator) + 2 +
// length of 'r.strValues' slice times. Heuristically 5 chars per word
+5*len(r.strValues))
if r.operator == selection.DoesNotExist {
sb.WriteString("!")
}
sb.WriteString(r.key)
switch r.operator {
case selection.Equals:
sb.WriteString("=")
case selection.DoubleEquals:
sb.WriteString("==")
case selection.NotEquals:
sb.WriteString("!=")
case selection.In:
sb.WriteString(" in ")
case selection.NotIn:
sb.WriteString(" notin ")
case selection.GreaterThan:
sb.WriteString(">")
case selection.LessThan:
sb.WriteString("<")
case selection.Exists, selection.DoesNotExist:
return sb.String()
}
switch r.operator {
case selection.In, selection.NotIn:
sb.WriteString("(")
}
if len(r.strValues) == 1 {
sb.WriteString(r.strValues[0])
} else { // only > 1 since == 0 prohibited by NewRequirement
// normalizes value order on output, without mutating the in-memory selector representation
// also avoids normalization when it is not required, and ensures we do not mutate shared data
sb.WriteString(strings.Join(safeSort(r.strValues), ","))
}
switch r.operator {
case selection.In, selection.NotIn:
sb.WriteString(")")
}
return sb.String()
}
// safeSort sorts input strings without modification
func safeSort(in []string) []string {
if slices.IsSorted(in) {
return in
}
out := make([]string, len(in))
copy(out, in)
slices.Sort(out)
return out
}
// Add adds requirements to the selector. It copies the current selector returning a new one
func (s internalSelector) Add(reqs ...Requirement) Selector {
ret := make(internalSelector, 0, len(s)+len(reqs))
ret = append(ret, s...)
ret = append(ret, reqs...)
sort.Sort(ByKey(ret))
return ret
}
// Matches for a internalSelector returns true if all
// its Requirements match the input Labels. If any
// Requirement does not match, false is returned.
func (s internalSelector) Matches(l Labels) bool {
for ix := range s {
if matches := s[ix].Matches(l); !matches {
return false
}
}
return true
}
func (s internalSelector) Requirements() (Requirements, bool) { return Requirements(s), true }
// String returns a comma-separated string of all
// the internalSelector Requirements' human-readable strings.
func (s internalSelector) String() string {
var reqs []string
for ix := range s {
reqs = append(reqs, s[ix].String())
}
return strings.Join(reqs, ",")
}
// RequiresExactMatch introspects whether a given selector requires a single specific field
// to be set, and if so returns the value it requires.
func (s internalSelector) RequiresExactMatch(label string) (value string, found bool) {
for ix := range s {
if s[ix].key == label {
switch s[ix].operator {
case selection.Equals, selection.DoubleEquals, selection.In:
if len(s[ix].strValues) == 1 {
return s[ix].strValues[0], true
}
}
return "", false
}
}
return "", false
}
// Token represents constant definition for lexer token
type Token int
const (
// ErrorToken represents scan error
ErrorToken Token = iota
// EndOfStringToken represents end of string
EndOfStringToken
// ClosedParToken represents close parenthesis
ClosedParToken
// CommaToken represents the comma
CommaToken
// DoesNotExistToken represents logic not
DoesNotExistToken
// DoubleEqualsToken represents double equals
DoubleEqualsToken
// EqualsToken represents equal
EqualsToken
// GreaterThanToken represents greater than
GreaterThanToken
// IdentifierToken represents identifier, e.g. keys and values
IdentifierToken
// InToken represents in
InToken
// LessThanToken represents less than
LessThanToken
// NotEqualsToken represents not equal
NotEqualsToken
// NotInToken represents not in
NotInToken
// OpenParToken represents open parenthesis
OpenParToken
)
// string2token contains the mapping between lexer Token and token literal
// (except IdentifierToken, EndOfStringToken and ErrorToken since it makes no sense)
var string2token = map[string]Token{
")": ClosedParToken,
",": CommaToken,
"!": DoesNotExistToken,
"==": DoubleEqualsToken,
"=": EqualsToken,
">": GreaterThanToken,
"in": InToken,
"<": LessThanToken,
"!=": NotEqualsToken,
"notin": NotInToken,
"(": OpenParToken,
}
// ScannedItem contains the Token and the literal produced by the lexer.
type ScannedItem struct {
tok Token
literal string
}
// isWhitespace returns true if the rune is a space, tab, or newline.
func isWhitespace(ch byte) bool {
return ch == ' ' || ch == '\t' || ch == '\r' || ch == '\n'
}
// isSpecialSymbol detects if the character ch can be an operator
func isSpecialSymbol(ch byte) bool {
switch ch {
case '=', '!', '(', ')', ',', '>', '<':
return true
}
return false
}
// Lexer represents the Lexer struct for label selector.
// It contains necessary informationt to tokenize the input string
type Lexer struct {
// s stores the string to be tokenized
s string
// pos is the position currently tokenized
pos int
}
// read returns the character currently lexed
// increment the position and check the buffer overflow
func (l *Lexer) read() (b byte) {
b = 0
if l.pos < len(l.s) {
b = l.s[l.pos]
l.pos++
}
return b
}
// unread 'undoes' the last read character
func (l *Lexer) unread() {
l.pos--
}
// scanIDOrKeyword scans string to recognize literal token (for example 'in') or an identifier.
func (l *Lexer) scanIDOrKeyword() (tok Token, lit string) {
var buffer []byte
IdentifierLoop:
for {
switch ch := l.read(); {
case ch == 0:
break IdentifierLoop
case isSpecialSymbol(ch) || isWhitespace(ch):
l.unread()
break IdentifierLoop
default:
buffer = append(buffer, ch)
}
}
s := string(buffer)
if val, ok := string2token[s]; ok { // is a literal token?
return val, s
}
return IdentifierToken, s // otherwise is an identifier
}
// scanSpecialSymbol scans string starting with special symbol.
// special symbol identify non literal operators. "!=", "==", "="
func (l *Lexer) scanSpecialSymbol() (Token, string) {
lastScannedItem := ScannedItem{}
var buffer []byte
SpecialSymbolLoop:
for {
switch ch := l.read(); {
case ch == 0:
break SpecialSymbolLoop
case isSpecialSymbol(ch):
buffer = append(buffer, ch)
if token, ok := string2token[string(buffer)]; ok {
lastScannedItem = ScannedItem{tok: token, literal: string(buffer)}
} else if lastScannedItem.tok != 0 {
l.unread()
break SpecialSymbolLoop
}
default:
l.unread()
break SpecialSymbolLoop
}
}
if lastScannedItem.tok == 0 {
return ErrorToken, fmt.Sprintf("error expected: keyword found '%s'", buffer)
}
return lastScannedItem.tok, lastScannedItem.literal
}
// skipWhiteSpaces consumes all blank characters
// returning the first non blank character
func (l *Lexer) skipWhiteSpaces(ch byte) byte {
for {
if !isWhitespace(ch) {
return ch
}
ch = l.read()
}
}
// Lex returns a pair of Token and the literal
// literal is meaningfull only for IdentifierToken token
func (l *Lexer) Lex() (tok Token, lit string) {
switch ch := l.skipWhiteSpaces(l.read()); {
case ch == 0:
return EndOfStringToken, ""
case isSpecialSymbol(ch):
l.unread()
return l.scanSpecialSymbol()
default:
l.unread()
return l.scanIDOrKeyword()
}
}
// Parser data structure contains the label selector parser data structure
type Parser struct {
l *Lexer
scannedItems []ScannedItem
position int
}
// ParserContext represents context during parsing:
// some literal for example 'in' and 'notin' can be
// recognized as operator for example 'x in (a)' but
// it can be recognized as value for example 'value in (in)'
type ParserContext int
const (
// KeyAndOperator represents key and operator
KeyAndOperator ParserContext = iota
// Values represents values
Values
)
// lookahead func returns the current token and string. No increment of current position
func (p *Parser) lookahead(context ParserContext) (Token, string) {
tok, lit := p.scannedItems[p.position].tok, p.scannedItems[p.position].literal
if context == Values {
switch tok {
case InToken, NotInToken:
tok = IdentifierToken
}
}
return tok, lit
}
// consume returns current token and string. Increments the position
func (p *Parser) consume(context ParserContext) (Token, string) {
p.position++
tok, lit := p.scannedItems[p.position-1].tok, p.scannedItems[p.position-1].literal
if context == Values {
switch tok {
case InToken, NotInToken:
tok = IdentifierToken
}
}
return tok, lit
}
// scan runs through the input string and stores the ScannedItem in an array
// Parser can now lookahead and consume the tokens
func (p *Parser) scan() {
for {
token, literal := p.l.Lex()
p.scannedItems = append(p.scannedItems, ScannedItem{token, literal})
if token == EndOfStringToken {
break
}
}
}
// parse runs the left recursive descending algorithm
// on input string. It returns a list of Requirement objects.
func (p *Parser) parse() (internalSelector, error) {
p.scan() // init scannedItems
var requirements internalSelector
for {
tok, lit := p.lookahead(Values)
switch tok {
case IdentifierToken, DoesNotExistToken:
r, err := p.parseRequirement()
if err != nil {
return nil, fmt.Errorf("unable to parse requirement: %w", err)
}
requirements = append(requirements, *r)
t, l := p.consume(Values)
switch t {
case EndOfStringToken:
return requirements, nil
case CommaToken:
t2, l2 := p.lookahead(Values)
if t2 != IdentifierToken && t2 != DoesNotExistToken {
return nil, fmt.Errorf("found '%s', expected: identifier after ','", l2)
}
default:
return nil, fmt.Errorf("found '%s', expected: ',' or 'end of string'", l)
}
case EndOfStringToken:
return requirements, nil
default:
return nil, fmt.Errorf("found '%s', expected: !, identifier, or 'end of string'", lit)
}
}
}
func (p *Parser) parseRequirement() (*Requirement, error) {
key, operator, err := p.parseKeyAndInferOperator()
if err != nil {
return nil, err
}
if operator == selection.Exists || operator == selection.DoesNotExist { // operator found lookahead set checked
return NewRequirement(key, operator, []string{})
}
operator, err = p.parseOperator()
if err != nil {
return nil, err
}
var values sets.Set[string]
switch operator {
case selection.In, selection.NotIn:
values, err = p.parseValues()
case selection.Equals, selection.DoubleEquals, selection.NotEquals, selection.GreaterThan, selection.LessThan:
values, err = p.parseExactValue()
}
if err != nil {
return nil, err
}
return NewRequirement(key, operator, sets.List(values))
}
// parseKeyAndInferOperator parses literals.
// in case of no operator '!, in, notin, ==, =, !=' are found
// the 'exists' operator is inferred
func (p *Parser) parseKeyAndInferOperator() (string, selection.Operator, error) {
var operator selection.Operator
tok, literal := p.consume(Values)
if tok == DoesNotExistToken {
operator = selection.DoesNotExist
tok, literal = p.consume(Values)
}
if tok != IdentifierToken {
err := fmt.Errorf("found '%s', expected: identifier", literal)
return "", "", err
}
if err := validateLabelKey(literal, nil); err != nil {
return "", "", err
}
if t, _ := p.lookahead(Values); t == EndOfStringToken || t == CommaToken {
if operator != selection.DoesNotExist {
operator = selection.Exists
}
}
return literal, operator, nil
}
// parseOperator returns operator and eventually matchType
// matchType can be exact
func (p *Parser) parseOperator() (op selection.Operator, err error) {
tok, lit := p.consume(KeyAndOperator)
switch tok {
// DoesNotExistToken shouldn't be here because it's a unary operator, not a binary operator
case InToken:
op = selection.In
case EqualsToken:
op = selection.Equals
case DoubleEqualsToken:
op = selection.DoubleEquals
case GreaterThanToken:
op = selection.GreaterThan
case LessThanToken:
op = selection.LessThan
case NotInToken:
op = selection.NotIn
case NotEqualsToken:
op = selection.NotEquals
default:
return "", fmt.Errorf("found '%s', expected: %v", lit, strings.Join(binaryOperators, ", "))
}
return op, nil
}
// parseValues parses the values for set based matching (x,y,z)
func (p *Parser) parseValues() (sets.Set[string], error) {
tok, lit := p.consume(Values)
if tok != OpenParToken {
return nil, fmt.Errorf("found '%s' expected: '('", lit)
}
tok, lit = p.lookahead(Values)
switch tok {
case IdentifierToken, CommaToken:
s, err := p.parseIdentifiersList() // handles general cases
if err != nil {
return s, err
}
if tok, _ = p.consume(Values); tok != ClosedParToken {
return nil, fmt.Errorf("found '%s', expected: ')'", lit)
}
return s, nil
case ClosedParToken: // handles "()"
p.consume(Values)
return sets.New[string](""), nil
default:
return nil, fmt.Errorf("found '%s', expected: ',', ')' or identifier", lit)
}
}
// parseIdentifiersList parses a (possibly empty) list of
// of comma separated (possibly empty) identifiers
func (p *Parser) parseIdentifiersList() (sets.Set[string], error) {
s := sets.New[string]()
for {
tok, lit := p.consume(Values)
switch tok {
case IdentifierToken:
s.Insert(lit)
tok2, lit2 := p.lookahead(Values)
switch tok2 {
case CommaToken:
continue
case ClosedParToken:
return s, nil
default:
return nil, fmt.Errorf("found '%s', expected: ',' or ')'", lit2)
}
case CommaToken: // handled here since we can have "(,"
if s.Len() == 0 {
s.Insert("") // to handle (,
}
tok2, _ := p.lookahead(Values)
if tok2 == ClosedParToken {
s.Insert("") // to handle ,) Double "" removed by StringSet
return s, nil
}
if tok2 == CommaToken {
p.consume(Values)
s.Insert("") // to handle ,, Double "" removed by StringSet
}
default: // it can be operator
return s, fmt.Errorf("found '%s', expected: ',', or identifier", lit)
}
}
}
// parseExactValue parses the only value for exact match style
func (p *Parser) parseExactValue() (sets.Set[string], error) {
s := sets.New[string]()
tok, _ := p.lookahead(Values)
if tok == EndOfStringToken || tok == CommaToken {
s.Insert("")
return s, nil
}
tok, lit := p.consume(Values)
if tok == IdentifierToken {
s.Insert(lit)
return s, nil
}
return nil, fmt.Errorf("found '%s', expected: identifier", lit)
}
// Parse takes a string representing a selector and returns a selector
// object, or an error. This parsing function differs from ParseSelector
// as they parse different selectors with different syntaxes.
// The input will cause an error if it does not follow this form:
//
// <selector-syntax> ::= <requirement> | <requirement> "," <selector-syntax>
// <requirement> ::= [!] KEY [ <set-based-restriction> | <exact-match-restriction> ]
// <set-based-restriction> ::= "" | <inclusion-exclusion> <value-set>
// <inclusion-exclusion> ::= <inclusion> | <exclusion>
// <exclusion> ::= "notin"
// <inclusion> ::= "in"
// <value-set> ::= "(" <values> ")"
// <values> ::= VALUE | VALUE "," <values>
// <exact-match-restriction> ::= ["="|"=="|"!="] VALUE
//
// KEY is a sequence of one or more characters following [ DNS_SUBDOMAIN "/" ] DNS_LABEL. Max length is 63 characters.
// VALUE is a sequence of zero or more characters "([A-Za-z0-9_-\.])". Max length is 63 characters.
// Delimiter is white space: (' ', '\t')
// Example of valid syntax:
//
// "x in (foo,,baz),y,z notin ()"
//
// Note:
// 1. Inclusion - " in " - denotes that the KEY exists and is equal to any of the
// VALUEs in its requirement
// 2. Exclusion - " notin " - denotes that the KEY is not equal to any
// of the VALUEs in its requirement or does not exist
// 3. The empty string is a valid VALUE
// 4. A requirement with just a KEY - as in "y" above - denotes that
// the KEY exists and can be any VALUE.
// 5. A requirement with just !KEY requires that the KEY not exist.
func Parse(selector string, opts ...field.PathOption) (Selector, error) {
parsedSelector, err := parse(selector, field.ToPath(opts...))
if err == nil {
return parsedSelector, nil
}
return nil, err
}
// parse parses the string representation of the selector and returns the internalSelector struct.
// The callers of this method can then decide how to return the internalSelector struct to their
// callers. This function has two callers now, one returns a Selector interface and the other
// returns a list of requirements.
func parse(selector string, _ *field.Path) (internalSelector, error) {
p := &Parser{l: &Lexer{s: selector, pos: 0}}
items, err := p.parse()
if err != nil {
return nil, err
}
sort.Sort(ByKey(items)) // sort to grant determistic parsing
return internalSelector(items), err
}
func validateLabelKey(k string, path *field.Path) *field.Error {
if errs := validation.IsQualifiedName(k); len(errs) != 0 {
return field.Invalid(path, k, strings.Join(errs, "; "))
}
return nil
}
func validateLabelValue(k, v string, path *field.Path) *field.Error {
if errs := validation.IsValidLabelValue(v); len(errs) != 0 {
return field.Invalid(path.Key(k), v, strings.Join(errs, "; "))
}
return nil
}
// SelectorFromSet returns a Selector which will match exactly the given Set. A
// nil and empty Sets are considered equivalent to Everything().
// It does not perform any validation, which means the server will reject
// the request if the Set contains invalid values.
func SelectorFromSet(ls Set) Selector {
return SelectorFromValidatedSet(ls)
}
// ValidatedSelectorFromSet returns a Selector which will match exactly the given Set. A
// nil and empty Sets are considered equivalent to Everything().
// The Set is validated client-side, which allows to catch errors early.
func ValidatedSelectorFromSet(ls Set) (Selector, error) {
if len(ls) == 0 {
return internalSelector{}, nil
}
requirements := make([]Requirement, 0, len(ls))
for label, value := range ls {
r, err := NewRequirement(label, selection.Equals, []string{value})
if err != nil {
return nil, err
}
requirements = append(requirements, *r)
}
// sort to have deterministic string representation
sort.Sort(ByKey(requirements))
return internalSelector(requirements), nil
}
// SelectorFromValidatedSet returns a Selector which will match exactly the given Set.
// A nil and empty Sets are considered equivalent to Everything().
// It assumes that Set is already validated and doesn't do any validation.
// Note: this method copies the Set; if the Set is immutable, consider wrapping it with ValidatedSetSelector
// instead, which does not copy.
func SelectorFromValidatedSet(ls Set) Selector {
if len(ls) == 0 {
return internalSelector{}
}
requirements := make([]Requirement, 0, len(ls))
for label, value := range ls {
requirements = append(requirements, Requirement{key: label, operator: selection.Equals, strValues: []string{value}})
}
// sort to have deterministic string representation
sort.Sort(ByKey(requirements))
return internalSelector(requirements)
}
// ParseToRequirements takes a string representing a selector and returns a list of
// requirements. This function is suitable for those callers that perform additional
// processing on selector requirements.
// See the documentation for Parse() function for more details.
// TODO: Consider exporting the internalSelector type instead.
func ParseToRequirements(selector string, opts ...field.PathOption) ([]Requirement, error) {
return parse(selector, field.ToPath(opts...))
}
// ValidatedSetSelector wraps a Set, allowing it to implement the Selector interface. Unlike
// Set.AsSelectorPreValidated (which copies the input Set), this type simply wraps the underlying
// Set. As a result, it is substantially more efficient. A nil and empty Sets are considered
// equivalent to Everything().
//
// Callers MUST ensure the underlying Set is not mutated, and that it is already validated. If these
// constraints are not met, Set.AsValidatedSelector should be preferred
//
// None of the Selector methods mutate the underlying Set, but Add() and Requirements() convert to
// the less optimized version.
type ValidatedSetSelector Set
func (s ValidatedSetSelector) Matches(labels Labels) bool {
for k, v := range s {
if !labels.Has(k) || v != labels.Get(k) {
return false
}
}
return true
}
func (s ValidatedSetSelector) Empty() bool {
return len(s) == 0
}
func (s ValidatedSetSelector) String() string {
b := strings.Builder{}
// Ensure deterministic output by sorting
for i, key := range slices.Sorted(maps.Keys(s)) {
v := s[key]
b.Grow(len(key) + 2 + len(v))
if i != 0 {
b.WriteString(",")
}
b.WriteString(key)
b.WriteString("=")
b.WriteString(v)
}
return b.String()
}
func (s ValidatedSetSelector) Add(r ...Requirement) Selector {
return s.toFullSelector().Add(r...)
}
func (s ValidatedSetSelector) Requirements() (requirements Requirements, selectable bool) {
return s.toFullSelector().Requirements()
}
func (s ValidatedSetSelector) DeepCopySelector() Selector {
return maps.Clone(s)
}
func (s ValidatedSetSelector) RequiresExactMatch(label string) (value string, found bool) {
v, f := s[label]
return v, f
}
func (s ValidatedSetSelector) toFullSelector() Selector {
return SelectorFromValidatedSet(Set(s))
}
var _ Selector = ValidatedSetSelector{}
//go:build !ignore_autogenerated
// +build !ignore_autogenerated
// SPDX-License-Identifier: Apache-2.0
// Copyright Authors of Cilium
// Code generated by deepcopy-gen. DO NOT EDIT.
package labels
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *Requirement) DeepCopyInto(out *Requirement) {
*out = *in
if in.strValues != nil {
in, out := &in.strValues, &out.strValues
*out = make([]string, len(*in))
copy(*out, *in)
}
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Requirement.
func (in *Requirement) DeepCopy() *Requirement {
if in == nil {
return nil
}
out := new(Requirement)
in.DeepCopyInto(out)
return out
}
//go:build !ignore_autogenerated
// +build !ignore_autogenerated
// SPDX-License-Identifier: Apache-2.0
// Copyright Authors of Cilium
// Code generated by deepequal-gen. DO NOT EDIT.
package labels
// DeepEqual is an autogenerated deepequal function, deeply comparing the
// receiver with other. in must be non-nil.
func (in *ByKey) DeepEqual(other *ByKey) bool {
if other == nil {
return false
}
if len(*in) != len(*other) {
return false
} else {
for i, inElement := range *in {
if !inElement.DeepEqual(&(*other)[i]) {
return false
}
}
}
return true
}
// DeepEqual is an autogenerated deepequal function, deeply comparing the
// receiver with other. in must be non-nil.
func (in *Lexer) DeepEqual(other *Lexer) bool {
if other == nil {
return false
}
if in.s != other.s {
return false
}
if in.pos != other.pos {
return false
}
return true
}
// DeepEqual is an autogenerated deepequal function, deeply comparing the
// receiver with other. in must be non-nil.
func (in *Parser) DeepEqual(other *Parser) bool {
if other == nil {
return false
}
if (in.l == nil) != (other.l == nil) {
return false
} else if in.l != nil {
if !in.l.DeepEqual(other.l) {
return false
}
}
if ((in.scannedItems != nil) && (other.scannedItems != nil)) || ((in.scannedItems == nil) != (other.scannedItems == nil)) {
in, other := &in.scannedItems, &other.scannedItems
if other == nil {
return false
}
if len(*in) != len(*other) {
return false
} else {
for i, inElement := range *in {
if !inElement.DeepEqual(&(*other)[i]) {
return false
}
}
}
}
if in.position != other.position {
return false
}
return true
}
// DeepEqual is an autogenerated deepequal function, deeply comparing the
// receiver with other. in must be non-nil.
func (in *Requirement) DeepEqual(other *Requirement) bool {
if other == nil {
return false
}
if in.key != other.key {
return false
}
if in.operator != other.operator {
return false
}
if ((in.strValues != nil) && (other.strValues != nil)) || ((in.strValues == nil) != (other.strValues == nil)) {
in, other := &in.strValues, &other.strValues
if other == nil {
return false
}
if len(*in) != len(*other) {
return false
} else {
for i, inElement := range *in {
if inElement != (*other)[i] {
return false
}
}
}
}
return true
}
// DeepEqual is an autogenerated deepequal function, deeply comparing the
// receiver with other. in must be non-nil.
func (in *Requirements) DeepEqual(other *Requirements) bool {
if other == nil {
return false
}
if len(*in) != len(*other) {
return false
} else {
for i, inElement := range *in {
if !inElement.DeepEqual(&(*other)[i]) {
return false
}
}
}
return true
}
// DeepEqual is an autogenerated deepequal function, deeply comparing the
// receiver with other. in must be non-nil.
func (in *ScannedItem) DeepEqual(other *ScannedItem) bool {
if other == nil {
return false
}
if in.tok != other.tok {
return false
}
if in.literal != other.literal {
return false
}
return true
}
// DeepEqual is an autogenerated deepequal function, deeply comparing the
// receiver with other. in must be non-nil.
func (in *Set) DeepEqual(other *Set) bool {
if other == nil {
return false
}
if len(*in) != len(*other) {
return false
} else {
for key, inValue := range *in {
if otherValue, present := (*other)[key]; !present {
return false
} else {
if inValue != otherValue {
return false
}
}
}
}
return true
}
// DeepEqual is an autogenerated deepequal function, deeply comparing the
// receiver with other. in must be non-nil.
func (in *ValidatedSetSelector) DeepEqual(other *ValidatedSetSelector) bool {
if other == nil {
return false
}
if len(*in) != len(*other) {
return false
} else {
for key, inValue := range *in {
if otherValue, present := (*other)[key]; !present {
return false
} else {
if inValue != otherValue {
return false
}
}
}
}
return true
}
// SPDX-License-Identifier: Apache-2.0
// Copyright Authors of Cilium
// Code generated by protoc-gen-gogo. DO NOT EDIT.
// source: github.com/cilium/cilium/pkg/k8s/slim/k8s/apis/meta/v1/generated.proto
package v1
import (
fmt "fmt"
io "io"
proto "github.com/gogo/protobuf/proto"
github_com_gogo_protobuf_sortkeys "github.com/gogo/protobuf/sortkeys"
math "math"
math_bits "math/bits"
reflect "reflect"
strings "strings"
k8s_io_apimachinery_pkg_types "k8s.io/apimachinery/pkg/types"
)
// Reference imports to suppress errors if they are not otherwise used.
var _ = proto.Marshal
var _ = fmt.Errorf
var _ = math.Inf
// This is a compile-time assertion to ensure that this generated file
// is compatible with the proto package it is being compiled against.
// A compilation error at this line likely means your copy of the
// proto package needs to be updated.
const _ = proto.GoGoProtoPackageIsVersion3 // please upgrade the proto package
func (m *Condition) Reset() { *m = Condition{} }
func (*Condition) ProtoMessage() {}
func (*Condition) Descriptor() ([]byte, []int) {
return fileDescriptor_e0f89ca41f751b36, []int{0}
}
func (m *Condition) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
}
func (m *Condition) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
b = b[:cap(b)]
n, err := m.MarshalToSizedBuffer(b)
if err != nil {
return nil, err
}
return b[:n], nil
}
func (m *Condition) XXX_Merge(src proto.Message) {
xxx_messageInfo_Condition.Merge(m, src)
}
func (m *Condition) XXX_Size() int {
return m.Size()
}
func (m *Condition) XXX_DiscardUnknown() {
xxx_messageInfo_Condition.DiscardUnknown(m)
}
var xxx_messageInfo_Condition proto.InternalMessageInfo
func (m *LabelSelector) Reset() { *m = LabelSelector{} }
func (*LabelSelector) ProtoMessage() {}
func (*LabelSelector) Descriptor() ([]byte, []int) {
return fileDescriptor_e0f89ca41f751b36, []int{1}
}
func (m *LabelSelector) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
}
func (m *LabelSelector) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
b = b[:cap(b)]
n, err := m.MarshalToSizedBuffer(b)
if err != nil {
return nil, err
}
return b[:n], nil
}
func (m *LabelSelector) XXX_Merge(src proto.Message) {
xxx_messageInfo_LabelSelector.Merge(m, src)
}
func (m *LabelSelector) XXX_Size() int {
return m.Size()
}
func (m *LabelSelector) XXX_DiscardUnknown() {
xxx_messageInfo_LabelSelector.DiscardUnknown(m)
}
var xxx_messageInfo_LabelSelector proto.InternalMessageInfo
func (m *LabelSelectorRequirement) Reset() { *m = LabelSelectorRequirement{} }
func (*LabelSelectorRequirement) ProtoMessage() {}
func (*LabelSelectorRequirement) Descriptor() ([]byte, []int) {
return fileDescriptor_e0f89ca41f751b36, []int{2}
}
func (m *LabelSelectorRequirement) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
}
func (m *LabelSelectorRequirement) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
b = b[:cap(b)]
n, err := m.MarshalToSizedBuffer(b)
if err != nil {
return nil, err
}
return b[:n], nil
}
func (m *LabelSelectorRequirement) XXX_Merge(src proto.Message) {
xxx_messageInfo_LabelSelectorRequirement.Merge(m, src)
}
func (m *LabelSelectorRequirement) XXX_Size() int {
return m.Size()
}
func (m *LabelSelectorRequirement) XXX_DiscardUnknown() {
xxx_messageInfo_LabelSelectorRequirement.DiscardUnknown(m)
}
var xxx_messageInfo_LabelSelectorRequirement proto.InternalMessageInfo
func (m *ListMeta) Reset() { *m = ListMeta{} }
func (*ListMeta) ProtoMessage() {}
func (*ListMeta) Descriptor() ([]byte, []int) {
return fileDescriptor_e0f89ca41f751b36, []int{3}
}
func (m *ListMeta) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
}
func (m *ListMeta) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
b = b[:cap(b)]
n, err := m.MarshalToSizedBuffer(b)
if err != nil {
return nil, err
}
return b[:n], nil
}
func (m *ListMeta) XXX_Merge(src proto.Message) {
xxx_messageInfo_ListMeta.Merge(m, src)
}
func (m *ListMeta) XXX_Size() int {
return m.Size()
}
func (m *ListMeta) XXX_DiscardUnknown() {
xxx_messageInfo_ListMeta.DiscardUnknown(m)
}
var xxx_messageInfo_ListMeta proto.InternalMessageInfo
func (m *ObjectMeta) Reset() { *m = ObjectMeta{} }
func (*ObjectMeta) ProtoMessage() {}
func (*ObjectMeta) Descriptor() ([]byte, []int) {
return fileDescriptor_e0f89ca41f751b36, []int{4}
}
func (m *ObjectMeta) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
}
func (m *ObjectMeta) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
b = b[:cap(b)]
n, err := m.MarshalToSizedBuffer(b)
if err != nil {
return nil, err
}
return b[:n], nil
}
func (m *ObjectMeta) XXX_Merge(src proto.Message) {
xxx_messageInfo_ObjectMeta.Merge(m, src)
}
func (m *ObjectMeta) XXX_Size() int {
return m.Size()
}
func (m *ObjectMeta) XXX_DiscardUnknown() {
xxx_messageInfo_ObjectMeta.DiscardUnknown(m)
}
var xxx_messageInfo_ObjectMeta proto.InternalMessageInfo
func (m *OwnerReference) Reset() { *m = OwnerReference{} }
func (*OwnerReference) ProtoMessage() {}
func (*OwnerReference) Descriptor() ([]byte, []int) {
return fileDescriptor_e0f89ca41f751b36, []int{5}
}
func (m *OwnerReference) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
}
func (m *OwnerReference) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
b = b[:cap(b)]
n, err := m.MarshalToSizedBuffer(b)
if err != nil {
return nil, err
}
return b[:n], nil
}
func (m *OwnerReference) XXX_Merge(src proto.Message) {
xxx_messageInfo_OwnerReference.Merge(m, src)
}
func (m *OwnerReference) XXX_Size() int {
return m.Size()
}
func (m *OwnerReference) XXX_DiscardUnknown() {
xxx_messageInfo_OwnerReference.DiscardUnknown(m)
}
var xxx_messageInfo_OwnerReference proto.InternalMessageInfo
func (m *PartialObjectMetadata) Reset() { *m = PartialObjectMetadata{} }
func (*PartialObjectMetadata) ProtoMessage() {}
func (*PartialObjectMetadata) Descriptor() ([]byte, []int) {
return fileDescriptor_e0f89ca41f751b36, []int{6}
}
func (m *PartialObjectMetadata) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
}
func (m *PartialObjectMetadata) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
b = b[:cap(b)]
n, err := m.MarshalToSizedBuffer(b)
if err != nil {
return nil, err
}
return b[:n], nil
}
func (m *PartialObjectMetadata) XXX_Merge(src proto.Message) {
xxx_messageInfo_PartialObjectMetadata.Merge(m, src)
}
func (m *PartialObjectMetadata) XXX_Size() int {
return m.Size()
}
func (m *PartialObjectMetadata) XXX_DiscardUnknown() {
xxx_messageInfo_PartialObjectMetadata.DiscardUnknown(m)
}
var xxx_messageInfo_PartialObjectMetadata proto.InternalMessageInfo
func (m *PartialObjectMetadataList) Reset() { *m = PartialObjectMetadataList{} }
func (*PartialObjectMetadataList) ProtoMessage() {}
func (*PartialObjectMetadataList) Descriptor() ([]byte, []int) {
return fileDescriptor_e0f89ca41f751b36, []int{7}
}
func (m *PartialObjectMetadataList) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
}
func (m *PartialObjectMetadataList) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
b = b[:cap(b)]
n, err := m.MarshalToSizedBuffer(b)
if err != nil {
return nil, err
}
return b[:n], nil
}
func (m *PartialObjectMetadataList) XXX_Merge(src proto.Message) {
xxx_messageInfo_PartialObjectMetadataList.Merge(m, src)
}
func (m *PartialObjectMetadataList) XXX_Size() int {
return m.Size()
}
func (m *PartialObjectMetadataList) XXX_DiscardUnknown() {
xxx_messageInfo_PartialObjectMetadataList.DiscardUnknown(m)
}
var xxx_messageInfo_PartialObjectMetadataList proto.InternalMessageInfo
func (m *Time) Reset() { *m = Time{} }
func (*Time) ProtoMessage() {}
func (*Time) Descriptor() ([]byte, []int) {
return fileDescriptor_e0f89ca41f751b36, []int{8}
}
func (m *Time) XXX_Unmarshal(b []byte) error {
return xxx_messageInfo_Time.Unmarshal(m, b)
}
func (m *Time) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
return xxx_messageInfo_Time.Marshal(b, m, deterministic)
}
func (m *Time) XXX_Merge(src proto.Message) {
xxx_messageInfo_Time.Merge(m, src)
}
func (m *Time) XXX_Size() int {
return xxx_messageInfo_Time.Size(m)
}
func (m *Time) XXX_DiscardUnknown() {
xxx_messageInfo_Time.DiscardUnknown(m)
}
var xxx_messageInfo_Time proto.InternalMessageInfo
func (m *Timestamp) Reset() { *m = Timestamp{} }
func (*Timestamp) ProtoMessage() {}
func (*Timestamp) Descriptor() ([]byte, []int) {
return fileDescriptor_e0f89ca41f751b36, []int{9}
}
func (m *Timestamp) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
}
func (m *Timestamp) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
b = b[:cap(b)]
n, err := m.MarshalToSizedBuffer(b)
if err != nil {
return nil, err
}
return b[:n], nil
}
func (m *Timestamp) XXX_Merge(src proto.Message) {
xxx_messageInfo_Timestamp.Merge(m, src)
}
func (m *Timestamp) XXX_Size() int {
return m.Size()
}
func (m *Timestamp) XXX_DiscardUnknown() {
xxx_messageInfo_Timestamp.DiscardUnknown(m)
}
var xxx_messageInfo_Timestamp proto.InternalMessageInfo
func (m *TypeMeta) Reset() { *m = TypeMeta{} }
func (*TypeMeta) ProtoMessage() {}
func (*TypeMeta) Descriptor() ([]byte, []int) {
return fileDescriptor_e0f89ca41f751b36, []int{10}
}
func (m *TypeMeta) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
}
func (m *TypeMeta) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
b = b[:cap(b)]
n, err := m.MarshalToSizedBuffer(b)
if err != nil {
return nil, err
}
return b[:n], nil
}
func (m *TypeMeta) XXX_Merge(src proto.Message) {
xxx_messageInfo_TypeMeta.Merge(m, src)
}
func (m *TypeMeta) XXX_Size() int {
return m.Size()
}
func (m *TypeMeta) XXX_DiscardUnknown() {
xxx_messageInfo_TypeMeta.DiscardUnknown(m)
}
var xxx_messageInfo_TypeMeta proto.InternalMessageInfo
func init() {
proto.RegisterType((*Condition)(nil), "github.com.cilium.cilium.pkg.k8s.slim.k8s.apis.meta.v1.Condition")
proto.RegisterType((*LabelSelector)(nil), "github.com.cilium.cilium.pkg.k8s.slim.k8s.apis.meta.v1.LabelSelector")
proto.RegisterMapType((map[string]string)(nil), "github.com.cilium.cilium.pkg.k8s.slim.k8s.apis.meta.v1.LabelSelector.MatchLabelsEntry")
proto.RegisterType((*LabelSelectorRequirement)(nil), "github.com.cilium.cilium.pkg.k8s.slim.k8s.apis.meta.v1.LabelSelectorRequirement")
proto.RegisterType((*ListMeta)(nil), "github.com.cilium.cilium.pkg.k8s.slim.k8s.apis.meta.v1.ListMeta")
proto.RegisterType((*ObjectMeta)(nil), "github.com.cilium.cilium.pkg.k8s.slim.k8s.apis.meta.v1.ObjectMeta")
proto.RegisterMapType((map[string]string)(nil), "github.com.cilium.cilium.pkg.k8s.slim.k8s.apis.meta.v1.ObjectMeta.AnnotationsEntry")
proto.RegisterMapType((map[string]string)(nil), "github.com.cilium.cilium.pkg.k8s.slim.k8s.apis.meta.v1.ObjectMeta.LabelsEntry")
proto.RegisterType((*OwnerReference)(nil), "github.com.cilium.cilium.pkg.k8s.slim.k8s.apis.meta.v1.OwnerReference")
proto.RegisterType((*PartialObjectMetadata)(nil), "github.com.cilium.cilium.pkg.k8s.slim.k8s.apis.meta.v1.PartialObjectMetadata")
proto.RegisterType((*PartialObjectMetadataList)(nil), "github.com.cilium.cilium.pkg.k8s.slim.k8s.apis.meta.v1.PartialObjectMetadataList")
proto.RegisterType((*Time)(nil), "github.com.cilium.cilium.pkg.k8s.slim.k8s.apis.meta.v1.Time")
proto.RegisterType((*Timestamp)(nil), "github.com.cilium.cilium.pkg.k8s.slim.k8s.apis.meta.v1.Timestamp")
proto.RegisterType((*TypeMeta)(nil), "github.com.cilium.cilium.pkg.k8s.slim.k8s.apis.meta.v1.TypeMeta")
}
func init() {
proto.RegisterFile("github.com/cilium/cilium/pkg/k8s/slim/k8s/apis/meta/v1/generated.proto", fileDescriptor_e0f89ca41f751b36)
}
var fileDescriptor_e0f89ca41f751b36 = []byte{
// 1205 bytes of a gzipped FileDescriptorProto
0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xac, 0x57, 0xcf, 0x6f, 0x1b, 0xc5,
0x17, 0xf7, 0x66, 0x13, 0xc7, 0x7e, 0x4e, 0x9b, 0x74, 0xbe, 0xed, 0x97, 0x25, 0x12, 0xb6, 0x6b,
0x24, 0x94, 0x4a, 0xb0, 0xa6, 0x39, 0x54, 0x69, 0x85, 0x10, 0xdd, 0xf4, 0x87, 0x42, 0x9b, 0xb4,
0x9a, 0xb4, 0x3d, 0x00, 0x07, 0x26, 0xeb, 0xa9, 0x33, 0x64, 0x77, 0xd6, 0xec, 0x8c, 0x03, 0xbe,
0x95, 0x5b, 0x01, 0x21, 0x55, 0x1c, 0x10, 0xc7, 0x56, 0xe2, 0x3f, 0xe1, 0xd2, 0x63, 0x8f, 0x3d,
0x20, 0x8b, 0x1a, 0xfe, 0x08, 0x54, 0x21, 0x81, 0x66, 0x76, 0xd6, 0xbb, 0xfe, 0x51, 0x95, 0xba,
0x39, 0x79, 0xf7, 0xbd, 0x37, 0x9f, 0xcf, 0x67, 0x76, 0xdf, 0xfb, 0xcc, 0x1a, 0xae, 0xb4, 0x99,
0xdc, 0xef, 0xee, 0xb9, 0x7e, 0x14, 0x36, 0x7d, 0x16, 0xb0, 0xee, 0xf0, 0xa7, 0x73, 0xd0, 0x6e,
0x1e, 0x6c, 0x88, 0xa6, 0x08, 0x58, 0xa8, 0x2f, 0x48, 0x87, 0x89, 0x66, 0x48, 0x25, 0x69, 0x1e,
0x9e, 0x6d, 0xb6, 0x29, 0xa7, 0x31, 0x91, 0xb4, 0xe5, 0x76, 0xe2, 0x48, 0x46, 0xe8, 0x5c, 0x86,
0xe3, 0x26, 0x00, 0xe9, 0x4f, 0xe7, 0xa0, 0xed, 0x1e, 0x6c, 0x08, 0x57, 0xe1, 0xe8, 0x0b, 0x85,
0xe3, 0x2a, 0x1c, 0xf7, 0xf0, 0xec, 0xea, 0x7b, 0x39, 0xfe, 0x76, 0xd4, 0x8e, 0x9a, 0x1a, 0x6e,
0xaf, 0x7b, 0x57, 0xdf, 0xe9, 0x1b, 0x7d, 0x95, 0xd0, 0xac, 0x2a, 0x21, 0x2e, 0x8b, 0x94, 0x96,
0x90, 0xf8, 0xfb, 0x8c, 0xd3, 0xb8, 0xa7, 0x95, 0xc6, 0x5d, 0x2e, 0x59, 0x48, 0xc7, 0x75, 0xad,
0x9e, 0x7b, 0xd9, 0x02, 0xe1, 0xef, 0xd3, 0x90, 0x8c, 0xaf, 0x6b, 0xfc, 0x68, 0x43, 0x79, 0x33,
0xe2, 0x2d, 0x26, 0x59, 0xc4, 0x51, 0x1d, 0xe6, 0x65, 0xaf, 0x43, 0x1d, 0xab, 0x6e, 0xad, 0x95,
0xbd, 0xa5, 0xc7, 0xfd, 0x5a, 0x61, 0xd0, 0xaf, 0xcd, 0xdf, 0xea, 0x75, 0x28, 0xd6, 0x19, 0x74,
0x1e, 0x8a, 0x42, 0x12, 0xd9, 0x15, 0xce, 0x9c, 0xae, 0x39, 0x6d, 0x6a, 0x8a, 0xbb, 0x3a, 0xfa,
0xbc, 0x5f, 0x5b, 0x1e, 0xc2, 0x25, 0x21, 0x6c, 0x16, 0xa0, 0x8f, 0x01, 0x45, 0x7b, 0x82, 0xc6,
0x87, 0xb4, 0x75, 0x35, 0x51, 0xc1, 0x22, 0xee, 0xd8, 0x75, 0x6b, 0xcd, 0xf6, 0x56, 0x0d, 0x0c,
0xba, 0x31, 0x51, 0x81, 0xa7, 0xac, 0x42, 0xf7, 0x2d, 0x40, 0x01, 0x11, 0xf2, 0x56, 0x4c, 0xb8,
0xd0, 0x64, 0xb7, 0x58, 0x48, 0x9d, 0xf9, 0xba, 0xb5, 0x56, 0x59, 0xff, 0xc0, 0x9d, 0xed, 0x25,
0xb9, 0x0a, 0x23, 0x93, 0x72, 0x7d, 0x02, 0x1f, 0x4f, 0xe1, 0x44, 0xef, 0x40, 0x31, 0xa6, 0x44,
0x44, 0xdc, 0x59, 0xd0, 0x4f, 0xe4, 0x78, 0xfa, 0x44, 0xb0, 0x8e, 0x62, 0x93, 0x45, 0x67, 0x60,
0x31, 0xa4, 0x42, 0x90, 0x36, 0x75, 0x8a, 0xba, 0x70, 0xd9, 0x14, 0x2e, 0x6e, 0x27, 0x61, 0x9c,
0xe6, 0x1b, 0x7f, 0xcd, 0xc1, 0xb1, 0xeb, 0x64, 0x8f, 0x06, 0xbb, 0x34, 0xa0, 0xbe, 0x8c, 0x62,
0xf4, 0x83, 0x05, 0x95, 0x90, 0x48, 0x7f, 0x5f, 0x87, 0x85, 0x63, 0xd5, 0xed, 0xb5, 0xca, 0xfa,
0x9d, 0x59, 0x37, 0x3a, 0x02, 0xee, 0x6e, 0x67, 0xc0, 0x97, 0xb9, 0x8c, 0x7b, 0xde, 0xff, 0x8c,
0xb2, 0x4a, 0x2e, 0x83, 0xf3, 0xfc, 0xe8, 0x27, 0x0b, 0x56, 0xf4, 0xfd, 0xe5, 0xaf, 0x3b, 0x31,
0x15, 0x82, 0x45, 0x5c, 0x75, 0x84, 0x12, 0x75, 0xf3, 0x48, 0x44, 0x61, 0xfa, 0x65, 0x97, 0xc5,
0x34, 0xa4, 0x5c, 0x7a, 0x8e, 0x91, 0xb3, 0xb2, 0x3d, 0xc6, 0x88, 0x27, 0x34, 0xac, 0x7e, 0x08,
0x2b, 0xe3, 0xdb, 0x41, 0x2b, 0x60, 0x1f, 0xd0, 0x5e, 0xd2, 0xd4, 0x58, 0x5d, 0xa2, 0x93, 0xb0,
0x70, 0x48, 0x82, 0x2e, 0x4d, 0x9a, 0x18, 0x27, 0x37, 0x17, 0xe6, 0x36, 0xac, 0xc6, 0x2f, 0x16,
0x38, 0x2f, 0x12, 0x82, 0xde, 0xca, 0x01, 0x79, 0x15, 0xa3, 0xca, 0xbe, 0x46, 0x7b, 0x09, 0xea,
0x65, 0x28, 0x45, 0x1d, 0xd5, 0xa1, 0x51, 0x6c, 0xa6, 0xe3, 0x8c, 0xa9, 0x29, 0xdd, 0x30, 0xf1,
0xe7, 0xfd, 0xda, 0xa9, 0x11, 0xf8, 0x34, 0x81, 0x87, 0x4b, 0x51, 0x03, 0x8a, 0x5a, 0x8f, 0x70,
0xec, 0xba, 0xbd, 0x56, 0xf6, 0x40, 0x35, 0xd3, 0x1d, 0x1d, 0xc1, 0x26, 0xd3, 0xf8, 0xd5, 0x82,
0xd2, 0x75, 0x26, 0xe4, 0x36, 0x95, 0x04, 0x5d, 0x84, 0xe5, 0x98, 0x8a, 0xa8, 0x1b, 0xfb, 0xf4,
0x0e, 0x8d, 0xd5, 0x73, 0x30, 0xf4, 0x6f, 0x18, 0xfa, 0x65, 0x3c, 0x9a, 0xc6, 0xe3, 0xf5, 0xe8,
0x5d, 0x28, 0xf9, 0x11, 0x97, 0x8c, 0x77, 0xa9, 0x9e, 0xc8, 0xb2, 0xb7, 0x92, 0x4a, 0xdf, 0x34,
0x71, 0x3c, 0xac, 0x40, 0x57, 0x00, 0xc5, 0x34, 0x24, 0x8c, 0x33, 0xde, 0xde, 0x92, 0x34, 0xdc,
0x8c, 0xba, 0x5c, 0xea, 0xe1, 0xb3, 0xbd, 0xff, 0xab, 0xd1, 0xc1, 0x13, 0x59, 0x3c, 0x65, 0x45,
0xe3, 0xcf, 0x45, 0x80, 0x1b, 0x7b, 0x5f, 0x50, 0x3f, 0xd9, 0x47, 0x1d, 0xe6, 0x39, 0x09, 0x27,
0xdc, 0x67, 0x87, 0x84, 0x14, 0xeb, 0x0c, 0xda, 0x80, 0xa5, 0xd4, 0xc0, 0x54, 0xd4, 0x6c, 0xf3,
0xa4, 0xa9, 0x5c, 0xba, 0x9a, 0xcb, 0xe1, 0x91, 0x4a, 0xd4, 0x84, 0xb2, 0x42, 0x10, 0x1d, 0xe2,
0xa7, 0x3b, 0x3c, 0x61, 0x96, 0x95, 0x77, 0xd2, 0x04, 0xce, 0x6a, 0x90, 0x07, 0x76, 0x97, 0xb5,
0xcc, 0x4c, 0xbf, 0x9f, 0xbe, 0xeb, 0xdb, 0x5b, 0x97, 0x9e, 0xf7, 0x6b, 0xa7, 0x5f, 0x64, 0xb6,
0xca, 0x22, 0x85, 0x7b, 0x7b, 0xeb, 0x12, 0x56, 0x8b, 0xa7, 0xbd, 0x98, 0xe2, 0x2b, 0xbe, 0x98,
0x75, 0x80, 0x76, 0x66, 0x96, 0x8b, 0xfa, 0x11, 0x23, 0xb3, 0x1a, 0x72, 0x26, 0x99, 0xab, 0x42,
0xdf, 0x58, 0x70, 0xa2, 0x45, 0x03, 0x9a, 0x5a, 0x94, 0x90, 0x24, 0xec, 0x38, 0xe5, 0x23, 0xf0,
0xc6, 0x53, 0x83, 0x7e, 0xed, 0xc4, 0xa5, 0x71, 0x68, 0x3c, 0xc9, 0x86, 0x0e, 0xa1, 0x18, 0x24,
0x56, 0x55, 0xd1, 0xae, 0xb0, 0x33, 0x2b, 0x6f, 0xd6, 0x1f, 0x6e, 0xde, 0xa2, 0x86, 0x2e, 0x6b,
0xdc, 0xc9, 0xb0, 0xa1, 0xef, 0x2c, 0xa8, 0x10, 0xce, 0x23, 0xa9, 0x1f, 0x85, 0x70, 0x96, 0x34,
0xfb, 0xee, 0x11, 0xb0, 0x5f, 0xcc, 0x50, 0xc7, 0x5c, 0x32, 0x97, 0xc1, 0x79, 0x72, 0xf4, 0xad,
0x05, 0xcb, 0xd1, 0x57, 0x9c, 0xc6, 0x98, 0xde, 0xa5, 0x31, 0xe5, 0x3e, 0x15, 0xce, 0x31, 0x2d,
0xe8, 0xca, 0xcc, 0x82, 0x46, 0xe0, 0xb2, 0x46, 0x1a, 0x8d, 0x0b, 0x3c, 0xce, 0xbb, 0x7a, 0x1e,
0x2a, 0x33, 0x7a, 0xa2, 0xf2, 0xd4, 0xf1, 0xcd, 0xbf, 0x92, 0xa7, 0xfe, 0x6d, 0xc1, 0xf1, 0x51,
0x7d, 0xaa, 0xad, 0x49, 0x87, 0xa5, 0x43, 0x91, 0x0c, 0xd9, 0xb0, 0xad, 0x2f, 0xde, 0xdc, 0x4a,
0xe7, 0x21, 0x57, 0xa5, 0xec, 0xe1, 0x80, 0xf1, 0xd6, 0xb8, 0x3d, 0x5c, 0x63, 0xbc, 0x85, 0x75,
0x66, 0x68, 0x20, 0xf6, 0x0b, 0x0d, 0xc4, 0x4c, 0xf5, 0xfc, 0xeb, 0x4c, 0xb5, 0x0b, 0xa0, 0x9c,
0x30, 0x8e, 0x82, 0x80, 0xc6, 0x7a, 0xa0, 0x4b, 0xde, 0x71, 0xa5, 0x7b, 0x73, 0x18, 0xc5, 0xb9,
0x8a, 0xc6, 0xf7, 0x16, 0x9c, 0xba, 0x49, 0x62, 0xc9, 0x48, 0x90, 0xb5, 0x53, 0x8b, 0x48, 0x82,
0x62, 0x28, 0x85, 0xe6, 0x5a, 0xef, 0xaa, 0xb2, 0xee, 0xbd, 0x7e, 0xa3, 0x66, 0xcf, 0x31, 0x8b,
0xe1, 0x21, 0x4f, 0xe3, 0x1f, 0x0b, 0xde, 0x9c, 0xaa, 0x46, 0x1d, 0x27, 0x88, 0x4f, 0x28, 0xfa,
0x68, 0xe6, 0xe3, 0xdc, 0x1c, 0x4f, 0xd9, 0x49, 0x92, 0x46, 0x32, 0x35, 0x28, 0x86, 0x05, 0x26,
0x69, 0x98, 0x7e, 0x3b, 0x6c, 0xcf, 0x4a, 0x36, 0x75, 0x47, 0xde, 0x31, 0xc3, 0xbc, 0xa0, 0x8e,
0x1c, 0x81, 0x13, 0xaa, 0x46, 0x00, 0xf3, 0xfa, 0xc3, 0xed, 0x0c, 0x2c, 0x0a, 0xea, 0x47, 0xbc,
0x25, 0xf4, 0x56, 0xed, 0xec, 0x83, 0x6c, 0x37, 0x09, 0xe3, 0x34, 0x8f, 0xde, 0x86, 0x05, 0x4e,
0x78, 0x94, 0x7c, 0xf4, 0x2e, 0x64, 0xb8, 0x3b, 0x2a, 0x88, 0x93, 0xdc, 0x85, 0x93, 0x3f, 0x3f,
0xac, 0x15, 0xee, 0x3f, 0xaa, 0x15, 0x1e, 0x3c, 0xaa, 0x15, 0x1e, 0x3e, 0xaa, 0x15, 0xee, 0xfd,
0x56, 0x2f, 0x34, 0x3e, 0x85, 0x72, 0xe6, 0x8a, 0x47, 0x4c, 0xd9, 0xf8, 0x1c, 0x4a, 0xea, 0xdb,
0x3c, 0x3d, 0x3d, 0x5f, 0x32, 0x1e, 0xa3, 0x43, 0x37, 0xf7, 0x5f, 0x86, 0xce, 0xfb, 0xec, 0xf1,
0xb3, 0x6a, 0xe1, 0xc9, 0xb3, 0x6a, 0xe1, 0xe9, 0xb3, 0x6a, 0xe1, 0xde, 0xa0, 0x6a, 0x3d, 0x1e,
0x54, 0xad, 0x27, 0x83, 0xaa, 0xf5, 0x74, 0x50, 0xb5, 0x7e, 0x1f, 0x54, 0xad, 0x07, 0x7f, 0x54,
0x0b, 0x9f, 0x9c, 0x9b, 0xed, 0xdf, 0xd5, 0xbf, 0x01, 0x00, 0x00, 0xff, 0xff, 0xfd, 0x8c, 0x32,
0xb1, 0x96, 0x0d, 0x00, 0x00,
}
func (m *Condition) Marshal() (dAtA []byte, err error) {
size := m.Size()
dAtA = make([]byte, size)
n, err := m.MarshalToSizedBuffer(dAtA[:size])
if err != nil {
return nil, err
}
return dAtA[:n], nil
}
func (m *Condition) MarshalTo(dAtA []byte) (int, error) {
size := m.Size()
return m.MarshalToSizedBuffer(dAtA[:size])
}
func (m *Condition) MarshalToSizedBuffer(dAtA []byte) (int, error) {
i := len(dAtA)
_ = i
var l int
_ = l
i -= len(m.Message)
copy(dAtA[i:], m.Message)
i = encodeVarintGenerated(dAtA, i, uint64(len(m.Message)))
i--
dAtA[i] = 0x32
i -= len(m.Reason)
copy(dAtA[i:], m.Reason)
i = encodeVarintGenerated(dAtA, i, uint64(len(m.Reason)))
i--
dAtA[i] = 0x2a
{
size, err := m.LastTransitionTime.MarshalToSizedBuffer(dAtA[:i])
if err != nil {
return 0, err
}
i -= size
i = encodeVarintGenerated(dAtA, i, uint64(size))
}
i--
dAtA[i] = 0x22
i = encodeVarintGenerated(dAtA, i, uint64(m.ObservedGeneration))
i--
dAtA[i] = 0x18
i -= len(m.Status)
copy(dAtA[i:], m.Status)
i = encodeVarintGenerated(dAtA, i, uint64(len(m.Status)))
i--
dAtA[i] = 0x12
i -= len(m.Type)
copy(dAtA[i:], m.Type)
i = encodeVarintGenerated(dAtA, i, uint64(len(m.Type)))
i--
dAtA[i] = 0xa
return len(dAtA) - i, nil
}
func (m *LabelSelector) Marshal() (dAtA []byte, err error) {
size := m.Size()
dAtA = make([]byte, size)
n, err := m.MarshalToSizedBuffer(dAtA[:size])
if err != nil {
return nil, err
}
return dAtA[:n], nil
}
func (m *LabelSelector) MarshalTo(dAtA []byte) (int, error) {
size := m.Size()
return m.MarshalToSizedBuffer(dAtA[:size])
}
func (m *LabelSelector) MarshalToSizedBuffer(dAtA []byte) (int, error) {
i := len(dAtA)
_ = i
var l int
_ = l
if len(m.MatchExpressions) > 0 {
for iNdEx := len(m.MatchExpressions) - 1; iNdEx >= 0; iNdEx-- {
{
size, err := m.MatchExpressions[iNdEx].MarshalToSizedBuffer(dAtA[:i])
if err != nil {
return 0, err
}
i -= size
i = encodeVarintGenerated(dAtA, i, uint64(size))
}
i--
dAtA[i] = 0x12
}
}
if len(m.MatchLabels) > 0 {
keysForMatchLabels := make([]string, 0, len(m.MatchLabels))
for k := range m.MatchLabels {
keysForMatchLabels = append(keysForMatchLabels, string(k))
}
github_com_gogo_protobuf_sortkeys.Strings(keysForMatchLabels)
for iNdEx := len(keysForMatchLabels) - 1; iNdEx >= 0; iNdEx-- {
v := m.MatchLabels[string(keysForMatchLabels[iNdEx])]
baseI := i
i -= len(v)
copy(dAtA[i:], v)
i = encodeVarintGenerated(dAtA, i, uint64(len(v)))
i--
dAtA[i] = 0x12
i -= len(keysForMatchLabels[iNdEx])
copy(dAtA[i:], keysForMatchLabels[iNdEx])
i = encodeVarintGenerated(dAtA, i, uint64(len(keysForMatchLabels[iNdEx])))
i--
dAtA[i] = 0xa
i = encodeVarintGenerated(dAtA, i, uint64(baseI-i))
i--
dAtA[i] = 0xa
}
}
return len(dAtA) - i, nil
}
func (m *LabelSelectorRequirement) Marshal() (dAtA []byte, err error) {
size := m.Size()
dAtA = make([]byte, size)
n, err := m.MarshalToSizedBuffer(dAtA[:size])
if err != nil {
return nil, err
}
return dAtA[:n], nil
}
func (m *LabelSelectorRequirement) MarshalTo(dAtA []byte) (int, error) {
size := m.Size()
return m.MarshalToSizedBuffer(dAtA[:size])
}
func (m *LabelSelectorRequirement) MarshalToSizedBuffer(dAtA []byte) (int, error) {
i := len(dAtA)
_ = i
var l int
_ = l
if len(m.Values) > 0 {
for iNdEx := len(m.Values) - 1; iNdEx >= 0; iNdEx-- {
i -= len(m.Values[iNdEx])
copy(dAtA[i:], m.Values[iNdEx])
i = encodeVarintGenerated(dAtA, i, uint64(len(m.Values[iNdEx])))
i--
dAtA[i] = 0x1a
}
}
i -= len(m.Operator)
copy(dAtA[i:], m.Operator)
i = encodeVarintGenerated(dAtA, i, uint64(len(m.Operator)))
i--
dAtA[i] = 0x12
i -= len(m.Key)
copy(dAtA[i:], m.Key)
i = encodeVarintGenerated(dAtA, i, uint64(len(m.Key)))
i--
dAtA[i] = 0xa
return len(dAtA) - i, nil
}
func (m *ListMeta) Marshal() (dAtA []byte, err error) {
size := m.Size()
dAtA = make([]byte, size)
n, err := m.MarshalToSizedBuffer(dAtA[:size])
if err != nil {
return nil, err
}
return dAtA[:n], nil
}
func (m *ListMeta) MarshalTo(dAtA []byte) (int, error) {
size := m.Size()
return m.MarshalToSizedBuffer(dAtA[:size])
}
func (m *ListMeta) MarshalToSizedBuffer(dAtA []byte) (int, error) {
i := len(dAtA)
_ = i
var l int
_ = l
if m.RemainingItemCount != nil {
i = encodeVarintGenerated(dAtA, i, uint64(*m.RemainingItemCount))
i--
dAtA[i] = 0x20
}
i -= len(m.Continue)
copy(dAtA[i:], m.Continue)
i = encodeVarintGenerated(dAtA, i, uint64(len(m.Continue)))
i--
dAtA[i] = 0x1a
i -= len(m.ResourceVersion)
copy(dAtA[i:], m.ResourceVersion)
i = encodeVarintGenerated(dAtA, i, uint64(len(m.ResourceVersion)))
i--
dAtA[i] = 0x12
return len(dAtA) - i, nil
}
func (m *ObjectMeta) Marshal() (dAtA []byte, err error) {
size := m.Size()
dAtA = make([]byte, size)
n, err := m.MarshalToSizedBuffer(dAtA[:size])
if err != nil {
return nil, err
}
return dAtA[:n], nil
}
func (m *ObjectMeta) MarshalTo(dAtA []byte) (int, error) {
size := m.Size()
return m.MarshalToSizedBuffer(dAtA[:size])
}
func (m *ObjectMeta) MarshalToSizedBuffer(dAtA []byte) (int, error) {
i := len(dAtA)
_ = i
var l int
_ = l
if len(m.OwnerReferences) > 0 {
for iNdEx := len(m.OwnerReferences) - 1; iNdEx >= 0; iNdEx-- {
{
size, err := m.OwnerReferences[iNdEx].MarshalToSizedBuffer(dAtA[:i])
if err != nil {
return 0, err
}
i -= size
i = encodeVarintGenerated(dAtA, i, uint64(size))
}
i--
dAtA[i] = 0x6a
}
}
if len(m.Annotations) > 0 {
keysForAnnotations := make([]string, 0, len(m.Annotations))
for k := range m.Annotations {
keysForAnnotations = append(keysForAnnotations, string(k))
}
github_com_gogo_protobuf_sortkeys.Strings(keysForAnnotations)
for iNdEx := len(keysForAnnotations) - 1; iNdEx >= 0; iNdEx-- {
v := m.Annotations[string(keysForAnnotations[iNdEx])]
baseI := i
i -= len(v)
copy(dAtA[i:], v)
i = encodeVarintGenerated(dAtA, i, uint64(len(v)))
i--
dAtA[i] = 0x12
i -= len(keysForAnnotations[iNdEx])
copy(dAtA[i:], keysForAnnotations[iNdEx])
i = encodeVarintGenerated(dAtA, i, uint64(len(keysForAnnotations[iNdEx])))
i--
dAtA[i] = 0xa
i = encodeVarintGenerated(dAtA, i, uint64(baseI-i))
i--
dAtA[i] = 0x62
}
}
if len(m.Labels) > 0 {
keysForLabels := make([]string, 0, len(m.Labels))
for k := range m.Labels {
keysForLabels = append(keysForLabels, string(k))
}
github_com_gogo_protobuf_sortkeys.Strings(keysForLabels)
for iNdEx := len(keysForLabels) - 1; iNdEx >= 0; iNdEx-- {
v := m.Labels[string(keysForLabels[iNdEx])]
baseI := i
i -= len(v)
copy(dAtA[i:], v)
i = encodeVarintGenerated(dAtA, i, uint64(len(v)))
i--
dAtA[i] = 0x12
i -= len(keysForLabels[iNdEx])
copy(dAtA[i:], keysForLabels[iNdEx])
i = encodeVarintGenerated(dAtA, i, uint64(len(keysForLabels[iNdEx])))
i--
dAtA[i] = 0xa
i = encodeVarintGenerated(dAtA, i, uint64(baseI-i))
i--
dAtA[i] = 0x5a
}
}
if m.DeletionTimestamp != nil {
{
size, err := m.DeletionTimestamp.MarshalToSizedBuffer(dAtA[:i])
if err != nil {
return 0, err
}
i -= size
i = encodeVarintGenerated(dAtA, i, uint64(size))
}
i--
dAtA[i] = 0x4a
}
i = encodeVarintGenerated(dAtA, i, uint64(m.Generation))
i--
dAtA[i] = 0x38
i -= len(m.ResourceVersion)
copy(dAtA[i:], m.ResourceVersion)
i = encodeVarintGenerated(dAtA, i, uint64(len(m.ResourceVersion)))
i--
dAtA[i] = 0x32
i -= len(m.UID)
copy(dAtA[i:], m.UID)
i = encodeVarintGenerated(dAtA, i, uint64(len(m.UID)))
i--
dAtA[i] = 0x2a
i -= len(m.Namespace)
copy(dAtA[i:], m.Namespace)
i = encodeVarintGenerated(dAtA, i, uint64(len(m.Namespace)))
i--
dAtA[i] = 0x1a
i -= len(m.GenerateName)
copy(dAtA[i:], m.GenerateName)
i = encodeVarintGenerated(dAtA, i, uint64(len(m.GenerateName)))
i--
dAtA[i] = 0x12
i -= len(m.Name)
copy(dAtA[i:], m.Name)
i = encodeVarintGenerated(dAtA, i, uint64(len(m.Name)))
i--
dAtA[i] = 0xa
return len(dAtA) - i, nil
}
func (m *OwnerReference) Marshal() (dAtA []byte, err error) {
size := m.Size()
dAtA = make([]byte, size)
n, err := m.MarshalToSizedBuffer(dAtA[:size])
if err != nil {
return nil, err
}
return dAtA[:n], nil
}
func (m *OwnerReference) MarshalTo(dAtA []byte) (int, error) {
size := m.Size()
return m.MarshalToSizedBuffer(dAtA[:size])
}
func (m *OwnerReference) MarshalToSizedBuffer(dAtA []byte) (int, error) {
i := len(dAtA)
_ = i
var l int
_ = l
if m.Controller != nil {
i--
if *m.Controller {
dAtA[i] = 1
} else {
dAtA[i] = 0
}
i--
dAtA[i] = 0x30
}
i -= len(m.APIVersion)
copy(dAtA[i:], m.APIVersion)
i = encodeVarintGenerated(dAtA, i, uint64(len(m.APIVersion)))
i--
dAtA[i] = 0x2a
i -= len(m.UID)
copy(dAtA[i:], m.UID)
i = encodeVarintGenerated(dAtA, i, uint64(len(m.UID)))
i--
dAtA[i] = 0x22
i -= len(m.Name)
copy(dAtA[i:], m.Name)
i = encodeVarintGenerated(dAtA, i, uint64(len(m.Name)))
i--
dAtA[i] = 0x1a
i -= len(m.Kind)
copy(dAtA[i:], m.Kind)
i = encodeVarintGenerated(dAtA, i, uint64(len(m.Kind)))
i--
dAtA[i] = 0xa
return len(dAtA) - i, nil
}
func (m *PartialObjectMetadata) Marshal() (dAtA []byte, err error) {
size := m.Size()
dAtA = make([]byte, size)
n, err := m.MarshalToSizedBuffer(dAtA[:size])
if err != nil {
return nil, err
}
return dAtA[:n], nil
}
func (m *PartialObjectMetadata) MarshalTo(dAtA []byte) (int, error) {
size := m.Size()
return m.MarshalToSizedBuffer(dAtA[:size])
}
func (m *PartialObjectMetadata) MarshalToSizedBuffer(dAtA []byte) (int, error) {
i := len(dAtA)
_ = i
var l int
_ = l
{
size, err := m.ObjectMeta.MarshalToSizedBuffer(dAtA[:i])
if err != nil {
return 0, err
}
i -= size
i = encodeVarintGenerated(dAtA, i, uint64(size))
}
i--
dAtA[i] = 0xa
return len(dAtA) - i, nil
}
func (m *PartialObjectMetadataList) Marshal() (dAtA []byte, err error) {
size := m.Size()
dAtA = make([]byte, size)
n, err := m.MarshalToSizedBuffer(dAtA[:size])
if err != nil {
return nil, err
}
return dAtA[:n], nil
}
func (m *PartialObjectMetadataList) MarshalTo(dAtA []byte) (int, error) {
size := m.Size()
return m.MarshalToSizedBuffer(dAtA[:size])
}
func (m *PartialObjectMetadataList) MarshalToSizedBuffer(dAtA []byte) (int, error) {
i := len(dAtA)
_ = i
var l int
_ = l
if len(m.Items) > 0 {
for iNdEx := len(m.Items) - 1; iNdEx >= 0; iNdEx-- {
{
size, err := m.Items[iNdEx].MarshalToSizedBuffer(dAtA[:i])
if err != nil {
return 0, err
}
i -= size
i = encodeVarintGenerated(dAtA, i, uint64(size))
}
i--
dAtA[i] = 0x12
}
}
{
size, err := m.ListMeta.MarshalToSizedBuffer(dAtA[:i])
if err != nil {
return 0, err
}
i -= size
i = encodeVarintGenerated(dAtA, i, uint64(size))
}
i--
dAtA[i] = 0xa
return len(dAtA) - i, nil
}
func (m *Timestamp) Marshal() (dAtA []byte, err error) {
size := m.Size()
dAtA = make([]byte, size)
n, err := m.MarshalToSizedBuffer(dAtA[:size])
if err != nil {
return nil, err
}
return dAtA[:n], nil
}
func (m *Timestamp) MarshalTo(dAtA []byte) (int, error) {
size := m.Size()
return m.MarshalToSizedBuffer(dAtA[:size])
}
func (m *Timestamp) MarshalToSizedBuffer(dAtA []byte) (int, error) {
i := len(dAtA)
_ = i
var l int
_ = l
i = encodeVarintGenerated(dAtA, i, uint64(m.Nanos))
i--
dAtA[i] = 0x10
i = encodeVarintGenerated(dAtA, i, uint64(m.Seconds))
i--
dAtA[i] = 0x8
return len(dAtA) - i, nil
}
func (m *TypeMeta) Marshal() (dAtA []byte, err error) {
size := m.Size()
dAtA = make([]byte, size)
n, err := m.MarshalToSizedBuffer(dAtA[:size])
if err != nil {
return nil, err
}
return dAtA[:n], nil
}
func (m *TypeMeta) MarshalTo(dAtA []byte) (int, error) {
size := m.Size()
return m.MarshalToSizedBuffer(dAtA[:size])
}
func (m *TypeMeta) MarshalToSizedBuffer(dAtA []byte) (int, error) {
i := len(dAtA)
_ = i
var l int
_ = l
i -= len(m.APIVersion)
copy(dAtA[i:], m.APIVersion)
i = encodeVarintGenerated(dAtA, i, uint64(len(m.APIVersion)))
i--
dAtA[i] = 0x12
i -= len(m.Kind)
copy(dAtA[i:], m.Kind)
i = encodeVarintGenerated(dAtA, i, uint64(len(m.Kind)))
i--
dAtA[i] = 0xa
return len(dAtA) - i, nil
}
func encodeVarintGenerated(dAtA []byte, offset int, v uint64) int {
offset -= sovGenerated(v)
base := offset
for v >= 1<<7 {
dAtA[offset] = uint8(v&0x7f | 0x80)
v >>= 7
offset++
}
dAtA[offset] = uint8(v)
return base
}
func (m *Condition) Size() (n int) {
if m == nil {
return 0
}
var l int
_ = l
l = len(m.Type)
n += 1 + l + sovGenerated(uint64(l))
l = len(m.Status)
n += 1 + l + sovGenerated(uint64(l))
n += 1 + sovGenerated(uint64(m.ObservedGeneration))
l = m.LastTransitionTime.Size()
n += 1 + l + sovGenerated(uint64(l))
l = len(m.Reason)
n += 1 + l + sovGenerated(uint64(l))
l = len(m.Message)
n += 1 + l + sovGenerated(uint64(l))
return n
}
func (m *LabelSelector) Size() (n int) {
if m == nil {
return 0
}
var l int
_ = l
if len(m.MatchLabels) > 0 {
for k, v := range m.MatchLabels {
_ = k
_ = v
mapEntrySize := 1 + len(k) + sovGenerated(uint64(len(k))) + 1 + len(v) + sovGenerated(uint64(len(v)))
n += mapEntrySize + 1 + sovGenerated(uint64(mapEntrySize))
}
}
if len(m.MatchExpressions) > 0 {
for _, e := range m.MatchExpressions {
l = e.Size()
n += 1 + l + sovGenerated(uint64(l))
}
}
return n
}
func (m *LabelSelectorRequirement) Size() (n int) {
if m == nil {
return 0
}
var l int
_ = l
l = len(m.Key)
n += 1 + l + sovGenerated(uint64(l))
l = len(m.Operator)
n += 1 + l + sovGenerated(uint64(l))
if len(m.Values) > 0 {
for _, s := range m.Values {
l = len(s)
n += 1 + l + sovGenerated(uint64(l))
}
}
return n
}
func (m *ListMeta) Size() (n int) {
if m == nil {
return 0
}
var l int
_ = l
l = len(m.ResourceVersion)
n += 1 + l + sovGenerated(uint64(l))
l = len(m.Continue)
n += 1 + l + sovGenerated(uint64(l))
if m.RemainingItemCount != nil {
n += 1 + sovGenerated(uint64(*m.RemainingItemCount))
}
return n
}
func (m *ObjectMeta) Size() (n int) {
if m == nil {
return 0
}
var l int
_ = l
l = len(m.Name)
n += 1 + l + sovGenerated(uint64(l))
l = len(m.GenerateName)
n += 1 + l + sovGenerated(uint64(l))
l = len(m.Namespace)
n += 1 + l + sovGenerated(uint64(l))
l = len(m.UID)
n += 1 + l + sovGenerated(uint64(l))
l = len(m.ResourceVersion)
n += 1 + l + sovGenerated(uint64(l))
n += 1 + sovGenerated(uint64(m.Generation))
if m.DeletionTimestamp != nil {
l = m.DeletionTimestamp.Size()
n += 1 + l + sovGenerated(uint64(l))
}
if len(m.Labels) > 0 {
for k, v := range m.Labels {
_ = k
_ = v
mapEntrySize := 1 + len(k) + sovGenerated(uint64(len(k))) + 1 + len(v) + sovGenerated(uint64(len(v)))
n += mapEntrySize + 1 + sovGenerated(uint64(mapEntrySize))
}
}
if len(m.Annotations) > 0 {
for k, v := range m.Annotations {
_ = k
_ = v
mapEntrySize := 1 + len(k) + sovGenerated(uint64(len(k))) + 1 + len(v) + sovGenerated(uint64(len(v)))
n += mapEntrySize + 1 + sovGenerated(uint64(mapEntrySize))
}
}
if len(m.OwnerReferences) > 0 {
for _, e := range m.OwnerReferences {
l = e.Size()
n += 1 + l + sovGenerated(uint64(l))
}
}
return n
}
func (m *OwnerReference) Size() (n int) {
if m == nil {
return 0
}
var l int
_ = l
l = len(m.Kind)
n += 1 + l + sovGenerated(uint64(l))
l = len(m.Name)
n += 1 + l + sovGenerated(uint64(l))
l = len(m.UID)
n += 1 + l + sovGenerated(uint64(l))
l = len(m.APIVersion)
n += 1 + l + sovGenerated(uint64(l))
if m.Controller != nil {
n += 2
}
return n
}
func (m *PartialObjectMetadata) Size() (n int) {
if m == nil {
return 0
}
var l int
_ = l
l = m.ObjectMeta.Size()
n += 1 + l + sovGenerated(uint64(l))
return n
}
func (m *PartialObjectMetadataList) Size() (n int) {
if m == nil {
return 0
}
var l int
_ = l
l = m.ListMeta.Size()
n += 1 + l + sovGenerated(uint64(l))
if len(m.Items) > 0 {
for _, e := range m.Items {
l = e.Size()
n += 1 + l + sovGenerated(uint64(l))
}
}
return n
}
func (m *Timestamp) Size() (n int) {
if m == nil {
return 0
}
var l int
_ = l
n += 1 + sovGenerated(uint64(m.Seconds))
n += 1 + sovGenerated(uint64(m.Nanos))
return n
}
func (m *TypeMeta) Size() (n int) {
if m == nil {
return 0
}
var l int
_ = l
l = len(m.Kind)
n += 1 + l + sovGenerated(uint64(l))
l = len(m.APIVersion)
n += 1 + l + sovGenerated(uint64(l))
return n
}
func sovGenerated(x uint64) (n int) {
return (math_bits.Len64(x|1) + 6) / 7
}
func sozGenerated(x uint64) (n int) {
return sovGenerated(uint64((x << 1) ^ uint64((int64(x) >> 63))))
}
func (this *Condition) String() string {
if this == nil {
return "nil"
}
s := strings.Join([]string{`&Condition{`,
`Type:` + fmt.Sprintf("%v", this.Type) + `,`,
`Status:` + fmt.Sprintf("%v", this.Status) + `,`,
`ObservedGeneration:` + fmt.Sprintf("%v", this.ObservedGeneration) + `,`,
`LastTransitionTime:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.LastTransitionTime), "Time", "Time", 1), `&`, ``, 1) + `,`,
`Reason:` + fmt.Sprintf("%v", this.Reason) + `,`,
`Message:` + fmt.Sprintf("%v", this.Message) + `,`,
`}`,
}, "")
return s
}
func (this *LabelSelector) String() string {
if this == nil {
return "nil"
}
repeatedStringForMatchExpressions := "[]LabelSelectorRequirement{"
for _, f := range this.MatchExpressions {
repeatedStringForMatchExpressions += strings.Replace(strings.Replace(f.String(), "LabelSelectorRequirement", "LabelSelectorRequirement", 1), `&`, ``, 1) + ","
}
repeatedStringForMatchExpressions += "}"
keysForMatchLabels := make([]string, 0, len(this.MatchLabels))
for k := range this.MatchLabels {
keysForMatchLabels = append(keysForMatchLabels, k)
}
github_com_gogo_protobuf_sortkeys.Strings(keysForMatchLabels)
mapStringForMatchLabels := "map[string]string{"
for _, k := range keysForMatchLabels {
mapStringForMatchLabels += fmt.Sprintf("%v: %v,", k, this.MatchLabels[k])
}
mapStringForMatchLabels += "}"
s := strings.Join([]string{`&LabelSelector{`,
`MatchLabels:` + mapStringForMatchLabels + `,`,
`MatchExpressions:` + repeatedStringForMatchExpressions + `,`,
`}`,
}, "")
return s
}
func (this *LabelSelectorRequirement) String() string {
if this == nil {
return "nil"
}
s := strings.Join([]string{`&LabelSelectorRequirement{`,
`Key:` + fmt.Sprintf("%v", this.Key) + `,`,
`Operator:` + fmt.Sprintf("%v", this.Operator) + `,`,
`Values:` + fmt.Sprintf("%v", this.Values) + `,`,
`}`,
}, "")
return s
}
func (this *ListMeta) String() string {
if this == nil {
return "nil"
}
s := strings.Join([]string{`&ListMeta{`,
`ResourceVersion:` + fmt.Sprintf("%v", this.ResourceVersion) + `,`,
`Continue:` + fmt.Sprintf("%v", this.Continue) + `,`,
`RemainingItemCount:` + valueToStringGenerated(this.RemainingItemCount) + `,`,
`}`,
}, "")
return s
}
func (this *ObjectMeta) String() string {
if this == nil {
return "nil"
}
repeatedStringForOwnerReferences := "[]OwnerReference{"
for _, f := range this.OwnerReferences {
repeatedStringForOwnerReferences += strings.Replace(strings.Replace(f.String(), "OwnerReference", "OwnerReference", 1), `&`, ``, 1) + ","
}
repeatedStringForOwnerReferences += "}"
keysForLabels := make([]string, 0, len(this.Labels))
for k := range this.Labels {
keysForLabels = append(keysForLabels, k)
}
github_com_gogo_protobuf_sortkeys.Strings(keysForLabels)
mapStringForLabels := "map[string]string{"
for _, k := range keysForLabels {
mapStringForLabels += fmt.Sprintf("%v: %v,", k, this.Labels[k])
}
mapStringForLabels += "}"
keysForAnnotations := make([]string, 0, len(this.Annotations))
for k := range this.Annotations {
keysForAnnotations = append(keysForAnnotations, k)
}
github_com_gogo_protobuf_sortkeys.Strings(keysForAnnotations)
mapStringForAnnotations := "map[string]string{"
for _, k := range keysForAnnotations {
mapStringForAnnotations += fmt.Sprintf("%v: %v,", k, this.Annotations[k])
}
mapStringForAnnotations += "}"
s := strings.Join([]string{`&ObjectMeta{`,
`Name:` + fmt.Sprintf("%v", this.Name) + `,`,
`GenerateName:` + fmt.Sprintf("%v", this.GenerateName) + `,`,
`Namespace:` + fmt.Sprintf("%v", this.Namespace) + `,`,
`UID:` + fmt.Sprintf("%v", this.UID) + `,`,
`ResourceVersion:` + fmt.Sprintf("%v", this.ResourceVersion) + `,`,
`Generation:` + fmt.Sprintf("%v", this.Generation) + `,`,
`DeletionTimestamp:` + strings.Replace(fmt.Sprintf("%v", this.DeletionTimestamp), "Time", "Time", 1) + `,`,
`Labels:` + mapStringForLabels + `,`,
`Annotations:` + mapStringForAnnotations + `,`,
`OwnerReferences:` + repeatedStringForOwnerReferences + `,`,
`}`,
}, "")
return s
}
func (this *OwnerReference) String() string {
if this == nil {
return "nil"
}
s := strings.Join([]string{`&OwnerReference{`,
`Kind:` + fmt.Sprintf("%v", this.Kind) + `,`,
`Name:` + fmt.Sprintf("%v", this.Name) + `,`,
`UID:` + fmt.Sprintf("%v", this.UID) + `,`,
`APIVersion:` + fmt.Sprintf("%v", this.APIVersion) + `,`,
`Controller:` + valueToStringGenerated(this.Controller) + `,`,
`}`,
}, "")
return s
}
func (this *PartialObjectMetadata) String() string {
if this == nil {
return "nil"
}
s := strings.Join([]string{`&PartialObjectMetadata{`,
`ObjectMeta:` + strings.Replace(strings.Replace(this.ObjectMeta.String(), "ObjectMeta", "ObjectMeta", 1), `&`, ``, 1) + `,`,
`}`,
}, "")
return s
}
func (this *PartialObjectMetadataList) String() string {
if this == nil {
return "nil"
}
repeatedStringForItems := "[]PartialObjectMetadata{"
for _, f := range this.Items {
repeatedStringForItems += strings.Replace(strings.Replace(f.String(), "PartialObjectMetadata", "PartialObjectMetadata", 1), `&`, ``, 1) + ","
}
repeatedStringForItems += "}"
s := strings.Join([]string{`&PartialObjectMetadataList{`,
`ListMeta:` + strings.Replace(strings.Replace(this.ListMeta.String(), "ListMeta", "ListMeta", 1), `&`, ``, 1) + `,`,
`Items:` + repeatedStringForItems + `,`,
`}`,
}, "")
return s
}
func (this *Timestamp) String() string {
if this == nil {
return "nil"
}
s := strings.Join([]string{`&Timestamp{`,
`Seconds:` + fmt.Sprintf("%v", this.Seconds) + `,`,
`Nanos:` + fmt.Sprintf("%v", this.Nanos) + `,`,
`}`,
}, "")
return s
}
func (this *TypeMeta) String() string {
if this == nil {
return "nil"
}
s := strings.Join([]string{`&TypeMeta{`,
`Kind:` + fmt.Sprintf("%v", this.Kind) + `,`,
`APIVersion:` + fmt.Sprintf("%v", this.APIVersion) + `,`,
`}`,
}, "")
return s
}
func valueToStringGenerated(v interface{}) string {
rv := reflect.ValueOf(v)
if rv.IsNil() {
return "nil"
}
pv := reflect.Indirect(rv).Interface()
return fmt.Sprintf("*%v", pv)
}
func (m *Condition) Unmarshal(dAtA []byte) error {
l := len(dAtA)
iNdEx := 0
for iNdEx < l {
preIndex := iNdEx
var wire uint64
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowGenerated
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
wire |= uint64(b&0x7F) << shift
if b < 0x80 {
break
}
}
fieldNum := int32(wire >> 3)
wireType := int(wire & 0x7)
if wireType == 4 {
return fmt.Errorf("proto: Condition: wiretype end group for non-group")
}
if fieldNum <= 0 {
return fmt.Errorf("proto: Condition: illegal tag %d (wire type %d)", fieldNum, wire)
}
switch fieldNum {
case 1:
if wireType != 2 {
return fmt.Errorf("proto: wrong wireType = %d for field Type", wireType)
}
var stringLen uint64
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowGenerated
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
stringLen |= uint64(b&0x7F) << shift
if b < 0x80 {
break
}
}
intStringLen := int(stringLen)
if intStringLen < 0 {
return ErrInvalidLengthGenerated
}
postIndex := iNdEx + intStringLen
if postIndex < 0 {
return ErrInvalidLengthGenerated
}
if postIndex > l {
return io.ErrUnexpectedEOF
}
m.Type = string(dAtA[iNdEx:postIndex])
iNdEx = postIndex
case 2:
if wireType != 2 {
return fmt.Errorf("proto: wrong wireType = %d for field Status", wireType)
}
var stringLen uint64
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowGenerated
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
stringLen |= uint64(b&0x7F) << shift
if b < 0x80 {
break
}
}
intStringLen := int(stringLen)
if intStringLen < 0 {
return ErrInvalidLengthGenerated
}
postIndex := iNdEx + intStringLen
if postIndex < 0 {
return ErrInvalidLengthGenerated
}
if postIndex > l {
return io.ErrUnexpectedEOF
}
m.Status = ConditionStatus(dAtA[iNdEx:postIndex])
iNdEx = postIndex
case 3:
if wireType != 0 {
return fmt.Errorf("proto: wrong wireType = %d for field ObservedGeneration", wireType)
}
m.ObservedGeneration = 0
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowGenerated
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
m.ObservedGeneration |= int64(b&0x7F) << shift
if b < 0x80 {
break
}
}
case 4:
if wireType != 2 {
return fmt.Errorf("proto: wrong wireType = %d for field LastTransitionTime", wireType)
}
var msglen int
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowGenerated
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
msglen |= int(b&0x7F) << shift
if b < 0x80 {
break
}
}
if msglen < 0 {
return ErrInvalidLengthGenerated
}
postIndex := iNdEx + msglen
if postIndex < 0 {
return ErrInvalidLengthGenerated
}
if postIndex > l {
return io.ErrUnexpectedEOF
}
if err := m.LastTransitionTime.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
return err
}
iNdEx = postIndex
case 5:
if wireType != 2 {
return fmt.Errorf("proto: wrong wireType = %d for field Reason", wireType)
}
var stringLen uint64
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowGenerated
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
stringLen |= uint64(b&0x7F) << shift
if b < 0x80 {
break
}
}
intStringLen := int(stringLen)
if intStringLen < 0 {
return ErrInvalidLengthGenerated
}
postIndex := iNdEx + intStringLen
if postIndex < 0 {
return ErrInvalidLengthGenerated
}
if postIndex > l {
return io.ErrUnexpectedEOF
}
m.Reason = string(dAtA[iNdEx:postIndex])
iNdEx = postIndex
case 6:
if wireType != 2 {
return fmt.Errorf("proto: wrong wireType = %d for field Message", wireType)
}
var stringLen uint64
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowGenerated
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
stringLen |= uint64(b&0x7F) << shift
if b < 0x80 {
break
}
}
intStringLen := int(stringLen)
if intStringLen < 0 {
return ErrInvalidLengthGenerated
}
postIndex := iNdEx + intStringLen
if postIndex < 0 {
return ErrInvalidLengthGenerated
}
if postIndex > l {
return io.ErrUnexpectedEOF
}
m.Message = string(dAtA[iNdEx:postIndex])
iNdEx = postIndex
default:
iNdEx = preIndex
skippy, err := skipGenerated(dAtA[iNdEx:])
if err != nil {
return err
}
if (skippy < 0) || (iNdEx+skippy) < 0 {
return ErrInvalidLengthGenerated
}
if (iNdEx + skippy) > l {
return io.ErrUnexpectedEOF
}
iNdEx += skippy
}
}
if iNdEx > l {
return io.ErrUnexpectedEOF
}
return nil
}
func (m *LabelSelector) Unmarshal(dAtA []byte) error {
l := len(dAtA)
iNdEx := 0
for iNdEx < l {
preIndex := iNdEx
var wire uint64
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowGenerated
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
wire |= uint64(b&0x7F) << shift
if b < 0x80 {
break
}
}
fieldNum := int32(wire >> 3)
wireType := int(wire & 0x7)
if wireType == 4 {
return fmt.Errorf("proto: LabelSelector: wiretype end group for non-group")
}
if fieldNum <= 0 {
return fmt.Errorf("proto: LabelSelector: illegal tag %d (wire type %d)", fieldNum, wire)
}
switch fieldNum {
case 1:
if wireType != 2 {
return fmt.Errorf("proto: wrong wireType = %d for field MatchLabels", wireType)
}
var msglen int
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowGenerated
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
msglen |= int(b&0x7F) << shift
if b < 0x80 {
break
}
}
if msglen < 0 {
return ErrInvalidLengthGenerated
}
postIndex := iNdEx + msglen
if postIndex < 0 {
return ErrInvalidLengthGenerated
}
if postIndex > l {
return io.ErrUnexpectedEOF
}
if m.MatchLabels == nil {
m.MatchLabels = make(map[string]string)
}
var mapkey string
var mapvalue string
for iNdEx < postIndex {
entryPreIndex := iNdEx
var wire uint64
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowGenerated
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
wire |= uint64(b&0x7F) << shift
if b < 0x80 {
break
}
}
fieldNum := int32(wire >> 3)
if fieldNum == 1 {
var stringLenmapkey uint64
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowGenerated
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
stringLenmapkey |= uint64(b&0x7F) << shift
if b < 0x80 {
break
}
}
intStringLenmapkey := int(stringLenmapkey)
if intStringLenmapkey < 0 {
return ErrInvalidLengthGenerated
}
postStringIndexmapkey := iNdEx + intStringLenmapkey
if postStringIndexmapkey < 0 {
return ErrInvalidLengthGenerated
}
if postStringIndexmapkey > l {
return io.ErrUnexpectedEOF
}
mapkey = string(dAtA[iNdEx:postStringIndexmapkey])
iNdEx = postStringIndexmapkey
} else if fieldNum == 2 {
var stringLenmapvalue uint64
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowGenerated
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
stringLenmapvalue |= uint64(b&0x7F) << shift
if b < 0x80 {
break
}
}
intStringLenmapvalue := int(stringLenmapvalue)
if intStringLenmapvalue < 0 {
return ErrInvalidLengthGenerated
}
postStringIndexmapvalue := iNdEx + intStringLenmapvalue
if postStringIndexmapvalue < 0 {
return ErrInvalidLengthGenerated
}
if postStringIndexmapvalue > l {
return io.ErrUnexpectedEOF
}
mapvalue = string(dAtA[iNdEx:postStringIndexmapvalue])
iNdEx = postStringIndexmapvalue
} else {
iNdEx = entryPreIndex
skippy, err := skipGenerated(dAtA[iNdEx:])
if err != nil {
return err
}
if (skippy < 0) || (iNdEx+skippy) < 0 {
return ErrInvalidLengthGenerated
}
if (iNdEx + skippy) > postIndex {
return io.ErrUnexpectedEOF
}
iNdEx += skippy
}
}
m.MatchLabels[mapkey] = mapvalue
iNdEx = postIndex
case 2:
if wireType != 2 {
return fmt.Errorf("proto: wrong wireType = %d for field MatchExpressions", wireType)
}
var msglen int
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowGenerated
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
msglen |= int(b&0x7F) << shift
if b < 0x80 {
break
}
}
if msglen < 0 {
return ErrInvalidLengthGenerated
}
postIndex := iNdEx + msglen
if postIndex < 0 {
return ErrInvalidLengthGenerated
}
if postIndex > l {
return io.ErrUnexpectedEOF
}
m.MatchExpressions = append(m.MatchExpressions, LabelSelectorRequirement{})
if err := m.MatchExpressions[len(m.MatchExpressions)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
return err
}
iNdEx = postIndex
default:
iNdEx = preIndex
skippy, err := skipGenerated(dAtA[iNdEx:])
if err != nil {
return err
}
if (skippy < 0) || (iNdEx+skippy) < 0 {
return ErrInvalidLengthGenerated
}
if (iNdEx + skippy) > l {
return io.ErrUnexpectedEOF
}
iNdEx += skippy
}
}
if iNdEx > l {
return io.ErrUnexpectedEOF
}
return nil
}
func (m *LabelSelectorRequirement) Unmarshal(dAtA []byte) error {
l := len(dAtA)
iNdEx := 0
for iNdEx < l {
preIndex := iNdEx
var wire uint64
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowGenerated
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
wire |= uint64(b&0x7F) << shift
if b < 0x80 {
break
}
}
fieldNum := int32(wire >> 3)
wireType := int(wire & 0x7)
if wireType == 4 {
return fmt.Errorf("proto: LabelSelectorRequirement: wiretype end group for non-group")
}
if fieldNum <= 0 {
return fmt.Errorf("proto: LabelSelectorRequirement: illegal tag %d (wire type %d)", fieldNum, wire)
}
switch fieldNum {
case 1:
if wireType != 2 {
return fmt.Errorf("proto: wrong wireType = %d for field Key", wireType)
}
var stringLen uint64
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowGenerated
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
stringLen |= uint64(b&0x7F) << shift
if b < 0x80 {
break
}
}
intStringLen := int(stringLen)
if intStringLen < 0 {
return ErrInvalidLengthGenerated
}
postIndex := iNdEx + intStringLen
if postIndex < 0 {
return ErrInvalidLengthGenerated
}
if postIndex > l {
return io.ErrUnexpectedEOF
}
m.Key = string(dAtA[iNdEx:postIndex])
iNdEx = postIndex
case 2:
if wireType != 2 {
return fmt.Errorf("proto: wrong wireType = %d for field Operator", wireType)
}
var stringLen uint64
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowGenerated
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
stringLen |= uint64(b&0x7F) << shift
if b < 0x80 {
break
}
}
intStringLen := int(stringLen)
if intStringLen < 0 {
return ErrInvalidLengthGenerated
}
postIndex := iNdEx + intStringLen
if postIndex < 0 {
return ErrInvalidLengthGenerated
}
if postIndex > l {
return io.ErrUnexpectedEOF
}
m.Operator = LabelSelectorOperator(dAtA[iNdEx:postIndex])
iNdEx = postIndex
case 3:
if wireType != 2 {
return fmt.Errorf("proto: wrong wireType = %d for field Values", wireType)
}
var stringLen uint64
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowGenerated
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
stringLen |= uint64(b&0x7F) << shift
if b < 0x80 {
break
}
}
intStringLen := int(stringLen)
if intStringLen < 0 {
return ErrInvalidLengthGenerated
}
postIndex := iNdEx + intStringLen
if postIndex < 0 {
return ErrInvalidLengthGenerated
}
if postIndex > l {
return io.ErrUnexpectedEOF
}
m.Values = append(m.Values, string(dAtA[iNdEx:postIndex]))
iNdEx = postIndex
default:
iNdEx = preIndex
skippy, err := skipGenerated(dAtA[iNdEx:])
if err != nil {
return err
}
if (skippy < 0) || (iNdEx+skippy) < 0 {
return ErrInvalidLengthGenerated
}
if (iNdEx + skippy) > l {
return io.ErrUnexpectedEOF
}
iNdEx += skippy
}
}
if iNdEx > l {
return io.ErrUnexpectedEOF
}
return nil
}
func (m *ListMeta) Unmarshal(dAtA []byte) error {
l := len(dAtA)
iNdEx := 0
for iNdEx < l {
preIndex := iNdEx
var wire uint64
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowGenerated
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
wire |= uint64(b&0x7F) << shift
if b < 0x80 {
break
}
}
fieldNum := int32(wire >> 3)
wireType := int(wire & 0x7)
if wireType == 4 {
return fmt.Errorf("proto: ListMeta: wiretype end group for non-group")
}
if fieldNum <= 0 {
return fmt.Errorf("proto: ListMeta: illegal tag %d (wire type %d)", fieldNum, wire)
}
switch fieldNum {
case 2:
if wireType != 2 {
return fmt.Errorf("proto: wrong wireType = %d for field ResourceVersion", wireType)
}
var stringLen uint64
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowGenerated
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
stringLen |= uint64(b&0x7F) << shift
if b < 0x80 {
break
}
}
intStringLen := int(stringLen)
if intStringLen < 0 {
return ErrInvalidLengthGenerated
}
postIndex := iNdEx + intStringLen
if postIndex < 0 {
return ErrInvalidLengthGenerated
}
if postIndex > l {
return io.ErrUnexpectedEOF
}
m.ResourceVersion = string(dAtA[iNdEx:postIndex])
iNdEx = postIndex
case 3:
if wireType != 2 {
return fmt.Errorf("proto: wrong wireType = %d for field Continue", wireType)
}
var stringLen uint64
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowGenerated
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
stringLen |= uint64(b&0x7F) << shift
if b < 0x80 {
break
}
}
intStringLen := int(stringLen)
if intStringLen < 0 {
return ErrInvalidLengthGenerated
}
postIndex := iNdEx + intStringLen
if postIndex < 0 {
return ErrInvalidLengthGenerated
}
if postIndex > l {
return io.ErrUnexpectedEOF
}
m.Continue = string(dAtA[iNdEx:postIndex])
iNdEx = postIndex
case 4:
if wireType != 0 {
return fmt.Errorf("proto: wrong wireType = %d for field RemainingItemCount", wireType)
}
var v int64
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowGenerated
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
v |= int64(b&0x7F) << shift
if b < 0x80 {
break
}
}
m.RemainingItemCount = &v
default:
iNdEx = preIndex
skippy, err := skipGenerated(dAtA[iNdEx:])
if err != nil {
return err
}
if (skippy < 0) || (iNdEx+skippy) < 0 {
return ErrInvalidLengthGenerated
}
if (iNdEx + skippy) > l {
return io.ErrUnexpectedEOF
}
iNdEx += skippy
}
}
if iNdEx > l {
return io.ErrUnexpectedEOF
}
return nil
}
func (m *ObjectMeta) Unmarshal(dAtA []byte) error {
l := len(dAtA)
iNdEx := 0
for iNdEx < l {
preIndex := iNdEx
var wire uint64
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowGenerated
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
wire |= uint64(b&0x7F) << shift
if b < 0x80 {
break
}
}
fieldNum := int32(wire >> 3)
wireType := int(wire & 0x7)
if wireType == 4 {
return fmt.Errorf("proto: ObjectMeta: wiretype end group for non-group")
}
if fieldNum <= 0 {
return fmt.Errorf("proto: ObjectMeta: illegal tag %d (wire type %d)", fieldNum, wire)
}
switch fieldNum {
case 1:
if wireType != 2 {
return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType)
}
var stringLen uint64
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowGenerated
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
stringLen |= uint64(b&0x7F) << shift
if b < 0x80 {
break
}
}
intStringLen := int(stringLen)
if intStringLen < 0 {
return ErrInvalidLengthGenerated
}
postIndex := iNdEx + intStringLen
if postIndex < 0 {
return ErrInvalidLengthGenerated
}
if postIndex > l {
return io.ErrUnexpectedEOF
}
m.Name = string(dAtA[iNdEx:postIndex])
iNdEx = postIndex
case 2:
if wireType != 2 {
return fmt.Errorf("proto: wrong wireType = %d for field GenerateName", wireType)
}
var stringLen uint64
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowGenerated
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
stringLen |= uint64(b&0x7F) << shift
if b < 0x80 {
break
}
}
intStringLen := int(stringLen)
if intStringLen < 0 {
return ErrInvalidLengthGenerated
}
postIndex := iNdEx + intStringLen
if postIndex < 0 {
return ErrInvalidLengthGenerated
}
if postIndex > l {
return io.ErrUnexpectedEOF
}
m.GenerateName = string(dAtA[iNdEx:postIndex])
iNdEx = postIndex
case 3:
if wireType != 2 {
return fmt.Errorf("proto: wrong wireType = %d for field Namespace", wireType)
}
var stringLen uint64
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowGenerated
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
stringLen |= uint64(b&0x7F) << shift
if b < 0x80 {
break
}
}
intStringLen := int(stringLen)
if intStringLen < 0 {
return ErrInvalidLengthGenerated
}
postIndex := iNdEx + intStringLen
if postIndex < 0 {
return ErrInvalidLengthGenerated
}
if postIndex > l {
return io.ErrUnexpectedEOF
}
m.Namespace = string(dAtA[iNdEx:postIndex])
iNdEx = postIndex
case 5:
if wireType != 2 {
return fmt.Errorf("proto: wrong wireType = %d for field UID", wireType)
}
var stringLen uint64
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowGenerated
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
stringLen |= uint64(b&0x7F) << shift
if b < 0x80 {
break
}
}
intStringLen := int(stringLen)
if intStringLen < 0 {
return ErrInvalidLengthGenerated
}
postIndex := iNdEx + intStringLen
if postIndex < 0 {
return ErrInvalidLengthGenerated
}
if postIndex > l {
return io.ErrUnexpectedEOF
}
m.UID = k8s_io_apimachinery_pkg_types.UID(dAtA[iNdEx:postIndex])
iNdEx = postIndex
case 6:
if wireType != 2 {
return fmt.Errorf("proto: wrong wireType = %d for field ResourceVersion", wireType)
}
var stringLen uint64
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowGenerated
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
stringLen |= uint64(b&0x7F) << shift
if b < 0x80 {
break
}
}
intStringLen := int(stringLen)
if intStringLen < 0 {
return ErrInvalidLengthGenerated
}
postIndex := iNdEx + intStringLen
if postIndex < 0 {
return ErrInvalidLengthGenerated
}
if postIndex > l {
return io.ErrUnexpectedEOF
}
m.ResourceVersion = string(dAtA[iNdEx:postIndex])
iNdEx = postIndex
case 7:
if wireType != 0 {
return fmt.Errorf("proto: wrong wireType = %d for field Generation", wireType)
}
m.Generation = 0
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowGenerated
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
m.Generation |= int64(b&0x7F) << shift
if b < 0x80 {
break
}
}
case 9:
if wireType != 2 {
return fmt.Errorf("proto: wrong wireType = %d for field DeletionTimestamp", wireType)
}
var msglen int
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowGenerated
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
msglen |= int(b&0x7F) << shift
if b < 0x80 {
break
}
}
if msglen < 0 {
return ErrInvalidLengthGenerated
}
postIndex := iNdEx + msglen
if postIndex < 0 {
return ErrInvalidLengthGenerated
}
if postIndex > l {
return io.ErrUnexpectedEOF
}
if m.DeletionTimestamp == nil {
m.DeletionTimestamp = &Time{}
}
if err := m.DeletionTimestamp.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
return err
}
iNdEx = postIndex
case 11:
if wireType != 2 {
return fmt.Errorf("proto: wrong wireType = %d for field Labels", wireType)
}
var msglen int
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowGenerated
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
msglen |= int(b&0x7F) << shift
if b < 0x80 {
break
}
}
if msglen < 0 {
return ErrInvalidLengthGenerated
}
postIndex := iNdEx + msglen
if postIndex < 0 {
return ErrInvalidLengthGenerated
}
if postIndex > l {
return io.ErrUnexpectedEOF
}
if m.Labels == nil {
m.Labels = make(map[string]string)
}
var mapkey string
var mapvalue string
for iNdEx < postIndex {
entryPreIndex := iNdEx
var wire uint64
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowGenerated
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
wire |= uint64(b&0x7F) << shift
if b < 0x80 {
break
}
}
fieldNum := int32(wire >> 3)
if fieldNum == 1 {
var stringLenmapkey uint64
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowGenerated
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
stringLenmapkey |= uint64(b&0x7F) << shift
if b < 0x80 {
break
}
}
intStringLenmapkey := int(stringLenmapkey)
if intStringLenmapkey < 0 {
return ErrInvalidLengthGenerated
}
postStringIndexmapkey := iNdEx + intStringLenmapkey
if postStringIndexmapkey < 0 {
return ErrInvalidLengthGenerated
}
if postStringIndexmapkey > l {
return io.ErrUnexpectedEOF
}
mapkey = string(dAtA[iNdEx:postStringIndexmapkey])
iNdEx = postStringIndexmapkey
} else if fieldNum == 2 {
var stringLenmapvalue uint64
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowGenerated
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
stringLenmapvalue |= uint64(b&0x7F) << shift
if b < 0x80 {
break
}
}
intStringLenmapvalue := int(stringLenmapvalue)
if intStringLenmapvalue < 0 {
return ErrInvalidLengthGenerated
}
postStringIndexmapvalue := iNdEx + intStringLenmapvalue
if postStringIndexmapvalue < 0 {
return ErrInvalidLengthGenerated
}
if postStringIndexmapvalue > l {
return io.ErrUnexpectedEOF
}
mapvalue = string(dAtA[iNdEx:postStringIndexmapvalue])
iNdEx = postStringIndexmapvalue
} else {
iNdEx = entryPreIndex
skippy, err := skipGenerated(dAtA[iNdEx:])
if err != nil {
return err
}
if (skippy < 0) || (iNdEx+skippy) < 0 {
return ErrInvalidLengthGenerated
}
if (iNdEx + skippy) > postIndex {
return io.ErrUnexpectedEOF
}
iNdEx += skippy
}
}
m.Labels[mapkey] = mapvalue
iNdEx = postIndex
case 12:
if wireType != 2 {
return fmt.Errorf("proto: wrong wireType = %d for field Annotations", wireType)
}
var msglen int
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowGenerated
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
msglen |= int(b&0x7F) << shift
if b < 0x80 {
break
}
}
if msglen < 0 {
return ErrInvalidLengthGenerated
}
postIndex := iNdEx + msglen
if postIndex < 0 {
return ErrInvalidLengthGenerated
}
if postIndex > l {
return io.ErrUnexpectedEOF
}
if m.Annotations == nil {
m.Annotations = make(map[string]string)
}
var mapkey string
var mapvalue string
for iNdEx < postIndex {
entryPreIndex := iNdEx
var wire uint64
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowGenerated
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
wire |= uint64(b&0x7F) << shift
if b < 0x80 {
break
}
}
fieldNum := int32(wire >> 3)
if fieldNum == 1 {
var stringLenmapkey uint64
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowGenerated
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
stringLenmapkey |= uint64(b&0x7F) << shift
if b < 0x80 {
break
}
}
intStringLenmapkey := int(stringLenmapkey)
if intStringLenmapkey < 0 {
return ErrInvalidLengthGenerated
}
postStringIndexmapkey := iNdEx + intStringLenmapkey
if postStringIndexmapkey < 0 {
return ErrInvalidLengthGenerated
}
if postStringIndexmapkey > l {
return io.ErrUnexpectedEOF
}
mapkey = string(dAtA[iNdEx:postStringIndexmapkey])
iNdEx = postStringIndexmapkey
} else if fieldNum == 2 {
var stringLenmapvalue uint64
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowGenerated
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
stringLenmapvalue |= uint64(b&0x7F) << shift
if b < 0x80 {
break
}
}
intStringLenmapvalue := int(stringLenmapvalue)
if intStringLenmapvalue < 0 {
return ErrInvalidLengthGenerated
}
postStringIndexmapvalue := iNdEx + intStringLenmapvalue
if postStringIndexmapvalue < 0 {
return ErrInvalidLengthGenerated
}
if postStringIndexmapvalue > l {
return io.ErrUnexpectedEOF
}
mapvalue = string(dAtA[iNdEx:postStringIndexmapvalue])
iNdEx = postStringIndexmapvalue
} else {
iNdEx = entryPreIndex
skippy, err := skipGenerated(dAtA[iNdEx:])
if err != nil {
return err
}
if (skippy < 0) || (iNdEx+skippy) < 0 {
return ErrInvalidLengthGenerated
}
if (iNdEx + skippy) > postIndex {
return io.ErrUnexpectedEOF
}
iNdEx += skippy
}
}
m.Annotations[mapkey] = mapvalue
iNdEx = postIndex
case 13:
if wireType != 2 {
return fmt.Errorf("proto: wrong wireType = %d for field OwnerReferences", wireType)
}
var msglen int
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowGenerated
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
msglen |= int(b&0x7F) << shift
if b < 0x80 {
break
}
}
if msglen < 0 {
return ErrInvalidLengthGenerated
}
postIndex := iNdEx + msglen
if postIndex < 0 {
return ErrInvalidLengthGenerated
}
if postIndex > l {
return io.ErrUnexpectedEOF
}
m.OwnerReferences = append(m.OwnerReferences, OwnerReference{})
if err := m.OwnerReferences[len(m.OwnerReferences)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
return err
}
iNdEx = postIndex
default:
iNdEx = preIndex
skippy, err := skipGenerated(dAtA[iNdEx:])
if err != nil {
return err
}
if (skippy < 0) || (iNdEx+skippy) < 0 {
return ErrInvalidLengthGenerated
}
if (iNdEx + skippy) > l {
return io.ErrUnexpectedEOF
}
iNdEx += skippy
}
}
if iNdEx > l {
return io.ErrUnexpectedEOF
}
return nil
}
func (m *OwnerReference) Unmarshal(dAtA []byte) error {
l := len(dAtA)
iNdEx := 0
for iNdEx < l {
preIndex := iNdEx
var wire uint64
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowGenerated
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
wire |= uint64(b&0x7F) << shift
if b < 0x80 {
break
}
}
fieldNum := int32(wire >> 3)
wireType := int(wire & 0x7)
if wireType == 4 {
return fmt.Errorf("proto: OwnerReference: wiretype end group for non-group")
}
if fieldNum <= 0 {
return fmt.Errorf("proto: OwnerReference: illegal tag %d (wire type %d)", fieldNum, wire)
}
switch fieldNum {
case 1:
if wireType != 2 {
return fmt.Errorf("proto: wrong wireType = %d for field Kind", wireType)
}
var stringLen uint64
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowGenerated
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
stringLen |= uint64(b&0x7F) << shift
if b < 0x80 {
break
}
}
intStringLen := int(stringLen)
if intStringLen < 0 {
return ErrInvalidLengthGenerated
}
postIndex := iNdEx + intStringLen
if postIndex < 0 {
return ErrInvalidLengthGenerated
}
if postIndex > l {
return io.ErrUnexpectedEOF
}
m.Kind = string(dAtA[iNdEx:postIndex])
iNdEx = postIndex
case 3:
if wireType != 2 {
return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType)
}
var stringLen uint64
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowGenerated
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
stringLen |= uint64(b&0x7F) << shift
if b < 0x80 {
break
}
}
intStringLen := int(stringLen)
if intStringLen < 0 {
return ErrInvalidLengthGenerated
}
postIndex := iNdEx + intStringLen
if postIndex < 0 {
return ErrInvalidLengthGenerated
}
if postIndex > l {
return io.ErrUnexpectedEOF
}
m.Name = string(dAtA[iNdEx:postIndex])
iNdEx = postIndex
case 4:
if wireType != 2 {
return fmt.Errorf("proto: wrong wireType = %d for field UID", wireType)
}
var stringLen uint64
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowGenerated
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
stringLen |= uint64(b&0x7F) << shift
if b < 0x80 {
break
}
}
intStringLen := int(stringLen)
if intStringLen < 0 {
return ErrInvalidLengthGenerated
}
postIndex := iNdEx + intStringLen
if postIndex < 0 {
return ErrInvalidLengthGenerated
}
if postIndex > l {
return io.ErrUnexpectedEOF
}
m.UID = k8s_io_apimachinery_pkg_types.UID(dAtA[iNdEx:postIndex])
iNdEx = postIndex
case 5:
if wireType != 2 {
return fmt.Errorf("proto: wrong wireType = %d for field APIVersion", wireType)
}
var stringLen uint64
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowGenerated
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
stringLen |= uint64(b&0x7F) << shift
if b < 0x80 {
break
}
}
intStringLen := int(stringLen)
if intStringLen < 0 {
return ErrInvalidLengthGenerated
}
postIndex := iNdEx + intStringLen
if postIndex < 0 {
return ErrInvalidLengthGenerated
}
if postIndex > l {
return io.ErrUnexpectedEOF
}
m.APIVersion = string(dAtA[iNdEx:postIndex])
iNdEx = postIndex
case 6:
if wireType != 0 {
return fmt.Errorf("proto: wrong wireType = %d for field Controller", wireType)
}
var v int
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowGenerated
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
v |= int(b&0x7F) << shift
if b < 0x80 {
break
}
}
b := bool(v != 0)
m.Controller = &b
default:
iNdEx = preIndex
skippy, err := skipGenerated(dAtA[iNdEx:])
if err != nil {
return err
}
if (skippy < 0) || (iNdEx+skippy) < 0 {
return ErrInvalidLengthGenerated
}
if (iNdEx + skippy) > l {
return io.ErrUnexpectedEOF
}
iNdEx += skippy
}
}
if iNdEx > l {
return io.ErrUnexpectedEOF
}
return nil
}
func (m *PartialObjectMetadata) Unmarshal(dAtA []byte) error {
l := len(dAtA)
iNdEx := 0
for iNdEx < l {
preIndex := iNdEx
var wire uint64
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowGenerated
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
wire |= uint64(b&0x7F) << shift
if b < 0x80 {
break
}
}
fieldNum := int32(wire >> 3)
wireType := int(wire & 0x7)
if wireType == 4 {
return fmt.Errorf("proto: PartialObjectMetadata: wiretype end group for non-group")
}
if fieldNum <= 0 {
return fmt.Errorf("proto: PartialObjectMetadata: illegal tag %d (wire type %d)", fieldNum, wire)
}
switch fieldNum {
case 1:
if wireType != 2 {
return fmt.Errorf("proto: wrong wireType = %d for field ObjectMeta", wireType)
}
var msglen int
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowGenerated
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
msglen |= int(b&0x7F) << shift
if b < 0x80 {
break
}
}
if msglen < 0 {
return ErrInvalidLengthGenerated
}
postIndex := iNdEx + msglen
if postIndex < 0 {
return ErrInvalidLengthGenerated
}
if postIndex > l {
return io.ErrUnexpectedEOF
}
if err := m.ObjectMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
return err
}
iNdEx = postIndex
default:
iNdEx = preIndex
skippy, err := skipGenerated(dAtA[iNdEx:])
if err != nil {
return err
}
if (skippy < 0) || (iNdEx+skippy) < 0 {
return ErrInvalidLengthGenerated
}
if (iNdEx + skippy) > l {
return io.ErrUnexpectedEOF
}
iNdEx += skippy
}
}
if iNdEx > l {
return io.ErrUnexpectedEOF
}
return nil
}
func (m *PartialObjectMetadataList) Unmarshal(dAtA []byte) error {
l := len(dAtA)
iNdEx := 0
for iNdEx < l {
preIndex := iNdEx
var wire uint64
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowGenerated
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
wire |= uint64(b&0x7F) << shift
if b < 0x80 {
break
}
}
fieldNum := int32(wire >> 3)
wireType := int(wire & 0x7)
if wireType == 4 {
return fmt.Errorf("proto: PartialObjectMetadataList: wiretype end group for non-group")
}
if fieldNum <= 0 {
return fmt.Errorf("proto: PartialObjectMetadataList: illegal tag %d (wire type %d)", fieldNum, wire)
}
switch fieldNum {
case 1:
if wireType != 2 {
return fmt.Errorf("proto: wrong wireType = %d for field ListMeta", wireType)
}
var msglen int
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowGenerated
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
msglen |= int(b&0x7F) << shift
if b < 0x80 {
break
}
}
if msglen < 0 {
return ErrInvalidLengthGenerated
}
postIndex := iNdEx + msglen
if postIndex < 0 {
return ErrInvalidLengthGenerated
}
if postIndex > l {
return io.ErrUnexpectedEOF
}
if err := m.ListMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
return err
}
iNdEx = postIndex
case 2:
if wireType != 2 {
return fmt.Errorf("proto: wrong wireType = %d for field Items", wireType)
}
var msglen int
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowGenerated
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
msglen |= int(b&0x7F) << shift
if b < 0x80 {
break
}
}
if msglen < 0 {
return ErrInvalidLengthGenerated
}
postIndex := iNdEx + msglen
if postIndex < 0 {
return ErrInvalidLengthGenerated
}
if postIndex > l {
return io.ErrUnexpectedEOF
}
m.Items = append(m.Items, PartialObjectMetadata{})
if err := m.Items[len(m.Items)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
return err
}
iNdEx = postIndex
default:
iNdEx = preIndex
skippy, err := skipGenerated(dAtA[iNdEx:])
if err != nil {
return err
}
if (skippy < 0) || (iNdEx+skippy) < 0 {
return ErrInvalidLengthGenerated
}
if (iNdEx + skippy) > l {
return io.ErrUnexpectedEOF
}
iNdEx += skippy
}
}
if iNdEx > l {
return io.ErrUnexpectedEOF
}
return nil
}
func (m *Timestamp) Unmarshal(dAtA []byte) error {
l := len(dAtA)
iNdEx := 0
for iNdEx < l {
preIndex := iNdEx
var wire uint64
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowGenerated
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
wire |= uint64(b&0x7F) << shift
if b < 0x80 {
break
}
}
fieldNum := int32(wire >> 3)
wireType := int(wire & 0x7)
if wireType == 4 {
return fmt.Errorf("proto: Timestamp: wiretype end group for non-group")
}
if fieldNum <= 0 {
return fmt.Errorf("proto: Timestamp: illegal tag %d (wire type %d)", fieldNum, wire)
}
switch fieldNum {
case 1:
if wireType != 0 {
return fmt.Errorf("proto: wrong wireType = %d for field Seconds", wireType)
}
m.Seconds = 0
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowGenerated
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
m.Seconds |= int64(b&0x7F) << shift
if b < 0x80 {
break
}
}
case 2:
if wireType != 0 {
return fmt.Errorf("proto: wrong wireType = %d for field Nanos", wireType)
}
m.Nanos = 0
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowGenerated
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
m.Nanos |= int32(b&0x7F) << shift
if b < 0x80 {
break
}
}
default:
iNdEx = preIndex
skippy, err := skipGenerated(dAtA[iNdEx:])
if err != nil {
return err
}
if (skippy < 0) || (iNdEx+skippy) < 0 {
return ErrInvalidLengthGenerated
}
if (iNdEx + skippy) > l {
return io.ErrUnexpectedEOF
}
iNdEx += skippy
}
}
if iNdEx > l {
return io.ErrUnexpectedEOF
}
return nil
}
func (m *TypeMeta) Unmarshal(dAtA []byte) error {
l := len(dAtA)
iNdEx := 0
for iNdEx < l {
preIndex := iNdEx
var wire uint64
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowGenerated
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
wire |= uint64(b&0x7F) << shift
if b < 0x80 {
break
}
}
fieldNum := int32(wire >> 3)
wireType := int(wire & 0x7)
if wireType == 4 {
return fmt.Errorf("proto: TypeMeta: wiretype end group for non-group")
}
if fieldNum <= 0 {
return fmt.Errorf("proto: TypeMeta: illegal tag %d (wire type %d)", fieldNum, wire)
}
switch fieldNum {
case 1:
if wireType != 2 {
return fmt.Errorf("proto: wrong wireType = %d for field Kind", wireType)
}
var stringLen uint64
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowGenerated
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
stringLen |= uint64(b&0x7F) << shift
if b < 0x80 {
break
}
}
intStringLen := int(stringLen)
if intStringLen < 0 {
return ErrInvalidLengthGenerated
}
postIndex := iNdEx + intStringLen
if postIndex < 0 {
return ErrInvalidLengthGenerated
}
if postIndex > l {
return io.ErrUnexpectedEOF
}
m.Kind = string(dAtA[iNdEx:postIndex])
iNdEx = postIndex
case 2:
if wireType != 2 {
return fmt.Errorf("proto: wrong wireType = %d for field APIVersion", wireType)
}
var stringLen uint64
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowGenerated
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
stringLen |= uint64(b&0x7F) << shift
if b < 0x80 {
break
}
}
intStringLen := int(stringLen)
if intStringLen < 0 {
return ErrInvalidLengthGenerated
}
postIndex := iNdEx + intStringLen
if postIndex < 0 {
return ErrInvalidLengthGenerated
}
if postIndex > l {
return io.ErrUnexpectedEOF
}
m.APIVersion = string(dAtA[iNdEx:postIndex])
iNdEx = postIndex
default:
iNdEx = preIndex
skippy, err := skipGenerated(dAtA[iNdEx:])
if err != nil {
return err
}
if (skippy < 0) || (iNdEx+skippy) < 0 {
return ErrInvalidLengthGenerated
}
if (iNdEx + skippy) > l {
return io.ErrUnexpectedEOF
}
iNdEx += skippy
}
}
if iNdEx > l {
return io.ErrUnexpectedEOF
}
return nil
}
func skipGenerated(dAtA []byte) (n int, err error) {
l := len(dAtA)
iNdEx := 0
depth := 0
for iNdEx < l {
var wire uint64
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return 0, ErrIntOverflowGenerated
}
if iNdEx >= l {
return 0, io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
wire |= (uint64(b) & 0x7F) << shift
if b < 0x80 {
break
}
}
wireType := int(wire & 0x7)
switch wireType {
case 0:
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return 0, ErrIntOverflowGenerated
}
if iNdEx >= l {
return 0, io.ErrUnexpectedEOF
}
iNdEx++
if dAtA[iNdEx-1] < 0x80 {
break
}
}
case 1:
iNdEx += 8
case 2:
var length int
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return 0, ErrIntOverflowGenerated
}
if iNdEx >= l {
return 0, io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
length |= (int(b) & 0x7F) << shift
if b < 0x80 {
break
}
}
if length < 0 {
return 0, ErrInvalidLengthGenerated
}
iNdEx += length
case 3:
depth++
case 4:
if depth == 0 {
return 0, ErrUnexpectedEndOfGroupGenerated
}
depth--
case 5:
iNdEx += 4
default:
return 0, fmt.Errorf("proto: illegal wireType %d", wireType)
}
if iNdEx < 0 {
return 0, ErrInvalidLengthGenerated
}
if depth == 0 {
return iNdEx, nil
}
}
return 0, io.ErrUnexpectedEOF
}
var (
ErrInvalidLengthGenerated = fmt.Errorf("proto: negative length found during unmarshaling")
ErrIntOverflowGenerated = fmt.Errorf("proto: integer overflow")
ErrUnexpectedEndOfGroupGenerated = fmt.Errorf("proto: unexpected end of group")
)
// SPDX-License-Identifier: Apache-2.0
// Copyright Authors of Cilium
// Copyright 2016 The Kubernetes Authors.
package v1
import (
"fmt"
"maps"
"slices"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/util/sets"
"github.com/cilium/cilium/pkg/k8s/slim/k8s/apis/labels"
"github.com/cilium/cilium/pkg/k8s/slim/k8s/apis/selection"
)
// LabelSelectorAsSelector converts the LabelSelector api type into a struct that implements
// labels.Selector
// Note: This function should be kept in sync with the selector methods in pkg/labels/selector.go
func LabelSelectorAsSelector(ps *LabelSelector) (labels.Selector, error) {
if ps == nil {
return labels.Nothing(), nil
}
if len(ps.MatchLabels)+len(ps.MatchExpressions) == 0 {
return labels.Everything(), nil
}
requirements := make([]labels.Requirement, 0, len(ps.MatchLabels)+len(ps.MatchExpressions))
for k, v := range ps.MatchLabels {
r, err := labels.NewRequirement(k, selection.Equals, []string{v})
if err != nil {
return nil, err
}
requirements = append(requirements, *r)
}
for _, expr := range ps.MatchExpressions {
var op selection.Operator
switch expr.Operator {
case LabelSelectorOpIn:
op = selection.In
case LabelSelectorOpNotIn:
op = selection.NotIn
case LabelSelectorOpExists:
op = selection.Exists
case LabelSelectorOpDoesNotExist:
op = selection.DoesNotExist
default:
return nil, fmt.Errorf("%q is not a valid label selector operator", expr.Operator)
}
r, err := labels.NewRequirement(expr.Key, op, slices.Clone(expr.Values))
if err != nil {
return nil, err
}
requirements = append(requirements, *r)
}
selector := labels.NewSelector()
selector = selector.Add(requirements...)
return selector, nil
}
// LabelSelectorAsMap converts the LabelSelector api type into a map of strings, ie. the
// original structure of a label selector. Operators that cannot be converted into plain
// labels (Exists, DoesNotExist, NotIn, and In with more than one value) will result in
// an error.
func LabelSelectorAsMap(ps *LabelSelector) (map[string]string, error) {
if ps == nil {
return nil, nil
}
selector := maps.Clone(ps.MatchLabels)
for _, expr := range ps.MatchExpressions {
switch expr.Operator {
case LabelSelectorOpIn:
if len(expr.Values) != 1 {
return selector, fmt.Errorf("operator %q without a single value cannot be converted into the old label selector format", expr.Operator)
}
// Should we do anything in case this will override a previous key-value pair?
selector[expr.Key] = expr.Values[0]
case LabelSelectorOpNotIn, LabelSelectorOpExists, LabelSelectorOpDoesNotExist:
return selector, fmt.Errorf("operator %q cannot be converted into the old label selector format", expr.Operator)
default:
return selector, fmt.Errorf("%q is not a valid selector operator", expr.Operator)
}
}
return selector, nil
}
// ParseToLabelSelector parses a string representing a selector into a LabelSelector object.
// Note: This function should be kept in sync with the parser in pkg/labels/selector.go
func ParseToLabelSelector(selector string) (*LabelSelector, error) {
reqs, err := labels.ParseToRequirements(selector)
if err != nil {
return nil, fmt.Errorf("couldn't parse the selector string \"%s\": %w", selector, err)
}
labelSelector := &LabelSelector{
MatchLabels: map[string]string{},
MatchExpressions: []LabelSelectorRequirement{},
}
for _, req := range reqs {
var op LabelSelectorOperator
switch req.Operator() {
case selection.Equals, selection.DoubleEquals:
vals := req.Values()
if vals.Len() != 1 {
return nil, fmt.Errorf("equals operator must have exactly one value")
}
val, ok := vals.PopAny()
if !ok {
return nil, fmt.Errorf("equals operator has exactly one value but it cannot be retrieved")
}
labelSelector.MatchLabels[req.Key()] = val
continue
case selection.In:
op = LabelSelectorOpIn
case selection.NotIn:
op = LabelSelectorOpNotIn
case selection.Exists:
op = LabelSelectorOpExists
case selection.DoesNotExist:
op = LabelSelectorOpDoesNotExist
case selection.GreaterThan, selection.LessThan:
// Adding a separate case for these operators to indicate that this is deliberate
return nil, fmt.Errorf("%q isn't supported in label selectors", req.Operator())
default:
return nil, fmt.Errorf("%q is not a valid label selector operator", req.Operator())
}
labelSelector.MatchExpressions = append(labelSelector.MatchExpressions, LabelSelectorRequirement{
Key: req.Key(),
Operator: op,
Values: sets.List(req.Values()),
})
}
return labelSelector, nil
}
// SetAsLabelSelector converts the labels.Set object into a LabelSelector api object.
func SetAsLabelSelector(ls labels.Set) *LabelSelector {
if ls == nil {
return nil
}
selector := &LabelSelector{
MatchLabels: make(map[string]string, len(ls)),
}
maps.Copy(selector.MatchLabels, ls)
return selector
}
// FormatLabelSelector convert labelSelector into plain string
func FormatLabelSelector(labelSelector *LabelSelector) string {
selector, err := LabelSelectorAsSelector(labelSelector)
if err != nil {
return "<error>"
}
l := selector.String()
if len(l) == 0 {
l = "<none>"
}
return l
}
// FullOwnerReferences converts slim OwnerReferences to original OwnerReferences
func FullOwnerReferences(references []OwnerReference) []metav1.OwnerReference {
var fullRefs []metav1.OwnerReference
for _, ref := range references {
full := metav1.OwnerReference{
APIVersion: ref.APIVersion,
UID: ref.UID,
Name: ref.Name,
Kind: ref.Kind,
Controller: ref.Controller,
}
fullRefs = append(fullRefs, full)
}
return fullRefs
}
// SlimOwnerReferences converts original OwnerReferences to slim OwnerReferences
func SlimOwnerReferences(references []metav1.OwnerReference) []OwnerReference {
var slimRefs []OwnerReference
for _, ref := range references {
slim := OwnerReference{
APIVersion: ref.APIVersion,
Name: ref.Name,
UID: ref.UID,
Kind: ref.Kind,
Controller: ref.Controller,
}
slimRefs = append(slimRefs, slim)
}
return slimRefs
}
// HasAnnotation returns a bool if passed in annotation exists
func HasAnnotation(obj ObjectMeta, ann string) bool {
_, found := obj.Annotations[ann]
return found
}
// SetMetaDataAnnotation sets the annotation and value
func SetMetaDataAnnotation(obj *ObjectMeta, ann string, value string) {
if obj.Annotations == nil {
obj.Annotations = make(map[string]string)
}
obj.Annotations[ann] = value
}
// HasLabel returns a bool if passed in label exists
func HasLabel(obj ObjectMeta, label string) bool {
_, found := obj.Labels[label]
return found
}
// SetMetaDataLabel sets the label and value
func SetMetaDataLabel(obj *ObjectMeta, label string, value string) {
if obj.Labels == nil {
obj.Labels = make(map[string]string)
}
obj.Labels[label] = value
}
// SPDX-License-Identifier: Apache-2.0
// Copyright Authors of Cilium
// Copyright 2016 The Kubernetes Authors.
package v1
import (
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/runtime/schema"
"k8s.io/apimachinery/pkg/types"
)
func (meta *ListMeta) GetResourceVersion() string { return meta.ResourceVersion }
func (meta *ListMeta) SetResourceVersion(version string) { meta.ResourceVersion = version }
func (meta *ListMeta) GetSelfLink() string { panic("ListMeta - GetSelfLink() not implemented") }
func (meta *ListMeta) SetSelfLink(_ string) { panic("ListMeta - SetSelfLink() not implemented") }
func (meta *ListMeta) GetContinue() string { return meta.Continue }
func (meta *ListMeta) SetContinue(c string) { meta.Continue = c }
func (meta *ListMeta) GetRemainingItemCount() *int64 { return meta.RemainingItemCount }
func (meta *ListMeta) SetRemainingItemCount(c *int64) { meta.RemainingItemCount = c }
func (obj *TypeMeta) GetObjectKind() schema.ObjectKind { return obj }
// SetGroupVersionKind satisfies the ObjectKind interface for all objects that embed TypeMeta
func (obj *TypeMeta) SetGroupVersionKind(gvk schema.GroupVersionKind) {
obj.APIVersion, obj.Kind = gvk.ToAPIVersionAndKind()
}
// GroupVersionKind satisfies the ObjectKind interface for all objects that embed TypeMeta
func (obj *TypeMeta) GroupVersionKind() schema.GroupVersionKind {
return schema.FromAPIVersionAndKind(obj.APIVersion, obj.Kind)
}
func (obj *ListMeta) GetListMeta() metav1.ListInterface { return obj }
func (obj *ObjectMeta) GetObjectMeta() metav1.Object { return obj }
// Namespace implements metav1.Object for any object with an ObjectMeta typed field. Allows
// fast, direct access to metadata fields for API objects.
func (meta *ObjectMeta) GetNamespace() string { return meta.Namespace }
func (meta *ObjectMeta) SetNamespace(namespace string) { meta.Namespace = namespace }
func (meta *ObjectMeta) GetName() string { return meta.Name }
func (meta *ObjectMeta) SetName(name string) { meta.Name = name }
func (meta *ObjectMeta) GetGenerateName() string { return meta.GenerateName }
func (meta *ObjectMeta) SetGenerateName(generateName string) { meta.GenerateName = generateName }
func (meta *ObjectMeta) GetUID() types.UID { return meta.UID }
func (meta *ObjectMeta) SetUID(uid types.UID) { meta.UID = uid }
func (meta *ObjectMeta) GetResourceVersion() string { return meta.ResourceVersion }
func (meta *ObjectMeta) SetResourceVersion(version string) { meta.ResourceVersion = version }
func (meta *ObjectMeta) GetGeneration() int64 { panic("ObjectMeta - GetGeneration() not implemented") }
func (meta *ObjectMeta) SetGeneration(_ int64) { panic("ObjectMeta - SetGeneration() not implemented") }
func (meta *ObjectMeta) GetSelfLink() string { panic("ObjectMeta - GetSelfLink() not implemented") }
func (meta *ObjectMeta) SetSelfLink(_ string) {
panic("ObjectMeta - SetSelfLink() not implemented")
}
func (meta *ObjectMeta) GetCreationTimestamp() metav1.Time {
panic("ObjectMeta - GetCreationTimestamp() not implemented")
}
func (meta *ObjectMeta) SetCreationTimestamp(_ metav1.Time) {
panic("ObjectMeta - SetCreationTimestamp() not implemented")
}
func (meta *ObjectMeta) GetDeletionTimestamp() *metav1.Time {
if meta.DeletionTimestamp == nil {
return nil
}
return &metav1.Time{
Time: meta.DeletionTimestamp.Time,
}
}
func (meta *ObjectMeta) SetDeletionTimestamp(_ *metav1.Time) {
panic("ObjectMeta - SetDeletionTimestamp() not implemented")
}
func (meta *ObjectMeta) GetDeletionGracePeriodSeconds() *int64 {
panic("ObjectMeta - GetDeletionGracePeriodSeconds() not implemented")
}
func (meta *ObjectMeta) SetDeletionGracePeriodSeconds(_ *int64) {
panic("ObjectMeta - SetDeletionGracePeriodSeconds() not implemented")
}
func (meta *ObjectMeta) GetLabels() map[string]string { return meta.Labels }
func (meta *ObjectMeta) SetLabels(labels map[string]string) { meta.Labels = labels }
func (meta *ObjectMeta) GetAnnotations() map[string]string { return meta.Annotations }
func (meta *ObjectMeta) SetAnnotations(annotations map[string]string) { meta.Annotations = annotations }
func (meta *ObjectMeta) GetFinalizers() []string {
panic("ObjectMeta - GetFinalizers() not implemented")
}
func (meta *ObjectMeta) SetFinalizers(_ []string) {
panic("ObjectMeta - SetFinalizers() not implemented")
}
func (meta *ObjectMeta) GetOwnerReferences() []metav1.OwnerReference {
return FullOwnerReferences(meta.OwnerReferences)
}
func (meta *ObjectMeta) SetOwnerReferences(references []metav1.OwnerReference) {
meta.OwnerReferences = SlimOwnerReferences(references)
}
func (meta *ObjectMeta) GetManagedFields() []metav1.ManagedFieldsEntry {
panic("ObjectMeta - GetManagedFields() not implemented")
}
func (meta *ObjectMeta) SetManagedFields(_ []metav1.ManagedFieldsEntry) {
panic("ObjectMeta - SetManagedFields() not implemented")
}
// SPDX-License-Identifier: Apache-2.0
// Copyright Authors of Cilium
// Copyright 2014 The Kubernetes Authors.
package v1
import (
"k8s.io/apimachinery/pkg/runtime"
"k8s.io/apimachinery/pkg/runtime/schema"
utilruntime "k8s.io/apimachinery/pkg/util/runtime"
)
// GroupName is the group name for this API.
const GroupName = "meta.k8s.io"
var (
// localSchemeBuilder is used to make compiler happy for autogenerated
// conversions. However, it's not used.
schemeBuilder runtime.SchemeBuilder
localSchemeBuilder = &schemeBuilder
)
// SchemeGroupVersion is group version used to register these objects
var SchemeGroupVersion = schema.GroupVersion{Group: GroupName, Version: "v1"}
// Unversioned is group version for unversioned API objects
// TODO: this should be v1 probably
var Unversioned = schema.GroupVersion{Group: "", Version: "v1"}
// WatchEventKind is name reserved for serializing watch events.
const WatchEventKind = "WatchEvent"
// Kind takes an unqualified kind and returns a Group qualified GroupKind
func Kind(kind string) schema.GroupKind {
return SchemeGroupVersion.WithKind(kind).GroupKind()
}
// scheme is the registry for the common types that adhere to the meta v1 API spec.
var scheme = runtime.NewScheme()
// ParameterCodec knows about query parameters used with the meta v1 API spec.
var ParameterCodec = runtime.NewParameterCodec(scheme)
var optionsTypes = []runtime.Object{}
// AddToGroupVersion registers common meta types into schemas.
func AddToGroupVersion(scheme *runtime.Scheme, groupVersion schema.GroupVersion) {
// Supports legacy code paths, most callers should use metav1.ParameterCodec for now
scheme.AddKnownTypes(groupVersion, optionsTypes...)
// Register Unversioned types under their own special group
scheme.AddUnversionedTypes(Unversioned)
// register manually. This usually goes through the SchemeBuilder, which we cannot use here.
utilruntime.Must(RegisterConversions(scheme))
utilruntime.Must(RegisterDefaults(scheme))
}
// AddMetaToScheme registers base meta types into schemas.
func AddMetaToScheme(scheme *runtime.Scheme) error {
scheme.AddKnownTypes(SchemeGroupVersion,
&PartialObjectMetadata{},
&PartialObjectMetadataList{},
)
return nil
}
func init() {
scheme.AddUnversionedTypes(SchemeGroupVersion, optionsTypes...)
utilruntime.Must(AddMetaToScheme(scheme))
// register manually. This usually goes through the SchemeBuilder, which we cannot use here.
utilruntime.Must(RegisterDefaults(scheme))
}
// SPDX-License-Identifier: Apache-2.0
// Copyright Authors of Cilium
// Copyright 2014 The Kubernetes Authors.
package v1
import (
"encoding/json"
"time"
)
// Time is a wrapper around time.Time which supports correct
// marshaling to YAML and JSON. Wrappers are provided for many
// of the factory methods that the time package offers.
//
// +protobuf.options.marshal=false
// +protobuf.as=Timestamp
// +protobuf.options.(gogoproto.goproto_stringer)=false
//
// +kubebuilder:validation:Format=date-time
// +kubebuilder:validation:Type=string
type Time struct {
time.Time `protobuf:"-" json:"-"`
}
// DeepCopyInto creates a deep-copy of the Time value. The underlying time.Time
// type is effectively immutable in the time API, so it is safe to
// copy-by-assign, despite the presence of (unexported) Pointer fields.
func (t *Time) DeepCopyInto(out *Time) {
*out = *t
}
func (in *Time) DeepEqual(other *Time) bool {
switch {
case (in == nil) != (other == nil):
return false
case (in == nil) && (other == nil):
return true
}
return in.Time.Equal(other.Time)
}
// NewTime returns a wrapped instance of the provided time
func NewTime(time time.Time) Time {
return Time{time}
}
// Date returns the Time corresponding to the supplied parameters
// by wrapping time.Date.
func Date(year int, month time.Month, day, hour, min, sec, nsec int, loc *time.Location) Time {
return Time{time.Date(year, month, day, hour, min, sec, nsec, loc)}
}
// Now returns the current local time.
func Now() Time {
return Time{time.Now()}
}
// IsZero returns true if the value is nil or time is zero.
func (t *Time) IsZero() bool {
if t == nil {
return true
}
return t.Time.IsZero()
}
// Before reports whether the time instant t is before u.
func (t *Time) Before(u *Time) bool {
if t != nil && u != nil {
return t.Time.Before(u.Time)
}
return false
}
// Equal reports whether the time instant t is equal to u.
func (t *Time) Equal(u *Time) bool {
if t == nil && u == nil {
return true
}
if t != nil && u != nil {
return t.Time.Equal(u.Time)
}
return false
}
// Unix returns the local time corresponding to the given Unix time
// by wrapping time.Unix.
func Unix(sec int64, nsec int64) Time {
return Time{time.Unix(sec, nsec)}
}
// Rfc3339Copy returns a copy of the Time at second-level precision.
func (t Time) Rfc3339Copy() Time {
copied, _ := time.Parse(time.RFC3339, t.Format(time.RFC3339))
return Time{copied}
}
// UnmarshalJSON implements the json.Unmarshaller interface.
func (t *Time) UnmarshalJSON(b []byte) error {
if len(b) == 4 && string(b) == "null" {
t.Time = time.Time{}
return nil
}
var str string
err := json.Unmarshal(b, &str)
if err != nil {
return err
}
pt, err := time.Parse(time.RFC3339, str)
if err != nil {
return err
}
t.Time = pt.Local()
return nil
}
// UnmarshalQueryParameter converts from a URL query parameter value to an object
func (t *Time) UnmarshalQueryParameter(str string) error {
if len(str) == 0 {
t.Time = time.Time{}
return nil
}
// Tolerate requests from older clients that used JSON serialization to build query params
if len(str) == 4 && str == "null" {
t.Time = time.Time{}
return nil
}
pt, err := time.Parse(time.RFC3339, str)
if err != nil {
return err
}
t.Time = pt.Local()
return nil
}
// MarshalJSON implements the json.Marshaler interface.
func (t Time) MarshalJSON() ([]byte, error) {
if t.IsZero() {
// Encode unset/nil objects as JSON's "null".
return []byte("null"), nil
}
buf := make([]byte, 0, len(time.RFC3339)+2)
buf = append(buf, '"')
// time cannot contain non escapable JSON characters
buf = t.UTC().AppendFormat(buf, time.RFC3339)
buf = append(buf, '"')
return buf, nil
}
// ToUnstructured implements the value.UnstructuredConverter interface.
func (t Time) ToUnstructured() any {
if t.IsZero() {
return nil
}
buf := make([]byte, 0, len(time.RFC3339))
buf = t.UTC().AppendFormat(buf, time.RFC3339)
return string(buf)
}
// OpenAPISchemaType is used by the kube-openapi generator when constructing
// the OpenAPI spec of this type.
//
// See: https://github.com/kubernetes/kube-openapi/tree/master/pkg/generators
func (_ Time) OpenAPISchemaType() []string { return []string{"string"} }
// OpenAPISchemaFormat is used by the kube-openapi generator when constructing
// the OpenAPI spec of this type.
func (_ Time) OpenAPISchemaFormat() string { return "date-time" }
// MarshalQueryParameter converts to a URL query parameter value
func (t Time) MarshalQueryParameter() (string, error) {
if t.IsZero() {
// Encode unset/nil objects as an empty string
return "", nil
}
return t.UTC().Format(time.RFC3339), nil
}
// SPDX-License-Identifier: Apache-2.0
// Copyright Authors of Cilium
// Copyright 2015 The Kubernetes Authors.
package v1
import (
"time"
)
// Timestamp is a struct that is equivalent to Time, but intended for
// protobuf marshalling/unmarshalling. It is generated into a serialization
// that matches Time. Do not use in Go structs.
type Timestamp struct {
// Represents seconds of UTC time since Unix epoch
// 1970-01-01T00:00:00Z. Must be from 0001-01-01T00:00:00Z to
// 9999-12-31T23:59:59Z inclusive.
Seconds int64 `json:"seconds" protobuf:"varint,1,opt,name=seconds"`
// Non-negative fractions of a second at nanosecond resolution. Negative
// second values with fractions must still have non-negative nanos values
// that count forward in time. Must be from 0 to 999,999,999
// inclusive. This field may be limited in precision depending on context.
Nanos int32 `json:"nanos" protobuf:"varint,2,opt,name=nanos"`
}
// Timestamp returns the Time as a new Timestamp value.
func (m *Time) ProtoTime() *Timestamp {
if m == nil {
return &Timestamp{}
}
return &Timestamp{
Seconds: m.Time.Unix(),
// leaving this here for the record. our JSON only handled seconds, so this results in writes by
// protobuf clients storing values that aren't read by json clients, which results in unexpected
// field mutation, which fails various validation and equality code.
// Nanos: int32(m.Time.Nanosecond()),
}
}
// Size implements the protobuf marshalling interface.
func (m *Time) Size() (n int) {
if m == nil || m.Time.IsZero() {
return 0
}
return m.ProtoTime().Size()
}
// Reset implements the protobuf marshalling interface.
func (m *Time) Unmarshal(data []byte) error {
if len(data) == 0 {
m.Time = time.Time{}
return nil
}
p := Timestamp{}
if err := p.Unmarshal(data); err != nil {
return err
}
// leaving this here for the record. our JSON only handled seconds, so this results in writes by
// protobuf clients storing values that aren't read by json clients, which results in unexpected
// field mutation, which fails various validation and equality code.
// m.Time = time.Unix(p.Seconds, int64(p.Nanos)).Local()
m.Time = time.Unix(p.Seconds, int64(0)).Local()
return nil
}
// Marshal implements the protobuf marshaling interface.
func (m *Time) Marshal() (data []byte, err error) {
if m == nil || m.Time.IsZero() {
return nil, nil
}
return m.ProtoTime().Marshal()
}
// MarshalTo implements the protobuf marshaling interface.
func (m *Time) MarshalTo(data []byte) (int, error) {
if m == nil || m.Time.IsZero() {
return 0, nil
}
return m.ProtoTime().MarshalTo(data)
}
// MarshalToSizedBuffer implements the protobuf reverse marshaling interface.
func (m *Time) MarshalToSizedBuffer(data []byte) (int, error) {
if m == nil || m.Time.IsZero() {
return 0, nil
}
return m.ProtoTime().MarshalToSizedBuffer(data)
}
// SPDX-License-Identifier: Apache-2.0
// Copyright Authors of Cilium
// Copyright 2015 The Kubernetes Authors.
// Package v1 contains API types that are common to all versions.
//
// The package contains two categories of types:
// - external (serialized) types that lack their own version (e.g TypeMeta)
// - internal (never-serialized) types that are needed by several different
// api groups, and so live here, to avoid duplication and/or import loops
// (e.g. LabelSelector).
//
// In the future, we will probably move these categories of objects into
// separate packages.
package v1
import (
"k8s.io/apimachinery/pkg/types"
)
// TypeMeta describes an individual object in an API response or request
// with strings representing the type of the object and its API schema version.
// Structures that are versioned or persisted should inline TypeMeta.
//
// +k8s:deepcopy-gen=false
type TypeMeta struct {
// Kind is a string value representing the REST resource this object represents.
// Servers may infer this from the endpoint the client submits requests to.
// Cannot be updated.
// In CamelCase.
// More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds
// +optional
Kind string `json:"kind,omitempty" protobuf:"bytes,1,opt,name=kind"`
// APIVersion defines the versioned schema of this representation of an object.
// Servers should convert recognized schemas to the latest internal value, and
// may reject unrecognized values.
// More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources
// +optional
APIVersion string `json:"apiVersion,omitempty" protobuf:"bytes,2,opt,name=apiVersion"`
}
// ListMeta describes metadata that synthetic resources must have, including lists and
// various status objects. A resource may have only one of {ObjectMeta, ListMeta}.
type ListMeta struct {
// String that identifies the server's internal version of this object that
// can be used by clients to determine when objects have changed.
// Value must be treated as opaque by clients and passed unmodified back to the server.
// Populated by the system.
// Read-only.
// More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#concurrency-control-and-consistency
// +optional
ResourceVersion string `json:"resourceVersion,omitempty" protobuf:"bytes,2,opt,name=resourceVersion"`
// continue may be set if the user set a limit on the number of items returned, and indicates that
// the server has more data available. The value is opaque and may be used to issue another request
// to the endpoint that served this list to retrieve the next set of available objects. Continuing a
// consistent list may not be possible if the server configuration has changed or more than a few
// minutes have passed. The resourceVersion field returned when using this continue value will be
// identical to the value in the first response, unless you have received this token from an error
// message.
Continue string `json:"continue,omitempty" protobuf:"bytes,3,opt,name=continue"`
// remainingItemCount is the number of subsequent items in the list which are not included in this
// list response. If the list request contained label or field selectors, then the number of
// remaining items is unknown and the field will be left unset and omitted during serialization.
// If the list is complete (either because it is not chunking or because this is the last chunk),
// then there are no more remaining items and this field will be left unset and omitted during
// serialization.
// Servers older than v1.15 do not set this field.
// The intended use of the remainingItemCount is *estimating* the size of a collection. Clients
// should not rely on the remainingItemCount to be set or to be exact.
// +optional
RemainingItemCount *int64 `json:"remainingItemCount,omitempty" protobuf:"bytes,4,opt,name=remainingItemCount"`
}
// Field path constants that are specific to the internal API
// representation.
const (
ObjectNameField = "metadata.name"
)
// These are internal finalizer values for Kubernetes-like APIs, must be qualified name unless defined here
const (
FinalizerOrphanDependents = "orphan"
FinalizerDeleteDependents = "foregroundDeletion"
)
// ObjectMeta is metadata that all persisted resources must have, which includes all objects
// users must create.
type ObjectMeta struct {
// Name must be unique within a namespace. Is required when creating resources, although
// some resources may allow a client to request the generation of an appropriate name
// automatically. Name is primarily intended for creation idempotence and configuration
// definition.
// Cannot be updated.
// More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names#names
// +optional
Name string `json:"name,omitempty" protobuf:"bytes,1,opt,name=name"`
// GenerateName is an optional prefix, used by the server, to generate a unique
// name ONLY IF the Name field has not been provided.
// If this field is used, the name returned to the client will be different
// than the name passed. This value will also be combined with a unique suffix.
// The provided value has the same validation rules as the Name field,
// and may be truncated by the length of the suffix required to make the value
// unique on the server.
//
// If this field is specified and the generated name exists, the server will return a 409.
//
// Applied only if Name is not specified.
// More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#idempotency
// +optional
GenerateName string `json:"generateName,omitempty" protobuf:"bytes,2,opt,name=generateName"`
// Namespace defines the space within which each name must be unique. An empty namespace is
// equivalent to the "default" namespace, but "default" is the canonical representation.
// Not all objects are required to be scoped to a namespace - the value of this field for
// those objects will be empty.
//
// Must be a DNS_LABEL.
// Cannot be updated.
// More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/namespaces
// +optional
Namespace string `json:"namespace,omitempty" protobuf:"bytes,3,opt,name=namespace"`
// UID is the unique in time and space value for this object. It is typically generated by
// the server on successful creation of a resource and is not allowed to change on PUT
// operations.
//
// Populated by the system.
// Read-only.
// More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names#uids
// +optional
UID types.UID `json:"uid,omitempty" protobuf:"bytes,5,opt,name=uid,casttype=k8s.io/kubernetes/pkg/types.UID"`
// An opaque value that represents the internal version of this object that can
// be used by clients to determine when objects have changed. May be used for optimistic
// concurrency, change detection, and the watch operation on a resource or set of resources.
// Clients must treat these values as opaque and passed unmodified back to the server.
// They may only be valid for a particular resource or set of resources.
//
// Populated by the system.
// Read-only.
// Value must be treated as opaque by clients and .
// More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#concurrency-control-and-consistency
// +optional
// +deepequal-gen=false
ResourceVersion string `json:"resourceVersion,omitempty" protobuf:"bytes,6,opt,name=resourceVersion"`
// A sequence number representing a specific generation of the desired state.
// Populated by the system. Read-only.
// +optional
Generation int64 `json:"generation,omitempty" protobuf:"varint,7,opt,name=generation"`
// DeletionTimestamp is RFC 3339 date and time at which this resource will be deleted. This
// field is set by the server when a graceful deletion is requested by the user, and is not
// directly settable by a client. The resource is expected to be deleted (no longer visible
// from resource lists, and not reachable by name) after the time in this field, once the
// finalizers list is empty. As long as the finalizers list contains items, deletion is blocked.
// Once the deletionTimestamp is set, this value may not be unset or be set further into the
// future, although it may be shortened or the resource may be deleted prior to this time.
// For example, a user may request that a pod is deleted in 30 seconds. The Kubelet will react
// by sending a graceful termination signal to the containers in the pod. After that 30 seconds,
// the Kubelet will send a hard termination signal (SIGKILL) to the container and after cleanup,
// remove the pod from the API. In the presence of network partitions, this object may still
// exist after this timestamp, until an administrator or automated process can determine the
// resource is fully terminated.
// If not set, graceful deletion of the object has not been requested.
//
// Populated by the system when a graceful deletion is requested.
// Read-only.
// More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata
// +optional
DeletionTimestamp *Time `json:"deletionTimestamp,omitempty" protobuf:"bytes,9,opt,name=deletionTimestamp"`
// Map of string keys and values that can be used to organize and categorize
// (scope and select) objects. May match selectors of replication controllers
// and services.
// More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/labels
// +optional
Labels map[string]string `json:"labels,omitempty" protobuf:"bytes,11,rep,name=labels"`
// Annotations is an unstructured key value map stored with a resource that may be
// set by external tools to store and retrieve arbitrary metadata. They are not
// queryable and should be preserved when modifying objects.
// More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/annotations
// +optional
Annotations map[string]string `json:"annotations,omitempty" protobuf:"bytes,12,rep,name=annotations"`
// List of objects depended by this object. If ALL objects in the list have
// been deleted, this object will be garbage collected. If this object is managed by a controller,
// then an entry in this list will point to this controller, with the controller field set to true.
// There cannot be more than one managing controller.
// +optional
// +patchMergeKey=uid
// +patchStrategy=merge
// +listType=map
// +listMapKey=uid
OwnerReferences []OwnerReference `json:"ownerReferences,omitempty" patchStrategy:"merge" patchMergeKey:"uid" protobuf:"bytes,13,rep,name=ownerReferences"`
}
const (
// NamespaceDefault means the object is in the default namespace which is applied when not specified by clients
NamespaceDefault = "default"
// NamespaceAll is the default argument to specify on a context when you want to list or filter resources across all namespaces
NamespaceAll = ""
// NamespaceNone is the argument for a context when there is no namespace.
NamespaceNone = ""
// NamespaceSystem is the system namespace where we place system components.
NamespaceSystem = "kube-system"
// NamespacePublic is the namespace where we place public info (ConfigMaps)
NamespacePublic = "kube-public"
)
// OwnerReference contains enough information to let you identify an owning
// object. An owning object must be in the same namespace as the dependent, or
// be cluster-scoped, so there is no namespace field.
// +structType=atomic
type OwnerReference struct {
// API version of the referent.
APIVersion string `json:"apiVersion" protobuf:"bytes,5,opt,name=apiVersion"`
// Kind of the referent.
// More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds
Kind string `json:"kind" protobuf:"bytes,1,opt,name=kind"`
// Name of the referent.
// More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names#names
Name string `json:"name" protobuf:"bytes,3,opt,name=name"`
// UID of the referent.
// More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names#uids
UID types.UID `json:"uid" protobuf:"bytes,4,opt,name=uid,casttype=k8s.io/apimachinery/pkg/types.UID"`
// If true, this reference points to the managing controller.
// +optional
Controller *bool `json:"controller,omitempty" protobuf:"varint,6,opt,name=controller"`
}
const (
// FieldValidationIgnore ignores unknown/duplicate fields
FieldValidationIgnore = "Ignore"
// FieldValidationWarn responds with a warning, but successfully serve the request
FieldValidationWarn = "Warn"
// FieldValidationStrict fails the request on unknown/duplicate fields
FieldValidationStrict = "Strict"
)
// TODO: remove me when watch is refactored
func LabelSelectorQueryParam(version string) string {
return "labelSelector"
}
// TODO: remove me when watch is refactored
func FieldSelectorQueryParam(version string) string {
return "fieldSelector"
}
// Note:
// There are two different styles of label selectors used in versioned types:
// an older style which is represented as just a string in versioned types, and a
// newer style that is structured. LabelSelector is an internal representation for the
// latter style.
// A label selector is a label query over a set of resources. The result of matchLabels and
// matchExpressions are ANDed. An empty label selector matches all objects. A null
// label selector matches no objects.
// +structType=atomic
type LabelSelector struct {
// matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels
// map is equivalent to an element of matchExpressions, whose key field is "key", the
// operator is "In", and the values array contains only "value". The requirements are ANDed.
//
// +kubebuilder:validation:Optional
MatchLabels map[string]MatchLabelsValue `json:"matchLabels,omitempty" protobuf:"bytes,1,rep,name=matchLabels"`
// matchExpressions is a list of label selector requirements. The requirements are ANDed.
// +optional
// +listType=atomic
MatchExpressions []LabelSelectorRequirement `json:"matchExpressions,omitempty" protobuf:"bytes,2,rep,name=matchExpressions"`
}
// MatchLabelsValue represents the value from the MatchLabels {key,value} pair.
//
// +kubebuilder:validation:MaxLength=63
// +kubebuilder:validation:Pattern=`^(([A-Za-z0-9][-A-Za-z0-9_.]*)?[A-Za-z0-9])?$`
type MatchLabelsValue = string
// A label selector requirement is a selector that contains values, a key, and an operator that
// relates the key and values.
type LabelSelectorRequirement struct {
// key is the label key that the selector applies to.
Key string `json:"key" protobuf:"bytes,1,opt,name=key"`
// operator represents a key's relationship to a set of values.
// Valid operators are In, NotIn, Exists and DoesNotExist.
//
// +kubebuilder:validation:Enum=In;NotIn;Exists;DoesNotExist
Operator LabelSelectorOperator `json:"operator" protobuf:"bytes,2,opt,name=operator,casttype=LabelSelectorOperator"`
// values is an array of string values. If the operator is In or NotIn,
// the values array must be non-empty. If the operator is Exists or DoesNotExist,
// the values array must be empty. This array is replaced during a strategic
// merge patch.
// +optional
// +listType=atomic
Values []string `json:"values,omitempty" protobuf:"bytes,3,rep,name=values"`
}
// A label selector operator is the set of operators that can be used in a selector requirement.
type LabelSelectorOperator string
const (
LabelSelectorOpIn LabelSelectorOperator = "In"
LabelSelectorOpNotIn LabelSelectorOperator = "NotIn"
LabelSelectorOpExists LabelSelectorOperator = "Exists"
LabelSelectorOpDoesNotExist LabelSelectorOperator = "DoesNotExist"
)
type ConditionStatus string
// These are valid condition statuses. "ConditionTrue" means a resource is in the condition.
// "ConditionFalse" means a resource is not in the condition. "ConditionUnknown" means kubernetes
// can't decide if a resource is in the condition or not. In the future, we could add other
// intermediate conditions, e.g. ConditionDegraded.
const (
ConditionTrue ConditionStatus = "True"
ConditionFalse ConditionStatus = "False"
ConditionUnknown ConditionStatus = "Unknown"
)
// PartialObjectMetadata is a generic representation of any object with ObjectMeta. It allows clients
// to get access to a particular ObjectMeta schema without knowing the details of the version.
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
type PartialObjectMetadata struct {
TypeMeta `json:",inline"`
// Standard object's metadata.
// More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata
// +optional
ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"`
}
// PartialObjectMetadataList contains a list of objects containing only their metadata
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
type PartialObjectMetadataList struct {
TypeMeta `json:",inline"`
// Standard list metadata.
// More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds
// +optional
ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"`
// items contains each of the included items.
Items []PartialObjectMetadata `json:"items" protobuf:"bytes,2,rep,name=items"`
}
// Condition contains details for one aspect of the current state of this API Resource.
// ---
// This struct is intended for direct use as an array at the field path .status.conditions. For example,
//
// type FooStatus struct{
// // Represents the observations of a foo's current state.
// // Known .status.conditions.type are: "Available", "Progressing", and "Degraded"
// // +patchMergeKey=type
// // +patchStrategy=merge
// // +listType=map
// // +listMapKey=type
// Conditions []metav1.Condition `json:"conditions,omitempty" patchStrategy:"merge" patchMergeKey:"type" protobuf:"bytes,1,rep,name=conditions"`
//
// // other fields
// }
type Condition struct {
// type of condition in CamelCase or in foo.example.com/CamelCase.
// ---
// Many .condition.type values are consistent across resources like Available, but because arbitrary conditions can be
// useful (see .node.status.conditions), the ability to deconflict is important.
// The regex it matches is (dns1123SubdomainFmt/)?(qualifiedNameFmt)
// +required
// +kubebuilder:validation:Required
// +kubebuilder:validation:Pattern=`^([a-z0-9]([-a-z0-9]*[a-z0-9])?(\.[a-z0-9]([-a-z0-9]*[a-z0-9])?)*/)?(([A-Za-z0-9][-A-Za-z0-9_.]*)?[A-Za-z0-9])$`
// +kubebuilder:validation:MaxLength=316
Type string `json:"type" protobuf:"bytes,1,opt,name=type"`
// status of the condition, one of True, False, Unknown.
// +required
// +kubebuilder:validation:Required
// +kubebuilder:validation:Enum=True;False;Unknown
Status ConditionStatus `json:"status" protobuf:"bytes,2,opt,name=status"`
// observedGeneration represents the .metadata.generation that the condition was set based upon.
// For instance, if .metadata.generation is currently 12, but the .status.conditions[x].observedGeneration is 9, the condition is out of date
// with respect to the current state of the instance.
// +optional
// +kubebuilder:validation:Minimum=0
ObservedGeneration int64 `json:"observedGeneration,omitempty" protobuf:"varint,3,opt,name=observedGeneration"`
// lastTransitionTime is the last time the condition transitioned from one status to another.
// This should be when the underlying condition changed. If that is not known, then using the time when the API field changed is acceptable.
// +required
// +kubebuilder:validation:Required
// +kubebuilder:validation:Type=string
// +kubebuilder:validation:Format=date-time
LastTransitionTime Time `json:"lastTransitionTime" protobuf:"bytes,4,opt,name=lastTransitionTime"`
// reason contains a programmatic identifier indicating the reason for the condition's last transition.
// Producers of specific condition types may define expected values and meanings for this field,
// and whether the values are considered a guaranteed API.
// The value should be a CamelCase string.
// This field may not be empty.
// +required
// +kubebuilder:validation:Required
// +kubebuilder:validation:MaxLength=1024
// +kubebuilder:validation:MinLength=1
// +kubebuilder:validation:Pattern=`^[A-Za-z]([A-Za-z0-9_,:]*[A-Za-z0-9_])?$`
Reason string `json:"reason" protobuf:"bytes,5,opt,name=reason"`
// message is a human readable message indicating details about the transition.
// This may be an empty string.
// +required
// +kubebuilder:validation:Required
// +kubebuilder:validation:MaxLength=32768
Message string `json:"message" protobuf:"bytes,6,opt,name=message"`
}
// SPDX-License-Identifier: Apache-2.0
// Copyright Authors of Cilium
// Copyright 2015 The Kubernetes Authors.
package validation
import (
"fmt"
"regexp"
"unicode"
"k8s.io/apimachinery/pkg/util/sets"
"k8s.io/apimachinery/pkg/util/validation"
"k8s.io/apimachinery/pkg/util/validation/field"
slim_metav1 "github.com/cilium/cilium/pkg/k8s/slim/k8s/apis/meta/v1"
)
// LabelSelectorValidationOptions is a struct that can be passed to ValidateLabelSelector to record the validate options
type LabelSelectorValidationOptions struct {
// Allow invalid label value in selector
AllowInvalidLabelValueInSelector bool
// Allows an operator that is not interpretable to pass validation. This is useful for cases where a broader check
// can be performed, as in a *SubjectAccessReview
AllowUnknownOperatorInRequirement bool
}
// LabelSelectorHasInvalidLabelValue returns true if the given selector contains an invalid label value in a match expression.
// This is useful for determining whether AllowInvalidLabelValueInSelector should be set to true when validating an update
// based on existing persisted invalid values.
func LabelSelectorHasInvalidLabelValue(ps *slim_metav1.LabelSelector) bool {
if ps == nil {
return false
}
for _, e := range ps.MatchExpressions {
for _, v := range e.Values {
if len(validation.IsValidLabelValue(v)) > 0 {
return true
}
}
}
return false
}
// ValidateLabelSelector validate the LabelSelector according to the opts and returns any validation errors.
// opts.AllowInvalidLabelValueInSelector is only expected to be set to true when required for backwards compatibility with existing invalid data.
func ValidateLabelSelector(ps *slim_metav1.LabelSelector, opts LabelSelectorValidationOptions, fldPath *field.Path) field.ErrorList {
allErrs := field.ErrorList{}
if ps == nil {
return allErrs
}
allErrs = append(allErrs, ValidateLabels(ps.MatchLabels, fldPath.Child("matchLabels"))...)
for i, expr := range ps.MatchExpressions {
allErrs = append(allErrs, ValidateLabelSelectorRequirement(expr, opts, fldPath.Child("matchExpressions").Index(i))...)
}
return allErrs
}
// ValidateLabelSelectorRequirement validate the requirement according to the opts and returns any validation errors.
// opts.AllowInvalidLabelValueInSelector is only expected to be set to true when required for backwards compatibility with existing invalid data.
func ValidateLabelSelectorRequirement(sr slim_metav1.LabelSelectorRequirement, opts LabelSelectorValidationOptions, fldPath *field.Path) field.ErrorList {
allErrs := field.ErrorList{}
switch sr.Operator {
case slim_metav1.LabelSelectorOpIn, slim_metav1.LabelSelectorOpNotIn:
if len(sr.Values) == 0 {
allErrs = append(allErrs, field.Required(fldPath.Child("values"), "must be specified when `operator` is 'In' or 'NotIn'"))
}
case slim_metav1.LabelSelectorOpExists, slim_metav1.LabelSelectorOpDoesNotExist:
if len(sr.Values) > 0 {
allErrs = append(allErrs, field.Forbidden(fldPath.Child("values"), "may not be specified when `operator` is 'Exists' or 'DoesNotExist'"))
}
default:
if !opts.AllowUnknownOperatorInRequirement {
allErrs = append(allErrs, field.Invalid(fldPath.Child("operator"), sr.Operator, "not a valid selector operator"))
}
}
allErrs = append(allErrs, ValidateLabelName(sr.Key, fldPath.Child("key"))...)
if !opts.AllowInvalidLabelValueInSelector {
for valueIndex, value := range sr.Values {
for _, msg := range validation.IsValidLabelValue(value) {
allErrs = append(allErrs, field.Invalid(fldPath.Child("values").Index(valueIndex), value, msg))
}
}
}
return allErrs
}
// ValidateLabelName validates that the label name is correctly defined.
func ValidateLabelName(labelName string, fldPath *field.Path) field.ErrorList {
allErrs := field.ErrorList{}
for _, msg := range validation.IsQualifiedName(labelName) {
allErrs = append(allErrs, field.Invalid(fldPath, labelName, msg).WithOrigin("labelKey"))
}
return allErrs
}
// ValidateLabels validates that a set of labels are correctly defined.
func ValidateLabels(labels map[string]string, fldPath *field.Path) field.ErrorList {
allErrs := field.ErrorList{}
for k, v := range labels {
allErrs = append(allErrs, ValidateLabelName(k, fldPath)...)
for _, msg := range validation.IsValidLabelValue(v) {
allErrs = append(allErrs, field.Invalid(fldPath, v, msg))
}
}
return allErrs
}
var FieldManagerMaxLength = 128
// ValidateFieldManager valides that the fieldManager is the proper length and
// only has printable characters.
func ValidateFieldManager(fieldManager string, fldPath *field.Path) field.ErrorList {
allErrs := field.ErrorList{}
// the field can not be set as a `*string`, so a empty string ("") is
// considered as not set and is defaulted by the rest of the process
// (unless apply is used, in which case it is required).
if len(fieldManager) > FieldManagerMaxLength {
allErrs = append(allErrs, field.TooLong(fldPath, "" /*unused*/, FieldManagerMaxLength))
}
// Verify that all characters are printable.
for i, r := range fieldManager {
if !unicode.IsPrint(r) {
allErrs = append(allErrs, field.Invalid(fldPath, fieldManager, fmt.Sprintf("invalid character %#U (at position %d)", r, i)))
}
}
return allErrs
}
var allowedFieldValidationValues = sets.NewString("", slim_metav1.FieldValidationIgnore, slim_metav1.FieldValidationWarn, slim_metav1.FieldValidationStrict)
// ValidateFieldValidation validates that a fieldValidation query param only contains allowed values.
func ValidateFieldValidation(fldPath *field.Path, fieldValidation string) field.ErrorList {
allErrs := field.ErrorList{}
if !allowedFieldValidationValues.Has(fieldValidation) {
allErrs = append(allErrs, field.NotSupported(fldPath, fieldValidation, allowedFieldValidationValues.List()))
}
return allErrs
}
const MaxSubresourceNameLength = 256
func ValidateConditions(conditions []slim_metav1.Condition, fldPath *field.Path) field.ErrorList {
var allErrs field.ErrorList
conditionTypeToFirstIndex := map[string]int{}
for i, condition := range conditions {
if _, ok := conditionTypeToFirstIndex[condition.Type]; ok {
allErrs = append(allErrs, field.Duplicate(fldPath.Index(i).Child("type"), condition.Type))
} else {
conditionTypeToFirstIndex[condition.Type] = i
}
allErrs = append(allErrs, ValidateCondition(condition, fldPath.Index(i))...)
}
return allErrs
}
// validConditionStatuses is used internally to check validity and provide a good message
var validConditionStatuses = sets.NewString(string(slim_metav1.ConditionTrue), string(slim_metav1.ConditionFalse), string(slim_metav1.ConditionUnknown))
const (
maxReasonLen = 1 * 1024
maxMessageLen = 32 * 1024
)
func ValidateCondition(condition slim_metav1.Condition, fldPath *field.Path) field.ErrorList {
var allErrs field.ErrorList
// type is set and is a valid format
allErrs = append(allErrs, ValidateLabelName(condition.Type, fldPath.Child("type"))...)
// status is set and is an accepted value
if !validConditionStatuses.Has(string(condition.Status)) {
allErrs = append(allErrs, field.NotSupported(fldPath.Child("status"), condition.Status, validConditionStatuses.List()))
}
if condition.ObservedGeneration < 0 {
allErrs = append(allErrs, field.Invalid(fldPath.Child("observedGeneration"), condition.ObservedGeneration, "must be greater than or equal to zero"))
}
if condition.LastTransitionTime.IsZero() {
allErrs = append(allErrs, field.Required(fldPath.Child("lastTransitionTime"), "must be set"))
}
if len(condition.Reason) == 0 {
allErrs = append(allErrs, field.Required(fldPath.Child("reason"), "must be set"))
} else {
for _, currErr := range isValidConditionReason(condition.Reason) {
allErrs = append(allErrs, field.Invalid(fldPath.Child("reason"), condition.Reason, currErr))
}
if len(condition.Reason) > maxReasonLen {
allErrs = append(allErrs, field.TooLong(fldPath.Child("reason"), "" /*unused*/, maxReasonLen))
}
}
if len(condition.Message) > maxMessageLen {
allErrs = append(allErrs, field.TooLong(fldPath.Child("message"), "" /*unused*/, maxMessageLen))
}
return allErrs
}
const conditionReasonFmt string = "[A-Za-z]([A-Za-z0-9_,:]*[A-Za-z0-9_])?"
const conditionReasonErrMsg string = "a condition reason must start with alphabetic character, optionally followed by a string of alphanumeric characters or '_,:', and must end with an alphanumeric character or '_'"
var conditionReasonRegexp = regexp.MustCompile("^" + conditionReasonFmt + "$")
// isValidConditionReason tests for a string that conforms to rules for condition reasons. This checks the format, but not the length.
func isValidConditionReason(value string) []string {
if !conditionReasonRegexp.MatchString(value) {
return []string{validation.RegexError(conditionReasonErrMsg, conditionReasonFmt, "my_name", "MY_NAME", "MyName", "ReasonA,ReasonB", "ReasonA:ReasonB")}
}
return nil
}
//go:build !ignore_autogenerated
// +build !ignore_autogenerated
// SPDX-License-Identifier: Apache-2.0
// Copyright Authors of Cilium
// Code generated by conversion-gen. DO NOT EDIT.
package v1
import (
runtime "k8s.io/apimachinery/pkg/runtime"
)
func init() {
localSchemeBuilder.Register(RegisterConversions)
}
// RegisterConversions adds conversion functions to the given scheme.
// Public to allow building arbitrary schemes.
func RegisterConversions(s *runtime.Scheme) error {
return nil
}
//go:build !ignore_autogenerated
// +build !ignore_autogenerated
// SPDX-License-Identifier: Apache-2.0
// Copyright Authors of Cilium
// Code generated by deepcopy-gen. DO NOT EDIT.
package v1
import (
runtime "k8s.io/apimachinery/pkg/runtime"
)
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *Condition) DeepCopyInto(out *Condition) {
*out = *in
in.LastTransitionTime.DeepCopyInto(&out.LastTransitionTime)
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Condition.
func (in *Condition) DeepCopy() *Condition {
if in == nil {
return nil
}
out := new(Condition)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *LabelSelector) DeepCopyInto(out *LabelSelector) {
*out = *in
if in.MatchLabels != nil {
in, out := &in.MatchLabels, &out.MatchLabels
*out = make(map[string]string, len(*in))
for key, val := range *in {
(*out)[key] = val
}
}
if in.MatchExpressions != nil {
in, out := &in.MatchExpressions, &out.MatchExpressions
*out = make([]LabelSelectorRequirement, len(*in))
for i := range *in {
(*in)[i].DeepCopyInto(&(*out)[i])
}
}
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new LabelSelector.
func (in *LabelSelector) DeepCopy() *LabelSelector {
if in == nil {
return nil
}
out := new(LabelSelector)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *LabelSelectorRequirement) DeepCopyInto(out *LabelSelectorRequirement) {
*out = *in
if in.Values != nil {
in, out := &in.Values, &out.Values
*out = make([]string, len(*in))
copy(*out, *in)
}
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new LabelSelectorRequirement.
func (in *LabelSelectorRequirement) DeepCopy() *LabelSelectorRequirement {
if in == nil {
return nil
}
out := new(LabelSelectorRequirement)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *ListMeta) DeepCopyInto(out *ListMeta) {
*out = *in
if in.RemainingItemCount != nil {
in, out := &in.RemainingItemCount, &out.RemainingItemCount
*out = new(int64)
**out = **in
}
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ListMeta.
func (in *ListMeta) DeepCopy() *ListMeta {
if in == nil {
return nil
}
out := new(ListMeta)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *ObjectMeta) DeepCopyInto(out *ObjectMeta) {
*out = *in
if in.DeletionTimestamp != nil {
in, out := &in.DeletionTimestamp, &out.DeletionTimestamp
*out = (*in).DeepCopy()
}
if in.Labels != nil {
in, out := &in.Labels, &out.Labels
*out = make(map[string]string, len(*in))
for key, val := range *in {
(*out)[key] = val
}
}
if in.Annotations != nil {
in, out := &in.Annotations, &out.Annotations
*out = make(map[string]string, len(*in))
for key, val := range *in {
(*out)[key] = val
}
}
if in.OwnerReferences != nil {
in, out := &in.OwnerReferences, &out.OwnerReferences
*out = make([]OwnerReference, len(*in))
for i := range *in {
(*in)[i].DeepCopyInto(&(*out)[i])
}
}
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ObjectMeta.
func (in *ObjectMeta) DeepCopy() *ObjectMeta {
if in == nil {
return nil
}
out := new(ObjectMeta)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *OwnerReference) DeepCopyInto(out *OwnerReference) {
*out = *in
if in.Controller != nil {
in, out := &in.Controller, &out.Controller
*out = new(bool)
**out = **in
}
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new OwnerReference.
func (in *OwnerReference) DeepCopy() *OwnerReference {
if in == nil {
return nil
}
out := new(OwnerReference)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *PartialObjectMetadata) DeepCopyInto(out *PartialObjectMetadata) {
*out = *in
out.TypeMeta = in.TypeMeta
in.ObjectMeta.DeepCopyInto(&out.ObjectMeta)
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PartialObjectMetadata.
func (in *PartialObjectMetadata) DeepCopy() *PartialObjectMetadata {
if in == nil {
return nil
}
out := new(PartialObjectMetadata)
in.DeepCopyInto(out)
return out
}
// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
func (in *PartialObjectMetadata) DeepCopyObject() runtime.Object {
if c := in.DeepCopy(); c != nil {
return c
}
return nil
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *PartialObjectMetadataList) DeepCopyInto(out *PartialObjectMetadataList) {
*out = *in
out.TypeMeta = in.TypeMeta
in.ListMeta.DeepCopyInto(&out.ListMeta)
if in.Items != nil {
in, out := &in.Items, &out.Items
*out = make([]PartialObjectMetadata, len(*in))
for i := range *in {
(*in)[i].DeepCopyInto(&(*out)[i])
}
}
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PartialObjectMetadataList.
func (in *PartialObjectMetadataList) DeepCopy() *PartialObjectMetadataList {
if in == nil {
return nil
}
out := new(PartialObjectMetadataList)
in.DeepCopyInto(out)
return out
}
// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
func (in *PartialObjectMetadataList) DeepCopyObject() runtime.Object {
if c := in.DeepCopy(); c != nil {
return c
}
return nil
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Time.
func (in *Time) DeepCopy() *Time {
if in == nil {
return nil
}
out := new(Time)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *Timestamp) DeepCopyInto(out *Timestamp) {
*out = *in
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Timestamp.
func (in *Timestamp) DeepCopy() *Timestamp {
if in == nil {
return nil
}
out := new(Timestamp)
in.DeepCopyInto(out)
return out
}
//go:build !ignore_autogenerated
// +build !ignore_autogenerated
// SPDX-License-Identifier: Apache-2.0
// Copyright Authors of Cilium
// Code generated by deepequal-gen. DO NOT EDIT.
package v1
// DeepEqual is an autogenerated deepequal function, deeply comparing the
// receiver with other. in must be non-nil.
func (in *Condition) DeepEqual(other *Condition) bool {
if other == nil {
return false
}
if in.Type != other.Type {
return false
}
if in.Status != other.Status {
return false
}
if in.ObservedGeneration != other.ObservedGeneration {
return false
}
if !in.LastTransitionTime.DeepEqual(&other.LastTransitionTime) {
return false
}
if in.Reason != other.Reason {
return false
}
if in.Message != other.Message {
return false
}
return true
}
// DeepEqual is an autogenerated deepequal function, deeply comparing the
// receiver with other. in must be non-nil.
func (in *LabelSelector) DeepEqual(other *LabelSelector) bool {
if other == nil {
return false
}
if ((in.MatchLabels != nil) && (other.MatchLabels != nil)) || ((in.MatchLabels == nil) != (other.MatchLabels == nil)) {
in, other := &in.MatchLabels, &other.MatchLabels
if other == nil {
return false
}
if len(*in) != len(*other) {
return false
} else {
for key, inValue := range *in {
if otherValue, present := (*other)[key]; !present {
return false
} else {
if inValue != otherValue {
return false
}
}
}
}
}
if ((in.MatchExpressions != nil) && (other.MatchExpressions != nil)) || ((in.MatchExpressions == nil) != (other.MatchExpressions == nil)) {
in, other := &in.MatchExpressions, &other.MatchExpressions
if other == nil {
return false
}
if len(*in) != len(*other) {
return false
} else {
for i, inElement := range *in {
if !inElement.DeepEqual(&(*other)[i]) {
return false
}
}
}
}
return true
}
// DeepEqual is an autogenerated deepequal function, deeply comparing the
// receiver with other. in must be non-nil.
func (in *LabelSelectorRequirement) DeepEqual(other *LabelSelectorRequirement) bool {
if other == nil {
return false
}
if in.Key != other.Key {
return false
}
if in.Operator != other.Operator {
return false
}
if ((in.Values != nil) && (other.Values != nil)) || ((in.Values == nil) != (other.Values == nil)) {
in, other := &in.Values, &other.Values
if other == nil {
return false
}
if len(*in) != len(*other) {
return false
} else {
for i, inElement := range *in {
if inElement != (*other)[i] {
return false
}
}
}
}
return true
}
// DeepEqual is an autogenerated deepequal function, deeply comparing the
// receiver with other. in must be non-nil.
func (in *ListMeta) DeepEqual(other *ListMeta) bool {
if other == nil {
return false
}
if in.ResourceVersion != other.ResourceVersion {
return false
}
if in.Continue != other.Continue {
return false
}
if (in.RemainingItemCount == nil) != (other.RemainingItemCount == nil) {
return false
} else if in.RemainingItemCount != nil {
if *in.RemainingItemCount != *other.RemainingItemCount {
return false
}
}
return true
}
// DeepEqual is an autogenerated deepequal function, deeply comparing the
// receiver with other. in must be non-nil.
func (in *ObjectMeta) DeepEqual(other *ObjectMeta) bool {
if other == nil {
return false
}
if in.Name != other.Name {
return false
}
if in.GenerateName != other.GenerateName {
return false
}
if in.Namespace != other.Namespace {
return false
}
if in.UID != other.UID {
return false
}
if in.Generation != other.Generation {
return false
}
if (in.DeletionTimestamp == nil) != (other.DeletionTimestamp == nil) {
return false
} else if in.DeletionTimestamp != nil {
if !in.DeletionTimestamp.DeepEqual(other.DeletionTimestamp) {
return false
}
}
if ((in.Labels != nil) && (other.Labels != nil)) || ((in.Labels == nil) != (other.Labels == nil)) {
in, other := &in.Labels, &other.Labels
if other == nil {
return false
}
if len(*in) != len(*other) {
return false
} else {
for key, inValue := range *in {
if otherValue, present := (*other)[key]; !present {
return false
} else {
if inValue != otherValue {
return false
}
}
}
}
}
if ((in.Annotations != nil) && (other.Annotations != nil)) || ((in.Annotations == nil) != (other.Annotations == nil)) {
in, other := &in.Annotations, &other.Annotations
if other == nil {
return false
}
if len(*in) != len(*other) {
return false
} else {
for key, inValue := range *in {
if otherValue, present := (*other)[key]; !present {
return false
} else {
if inValue != otherValue {
return false
}
}
}
}
}
if ((in.OwnerReferences != nil) && (other.OwnerReferences != nil)) || ((in.OwnerReferences == nil) != (other.OwnerReferences == nil)) {
in, other := &in.OwnerReferences, &other.OwnerReferences
if other == nil {
return false
}
if len(*in) != len(*other) {
return false
} else {
for i, inElement := range *in {
if !inElement.DeepEqual(&(*other)[i]) {
return false
}
}
}
}
return true
}
// DeepEqual is an autogenerated deepequal function, deeply comparing the
// receiver with other. in must be non-nil.
func (in *OwnerReference) DeepEqual(other *OwnerReference) bool {
if other == nil {
return false
}
if in.APIVersion != other.APIVersion {
return false
}
if in.Kind != other.Kind {
return false
}
if in.Name != other.Name {
return false
}
if in.UID != other.UID {
return false
}
if (in.Controller == nil) != (other.Controller == nil) {
return false
} else if in.Controller != nil {
if *in.Controller != *other.Controller {
return false
}
}
return true
}
// DeepEqual is an autogenerated deepequal function, deeply comparing the
// receiver with other. in must be non-nil.
func (in *PartialObjectMetadata) DeepEqual(other *PartialObjectMetadata) bool {
if other == nil {
return false
}
if in.TypeMeta != other.TypeMeta {
return false
}
if !in.ObjectMeta.DeepEqual(&other.ObjectMeta) {
return false
}
return true
}
// DeepEqual is an autogenerated deepequal function, deeply comparing the
// receiver with other. in must be non-nil.
func (in *PartialObjectMetadataList) DeepEqual(other *PartialObjectMetadataList) bool {
if other == nil {
return false
}
if in.TypeMeta != other.TypeMeta {
return false
}
if !in.ListMeta.DeepEqual(&other.ListMeta) {
return false
}
if ((in.Items != nil) && (other.Items != nil)) || ((in.Items == nil) != (other.Items == nil)) {
in, other := &in.Items, &other.Items
if other == nil {
return false
}
if len(*in) != len(*other) {
return false
} else {
for i, inElement := range *in {
if !inElement.DeepEqual(&(*other)[i]) {
return false
}
}
}
}
return true
}
// DeepEqual is an autogenerated deepequal function, deeply comparing the
// receiver with other. in must be non-nil.
func (in *Timestamp) DeepEqual(other *Timestamp) bool {
if other == nil {
return false
}
if in.Seconds != other.Seconds {
return false
}
if in.Nanos != other.Nanos {
return false
}
return true
}
// DeepEqual is an autogenerated deepequal function, deeply comparing the
// receiver with other. in must be non-nil.
func (in *TypeMeta) DeepEqual(other *TypeMeta) bool {
if other == nil {
return false
}
if in.Kind != other.Kind {
return false
}
if in.APIVersion != other.APIVersion {
return false
}
return true
}
//go:build !ignore_autogenerated
// +build !ignore_autogenerated
// SPDX-License-Identifier: Apache-2.0
// Copyright Authors of Cilium
// Code generated by defaulter-gen. DO NOT EDIT.
package v1
import (
runtime "k8s.io/apimachinery/pkg/runtime"
)
// RegisterDefaults adds defaulters functions to the given scheme.
// Public to allow building arbitrary schemes.
// All generated defaulters are covering - they call all nested defaulters.
func RegisterDefaults(scheme *runtime.Scheme) error {
return nil
}
// SPDX-License-Identifier: Apache-2.0
// Copyright Authors of Cilium
// Code generated by protoc-gen-gogo. DO NOT EDIT.
// source: github.com/cilium/cilium/pkg/k8s/slim/k8s/apis/meta/v1beta1/generated.proto
package v1beta1
import (
fmt "fmt"
v1 "github.com/cilium/cilium/pkg/k8s/slim/k8s/apis/meta/v1"
io "io"
proto "github.com/gogo/protobuf/proto"
math "math"
math_bits "math/bits"
reflect "reflect"
strings "strings"
)
// Reference imports to suppress errors if they are not otherwise used.
var _ = proto.Marshal
var _ = fmt.Errorf
var _ = math.Inf
// This is a compile-time assertion to ensure that this generated file
// is compatible with the proto package it is being compiled against.
// A compilation error at this line likely means your copy of the
// proto package needs to be updated.
const _ = proto.GoGoProtoPackageIsVersion3 // please upgrade the proto package
func (m *PartialObjectMetadataList) Reset() { *m = PartialObjectMetadataList{} }
func (*PartialObjectMetadataList) ProtoMessage() {}
func (*PartialObjectMetadataList) Descriptor() ([]byte, []int) {
return fileDescriptor_1a84ae209524fd15, []int{0}
}
func (m *PartialObjectMetadataList) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
}
func (m *PartialObjectMetadataList) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
b = b[:cap(b)]
n, err := m.MarshalToSizedBuffer(b)
if err != nil {
return nil, err
}
return b[:n], nil
}
func (m *PartialObjectMetadataList) XXX_Merge(src proto.Message) {
xxx_messageInfo_PartialObjectMetadataList.Merge(m, src)
}
func (m *PartialObjectMetadataList) XXX_Size() int {
return m.Size()
}
func (m *PartialObjectMetadataList) XXX_DiscardUnknown() {
xxx_messageInfo_PartialObjectMetadataList.DiscardUnknown(m)
}
var xxx_messageInfo_PartialObjectMetadataList proto.InternalMessageInfo
func init() {
proto.RegisterType((*PartialObjectMetadataList)(nil), "github.com.cilium.cilium.pkg.k8s.slim.k8s.apis.meta.v1beta1.PartialObjectMetadataList")
}
func init() {
proto.RegisterFile("github.com/cilium/cilium/pkg/k8s/slim/k8s/apis/meta/v1beta1/generated.proto", fileDescriptor_1a84ae209524fd15)
}
var fileDescriptor_1a84ae209524fd15 = []byte{
// 322 bytes of a gzipped FileDescriptorProto
0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xa4, 0x92, 0x3f, 0x4f, 0x02, 0x31,
0x18, 0x87, 0xaf, 0x1a, 0x12, 0x72, 0xc4, 0xc4, 0x30, 0x21, 0x43, 0x21, 0x4e, 0x2c, 0xb6, 0x81,
0xc1, 0x90, 0xb0, 0x18, 0x06, 0x13, 0xa3, 0x44, 0xc3, 0xe8, 0xf6, 0xde, 0x51, 0x8f, 0x7a, 0xf4,
0xee, 0x72, 0x7d, 0xcf, 0xc4, 0xcd, 0x8f, 0xe0, 0xc7, 0x62, 0x64, 0x64, 0x22, 0x52, 0x3f, 0x88,
0xa6, 0xe5, 0x40, 0x43, 0x9c, 0x70, 0x7a, 0xff, 0xa4, 0x79, 0x9e, 0x5f, 0x9b, 0xfa, 0xb7, 0x91,
0xc4, 0x69, 0x11, 0xb0, 0x30, 0x55, 0x3c, 0x94, 0x33, 0x59, 0xec, 0x4a, 0x16, 0x47, 0x3c, 0xee,
0x6b, 0xae, 0x67, 0x52, 0xb9, 0x06, 0x32, 0xa9, 0xb9, 0x12, 0x08, 0xfc, 0xa5, 0x1b, 0x08, 0x84,
0x2e, 0x8f, 0x44, 0x22, 0x72, 0x40, 0x31, 0x61, 0x59, 0x9e, 0x62, 0x5a, 0x1f, 0xfc, 0xc0, 0xd8,
0x86, 0xb2, 0x2d, 0x59, 0x1c, 0xb1, 0xb8, 0xaf, 0x99, 0x85, 0xb9, 0xc6, 0xc2, 0x98, 0x85, 0xb1,
0x12, 0xd6, 0xbc, 0x3e, 0x2c, 0xc9, 0x7e, 0x88, 0xe6, 0xc5, 0x2f, 0x4e, 0x94, 0x46, 0x29, 0x77,
0xeb, 0xa0, 0x78, 0x72, 0x93, 0x1b, 0x5c, 0x57, 0x1e, 0xbf, 0xb4, 0x69, 0x64, 0x6a, 0x99, 0x0a,
0xc2, 0xa9, 0x4c, 0x44, 0xfe, 0xea, 0x8c, 0x79, 0x91, 0xa0, 0x54, 0x82, 0xeb, 0x70, 0x2a, 0x14,
0xec, 0x6b, 0xce, 0xbf, 0x88, 0x7f, 0xf6, 0x00, 0x39, 0x4a, 0x98, 0xdd, 0x07, 0xcf, 0x22, 0xc4,
0x91, 0x40, 0x98, 0x00, 0xc2, 0x9d, 0xd4, 0x58, 0x4f, 0xfc, 0xaa, 0x2a, 0xe7, 0xc6, 0x51, 0x9b,
0x74, 0x6a, 0xbd, 0x2b, 0x76, 0xd8, 0xe3, 0x30, 0xcb, 0xb3, 0xec, 0xe1, 0xe9, 0x7c, 0xd5, 0xf2,
0xcc, 0xaa, 0x55, 0xdd, 0x6e, 0xc6, 0x3b, 0x47, 0x3d, 0xf7, 0x2b, 0x12, 0x85, 0xd2, 0x0d, 0xd2,
0x3e, 0xee, 0xd4, 0x7a, 0xa3, 0x43, 0x65, 0x7f, 0xde, 0x68, 0x78, 0x52, 0x9a, 0x2b, 0x37, 0xd6,
0x31, 0xde, 0xa8, 0x86, 0x30, 0x5f, 0x53, 0x6f, 0xb1, 0xa6, 0xde, 0x72, 0x4d, 0xbd, 0x37, 0x43,
0xc9, 0xdc, 0x50, 0xb2, 0x30, 0x94, 0x2c, 0x0d, 0x25, 0x1f, 0x86, 0x92, 0xf7, 0x4f, 0xea, 0x3d,
0x0e, 0xfe, 0xf1, 0xc1, 0xbe, 0x03, 0x00, 0x00, 0xff, 0xff, 0x8e, 0xe4, 0x91, 0x70, 0x9e, 0x02,
0x00, 0x00,
}
func (m *PartialObjectMetadataList) Marshal() (dAtA []byte, err error) {
size := m.Size()
dAtA = make([]byte, size)
n, err := m.MarshalToSizedBuffer(dAtA[:size])
if err != nil {
return nil, err
}
return dAtA[:n], nil
}
func (m *PartialObjectMetadataList) MarshalTo(dAtA []byte) (int, error) {
size := m.Size()
return m.MarshalToSizedBuffer(dAtA[:size])
}
func (m *PartialObjectMetadataList) MarshalToSizedBuffer(dAtA []byte) (int, error) {
i := len(dAtA)
_ = i
var l int
_ = l
{
size, err := m.ListMeta.MarshalToSizedBuffer(dAtA[:i])
if err != nil {
return 0, err
}
i -= size
i = encodeVarintGenerated(dAtA, i, uint64(size))
}
i--
dAtA[i] = 0x12
if len(m.Items) > 0 {
for iNdEx := len(m.Items) - 1; iNdEx >= 0; iNdEx-- {
{
size, err := m.Items[iNdEx].MarshalToSizedBuffer(dAtA[:i])
if err != nil {
return 0, err
}
i -= size
i = encodeVarintGenerated(dAtA, i, uint64(size))
}
i--
dAtA[i] = 0xa
}
}
return len(dAtA) - i, nil
}
func encodeVarintGenerated(dAtA []byte, offset int, v uint64) int {
offset -= sovGenerated(v)
base := offset
for v >= 1<<7 {
dAtA[offset] = uint8(v&0x7f | 0x80)
v >>= 7
offset++
}
dAtA[offset] = uint8(v)
return base
}
func (m *PartialObjectMetadataList) Size() (n int) {
if m == nil {
return 0
}
var l int
_ = l
if len(m.Items) > 0 {
for _, e := range m.Items {
l = e.Size()
n += 1 + l + sovGenerated(uint64(l))
}
}
l = m.ListMeta.Size()
n += 1 + l + sovGenerated(uint64(l))
return n
}
func sovGenerated(x uint64) (n int) {
return (math_bits.Len64(x|1) + 6) / 7
}
func sozGenerated(x uint64) (n int) {
return sovGenerated(uint64((x << 1) ^ uint64((int64(x) >> 63))))
}
func (this *PartialObjectMetadataList) String() string {
if this == nil {
return "nil"
}
repeatedStringForItems := "[]PartialObjectMetadata{"
for _, f := range this.Items {
repeatedStringForItems += fmt.Sprintf("%v", f) + ","
}
repeatedStringForItems += "}"
s := strings.Join([]string{`&PartialObjectMetadataList{`,
`Items:` + repeatedStringForItems + `,`,
`ListMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ListMeta), "ListMeta", "v1.ListMeta", 1), `&`, ``, 1) + `,`,
`}`,
}, "")
return s
}
func valueToStringGenerated(v interface{}) string {
rv := reflect.ValueOf(v)
if rv.IsNil() {
return "nil"
}
pv := reflect.Indirect(rv).Interface()
return fmt.Sprintf("*%v", pv)
}
func (m *PartialObjectMetadataList) Unmarshal(dAtA []byte) error {
l := len(dAtA)
iNdEx := 0
for iNdEx < l {
preIndex := iNdEx
var wire uint64
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowGenerated
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
wire |= uint64(b&0x7F) << shift
if b < 0x80 {
break
}
}
fieldNum := int32(wire >> 3)
wireType := int(wire & 0x7)
if wireType == 4 {
return fmt.Errorf("proto: PartialObjectMetadataList: wiretype end group for non-group")
}
if fieldNum <= 0 {
return fmt.Errorf("proto: PartialObjectMetadataList: illegal tag %d (wire type %d)", fieldNum, wire)
}
switch fieldNum {
case 1:
if wireType != 2 {
return fmt.Errorf("proto: wrong wireType = %d for field Items", wireType)
}
var msglen int
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowGenerated
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
msglen |= int(b&0x7F) << shift
if b < 0x80 {
break
}
}
if msglen < 0 {
return ErrInvalidLengthGenerated
}
postIndex := iNdEx + msglen
if postIndex < 0 {
return ErrInvalidLengthGenerated
}
if postIndex > l {
return io.ErrUnexpectedEOF
}
m.Items = append(m.Items, v1.PartialObjectMetadata{})
if err := m.Items[len(m.Items)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
return err
}
iNdEx = postIndex
case 2:
if wireType != 2 {
return fmt.Errorf("proto: wrong wireType = %d for field ListMeta", wireType)
}
var msglen int
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowGenerated
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
msglen |= int(b&0x7F) << shift
if b < 0x80 {
break
}
}
if msglen < 0 {
return ErrInvalidLengthGenerated
}
postIndex := iNdEx + msglen
if postIndex < 0 {
return ErrInvalidLengthGenerated
}
if postIndex > l {
return io.ErrUnexpectedEOF
}
if err := m.ListMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
return err
}
iNdEx = postIndex
default:
iNdEx = preIndex
skippy, err := skipGenerated(dAtA[iNdEx:])
if err != nil {
return err
}
if (skippy < 0) || (iNdEx+skippy) < 0 {
return ErrInvalidLengthGenerated
}
if (iNdEx + skippy) > l {
return io.ErrUnexpectedEOF
}
iNdEx += skippy
}
}
if iNdEx > l {
return io.ErrUnexpectedEOF
}
return nil
}
func skipGenerated(dAtA []byte) (n int, err error) {
l := len(dAtA)
iNdEx := 0
depth := 0
for iNdEx < l {
var wire uint64
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return 0, ErrIntOverflowGenerated
}
if iNdEx >= l {
return 0, io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
wire |= (uint64(b) & 0x7F) << shift
if b < 0x80 {
break
}
}
wireType := int(wire & 0x7)
switch wireType {
case 0:
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return 0, ErrIntOverflowGenerated
}
if iNdEx >= l {
return 0, io.ErrUnexpectedEOF
}
iNdEx++
if dAtA[iNdEx-1] < 0x80 {
break
}
}
case 1:
iNdEx += 8
case 2:
var length int
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return 0, ErrIntOverflowGenerated
}
if iNdEx >= l {
return 0, io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
length |= (int(b) & 0x7F) << shift
if b < 0x80 {
break
}
}
if length < 0 {
return 0, ErrInvalidLengthGenerated
}
iNdEx += length
case 3:
depth++
case 4:
if depth == 0 {
return 0, ErrUnexpectedEndOfGroupGenerated
}
depth--
case 5:
iNdEx += 4
default:
return 0, fmt.Errorf("proto: illegal wireType %d", wireType)
}
if iNdEx < 0 {
return 0, ErrInvalidLengthGenerated
}
if depth == 0 {
return iNdEx, nil
}
}
return 0, io.ErrUnexpectedEOF
}
var (
ErrInvalidLengthGenerated = fmt.Errorf("proto: negative length found during unmarshaling")
ErrIntOverflowGenerated = fmt.Errorf("proto: integer overflow")
ErrUnexpectedEndOfGroupGenerated = fmt.Errorf("proto: unexpected end of group")
)
// SPDX-License-Identifier: Apache-2.0
// Copyright Authors of Cilium
// Copyright 2017 The Kubernetes Authors.
package v1beta1
import (
"k8s.io/apimachinery/pkg/runtime"
"k8s.io/apimachinery/pkg/runtime/schema"
)
// GroupName is the group name for this API.
const GroupName = "meta.k8s.io"
// SchemeGroupVersion is group version used to register these objects
var SchemeGroupVersion = schema.GroupVersion{Group: GroupName, Version: "v1beta1"}
// Kind takes an unqualified kind and returns a Group qualified GroupKind
func Kind(kind string) schema.GroupKind {
return SchemeGroupVersion.WithKind(kind).GroupKind()
}
// AddMetaToScheme registers base meta types into schemas.
func AddMetaToScheme(scheme *runtime.Scheme) error {
scheme.AddKnownTypes(SchemeGroupVersion,
&PartialObjectMetadata{},
&PartialObjectMetadataList{},
)
return nil
}
//go:build !ignore_autogenerated
// +build !ignore_autogenerated
// SPDX-License-Identifier: Apache-2.0
// Copyright Authors of Cilium
// Code generated by deepcopy-gen. DO NOT EDIT.
package v1beta1
import (
v1 "github.com/cilium/cilium/pkg/k8s/slim/k8s/apis/meta/v1"
runtime "k8s.io/apimachinery/pkg/runtime"
)
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *PartialObjectMetadataList) DeepCopyInto(out *PartialObjectMetadataList) {
*out = *in
out.TypeMeta = in.TypeMeta
in.ListMeta.DeepCopyInto(&out.ListMeta)
if in.Items != nil {
in, out := &in.Items, &out.Items
*out = make([]v1.PartialObjectMetadata, len(*in))
for i := range *in {
(*in)[i].DeepCopyInto(&(*out)[i])
}
}
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PartialObjectMetadataList.
func (in *PartialObjectMetadataList) DeepCopy() *PartialObjectMetadataList {
if in == nil {
return nil
}
out := new(PartialObjectMetadataList)
in.DeepCopyInto(out)
return out
}
// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
func (in *PartialObjectMetadataList) DeepCopyObject() runtime.Object {
if c := in.DeepCopy(); c != nil {
return c
}
return nil
}
//go:build !ignore_autogenerated
// +build !ignore_autogenerated
// SPDX-License-Identifier: Apache-2.0
// Copyright Authors of Cilium
// Code generated by defaulter-gen. DO NOT EDIT.
package v1beta1
import (
runtime "k8s.io/apimachinery/pkg/runtime"
)
// RegisterDefaults adds defaulters functions to the given scheme.
// Public to allow building arbitrary schemes.
// All generated defaulters are covering - they call all nested defaulters.
func RegisterDefaults(scheme *runtime.Scheme) error {
return nil
}
// SPDX-License-Identifier: Apache-2.0
// Copyright Authors of Cilium
// Code generated by protoc-gen-gogo. DO NOT EDIT.
// source: github.com/cilium/cilium/pkg/k8s/slim/k8s/apis/util/intstr/generated.proto
package intstr
import (
fmt "fmt"
io "io"
math "math"
math_bits "math/bits"
proto "github.com/gogo/protobuf/proto"
)
// Reference imports to suppress errors if they are not otherwise used.
var _ = proto.Marshal
var _ = fmt.Errorf
var _ = math.Inf
// This is a compile-time assertion to ensure that this generated file
// is compatible with the proto package it is being compiled against.
// A compilation error at this line likely means your copy of the
// proto package needs to be updated.
const _ = proto.GoGoProtoPackageIsVersion3 // please upgrade the proto package
func (m *IntOrString) Reset() { *m = IntOrString{} }
func (*IntOrString) ProtoMessage() {}
func (*IntOrString) Descriptor() ([]byte, []int) {
return fileDescriptor_8984be45904ea297, []int{0}
}
func (m *IntOrString) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
}
func (m *IntOrString) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
b = b[:cap(b)]
n, err := m.MarshalToSizedBuffer(b)
if err != nil {
return nil, err
}
return b[:n], nil
}
func (m *IntOrString) XXX_Merge(src proto.Message) {
xxx_messageInfo_IntOrString.Merge(m, src)
}
func (m *IntOrString) XXX_Size() int {
return m.Size()
}
func (m *IntOrString) XXX_DiscardUnknown() {
xxx_messageInfo_IntOrString.DiscardUnknown(m)
}
var xxx_messageInfo_IntOrString proto.InternalMessageInfo
func init() {
proto.RegisterType((*IntOrString)(nil), "github.com.cilium.cilium.pkg.k8s.slim.k8s.apis.util.intstr.IntOrString")
}
func init() {
proto.RegisterFile("github.com/cilium/cilium/pkg/k8s/slim/k8s/apis/util/intstr/generated.proto", fileDescriptor_8984be45904ea297)
}
var fileDescriptor_8984be45904ea297 = []byte{
// 293 bytes of a gzipped FileDescriptorProto
0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xe2, 0xf2, 0x4a, 0xcf, 0x2c, 0xc9,
0x28, 0x4d, 0xd2, 0x4b, 0xce, 0xcf, 0xd5, 0x4f, 0xce, 0xcc, 0xc9, 0x2c, 0x85, 0x53, 0x05, 0xd9,
0xe9, 0xfa, 0xd9, 0x16, 0xc5, 0xfa, 0xc5, 0x39, 0x99, 0xb9, 0x60, 0x46, 0x62, 0x41, 0x66, 0xb1,
0x7e, 0x69, 0x49, 0x66, 0x8e, 0x7e, 0x66, 0x5e, 0x49, 0x71, 0x49, 0x91, 0x7e, 0x7a, 0x6a, 0x5e,
0x6a, 0x51, 0x62, 0x49, 0x6a, 0x8a, 0x5e, 0x41, 0x51, 0x7e, 0x49, 0xbe, 0x90, 0x15, 0xc2, 0x2c,
0x3d, 0x88, 0x21, 0x30, 0xaa, 0x20, 0x3b, 0x5d, 0x2f, 0xdb, 0xa2, 0x58, 0x0f, 0x64, 0x16, 0x98,
0x01, 0x32, 0x4b, 0x0f, 0x64, 0x96, 0x1e, 0xc4, 0x2c, 0x29, 0x5d, 0x24, 0x77, 0xa4, 0xe7, 0xa7,
0xe7, 0xeb, 0x83, 0x8d, 0x4c, 0x2a, 0x4d, 0x03, 0xf3, 0xc0, 0x1c, 0x30, 0x0b, 0x62, 0x95, 0xd2,
0x44, 0x46, 0x2e, 0x6e, 0xcf, 0xbc, 0x12, 0xff, 0xa2, 0xe0, 0x92, 0xa2, 0xcc, 0xbc, 0x74, 0x21,
0x0d, 0x2e, 0x96, 0x92, 0xca, 0x82, 0x54, 0x09, 0x46, 0x05, 0x46, 0x0d, 0x66, 0x27, 0x91, 0x13,
0xf7, 0xe4, 0x19, 0x1e, 0xdd, 0x93, 0x67, 0x09, 0xa9, 0x2c, 0x48, 0xfd, 0x05, 0xa5, 0x83, 0xc0,
0x2a, 0x84, 0xd4, 0xb8, 0xd8, 0x32, 0xf3, 0x4a, 0xc2, 0x12, 0x73, 0x24, 0x98, 0x14, 0x18, 0x35,
0x58, 0x9d, 0xf8, 0xa0, 0x6a, 0xd9, 0x3c, 0xc1, 0xa2, 0x41, 0x50, 0x59, 0x90, 0xba, 0xe2, 0x92,
0x22, 0x90, 0x3a, 0x66, 0x05, 0x46, 0x0d, 0x4e, 0x84, 0xba, 0x60, 0xb0, 0x68, 0x10, 0x54, 0xd6,
0x8a, 0x63, 0xc6, 0x02, 0x79, 0x86, 0x86, 0x3b, 0x0a, 0x0c, 0x4e, 0x09, 0x27, 0x1e, 0xca, 0x31,
0x5c, 0x78, 0x28, 0xc7, 0x70, 0xe3, 0xa1, 0x1c, 0x43, 0xc3, 0x23, 0x39, 0xc6, 0x13, 0x8f, 0xe4,
0x18, 0x2f, 0x3c, 0x92, 0x63, 0xbc, 0xf1, 0x48, 0x8e, 0xf1, 0xc1, 0x23, 0x39, 0xc6, 0x09, 0x8f,
0xe5, 0x18, 0xa2, 0xac, 0xc8, 0x0f, 0x70, 0x40, 0x00, 0x00, 0x00, 0xff, 0xff, 0x79, 0x17, 0x78,
0x08, 0xad, 0x01, 0x00, 0x00,
}
func (m *IntOrString) Marshal() (dAtA []byte, err error) {
size := m.Size()
dAtA = make([]byte, size)
n, err := m.MarshalToSizedBuffer(dAtA[:size])
if err != nil {
return nil, err
}
return dAtA[:n], nil
}
func (m *IntOrString) MarshalTo(dAtA []byte) (int, error) {
size := m.Size()
return m.MarshalToSizedBuffer(dAtA[:size])
}
func (m *IntOrString) MarshalToSizedBuffer(dAtA []byte) (int, error) {
i := len(dAtA)
_ = i
var l int
_ = l
i -= len(m.StrVal)
copy(dAtA[i:], m.StrVal)
i = encodeVarintGenerated(dAtA, i, uint64(len(m.StrVal)))
i--
dAtA[i] = 0x1a
i = encodeVarintGenerated(dAtA, i, uint64(m.IntVal))
i--
dAtA[i] = 0x10
i = encodeVarintGenerated(dAtA, i, uint64(m.Type))
i--
dAtA[i] = 0x8
return len(dAtA) - i, nil
}
func encodeVarintGenerated(dAtA []byte, offset int, v uint64) int {
offset -= sovGenerated(v)
base := offset
for v >= 1<<7 {
dAtA[offset] = uint8(v&0x7f | 0x80)
v >>= 7
offset++
}
dAtA[offset] = uint8(v)
return base
}
func (m *IntOrString) Size() (n int) {
if m == nil {
return 0
}
var l int
_ = l
n += 1 + sovGenerated(uint64(m.Type))
n += 1 + sovGenerated(uint64(m.IntVal))
l = len(m.StrVal)
n += 1 + l + sovGenerated(uint64(l))
return n
}
func sovGenerated(x uint64) (n int) {
return (math_bits.Len64(x|1) + 6) / 7
}
func sozGenerated(x uint64) (n int) {
return sovGenerated(uint64((x << 1) ^ uint64((int64(x) >> 63))))
}
func (m *IntOrString) Unmarshal(dAtA []byte) error {
l := len(dAtA)
iNdEx := 0
for iNdEx < l {
preIndex := iNdEx
var wire uint64
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowGenerated
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
wire |= uint64(b&0x7F) << shift
if b < 0x80 {
break
}
}
fieldNum := int32(wire >> 3)
wireType := int(wire & 0x7)
if wireType == 4 {
return fmt.Errorf("proto: IntOrString: wiretype end group for non-group")
}
if fieldNum <= 0 {
return fmt.Errorf("proto: IntOrString: illegal tag %d (wire type %d)", fieldNum, wire)
}
switch fieldNum {
case 1:
if wireType != 0 {
return fmt.Errorf("proto: wrong wireType = %d for field Type", wireType)
}
m.Type = 0
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowGenerated
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
m.Type |= Type(b&0x7F) << shift
if b < 0x80 {
break
}
}
case 2:
if wireType != 0 {
return fmt.Errorf("proto: wrong wireType = %d for field IntVal", wireType)
}
m.IntVal = 0
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowGenerated
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
m.IntVal |= int32(b&0x7F) << shift
if b < 0x80 {
break
}
}
case 3:
if wireType != 2 {
return fmt.Errorf("proto: wrong wireType = %d for field StrVal", wireType)
}
var stringLen uint64
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowGenerated
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
stringLen |= uint64(b&0x7F) << shift
if b < 0x80 {
break
}
}
intStringLen := int(stringLen)
if intStringLen < 0 {
return ErrInvalidLengthGenerated
}
postIndex := iNdEx + intStringLen
if postIndex < 0 {
return ErrInvalidLengthGenerated
}
if postIndex > l {
return io.ErrUnexpectedEOF
}
m.StrVal = string(dAtA[iNdEx:postIndex])
iNdEx = postIndex
default:
iNdEx = preIndex
skippy, err := skipGenerated(dAtA[iNdEx:])
if err != nil {
return err
}
if (skippy < 0) || (iNdEx+skippy) < 0 {
return ErrInvalidLengthGenerated
}
if (iNdEx + skippy) > l {
return io.ErrUnexpectedEOF
}
iNdEx += skippy
}
}
if iNdEx > l {
return io.ErrUnexpectedEOF
}
return nil
}
func skipGenerated(dAtA []byte) (n int, err error) {
l := len(dAtA)
iNdEx := 0
depth := 0
for iNdEx < l {
var wire uint64
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return 0, ErrIntOverflowGenerated
}
if iNdEx >= l {
return 0, io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
wire |= (uint64(b) & 0x7F) << shift
if b < 0x80 {
break
}
}
wireType := int(wire & 0x7)
switch wireType {
case 0:
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return 0, ErrIntOverflowGenerated
}
if iNdEx >= l {
return 0, io.ErrUnexpectedEOF
}
iNdEx++
if dAtA[iNdEx-1] < 0x80 {
break
}
}
case 1:
iNdEx += 8
case 2:
var length int
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return 0, ErrIntOverflowGenerated
}
if iNdEx >= l {
return 0, io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
length |= (int(b) & 0x7F) << shift
if b < 0x80 {
break
}
}
if length < 0 {
return 0, ErrInvalidLengthGenerated
}
iNdEx += length
case 3:
depth++
case 4:
if depth == 0 {
return 0, ErrUnexpectedEndOfGroupGenerated
}
depth--
case 5:
iNdEx += 4
default:
return 0, fmt.Errorf("proto: illegal wireType %d", wireType)
}
if iNdEx < 0 {
return 0, ErrInvalidLengthGenerated
}
if depth == 0 {
return iNdEx, nil
}
}
return 0, io.ErrUnexpectedEOF
}
var (
ErrInvalidLengthGenerated = fmt.Errorf("proto: negative length found during unmarshaling")
ErrIntOverflowGenerated = fmt.Errorf("proto: integer overflow")
ErrUnexpectedEndOfGroupGenerated = fmt.Errorf("proto: unexpected end of group")
)
// SPDX-License-Identifier: Apache-2.0
// Copyright Authors of Cilium
// Copyright 2014 The Kubernetes Authors.
package intstr
import (
"encoding/json"
"errors"
"fmt"
"math"
"runtime/debug"
"strconv"
"strings"
cbor "k8s.io/apimachinery/pkg/runtime/serializer/cbor/direct"
"k8s.io/klog/v2"
)
// IntOrString is a type that can hold an int32 or a string. When used in
// JSON or YAML marshalling and unmarshalling, it produces or consumes the
// inner type. This allows you to have, for example, a JSON field that can
// accept a name or number.
// TODO: Rename to Int32OrString
//
// +protobuf=true
// +protobuf.options.(gogoproto.goproto_stringer)=false
// +k8s:openapi-gen=true
type IntOrString struct {
Type Type `protobuf:"varint,1,opt,name=type,casttype=Type"`
IntVal int32 `protobuf:"varint,2,opt,name=intVal"`
StrVal string `protobuf:"bytes,3,opt,name=strVal"`
}
// Type represents the stored type of IntOrString.
type Type int64
const (
Int Type = iota // The IntOrString holds an int.
String // The IntOrString holds a string.
)
// FromInt creates an IntOrString object with an int32 value. It is
// your responsibility not to call this method with a value greater
// than int32.
// Deprecated: use FromInt32 instead.
func FromInt(val int) IntOrString {
if val > math.MaxInt32 || val < math.MinInt32 {
klog.Errorf("value: %d overflows int32\n%s\n", val, debug.Stack())
}
return IntOrString{Type: Int, IntVal: int32(val)}
}
// FromInt32 creates an IntOrString object with an int32 value.
func FromInt32(val int32) IntOrString {
return IntOrString{Type: Int, IntVal: val}
}
// FromString creates an IntOrString object with a string value.
func FromString(val string) IntOrString {
return IntOrString{Type: String, StrVal: val}
}
// Parse the given string and try to convert it to an int32 integer before
// setting it as a string value.
func Parse(val string) IntOrString {
i, err := strconv.ParseInt(val, 10, 32)
if err != nil {
return FromString(val)
}
return FromInt32(int32(i))
}
// UnmarshalJSON implements the json.Unmarshaller interface.
func (intstr *IntOrString) UnmarshalJSON(value []byte) error {
if value[0] == '"' {
intstr.Type = String
return json.Unmarshal(value, &intstr.StrVal)
}
intstr.Type = Int
return json.Unmarshal(value, &intstr.IntVal)
}
func (intstr *IntOrString) UnmarshalCBOR(value []byte) error {
if err := cbor.Unmarshal(value, &intstr.StrVal); err == nil {
intstr.Type = String
return nil
}
if err := cbor.Unmarshal(value, &intstr.IntVal); err != nil {
return err
}
intstr.Type = Int
return nil
}
// String returns the string value, or the Itoa of the int value.
func (intstr *IntOrString) String() string {
if intstr == nil {
return "<nil>"
}
if intstr.Type == String {
return intstr.StrVal
}
return strconv.Itoa(intstr.IntValue())
}
// IntValue returns the IntVal if type Int, or if
// it is a String, will attempt a conversion to int,
// returning 0 if a parsing error occurs.
func (intstr *IntOrString) IntValue() int {
if intstr.Type == String {
i, _ := strconv.Atoi(intstr.StrVal)
return i
}
return int(intstr.IntVal)
}
// MarshalJSON implements the json.Marshaller interface.
func (intstr IntOrString) MarshalJSON() ([]byte, error) {
switch intstr.Type {
case Int:
return json.Marshal(intstr.IntVal)
case String:
return json.Marshal(intstr.StrVal)
default:
return []byte{}, fmt.Errorf("impossible IntOrString.Type")
}
}
func (intstr IntOrString) MarshalCBOR() ([]byte, error) {
switch intstr.Type {
case Int:
return cbor.Marshal(intstr.IntVal)
case String:
return cbor.Marshal(intstr.StrVal)
default:
return nil, fmt.Errorf("impossible IntOrString.Type")
}
}
// OpenAPISchemaType is used by the kube-openapi generator when constructing
// the OpenAPI spec of this type.
//
// See: https://github.com/kubernetes/kube-openapi/tree/master/pkg/generators
func (IntOrString) OpenAPISchemaType() []string { return []string{"string"} }
// OpenAPISchemaFormat is used by the kube-openapi generator when constructing
// the OpenAPI spec of this type.
func (IntOrString) OpenAPISchemaFormat() string { return "int-or-string" }
// OpenAPIV3OneOfTypes is used by the kube-openapi generator when constructing
// the OpenAPI v3 spec of this type.
func (IntOrString) OpenAPIV3OneOfTypes() []string { return []string{"integer", "string"} }
func ValueOrDefault(intOrPercent *IntOrString, defaultValue IntOrString) *IntOrString {
if intOrPercent == nil {
return &defaultValue
}
return intOrPercent
}
// GetScaledValueFromIntOrPercent is meant to replace GetValueFromIntOrPercent.
// This method returns a scaled value from an IntOrString type. If the IntOrString
// is a percentage string value it's treated as a percentage and scaled appropriately
// in accordance to the total, if it's an int value it's treated as a simple value and
// if it is a string value which is either non-numeric or numeric but lacking a trailing '%' it returns an error.
func GetScaledValueFromIntOrPercent(intOrPercent *IntOrString, total int, roundUp bool) (int, error) {
if intOrPercent == nil {
return 0, errors.New("nil value for IntOrString")
}
value, isPercent, err := getIntOrPercentValueSafely(intOrPercent)
if err != nil {
return 0, fmt.Errorf("invalid value for IntOrString: %w", err)
}
if isPercent {
if roundUp {
value = int(math.Ceil(float64(value) * (float64(total)) / 100))
} else {
value = int(math.Floor(float64(value) * (float64(total)) / 100))
}
}
return value, nil
}
// GetValueFromIntOrPercent was deprecated in favor of
// GetScaledValueFromIntOrPercent. This method was treating all int as a numeric value and all
// strings with or without a percent symbol as a percentage value.
// Deprecated
func GetValueFromIntOrPercent(intOrPercent *IntOrString, total int, roundUp bool) (int, error) {
if intOrPercent == nil {
return 0, errors.New("nil value for IntOrString")
}
value, isPercent, err := getIntOrPercentValue(intOrPercent)
if err != nil {
return 0, fmt.Errorf("invalid value for IntOrString: %w", err)
}
if isPercent {
if roundUp {
value = int(math.Ceil(float64(value) * (float64(total)) / 100))
} else {
value = int(math.Floor(float64(value) * (float64(total)) / 100))
}
}
return value, nil
}
// getIntOrPercentValue is a legacy function and only meant to be called by GetValueFromIntOrPercent
// For a more correct implementation call getIntOrPercentSafely
func getIntOrPercentValue(intOrStr *IntOrString) (int, bool, error) {
switch intOrStr.Type {
case Int:
return intOrStr.IntValue(), false, nil
case String:
s := strings.ReplaceAll(intOrStr.StrVal, "%", "")
v, err := strconv.Atoi(s)
if err != nil {
return 0, false, fmt.Errorf("invalid value %q: %w", intOrStr.StrVal, err)
}
return int(v), true, nil
}
return 0, false, fmt.Errorf("invalid type: neither int nor percentage")
}
func getIntOrPercentValueSafely(intOrStr *IntOrString) (int, bool, error) {
switch intOrStr.Type {
case Int:
return intOrStr.IntValue(), false, nil
case String:
isPercent := false
s := intOrStr.StrVal
if strings.HasSuffix(s, "%") {
isPercent = true
s = strings.TrimSuffix(intOrStr.StrVal, "%")
} else {
return 0, false, fmt.Errorf("invalid type: string is not a percentage")
}
v, err := strconv.Atoi(s)
if err != nil {
return 0, false, fmt.Errorf("invalid value %q: %w", intOrStr.StrVal, err)
}
return int(v), isPercent, nil
}
return 0, false, fmt.Errorf("invalid type: neither int nor percentage")
}
//go:build !ignore_autogenerated
// +build !ignore_autogenerated
// SPDX-License-Identifier: Apache-2.0
// Copyright Authors of Cilium
// Code generated by deepequal-gen. DO NOT EDIT.
package intstr
// DeepEqual is an autogenerated deepequal function, deeply comparing the
// receiver with other. in must be non-nil.
func (in *IntOrString) DeepEqual(other *IntOrString) bool {
if other == nil {
return false
}
if in.Type != other.Type {
return false
}
if in.IntVal != other.IntVal {
return false
}
if in.StrVal != other.StrVal {
return false
}
return true
}
// SPDX-License-Identifier: Apache-2.0
// Copyright Authors of Cilium
// Code generated by client-gen. DO NOT EDIT.
package versioned
import (
fmt "fmt"
http "net/http"
corev1 "github.com/cilium/cilium/pkg/k8s/slim/k8s/client/clientset/versioned/typed/core/v1"
discoveryv1 "github.com/cilium/cilium/pkg/k8s/slim/k8s/client/clientset/versioned/typed/discovery/v1"
networkingv1 "github.com/cilium/cilium/pkg/k8s/slim/k8s/client/clientset/versioned/typed/networking/v1"
discovery "k8s.io/client-go/discovery"
rest "k8s.io/client-go/rest"
flowcontrol "k8s.io/client-go/util/flowcontrol"
)
type Interface interface {
Discovery() discovery.DiscoveryInterface
CoreV1() corev1.CoreV1Interface
DiscoveryV1() discoveryv1.DiscoveryV1Interface
NetworkingV1() networkingv1.NetworkingV1Interface
}
// Clientset contains the clients for groups.
type Clientset struct {
*discovery.DiscoveryClient
coreV1 *corev1.CoreV1Client
discoveryV1 *discoveryv1.DiscoveryV1Client
networkingV1 *networkingv1.NetworkingV1Client
}
// CoreV1 retrieves the CoreV1Client
func (c *Clientset) CoreV1() corev1.CoreV1Interface {
return c.coreV1
}
// DiscoveryV1 retrieves the DiscoveryV1Client
func (c *Clientset) DiscoveryV1() discoveryv1.DiscoveryV1Interface {
return c.discoveryV1
}
// NetworkingV1 retrieves the NetworkingV1Client
func (c *Clientset) NetworkingV1() networkingv1.NetworkingV1Interface {
return c.networkingV1
}
// Discovery retrieves the DiscoveryClient
func (c *Clientset) Discovery() discovery.DiscoveryInterface {
if c == nil {
return nil
}
return c.DiscoveryClient
}
// NewForConfig creates a new Clientset for the given config.
// If config's RateLimiter is not set and QPS and Burst are acceptable,
// NewForConfig will generate a rate-limiter in configShallowCopy.
// NewForConfig is equivalent to NewForConfigAndClient(c, httpClient),
// where httpClient was generated with rest.HTTPClientFor(c).
func NewForConfig(c *rest.Config) (*Clientset, error) {
configShallowCopy := *c
if configShallowCopy.UserAgent == "" {
configShallowCopy.UserAgent = rest.DefaultKubernetesUserAgent()
}
// share the transport between all clients
httpClient, err := rest.HTTPClientFor(&configShallowCopy)
if err != nil {
return nil, err
}
return NewForConfigAndClient(&configShallowCopy, httpClient)
}
// NewForConfigAndClient creates a new Clientset for the given config and http client.
// Note the http client provided takes precedence over the configured transport values.
// If config's RateLimiter is not set and QPS and Burst are acceptable,
// NewForConfigAndClient will generate a rate-limiter in configShallowCopy.
func NewForConfigAndClient(c *rest.Config, httpClient *http.Client) (*Clientset, error) {
configShallowCopy := *c
if configShallowCopy.RateLimiter == nil && configShallowCopy.QPS > 0 {
if configShallowCopy.Burst <= 0 {
return nil, fmt.Errorf("burst is required to be greater than 0 when RateLimiter is not set and QPS is set to greater than 0")
}
configShallowCopy.RateLimiter = flowcontrol.NewTokenBucketRateLimiter(configShallowCopy.QPS, configShallowCopy.Burst)
}
var cs Clientset
var err error
cs.coreV1, err = corev1.NewForConfigAndClient(&configShallowCopy, httpClient)
if err != nil {
return nil, err
}
cs.discoveryV1, err = discoveryv1.NewForConfigAndClient(&configShallowCopy, httpClient)
if err != nil {
return nil, err
}
cs.networkingV1, err = networkingv1.NewForConfigAndClient(&configShallowCopy, httpClient)
if err != nil {
return nil, err
}
cs.DiscoveryClient, err = discovery.NewDiscoveryClientForConfigAndClient(&configShallowCopy, httpClient)
if err != nil {
return nil, err
}
return &cs, nil
}
// NewForConfigOrDie creates a new Clientset for the given config and
// panics if there is an error in the config.
func NewForConfigOrDie(c *rest.Config) *Clientset {
cs, err := NewForConfig(c)
if err != nil {
panic(err)
}
return cs
}
// New creates a new Clientset for the given RESTClient.
func New(c rest.Interface) *Clientset {
var cs Clientset
cs.coreV1 = corev1.New(c)
cs.discoveryV1 = discoveryv1.New(c)
cs.networkingV1 = networkingv1.New(c)
cs.DiscoveryClient = discovery.NewDiscoveryClient(c)
return &cs
}
// SPDX-License-Identifier: Apache-2.0
// Copyright Authors of Cilium
// Code generated by client-gen. DO NOT EDIT.
package scheme
import (
corev1 "github.com/cilium/cilium/pkg/k8s/slim/k8s/api/core/v1"
discoveryv1 "github.com/cilium/cilium/pkg/k8s/slim/k8s/api/discovery/v1"
networkingv1 "github.com/cilium/cilium/pkg/k8s/slim/k8s/api/networking/v1"
v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
runtime "k8s.io/apimachinery/pkg/runtime"
schema "k8s.io/apimachinery/pkg/runtime/schema"
serializer "k8s.io/apimachinery/pkg/runtime/serializer"
utilruntime "k8s.io/apimachinery/pkg/util/runtime"
)
var Scheme = runtime.NewScheme()
var Codecs = serializer.NewCodecFactory(Scheme)
var ParameterCodec = runtime.NewParameterCodec(Scheme)
var localSchemeBuilder = runtime.SchemeBuilder{
corev1.AddToScheme,
discoveryv1.AddToScheme,
networkingv1.AddToScheme,
}
// AddToScheme adds all types of this clientset into the given scheme. This allows composition
// of clientsets, like in:
//
// import (
// "k8s.io/client-go/kubernetes"
// clientsetscheme "k8s.io/client-go/kubernetes/scheme"
// aggregatorclientsetscheme "k8s.io/kube-aggregator/pkg/client/clientset_generated/clientset/scheme"
// )
//
// kclientset, _ := kubernetes.NewForConfig(c)
// _ = aggregatorclientsetscheme.AddToScheme(clientsetscheme.Scheme)
//
// After this, RawExtensions in Kubernetes types will serialize kube-aggregator types
// correctly.
var AddToScheme = localSchemeBuilder.AddToScheme
func init() {
v1.AddToGroupVersion(Scheme, schema.GroupVersion{Version: "v1"})
utilruntime.Must(AddToScheme(Scheme))
}
// SPDX-License-Identifier: Apache-2.0
// Copyright Authors of Cilium
// Code generated by client-gen. DO NOT EDIT.
package v1
import (
http "net/http"
corev1 "github.com/cilium/cilium/pkg/k8s/slim/k8s/api/core/v1"
scheme "github.com/cilium/cilium/pkg/k8s/slim/k8s/client/clientset/versioned/scheme"
rest "k8s.io/client-go/rest"
)
type CoreV1Interface interface {
RESTClient() rest.Interface
EndpointsGetter
NamespacesGetter
NodesGetter
PodsGetter
SecretsGetter
ServicesGetter
}
// CoreV1Client is used to interact with features provided by the group.
type CoreV1Client struct {
restClient rest.Interface
}
func (c *CoreV1Client) Endpoints(namespace string) EndpointsInterface {
return newEndpoints(c, namespace)
}
func (c *CoreV1Client) Namespaces() NamespaceInterface {
return newNamespaces(c)
}
func (c *CoreV1Client) Nodes() NodeInterface {
return newNodes(c)
}
func (c *CoreV1Client) Pods(namespace string) PodInterface {
return newPods(c, namespace)
}
func (c *CoreV1Client) Secrets(namespace string) SecretInterface {
return newSecrets(c, namespace)
}
func (c *CoreV1Client) Services(namespace string) ServiceInterface {
return newServices(c, namespace)
}
// NewForConfig creates a new CoreV1Client for the given config.
// NewForConfig is equivalent to NewForConfigAndClient(c, httpClient),
// where httpClient was generated with rest.HTTPClientFor(c).
func NewForConfig(c *rest.Config) (*CoreV1Client, error) {
config := *c
setConfigDefaults(&config)
httpClient, err := rest.HTTPClientFor(&config)
if err != nil {
return nil, err
}
return NewForConfigAndClient(&config, httpClient)
}
// NewForConfigAndClient creates a new CoreV1Client for the given config and http client.
// Note the http client provided takes precedence over the configured transport values.
func NewForConfigAndClient(c *rest.Config, h *http.Client) (*CoreV1Client, error) {
config := *c
setConfigDefaults(&config)
client, err := rest.RESTClientForConfigAndClient(&config, h)
if err != nil {
return nil, err
}
return &CoreV1Client{client}, nil
}
// NewForConfigOrDie creates a new CoreV1Client for the given config and
// panics if there is an error in the config.
func NewForConfigOrDie(c *rest.Config) *CoreV1Client {
client, err := NewForConfig(c)
if err != nil {
panic(err)
}
return client
}
// New creates a new CoreV1Client for the given RESTClient.
func New(c rest.Interface) *CoreV1Client {
return &CoreV1Client{c}
}
func setConfigDefaults(config *rest.Config) {
gv := corev1.SchemeGroupVersion
config.GroupVersion = &gv
config.APIPath = "/api"
config.NegotiatedSerializer = rest.CodecFactoryForGeneratedClient(scheme.Scheme, scheme.Codecs).WithoutConversion()
if config.UserAgent == "" {
config.UserAgent = rest.DefaultKubernetesUserAgent()
}
}
// RESTClient returns a RESTClient that is used to communicate
// with API server by this client implementation.
func (c *CoreV1Client) RESTClient() rest.Interface {
if c == nil {
return nil
}
return c.restClient
}
// SPDX-License-Identifier: Apache-2.0
// Copyright Authors of Cilium
// Code generated by client-gen. DO NOT EDIT.
package v1
import (
context "context"
corev1 "github.com/cilium/cilium/pkg/k8s/slim/k8s/api/core/v1"
scheme "github.com/cilium/cilium/pkg/k8s/slim/k8s/client/clientset/versioned/scheme"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
types "k8s.io/apimachinery/pkg/types"
watch "k8s.io/apimachinery/pkg/watch"
gentype "k8s.io/client-go/gentype"
)
// EndpointsGetter has a method to return a EndpointsInterface.
// A group's client should implement this interface.
type EndpointsGetter interface {
Endpoints(namespace string) EndpointsInterface
}
// EndpointsInterface has methods to work with Endpoints resources.
type EndpointsInterface interface {
Create(ctx context.Context, endpoints *corev1.Endpoints, opts metav1.CreateOptions) (*corev1.Endpoints, error)
Update(ctx context.Context, endpoints *corev1.Endpoints, opts metav1.UpdateOptions) (*corev1.Endpoints, error)
Delete(ctx context.Context, name string, opts metav1.DeleteOptions) error
DeleteCollection(ctx context.Context, opts metav1.DeleteOptions, listOpts metav1.ListOptions) error
Get(ctx context.Context, name string, opts metav1.GetOptions) (*corev1.Endpoints, error)
List(ctx context.Context, opts metav1.ListOptions) (*corev1.EndpointsList, error)
Watch(ctx context.Context, opts metav1.ListOptions) (watch.Interface, error)
Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts metav1.PatchOptions, subresources ...string) (result *corev1.Endpoints, err error)
EndpointsExpansion
}
// endpoints implements EndpointsInterface
type endpoints struct {
*gentype.ClientWithList[*corev1.Endpoints, *corev1.EndpointsList]
}
// newEndpoints returns a Endpoints
func newEndpoints(c *CoreV1Client, namespace string) *endpoints {
return &endpoints{
gentype.NewClientWithList[*corev1.Endpoints, *corev1.EndpointsList](
"endpoints",
c.RESTClient(),
scheme.ParameterCodec,
namespace,
func() *corev1.Endpoints { return &corev1.Endpoints{} },
func() *corev1.EndpointsList { return &corev1.EndpointsList{} },
),
}
}
// SPDX-License-Identifier: Apache-2.0
// Copyright Authors of Cilium
// Code generated by client-gen. DO NOT EDIT.
package v1
import (
context "context"
corev1 "github.com/cilium/cilium/pkg/k8s/slim/k8s/api/core/v1"
scheme "github.com/cilium/cilium/pkg/k8s/slim/k8s/client/clientset/versioned/scheme"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
types "k8s.io/apimachinery/pkg/types"
watch "k8s.io/apimachinery/pkg/watch"
gentype "k8s.io/client-go/gentype"
)
// NamespacesGetter has a method to return a NamespaceInterface.
// A group's client should implement this interface.
type NamespacesGetter interface {
Namespaces() NamespaceInterface
}
// NamespaceInterface has methods to work with Namespace resources.
type NamespaceInterface interface {
Create(ctx context.Context, namespace *corev1.Namespace, opts metav1.CreateOptions) (*corev1.Namespace, error)
Update(ctx context.Context, namespace *corev1.Namespace, opts metav1.UpdateOptions) (*corev1.Namespace, error)
Delete(ctx context.Context, name string, opts metav1.DeleteOptions) error
Get(ctx context.Context, name string, opts metav1.GetOptions) (*corev1.Namespace, error)
List(ctx context.Context, opts metav1.ListOptions) (*corev1.NamespaceList, error)
Watch(ctx context.Context, opts metav1.ListOptions) (watch.Interface, error)
Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts metav1.PatchOptions, subresources ...string) (result *corev1.Namespace, err error)
NamespaceExpansion
}
// namespaces implements NamespaceInterface
type namespaces struct {
*gentype.ClientWithList[*corev1.Namespace, *corev1.NamespaceList]
}
// newNamespaces returns a Namespaces
func newNamespaces(c *CoreV1Client) *namespaces {
return &namespaces{
gentype.NewClientWithList[*corev1.Namespace, *corev1.NamespaceList](
"namespaces",
c.RESTClient(),
scheme.ParameterCodec,
"",
func() *corev1.Namespace { return &corev1.Namespace{} },
func() *corev1.NamespaceList { return &corev1.NamespaceList{} },
),
}
}
// SPDX-License-Identifier: Apache-2.0
// Copyright Authors of Cilium
// Code generated by client-gen. DO NOT EDIT.
package v1
import (
context "context"
corev1 "github.com/cilium/cilium/pkg/k8s/slim/k8s/api/core/v1"
scheme "github.com/cilium/cilium/pkg/k8s/slim/k8s/client/clientset/versioned/scheme"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
types "k8s.io/apimachinery/pkg/types"
watch "k8s.io/apimachinery/pkg/watch"
gentype "k8s.io/client-go/gentype"
)
// NodesGetter has a method to return a NodeInterface.
// A group's client should implement this interface.
type NodesGetter interface {
Nodes() NodeInterface
}
// NodeInterface has methods to work with Node resources.
type NodeInterface interface {
Create(ctx context.Context, node *corev1.Node, opts metav1.CreateOptions) (*corev1.Node, error)
Update(ctx context.Context, node *corev1.Node, opts metav1.UpdateOptions) (*corev1.Node, error)
// Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus().
UpdateStatus(ctx context.Context, node *corev1.Node, opts metav1.UpdateOptions) (*corev1.Node, error)
Delete(ctx context.Context, name string, opts metav1.DeleteOptions) error
DeleteCollection(ctx context.Context, opts metav1.DeleteOptions, listOpts metav1.ListOptions) error
Get(ctx context.Context, name string, opts metav1.GetOptions) (*corev1.Node, error)
List(ctx context.Context, opts metav1.ListOptions) (*corev1.NodeList, error)
Watch(ctx context.Context, opts metav1.ListOptions) (watch.Interface, error)
Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts metav1.PatchOptions, subresources ...string) (result *corev1.Node, err error)
NodeExpansion
}
// nodes implements NodeInterface
type nodes struct {
*gentype.ClientWithList[*corev1.Node, *corev1.NodeList]
}
// newNodes returns a Nodes
func newNodes(c *CoreV1Client) *nodes {
return &nodes{
gentype.NewClientWithList[*corev1.Node, *corev1.NodeList](
"nodes",
c.RESTClient(),
scheme.ParameterCodec,
"",
func() *corev1.Node { return &corev1.Node{} },
func() *corev1.NodeList { return &corev1.NodeList{} },
),
}
}
// SPDX-License-Identifier: Apache-2.0
// Copyright Authors of Cilium
// Code generated by client-gen. DO NOT EDIT.
package v1
import (
context "context"
corev1 "github.com/cilium/cilium/pkg/k8s/slim/k8s/api/core/v1"
scheme "github.com/cilium/cilium/pkg/k8s/slim/k8s/client/clientset/versioned/scheme"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
types "k8s.io/apimachinery/pkg/types"
watch "k8s.io/apimachinery/pkg/watch"
gentype "k8s.io/client-go/gentype"
)
// PodsGetter has a method to return a PodInterface.
// A group's client should implement this interface.
type PodsGetter interface {
Pods(namespace string) PodInterface
}
// PodInterface has methods to work with Pod resources.
type PodInterface interface {
Create(ctx context.Context, pod *corev1.Pod, opts metav1.CreateOptions) (*corev1.Pod, error)
Update(ctx context.Context, pod *corev1.Pod, opts metav1.UpdateOptions) (*corev1.Pod, error)
// Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus().
UpdateStatus(ctx context.Context, pod *corev1.Pod, opts metav1.UpdateOptions) (*corev1.Pod, error)
Delete(ctx context.Context, name string, opts metav1.DeleteOptions) error
DeleteCollection(ctx context.Context, opts metav1.DeleteOptions, listOpts metav1.ListOptions) error
Get(ctx context.Context, name string, opts metav1.GetOptions) (*corev1.Pod, error)
List(ctx context.Context, opts metav1.ListOptions) (*corev1.PodList, error)
Watch(ctx context.Context, opts metav1.ListOptions) (watch.Interface, error)
Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts metav1.PatchOptions, subresources ...string) (result *corev1.Pod, err error)
UpdateEphemeralContainers(ctx context.Context, podName string, pod *corev1.Pod, opts metav1.UpdateOptions) (*corev1.Pod, error)
UpdateResize(ctx context.Context, podName string, pod *corev1.Pod, opts metav1.UpdateOptions) (*corev1.Pod, error)
PodExpansion
}
// pods implements PodInterface
type pods struct {
*gentype.ClientWithList[*corev1.Pod, *corev1.PodList]
}
// newPods returns a Pods
func newPods(c *CoreV1Client, namespace string) *pods {
return &pods{
gentype.NewClientWithList[*corev1.Pod, *corev1.PodList](
"pods",
c.RESTClient(),
scheme.ParameterCodec,
namespace,
func() *corev1.Pod { return &corev1.Pod{} },
func() *corev1.PodList { return &corev1.PodList{} },
),
}
}
// UpdateEphemeralContainers takes the top resource name and the representation of a pod and updates it. Returns the server's representation of the pod, and an error, if there is any.
func (c *pods) UpdateEphemeralContainers(ctx context.Context, podName string, pod *corev1.Pod, opts metav1.UpdateOptions) (result *corev1.Pod, err error) {
result = &corev1.Pod{}
err = c.GetClient().Put().
Namespace(c.GetNamespace()).
Resource("pods").
Name(podName).
SubResource("ephemeralcontainers").
VersionedParams(&opts, scheme.ParameterCodec).
Body(pod).
Do(ctx).
Into(result)
return
}
// UpdateResize takes the top resource name and the representation of a pod and updates it. Returns the server's representation of the pod, and an error, if there is any.
func (c *pods) UpdateResize(ctx context.Context, podName string, pod *corev1.Pod, opts metav1.UpdateOptions) (result *corev1.Pod, err error) {
result = &corev1.Pod{}
err = c.GetClient().Put().
Namespace(c.GetNamespace()).
Resource("pods").
Name(podName).
SubResource("resize").
VersionedParams(&opts, scheme.ParameterCodec).
Body(pod).
Do(ctx).
Into(result)
return
}
// SPDX-License-Identifier: Apache-2.0
// Copyright Authors of Cilium
// Code generated by client-gen. DO NOT EDIT.
package v1
import (
context "context"
corev1 "github.com/cilium/cilium/pkg/k8s/slim/k8s/api/core/v1"
scheme "github.com/cilium/cilium/pkg/k8s/slim/k8s/client/clientset/versioned/scheme"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
types "k8s.io/apimachinery/pkg/types"
watch "k8s.io/apimachinery/pkg/watch"
gentype "k8s.io/client-go/gentype"
)
// SecretsGetter has a method to return a SecretInterface.
// A group's client should implement this interface.
type SecretsGetter interface {
Secrets(namespace string) SecretInterface
}
// SecretInterface has methods to work with Secret resources.
type SecretInterface interface {
Create(ctx context.Context, secret *corev1.Secret, opts metav1.CreateOptions) (*corev1.Secret, error)
Update(ctx context.Context, secret *corev1.Secret, opts metav1.UpdateOptions) (*corev1.Secret, error)
Delete(ctx context.Context, name string, opts metav1.DeleteOptions) error
DeleteCollection(ctx context.Context, opts metav1.DeleteOptions, listOpts metav1.ListOptions) error
Get(ctx context.Context, name string, opts metav1.GetOptions) (*corev1.Secret, error)
List(ctx context.Context, opts metav1.ListOptions) (*corev1.SecretList, error)
Watch(ctx context.Context, opts metav1.ListOptions) (watch.Interface, error)
Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts metav1.PatchOptions, subresources ...string) (result *corev1.Secret, err error)
SecretExpansion
}
// secrets implements SecretInterface
type secrets struct {
*gentype.ClientWithList[*corev1.Secret, *corev1.SecretList]
}
// newSecrets returns a Secrets
func newSecrets(c *CoreV1Client, namespace string) *secrets {
return &secrets{
gentype.NewClientWithList[*corev1.Secret, *corev1.SecretList](
"secrets",
c.RESTClient(),
scheme.ParameterCodec,
namespace,
func() *corev1.Secret { return &corev1.Secret{} },
func() *corev1.SecretList { return &corev1.SecretList{} },
),
}
}
// SPDX-License-Identifier: Apache-2.0
// Copyright Authors of Cilium
// Code generated by client-gen. DO NOT EDIT.
package v1
import (
context "context"
corev1 "github.com/cilium/cilium/pkg/k8s/slim/k8s/api/core/v1"
scheme "github.com/cilium/cilium/pkg/k8s/slim/k8s/client/clientset/versioned/scheme"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
types "k8s.io/apimachinery/pkg/types"
watch "k8s.io/apimachinery/pkg/watch"
gentype "k8s.io/client-go/gentype"
)
// ServicesGetter has a method to return a ServiceInterface.
// A group's client should implement this interface.
type ServicesGetter interface {
Services(namespace string) ServiceInterface
}
// ServiceInterface has methods to work with Service resources.
type ServiceInterface interface {
Create(ctx context.Context, service *corev1.Service, opts metav1.CreateOptions) (*corev1.Service, error)
Update(ctx context.Context, service *corev1.Service, opts metav1.UpdateOptions) (*corev1.Service, error)
// Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus().
UpdateStatus(ctx context.Context, service *corev1.Service, opts metav1.UpdateOptions) (*corev1.Service, error)
Delete(ctx context.Context, name string, opts metav1.DeleteOptions) error
Get(ctx context.Context, name string, opts metav1.GetOptions) (*corev1.Service, error)
List(ctx context.Context, opts metav1.ListOptions) (*corev1.ServiceList, error)
Watch(ctx context.Context, opts metav1.ListOptions) (watch.Interface, error)
Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts metav1.PatchOptions, subresources ...string) (result *corev1.Service, err error)
ServiceExpansion
}
// services implements ServiceInterface
type services struct {
*gentype.ClientWithList[*corev1.Service, *corev1.ServiceList]
}
// newServices returns a Services
func newServices(c *CoreV1Client, namespace string) *services {
return &services{
gentype.NewClientWithList[*corev1.Service, *corev1.ServiceList](
"services",
c.RESTClient(),
scheme.ParameterCodec,
namespace,
func() *corev1.Service { return &corev1.Service{} },
func() *corev1.ServiceList { return &corev1.ServiceList{} },
),
}
}
// SPDX-License-Identifier: Apache-2.0
// Copyright Authors of Cilium
// Code generated by client-gen. DO NOT EDIT.
package v1
import (
http "net/http"
discoveryv1 "github.com/cilium/cilium/pkg/k8s/slim/k8s/api/discovery/v1"
scheme "github.com/cilium/cilium/pkg/k8s/slim/k8s/client/clientset/versioned/scheme"
rest "k8s.io/client-go/rest"
)
type DiscoveryV1Interface interface {
RESTClient() rest.Interface
EndpointSlicesGetter
}
// DiscoveryV1Client is used to interact with features provided by the discovery.k8s.io group.
type DiscoveryV1Client struct {
restClient rest.Interface
}
func (c *DiscoveryV1Client) EndpointSlices(namespace string) EndpointSliceInterface {
return newEndpointSlices(c, namespace)
}
// NewForConfig creates a new DiscoveryV1Client for the given config.
// NewForConfig is equivalent to NewForConfigAndClient(c, httpClient),
// where httpClient was generated with rest.HTTPClientFor(c).
func NewForConfig(c *rest.Config) (*DiscoveryV1Client, error) {
config := *c
setConfigDefaults(&config)
httpClient, err := rest.HTTPClientFor(&config)
if err != nil {
return nil, err
}
return NewForConfigAndClient(&config, httpClient)
}
// NewForConfigAndClient creates a new DiscoveryV1Client for the given config and http client.
// Note the http client provided takes precedence over the configured transport values.
func NewForConfigAndClient(c *rest.Config, h *http.Client) (*DiscoveryV1Client, error) {
config := *c
setConfigDefaults(&config)
client, err := rest.RESTClientForConfigAndClient(&config, h)
if err != nil {
return nil, err
}
return &DiscoveryV1Client{client}, nil
}
// NewForConfigOrDie creates a new DiscoveryV1Client for the given config and
// panics if there is an error in the config.
func NewForConfigOrDie(c *rest.Config) *DiscoveryV1Client {
client, err := NewForConfig(c)
if err != nil {
panic(err)
}
return client
}
// New creates a new DiscoveryV1Client for the given RESTClient.
func New(c rest.Interface) *DiscoveryV1Client {
return &DiscoveryV1Client{c}
}
func setConfigDefaults(config *rest.Config) {
gv := discoveryv1.SchemeGroupVersion
config.GroupVersion = &gv
config.APIPath = "/apis"
config.NegotiatedSerializer = rest.CodecFactoryForGeneratedClient(scheme.Scheme, scheme.Codecs).WithoutConversion()
if config.UserAgent == "" {
config.UserAgent = rest.DefaultKubernetesUserAgent()
}
}
// RESTClient returns a RESTClient that is used to communicate
// with API server by this client implementation.
func (c *DiscoveryV1Client) RESTClient() rest.Interface {
if c == nil {
return nil
}
return c.restClient
}
// SPDX-License-Identifier: Apache-2.0
// Copyright Authors of Cilium
// Code generated by client-gen. DO NOT EDIT.
package v1
import (
context "context"
discoveryv1 "github.com/cilium/cilium/pkg/k8s/slim/k8s/api/discovery/v1"
scheme "github.com/cilium/cilium/pkg/k8s/slim/k8s/client/clientset/versioned/scheme"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
types "k8s.io/apimachinery/pkg/types"
watch "k8s.io/apimachinery/pkg/watch"
gentype "k8s.io/client-go/gentype"
)
// EndpointSlicesGetter has a method to return a EndpointSliceInterface.
// A group's client should implement this interface.
type EndpointSlicesGetter interface {
EndpointSlices(namespace string) EndpointSliceInterface
}
// EndpointSliceInterface has methods to work with EndpointSlice resources.
type EndpointSliceInterface interface {
Create(ctx context.Context, endpointSlice *discoveryv1.EndpointSlice, opts metav1.CreateOptions) (*discoveryv1.EndpointSlice, error)
Update(ctx context.Context, endpointSlice *discoveryv1.EndpointSlice, opts metav1.UpdateOptions) (*discoveryv1.EndpointSlice, error)
Delete(ctx context.Context, name string, opts metav1.DeleteOptions) error
DeleteCollection(ctx context.Context, opts metav1.DeleteOptions, listOpts metav1.ListOptions) error
Get(ctx context.Context, name string, opts metav1.GetOptions) (*discoveryv1.EndpointSlice, error)
List(ctx context.Context, opts metav1.ListOptions) (*discoveryv1.EndpointSliceList, error)
Watch(ctx context.Context, opts metav1.ListOptions) (watch.Interface, error)
Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts metav1.PatchOptions, subresources ...string) (result *discoveryv1.EndpointSlice, err error)
EndpointSliceExpansion
}
// endpointSlices implements EndpointSliceInterface
type endpointSlices struct {
*gentype.ClientWithList[*discoveryv1.EndpointSlice, *discoveryv1.EndpointSliceList]
}
// newEndpointSlices returns a EndpointSlices
func newEndpointSlices(c *DiscoveryV1Client, namespace string) *endpointSlices {
return &endpointSlices{
gentype.NewClientWithList[*discoveryv1.EndpointSlice, *discoveryv1.EndpointSliceList](
"endpointslices",
c.RESTClient(),
scheme.ParameterCodec,
namespace,
func() *discoveryv1.EndpointSlice { return &discoveryv1.EndpointSlice{} },
func() *discoveryv1.EndpointSliceList { return &discoveryv1.EndpointSliceList{} },
),
}
}
// SPDX-License-Identifier: Apache-2.0
// Copyright Authors of Cilium
// Code generated by client-gen. DO NOT EDIT.
package v1
import (
http "net/http"
networkingv1 "github.com/cilium/cilium/pkg/k8s/slim/k8s/api/networking/v1"
scheme "github.com/cilium/cilium/pkg/k8s/slim/k8s/client/clientset/versioned/scheme"
rest "k8s.io/client-go/rest"
)
type NetworkingV1Interface interface {
RESTClient() rest.Interface
NetworkPoliciesGetter
}
// NetworkingV1Client is used to interact with features provided by the networking.k8s.io group.
type NetworkingV1Client struct {
restClient rest.Interface
}
func (c *NetworkingV1Client) NetworkPolicies(namespace string) NetworkPolicyInterface {
return newNetworkPolicies(c, namespace)
}
// NewForConfig creates a new NetworkingV1Client for the given config.
// NewForConfig is equivalent to NewForConfigAndClient(c, httpClient),
// where httpClient was generated with rest.HTTPClientFor(c).
func NewForConfig(c *rest.Config) (*NetworkingV1Client, error) {
config := *c
setConfigDefaults(&config)
httpClient, err := rest.HTTPClientFor(&config)
if err != nil {
return nil, err
}
return NewForConfigAndClient(&config, httpClient)
}
// NewForConfigAndClient creates a new NetworkingV1Client for the given config and http client.
// Note the http client provided takes precedence over the configured transport values.
func NewForConfigAndClient(c *rest.Config, h *http.Client) (*NetworkingV1Client, error) {
config := *c
setConfigDefaults(&config)
client, err := rest.RESTClientForConfigAndClient(&config, h)
if err != nil {
return nil, err
}
return &NetworkingV1Client{client}, nil
}
// NewForConfigOrDie creates a new NetworkingV1Client for the given config and
// panics if there is an error in the config.
func NewForConfigOrDie(c *rest.Config) *NetworkingV1Client {
client, err := NewForConfig(c)
if err != nil {
panic(err)
}
return client
}
// New creates a new NetworkingV1Client for the given RESTClient.
func New(c rest.Interface) *NetworkingV1Client {
return &NetworkingV1Client{c}
}
func setConfigDefaults(config *rest.Config) {
gv := networkingv1.SchemeGroupVersion
config.GroupVersion = &gv
config.APIPath = "/apis"
config.NegotiatedSerializer = rest.CodecFactoryForGeneratedClient(scheme.Scheme, scheme.Codecs).WithoutConversion()
if config.UserAgent == "" {
config.UserAgent = rest.DefaultKubernetesUserAgent()
}
}
// RESTClient returns a RESTClient that is used to communicate
// with API server by this client implementation.
func (c *NetworkingV1Client) RESTClient() rest.Interface {
if c == nil {
return nil
}
return c.restClient
}
// SPDX-License-Identifier: Apache-2.0
// Copyright Authors of Cilium
// Code generated by client-gen. DO NOT EDIT.
package v1
import (
context "context"
networkingv1 "github.com/cilium/cilium/pkg/k8s/slim/k8s/api/networking/v1"
scheme "github.com/cilium/cilium/pkg/k8s/slim/k8s/client/clientset/versioned/scheme"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
types "k8s.io/apimachinery/pkg/types"
watch "k8s.io/apimachinery/pkg/watch"
gentype "k8s.io/client-go/gentype"
)
// NetworkPoliciesGetter has a method to return a NetworkPolicyInterface.
// A group's client should implement this interface.
type NetworkPoliciesGetter interface {
NetworkPolicies(namespace string) NetworkPolicyInterface
}
// NetworkPolicyInterface has methods to work with NetworkPolicy resources.
type NetworkPolicyInterface interface {
Create(ctx context.Context, networkPolicy *networkingv1.NetworkPolicy, opts metav1.CreateOptions) (*networkingv1.NetworkPolicy, error)
Update(ctx context.Context, networkPolicy *networkingv1.NetworkPolicy, opts metav1.UpdateOptions) (*networkingv1.NetworkPolicy, error)
Delete(ctx context.Context, name string, opts metav1.DeleteOptions) error
DeleteCollection(ctx context.Context, opts metav1.DeleteOptions, listOpts metav1.ListOptions) error
Get(ctx context.Context, name string, opts metav1.GetOptions) (*networkingv1.NetworkPolicy, error)
List(ctx context.Context, opts metav1.ListOptions) (*networkingv1.NetworkPolicyList, error)
Watch(ctx context.Context, opts metav1.ListOptions) (watch.Interface, error)
Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts metav1.PatchOptions, subresources ...string) (result *networkingv1.NetworkPolicy, err error)
NetworkPolicyExpansion
}
// networkPolicies implements NetworkPolicyInterface
type networkPolicies struct {
*gentype.ClientWithList[*networkingv1.NetworkPolicy, *networkingv1.NetworkPolicyList]
}
// newNetworkPolicies returns a NetworkPolicies
func newNetworkPolicies(c *NetworkingV1Client, namespace string) *networkPolicies {
return &networkPolicies{
gentype.NewClientWithList[*networkingv1.NetworkPolicy, *networkingv1.NetworkPolicyList](
"networkpolicies",
c.RESTClient(),
scheme.ParameterCodec,
namespace,
func() *networkingv1.NetworkPolicy { return &networkingv1.NetworkPolicy{} },
func() *networkingv1.NetworkPolicyList { return &networkingv1.NetworkPolicyList{} },
),
}
}
// SPDX-License-Identifier: Apache-2.0
// Copyright Authors of Cilium
package synced
import (
"github.com/cilium/cilium/pkg/lock"
)
// APIGroups is a lockable map to hold which k8s API Groups we have
// enabled/in-use
// Note: We can replace it with a Go 1.9 map once we require that version
type APIGroups struct {
lock.RWMutex
apis map[string]bool
}
func (m *APIGroups) AddAPI(api string) {
m.Lock()
defer m.Unlock()
if m.apis == nil {
m.apis = make(map[string]bool)
}
m.apis[api] = true
}
func (m *APIGroups) RemoveAPI(api string) {
m.Lock()
defer m.Unlock()
delete(m.apis, api)
}
func (m *APIGroups) GetGroups() []string {
m.RLock()
defer m.RUnlock()
groups := make([]string, 0, len(m.apis))
for k := range m.apis {
groups = append(groups, k)
}
return groups
}
// SPDX-License-Identifier: Apache-2.0
// Copyright Authors of Cilium
package synced
// CacheStatus allows waiting for k8s caches to synchronize.
type CacheStatus chan struct{}
// Sychronized returns true if caches have been synchronized at least once.
//
// Returns true for an uninitialized [CacheStatus].
func (cs CacheStatus) Synchronized() bool {
if cs == nil {
return true
}
select {
case <-cs:
return true
default:
return false
}
}
// SPDX-License-Identifier: Apache-2.0
// Copyright Authors of Cilium
package synced
import (
"context"
"errors"
"log/slog"
"github.com/cilium/hive/cell"
"github.com/cilium/hive/job"
"github.com/spf13/pflag"
"github.com/cilium/cilium/pkg/k8s/client"
"github.com/cilium/cilium/pkg/option"
"github.com/cilium/cilium/pkg/promise"
"github.com/cilium/cilium/pkg/time"
)
type syncedParams struct {
cell.In
Logger *slog.Logger
CacheStatus CacheStatus
}
var Cell = cell.Module(
"k8s-synced",
"Provides types for internal K8s resource synchronization",
cell.Provide(func() *APIGroups {
return new(APIGroups)
}),
cell.Provide(func(params syncedParams) *Resources {
return &Resources{
logger: params.Logger,
CacheStatus: params.CacheStatus,
}
}),
cell.Provide(func() CacheStatus {
return make(CacheStatus)
}),
)
var CRDSyncCell = cell.Module(
"k8s-synced-crdsync",
"Provides promise for waiting for CRD to have been synchronized",
cell.Provide(newCRDSyncPromise),
cell.Config(DefaultCRDSyncConfig),
)
type CRDSyncConfig struct {
CRDWaitTimeout time.Duration
}
var DefaultCRDSyncConfig = CRDSyncConfig{
CRDWaitTimeout: 5 * time.Minute,
}
func (def CRDSyncConfig) Flags(flags *pflag.FlagSet) {
flags.Duration("crd-wait-timeout", def.CRDWaitTimeout, "Cilium will exit if CRDs are not available within this duration upon startup")
}
// CRDSync is an empty type used for promise.Promise. If SyncCRDs() fails, the error is passed via
// promise Reject to the result of the promise Await() call.
type CRDSync struct{}
// CRDSyncResourceNames is a slice of CRD resource names CRDSync promise waits for
type CRDSyncResourceNames []string
var ErrCRDSyncDisabled = errors.New("CRDSync promise is disabled")
// RejectedCRDSyncPromise can be used in hives that do not provide the CRDSync promise.
var RejectedCRDSyncPromise = func() promise.Promise[CRDSync] {
crdSyncResolver, crdSyncPromise := promise.New[CRDSync]()
crdSyncResolver.Reject(ErrCRDSyncDisabled)
return crdSyncPromise
}
type syncCRDsPromiseParams struct {
cell.In
Logger *slog.Logger
Lifecycle cell.Lifecycle
Jobs job.Registry
Health cell.Health
Clientset client.Clientset
Resources *Resources
APIGroups *APIGroups
ResourceNames CRDSyncResourceNames
Config CRDSyncConfig
}
func newCRDSyncPromise(params syncCRDsPromiseParams) promise.Promise[CRDSync] {
crdSyncResolver, crdSyncPromise := promise.New[CRDSync]()
if !params.Clientset.IsEnabled() || option.Config.DryMode {
crdSyncResolver.Reject(ErrCRDSyncDisabled)
return crdSyncPromise
}
g := params.Jobs.NewGroup(params.Health, params.Lifecycle)
g.Add(job.OneShot("sync-crds", func(ctx context.Context, health cell.Health) error {
err := SyncCRDs(ctx, params.Logger, params.Clientset, params.ResourceNames, params.Resources, params.APIGroups, params.Config)
if err != nil {
crdSyncResolver.Reject(err)
} else {
crdSyncResolver.Resolve(struct{}{})
}
return err
}))
return crdSyncPromise
}
// SPDX-License-Identifier: Apache-2.0
// Copyright Authors of Cilium
// Package synced provides tools for tracking if k8s resources have
// been initially sychronized with the k8s apiserver.
package synced
import (
"context"
"errors"
"fmt"
"log/slog"
apiextclientset "k8s.io/apiextensions-apiserver/pkg/client/clientset/clientset"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/fields"
"k8s.io/apimachinery/pkg/runtime"
"k8s.io/apimachinery/pkg/watch"
"k8s.io/client-go/rest"
"k8s.io/client-go/tools/cache"
operatorOption "github.com/cilium/cilium/operator/option"
v2 "github.com/cilium/cilium/pkg/k8s/apis/cilium.io/v2"
"github.com/cilium/cilium/pkg/k8s/apis/cilium.io/v2alpha1"
"github.com/cilium/cilium/pkg/k8s/client"
"github.com/cilium/cilium/pkg/k8s/informer"
slim_metav1 "github.com/cilium/cilium/pkg/k8s/slim/k8s/apis/meta/v1"
"github.com/cilium/cilium/pkg/lock"
"github.com/cilium/cilium/pkg/logging"
"github.com/cilium/cilium/pkg/logging/logfields"
"github.com/cilium/cilium/pkg/option"
"github.com/cilium/cilium/pkg/time"
)
const (
k8sAPIGroupCRD = "CustomResourceDefinition"
)
func CRDResourceName(crd string) string {
return "crd:" + crd
}
func agentCRDResourceNames() []string {
result := []string{
CRDResourceName(v2.CNName),
CRDResourceName(v2.CIDName),
CRDResourceName(v2alpha1.CPIPName),
}
if !option.Config.DisableCiliumEndpointCRD {
result = append(result, CRDResourceName(v2.CEPName))
if option.Config.EnableCiliumEndpointSlice {
result = append(result, CRDResourceName(v2alpha1.CESName))
}
}
if option.Config.EnableCiliumNetworkPolicy {
result = append(result, CRDResourceName(v2.CNPName))
}
if option.Config.EnableCiliumClusterwideNetworkPolicy {
result = append(result, CRDResourceName(v2.CCNPName))
}
if option.Config.EnableCiliumNetworkPolicy || option.Config.EnableCiliumClusterwideNetworkPolicy {
result = append(result, CRDResourceName(v2.CCGName))
}
if option.Config.EnableEgressGateway {
result = append(result, CRDResourceName(v2.CEGPName))
}
if option.Config.EnableLocalRedirectPolicy {
result = append(result, CRDResourceName(v2.CLRPName))
}
if option.Config.EnableEnvoyConfig {
result = append(result, CRDResourceName(v2.CCECName))
result = append(result, CRDResourceName(v2.CECName))
}
if option.Config.EnableBGPControlPlane {
result = append(result, CRDResourceName(v2alpha1.BGPPName))
// BGPv2 CRDs
result = append(result, CRDResourceName(v2.BGPCCName))
result = append(result, CRDResourceName(v2.BGPAName))
result = append(result, CRDResourceName(v2.BGPPCName))
result = append(result, CRDResourceName(v2.BGPNCName))
result = append(result, CRDResourceName(v2.BGPNCOName))
}
result = append(result,
CRDResourceName(v2.LBIPPoolName),
CRDResourceName(v2alpha1.L2AnnouncementName),
)
return result
}
// AgentCRDResourceNames returns a list of all CRD resource names the Cilium
// agent needs to wait to be registered before initializing any k8s watchers.
func AgentCRDResourceNames() []string {
return agentCRDResourceNames()
}
// ClusterMeshAPIServerResourceNames returns a list of all CRD resource names the
// clustermesh-apiserver needs to wait to be registered before initializing any
// k8s watchers.
func ClusterMeshAPIServerResourceNames() []string {
return []string{
CRDResourceName(v2.CNName),
CRDResourceName(v2.CIDName),
CRDResourceName(v2.CEPName),
}
}
func GatewayAPIResourceNames() []string {
if !operatorOption.Config.EnableGatewayAPI {
return nil
}
return []string{
CRDResourceName(v2alpha1.CGCCName),
}
}
// AllCiliumCRDResourceNames returns a list of all Cilium CRD resource names
// that the cilium operator or testsuite may register.
func AllCiliumCRDResourceNames() []string {
res := append(AgentCRDResourceNames(), GatewayAPIResourceNames()...)
res = append(res,
CRDResourceName(v2.CNCName),
)
return res
}
// SyncCRDs will sync Cilium CRDs to ensure that they have all been
// installed inside the K8s cluster. These CRDs are added by the
// Cilium Operator. This function will block until it finds all the
// CRDs or if a timeout occurs.
func SyncCRDs(ctx context.Context, logger *slog.Logger, clientset client.Clientset, crdNames []string, rs *Resources, ag *APIGroups, cfg CRDSyncConfig) error {
crds := newCRDState(logger, crdNames)
listerWatcher := newListWatchFromClient(
newCRDGetter(clientset),
fields.Everything(),
)
_, crdController := informer.NewInformer(
listerWatcher,
&slim_metav1.PartialObjectMetadata{},
0,
cache.ResourceEventHandlerFuncs{
AddFunc: func(obj any) { crds.add(obj) },
DeleteFunc: func(obj any) { crds.remove(obj) },
},
nil,
)
// Create a context so that we can timeout after the configured CRD wait
// peroid.
ctx, cancel := context.WithTimeout(ctx, cfg.CRDWaitTimeout)
defer cancel()
crds.Lock()
for crd := range crds.m {
rs.BlockWaitGroupToSyncResources(
ctx.Done(),
nil,
func() bool {
crds.Lock()
defer crds.Unlock()
return crds.m[crd]
},
crd,
)
}
crds.Unlock()
// The above loop will call blockWaitGroupToSyncResources to populate the
// K8sWatcher state with the current state of the CRDs. It will check the
// state of each CRD, with the inline function provided. If the function
// reports that the given CRD is true (has been synced), it will close a
// channel associated with the given CRD. A subsequent call to
// (*K8sWatcher).WaitForCacheSync will notice that a given CRD's channel
// has been closed. Once all the CRDs passed to WaitForCacheSync have had
// their channels closed, the function unblocks.
//
// Meanwhile, the below code kicks off the controller that was instantiated
// above, and enters a loop looking for (1) if the context has deadlined or
// (2) if the entire CRD state has been synced (all CRDs found in the
// cluster). While we're in for-select loop, the controller is listening
// for either add or delete events to the customresourcedefinition resource
// (disguised inside a metav1.PartialObjectMetadata object). If (1) is
// encountered, then Cilium will fatal because it cannot proceed if the
// CRDs are not present. If (2) is encountered, then make sure the
// controller has exited by cancelling the context and we return out.
go crdController.Run(ctx.Done())
ag.AddAPI(k8sAPIGroupCRD)
// We no longer need this API to show up in `cilium status` as the
// controller will exit after this function.
defer ag.RemoveAPI(k8sAPIGroupCRD)
logger.Info("Waiting until all Cilium CRDs are available")
ticker := time.NewTicker(50 * time.Millisecond)
count := 0
for {
select {
case <-ctx.Done():
err := ctx.Err()
if err != nil && !errors.Is(err, context.Canceled) {
logging.Fatal(
logger,
fmt.Sprintf("Unable to find all Cilium CRDs necessary within "+
"%v timeout. Please ensure that Cilium Operator is "+
"running, as it's responsible for registering all "+
"the Cilium CRDs. The following CRDs were not found: %v",
cfg.CRDWaitTimeout, crds.unSynced()),
logfields.Error, err,
)
}
// If the context was canceled it means the daemon is being stopped
// so we can return the context's error.
return err
case <-ticker.C:
if crds.isSynced() {
ticker.Stop()
logger.Info("All Cilium CRDs have been found and are available")
return nil
}
count++
if count == 20 {
count = 0
logger.Info(
"Still waiting for Cilium Operator to register CRDs",
logfields.CRDs, crds.unSynced(),
)
}
}
}
}
func (s *crdState) add(obj any) {
if pom := informer.CastInformerEvent[slim_metav1.PartialObjectMetadata](s.logger, obj); pom != nil {
s.Lock()
s.m[CRDResourceName(pom.GetName())] = true
s.Unlock()
}
}
func (s *crdState) remove(obj any) {
if pom := informer.CastInformerEvent[slim_metav1.PartialObjectMetadata](s.logger, obj); pom != nil {
s.Lock()
s.m[CRDResourceName(pom.GetName())] = false
s.Unlock()
}
}
// isSynced returns whether all the CRDs inside `m` have all been synced,
// meaning all CRDs we care about in Cilium exist in the cluster.
func (s *crdState) isSynced() bool {
s.Lock()
defer s.Unlock()
for _, synced := range s.m {
if !synced {
return false
}
}
return true
}
// unSynced returns a slice containing all CRDs that currently have not been
// synced.
func (s *crdState) unSynced() []string {
s.Lock()
defer s.Unlock()
u := make([]string, 0, len(s.m))
for crd, synced := range s.m {
if !synced {
u = append(u, crd)
}
}
return u
}
// crdState contains the state of the CRDs inside the cluster.
type crdState struct {
logger *slog.Logger
lock.Mutex
// m is a map which maps the CRD name to its synced state in the cluster.
// True means it exists, false means it doesn't exist.
m map[string]bool
}
func newCRDState(logger *slog.Logger, crds []string) crdState {
m := make(map[string]bool, len(crds))
for _, name := range crds {
m[name] = false
}
return crdState{
logger: logger,
m: m,
}
}
// newListWatchFromClient is a copy of the NewListWatchFromClient from the
// "k8s.io/client-go/tools/cache" package, with many alterations made to
// efficiently retrieve Cilium CRDs. Efficient retrieval is important because
// we don't want each agent to fetch the full CRDs across the cluster, because
// they potentially contain large validation schemas.
//
// This function also removes removes unnecessary calls from the upstream
// version that set the namespace and the resource when performing `Get`.
//
// - If the resource was set, the following error was observed:
// "customresourcedefinitions.apiextensions.k8s.io
// "customresourcedefinitions" not found".
// - If the namespace was set, the following error was observed:
// "an empty namespace may not be set when a resource name is provided".
//
// The namespace problem can be worked around by using NamespaceIfScoped, but
// it's been omitted entirely here because it's equivalent in functionality.
func newListWatchFromClient(
c cache.Getter,
fieldSelector fields.Selector,
) *cache.ListWatch {
optionsModifier := func(options *metav1.ListOptions) {
options.FieldSelector = fieldSelector.String()
}
listFunc := func(options metav1.ListOptions) (runtime.Object, error) {
optionsModifier(&options)
// This lister will retrieve the CRDs as a
// metav1{,v1beta1}.PartialObjectMetadataList object.
getter := c.Get()
// Setting this special header allows us to retrieve the objects the
// same way that `kubectl get crds` does, except that kubectl retrieves
// them as a collection inside a metav1{,v1beta1}.Table. Either way, we
// request the CRDs in a metav1,{v1beta1}.PartialObjectMetadataList
// object which contains individual metav1.PartialObjectMetadata
// objects, containing the minimal representation of objects in K8s (in
// this case a CRD). This matches with what the controller (informer)
// expects as it wants a list type.
getter = getter.SetHeader("Accept", pomListHeader)
t := &slim_metav1.PartialObjectMetadataList{}
if err := getter.
VersionedParams(&options, metav1.ParameterCodec).
Do(context.TODO()).
Into(t); err != nil {
return nil, err
}
return t, nil
}
watchFunc := func(options metav1.ListOptions) (watch.Interface, error) {
optionsModifier(&options)
getter := c.Get()
// This watcher will retrieve each CRD that the lister has listed
// as individual metav1.PartialObjectMetadata because it is
// requesting the apiserver to return objects as such via the
// "Accept" header.
getter = getter.SetHeader("Accept", pomHeader)
options.Watch = true
return getter.
VersionedParams(&options, metav1.ParameterCodec).
Watch(context.TODO())
}
return &cache.ListWatch{ListFunc: listFunc, WatchFunc: watchFunc}
}
const (
pomListHeader = "application/json;as=PartialObjectMetadataList;v=v1;g=meta.k8s.io,application/json;as=PartialObjectMetadataList;v=v1beta1;g=meta.k8s.io,application/json"
pomHeader = "application/json;as=PartialObjectMetadata;v=v1;g=meta.k8s.io,application/json;as=PartialObjectMetadata;v=v1beta1;g=meta.k8s.io,application/json"
)
// Get instantiates a GET request from the K8s REST client to retrieve CRDs. We
// define this getter because it's necessary to use the correct apiextensions
// client (v1 or v1beta1) in order to retrieve the CRDs in a
// backwards-compatible way. This implements the cache.Getter interface.
func (c *crdGetter) Get() *rest.Request {
return c.api.ApiextensionsV1().
RESTClient().
Get().
Name("customresourcedefinitions")
}
type crdGetter struct {
api apiextclientset.Interface
}
func newCRDGetter(c apiextclientset.Interface) *crdGetter {
return &crdGetter{api: c}
}
// SPDX-License-Identifier: Apache-2.0
// Copyright Authors of Cilium
package synced
import (
"context"
"fmt"
"log/slog"
"golang.org/x/sync/errgroup"
"k8s.io/client-go/tools/cache"
"github.com/cilium/cilium/pkg/lock"
"github.com/cilium/cilium/pkg/logging"
"github.com/cilium/cilium/pkg/logging/logfields"
"github.com/cilium/cilium/pkg/time"
)
// Resources maps resource names to channels that are closed upon initial
// sync with k8s.
type Resources struct {
logger *slog.Logger
CacheStatus CacheStatus
lock.RWMutex
// resourceChannels maps a resource name to a channel. Once the given
// resource name is synchronized with k8s, the channel for which that
// resource name maps to is closed.
resources map[string]<-chan struct{}
// stopWait contains the result of cache.WaitForCacheSync
stopWait map[string]bool
// timeSinceLastEvent contains the time each resource last received an event.
timeSinceLastEvent map[string]time.Time
}
func (r *Resources) getTimeOfLastEvent(resource string) (when time.Time, never bool) {
r.RLock()
defer r.RUnlock()
t, ok := r.timeSinceLastEvent[resource]
if !ok {
return time.Time{}, true
}
return t, false
}
func (r *Resources) SetEventTimestamp(resource string) {
now := time.Now()
r.Lock()
defer r.Unlock()
if r.timeSinceLastEvent != nil {
r.timeSinceLastEvent[resource] = now
}
}
func (r *Resources) CancelWaitGroupToSyncResources(resourceName string) {
r.Lock()
delete(r.resources, resourceName)
r.Unlock()
}
// BlockWaitGroupToSyncResources ensures that anything which waits on waitGroup
// waits until all objects of the specified resource stored in Kubernetes are
// received by the informer and processed by controller.
// Fatally exits if syncing these initial objects fails.
// If the given stop channel is closed, it does not fatal.
// Once the k8s caches are synced against k8s, k8sCacheSynced is also closed.
func (r *Resources) BlockWaitGroupToSyncResources(
stop <-chan struct{},
swg *lock.StoppableWaitGroup,
hasSyncedFunc cache.InformerSynced,
resourceName string,
) {
// Log an error caches have already synchronized, as the caller is making this call too late
// and the resource in question was missed in the initial cache sync.
if r.CacheStatus.Synchronized() {
r.logger.Error(
"BlockWaitGroupToSyncResources called after Caches have already synced",
logfields.Resource, resourceName,
)
return
}
ch := make(chan struct{})
r.Lock()
if r.resources == nil {
r.resources = make(map[string]<-chan struct{})
r.stopWait = make(map[string]bool)
r.timeSinceLastEvent = make(map[string]time.Time)
}
r.resources[resourceName] = ch
r.Unlock()
go func() {
scopedLog := r.logger.With(logfields.Resource, resourceName)
scopedLog.Debug("waiting for cache to synchronize")
if ok := cache.WaitForCacheSync(stop, hasSyncedFunc); !ok {
select {
case <-stop:
// do not fatal if the channel was stopped
scopedLog.Debug("canceled cache synchronization")
r.Lock()
// Since the wait for cache sync was canceled we
// need to mark that stopWait was canceled and it
// should not stop waiting for this resource to be
// synchronized.
r.stopWait[resourceName] = false
r.Unlock()
default:
// Fatally exit it resource fails to sync
logging.Fatal(
scopedLog,
"failed to wait for cache to sync",
)
}
} else {
scopedLog.Debug("cache synced")
r.Lock()
// Since the wait for cache sync was not canceled we need to
// mark that stopWait not canceled and it should stop
// waiting for this resource to be synchronized.
r.stopWait[resourceName] = true
r.Unlock()
}
if swg != nil {
swg.Stop()
swg.Wait()
}
close(ch)
}()
}
// WaitForCacheSync waits for all K8s resources represented by
// resourceNames to have their K8s caches synchronized.
func (r *Resources) WaitForCacheSync(resourceNames ...string) {
for _, resourceName := range resourceNames {
r.RLock()
c, ok := r.resources[resourceName]
r.RUnlock()
if !ok {
continue
}
for {
scopedLog := r.logger.With(logfields.Resource, resourceName)
<-c
r.RLock()
stopWait := r.stopWait[resourceName]
r.RUnlock()
if stopWait {
scopedLog.Debug("stopped waiting for caches to be synced")
break
}
scopedLog.Debug("original cache sync operation was aborted, waiting for caches to be synced with a new channel...")
time.Sleep(syncedPollPeriod)
r.RLock()
c, ok = r.resources[resourceName]
r.RUnlock()
if !ok {
break
}
}
}
}
// poll period for underlying client-go wait for cache sync.
const syncedPollPeriod = 100 * time.Millisecond
// WaitForCacheSyncWithTimeout waits for K8s resources represented by resourceNames to be synced.
// For every resource type, if an event happens after starting the wait, the timeout will be pushed out
// to be the time of the last event plus the timeout duration.
func (r *Resources) WaitForCacheSyncWithTimeout(ctx context.Context, timeout time.Duration, resourceNames ...string) error {
// Upon completion, release event map to reduce unnecessary memory usage.
// SetEventTimestamp calls to nil event time map are no-op.
// Running BlockWaitGroupToSyncResources will reinitialize the event map.
defer func() {
r.Lock()
r.timeSinceLastEvent = nil
r.Unlock()
}()
wg := &errgroup.Group{}
for _, resource := range resourceNames {
done := make(chan struct{})
go func(resource string) {
r.WaitForCacheSync(resource)
close(done)
}(resource)
waitFn := func(resource string) func() error {
return func() error {
currTimeout := timeout + syncedPollPeriod // add buffer of the poll period.
for {
// Wait until after timeout ends or sync is completed.
// If timeout is reached, check if an event occurred that would
// have pushed back the timeout and wait for that amount of time.
select {
case now := <-time.After(currTimeout):
lastEvent, never := r.getTimeOfLastEvent(resource)
if never {
return fmt.Errorf("timed out after %s, never received event for resource %q", timeout, resource)
}
if now.After(lastEvent.Add(timeout)) {
return fmt.Errorf("timed out after %s since receiving last event for resource %q", timeout, resource)
}
// We reset the timer to wait the timeout period minus the
// time since the last event.
currTimeout = timeout - time.Since(lastEvent)
r.logger.Debug(
"received event for resource type, waiting before timeout",
logfields.Resource, resource,
logfields.LastEventReceived, time.Since(lastEvent),
logfields.Timeout, currTimeout,
)
case <-done:
r.logger.Debug(
"cache has synced, stopping timeout watcher",
logfields.Resource, resource,
)
return nil
case <-ctx.Done():
r.logger.Info("stop waiting for cache sync due to cancellation", logfields.Resource, resource)
return nil
}
}
}
}(resource)
wg.Go(waitFn)
}
return wg.Wait()
}
// SPDX-License-Identifier: Apache-2.0
// Copyright Authors of Cilium
package utils
import (
"context"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/fields"
k8sRuntime "k8s.io/apimachinery/pkg/runtime"
"k8s.io/apimachinery/pkg/watch"
"k8s.io/client-go/tools/cache"
)
// typedListWatcher is a generic interface that all the typed k8s clients match.
type typedListWatcher[T k8sRuntime.Object] interface {
List(ctx context.Context, opts metav1.ListOptions) (T, error)
Watch(ctx context.Context, opts metav1.ListOptions) (watch.Interface, error)
}
// genListWatcher takes a typed list watcher and implements cache.ListWatch
// using it.
type genListWatcher[T k8sRuntime.Object] struct {
lw typedListWatcher[T]
}
func (g *genListWatcher[T]) List(opts metav1.ListOptions) (k8sRuntime.Object, error) {
return g.lw.List(context.Background(), opts)
}
func (g *genListWatcher[T]) Watch(opts metav1.ListOptions) (watch.Interface, error) {
return g.lw.Watch(context.Background(), opts)
}
// ListerWatcherFromTyped adapts a typed k8s client to cache.ListerWatcher so it can be used
// with an informer. With this construction we can use fake clients for testing,
// which would not be possible if we used NewListWatchFromClient and RESTClient().
func ListerWatcherFromTyped[T k8sRuntime.Object](lw typedListWatcher[T]) cache.ListerWatcher {
return &genListWatcher[T]{lw: lw}
}
type listWatcherWithModifier struct {
inner cache.ListerWatcher
optsModifier func(*metav1.ListOptions)
}
func (lw *listWatcherWithModifier) List(opts metav1.ListOptions) (k8sRuntime.Object, error) {
lw.optsModifier(&opts)
return lw.inner.List(opts)
}
func (lw *listWatcherWithModifier) Watch(opts metav1.ListOptions) (watch.Interface, error) {
lw.optsModifier(&opts)
return lw.inner.Watch(opts)
}
func ListerWatcherWithFields(lw cache.ListerWatcher, fieldSelector fields.Selector) cache.ListerWatcher {
return ListerWatcherWithModifier(
lw,
func(opts *metav1.ListOptions) { opts.FieldSelector = fieldSelector.String() })
}
func ListerWatcherWithModifier(lw cache.ListerWatcher, optsModifier func(*metav1.ListOptions)) cache.ListerWatcher {
return &listWatcherWithModifier{
inner: lw,
optsModifier: optsModifier,
}
}
func ListerWatcherWithModifiers(lw cache.ListerWatcher, opts ...func(*metav1.ListOptions)) cache.ListerWatcher {
for _, opt := range opts {
lw = ListerWatcherWithModifier(lw, opt)
}
return lw
}
// SPDX-License-Identifier: Apache-2.0
// Copyright Authors of Cilium
package utils
import (
"strings"
v1 "k8s.io/api/core/v1"
discoveryv1 "k8s.io/api/discovery/v1"
v1meta "k8s.io/apimachinery/pkg/apis/meta/v1"
k8sconst "github.com/cilium/cilium/pkg/k8s/apis/cilium.io"
slim_corev1 "github.com/cilium/cilium/pkg/k8s/slim/k8s/api/core/v1"
"github.com/cilium/cilium/pkg/k8s/slim/k8s/apis/labels"
"github.com/cilium/cilium/pkg/k8s/slim/k8s/apis/selection"
labelsPkg "github.com/cilium/cilium/pkg/labels"
"github.com/cilium/cilium/pkg/slices"
)
const (
// ServiceProxyNameLabel is the label for service proxy name in k8s service related
// objects.
serviceProxyNameLabel = "service.kubernetes.io/service-proxy-name"
// EndpointSliceMeshControllerName is a unique value used with LabelManagedBy to indicate
// the component managing an EndpointSlice.
EndpointSliceMeshControllerName = "endpointslice-mesh-controller.cilium.io"
)
type NamespaceNameGetter interface {
GetNamespace() string
GetName() string
}
// ExtractNamespace extracts the namespace of ObjectMeta.
// For cluster scoped objects the Namespace field is empty and this function
// assumes that the object is returned from kubernetes itself implying that
// the namespace is empty only and only when the Object is cluster scoped
// and thus returns empty namespace for such objects.
func ExtractNamespace(np NamespaceNameGetter) string {
return np.GetNamespace()
}
// ExtractNamespaceOrDefault extracts the namespace of ObjectMeta, it returns default
// namespace if the namespace field in the ObjectMeta is empty.
func ExtractNamespaceOrDefault(np NamespaceNameGetter) string {
ns := np.GetNamespace()
if ns == "" {
return v1.NamespaceDefault
}
return ns
}
// GetObjNamespaceName returns the object's namespace and name.
// If the object is cluster scoped then the function returns only the object name
// without any namespace prefix.
func GetObjNamespaceName(obj NamespaceNameGetter) string {
ns := ExtractNamespace(obj)
if ns == "" {
return obj.GetName()
}
return ns + "/" + obj.GetName()
}
// GetEndpointSliceListOptionsModifier returns the options modifier for endpointSlice object list.
// This methods returns a ListOptions modifier which adds a label selector to
// select all endpointSlice objects they are not from remote clusters in Cilium cluster mesh.
// This is mostly the same behavior as kube-proxy except the cluster mesh behavior which is
// tied to how Cilium internally works with clustermesh endpoints and that this function also doesn't ignore headless Services.
// Given label mirroring from the service objects to endpoint slice objects were introduced in Kubernetes PR 94443,
// and released as part of Kubernetes v1.20; we can start using GetServiceAndEndpointListOptionsModifier for
// endpoint slices when dropping support for Kubernetes v1.19 and older. We can do that since the
// serviceProxyNameLabel label will then be mirrored to endpoint slices for services with that label.
// We also ignore Kubernetes endpoints coming from other clusters in the Cilium clustermesh here as
// Cilium does not rely on mirrored Kubernetes EndpointSlice for any of its functionalities.
func GetEndpointSliceListOptionsModifier() (func(options *v1meta.ListOptions), error) {
nonRemoteEndpointSelector, err := labels.NewRequirement(discoveryv1.LabelManagedBy, selection.NotEquals, []string{EndpointSliceMeshControllerName})
if err != nil {
return nil, err
}
labelSelector := labels.NewSelector()
labelSelector = labelSelector.Add(*nonRemoteEndpointSelector)
return func(options *v1meta.ListOptions) {
options.LabelSelector = labelSelector.String()
}, nil
}
// GetServiceAndEndpointListOptionsModifier returns the options modifier for service and endpoint object lists.
// This methods returns a ListOptions modifier which adds a label selector to only
// select services that are in context of Cilium.
// Unlike kube-proxy Cilium does not select services/endpoints containing k8s headless service label.
// We honor service.kubernetes.io/service-proxy-name label in the service object and only
// handle services that match our service proxy name. If the service proxy name for Cilium
// is an empty string, we assume that Cilium is the default service handler in which case
// we select all services that don't have the above mentioned label.
func GetServiceAndEndpointListOptionsModifier(k8sServiceProxy string) (func(options *v1meta.ListOptions), error) {
var (
serviceNameSelector *labels.Requirement
err error
)
if k8sServiceProxy == "" {
serviceNameSelector, err = labels.NewRequirement(
serviceProxyNameLabel, selection.DoesNotExist, nil)
} else {
serviceNameSelector, err = labels.NewRequirement(
serviceProxyNameLabel, selection.DoubleEquals, []string{k8sServiceProxy})
}
if err != nil {
return nil, err
}
labelSelector := labels.NewSelector()
labelSelector = labelSelector.Add(*serviceNameSelector)
return func(options *v1meta.ListOptions) {
options.LabelSelector = labelSelector.String()
}, nil
}
// GetLatestPodReadiness returns the lastest podReady condition on a given pod.
func GetLatestPodReadiness(podStatus slim_corev1.PodStatus) slim_corev1.ConditionStatus {
for _, cond := range podStatus.Conditions {
if cond.Type == slim_corev1.PodReady {
return cond.Status
}
}
return slim_corev1.ConditionUnknown
}
// ValidIPs return a sorted slice of unique IP addresses retrieved from the given PodStatus.
// Returns an error when no IPs are found.
func ValidIPs(podStatus slim_corev1.PodStatus) []string {
if len(podStatus.PodIPs) == 0 && len(podStatus.PodIP) == 0 {
return nil
}
// make it a set first to avoid repeated IP addresses
ips := []string{}
if podStatus.PodIP != "" {
ips = append(ips, podStatus.PodIP)
}
for _, podIP := range podStatus.PodIPs {
if podIP.IP != "" {
ips = append(ips, podIP.IP)
}
}
return slices.SortedUnique(ips)
}
// IsPodRunning returns true if the pod is considered to be in running state.
// We consider a Running pod a pod that does not report a Failed nor a Succeeded
// pod Phase.
func IsPodRunning(status slim_corev1.PodStatus) bool {
switch status.Phase {
case slim_corev1.PodFailed, slim_corev1.PodSucceeded:
return false
}
return true
}
// nameLabelsGetter is an interface that returns the name and the labels for
// the namespace.
type nameLabelsGetter interface {
GetName() string
GetLabels() map[string]string
}
// RemoveCiliumLabels returns a copy of the given labels map, without the labels owned by Cilium.
func RemoveCiliumLabels(labels map[string]string) map[string]string {
res := map[string]string{}
for k, v := range labels {
if strings.HasPrefix(k, k8sconst.LabelPrefix) {
continue
}
res[k] = v
}
return res
}
// SanitizePodLabels makes sure that no important pod labels were overridden manually on k8s pod
// object creation.
func SanitizePodLabels(podLabels map[string]string, namespace nameLabelsGetter, serviceAccount, clusterName string) map[string]string {
sanitizedLabels := RemoveCiliumLabels(podLabels)
// Sanitize namespace labels
for k, v := range namespace.GetLabels() {
sanitizedLabels[joinPath(k8sconst.PodNamespaceMetaLabels, k)] = v
}
// Sanitize namespace name label
sanitizedLabels[k8sconst.PodNamespaceLabel] = namespace.GetName()
// Sanitize service account name
if serviceAccount != "" {
sanitizedLabels[k8sconst.PolicyLabelServiceAccount] = serviceAccount
} else {
delete(sanitizedLabels, k8sconst.PolicyLabelServiceAccount)
}
// Sanitize cluster name
sanitizedLabels[k8sconst.PolicyLabelCluster] = clusterName
return sanitizedLabels
}
// StripPodSpecialLabels strips labels that are not supposed to be coming from a k8s pod object update.
func StripPodSpecialLabels(labels map[string]string) map[string]string {
sanitizedLabels := make(map[string]string)
for k, v := range RemoveCiliumLabels(labels) {
// If the key contains the prefix for namespace labels then we will
// ignore it.
if strings.HasPrefix(k, k8sconst.PodNamespaceMetaLabels) {
continue
}
// Also ignore it if the key is a kubernetes namespace label.
if k == k8sconst.PodNamespaceLabel {
continue
}
sanitizedLabels[k] = v
}
return sanitizedLabels
}
// joinPath mimics JoinPath from pkg/policy/utils, which could not be imported here due to circular dependency
func joinPath(a, b string) string {
return a + labelsPkg.PathDelimiter + b
}
// SPDX-License-Identifier: Apache-2.0
// Copyright Authors of Cilium
// Copyright Istio Authors
// Copyright Authors of Hubble
// GetWorkloadMetaFromPod and cronJobNameRegexp are copied from
// https://github.com/istio/istio/blob/1aca7a67afd7b3e1d24fafb2fbfbeaf1e41534c0/pkg/kube/util.go
//
// Modifications:
// GetDeployMetaFromPod has been renamed to GetWorkloadMetaFromPod and has
// been updated to use the cilium slim API types.
// We do not store the APIVersion of the owning workload in the TypeMeta
// either, because it isn't needed for our purposes, and our slim types do not
// have this field.
// We fallback to the pod's ownerReference if we cannot find a more suitable
// workload based on heuristics, whereas the original code defaulted to the
// pod's name. This may be the case when using ReplicaSets without a Deployment.
package utils
import (
"regexp"
"strings"
slim_corev1 "github.com/cilium/cilium/pkg/k8s/slim/k8s/api/core/v1"
slim_metav1 "github.com/cilium/cilium/pkg/k8s/slim/k8s/apis/meta/v1"
)
var cronJobNameRegexp = regexp.MustCompile(`(.+)-\d{8,10}$`)
// GetWorkloadMetaFromPod heuristically derives workload metadata from the pod spec.
func GetWorkloadMetaFromPod(pod *slim_corev1.Pod) (slim_metav1.ObjectMeta, slim_metav1.TypeMeta, bool) {
if pod == nil {
return slim_metav1.ObjectMeta{}, slim_metav1.TypeMeta{}, false
}
// try to capture more useful namespace/name info for deployments, etc.
// TODO(dougreid): expand to enable lookup of OWNERs recursively a la kubernetesenv
workloadObjectMeta := pod.ObjectMeta
workloadObjectMeta.OwnerReferences = nil
var ok bool
var typeMetadata slim_metav1.TypeMeta
if len(pod.GenerateName) > 0 {
// if the pod name was generated (or is scheduled for generation), we can begin an investigation into the controlling reference for the pod.
var controllerRef slim_metav1.OwnerReference
controllerFound := false
for _, ref := range pod.OwnerReferences {
if ref.Controller != nil && *ref.Controller {
controllerRef = ref
controllerFound = true
break
}
}
if controllerFound {
ok = true
// default to the owner kind/name
typeMetadata.Kind = controllerRef.Kind
workloadObjectMeta.Name = controllerRef.Name
// heuristic for deployment detection
if typeMetadata.Kind == "ReplicaSet" && pod.Labels["pod-template-hash"] != "" && strings.HasSuffix(controllerRef.Name, pod.Labels["pod-template-hash"]) {
name := strings.TrimSuffix(controllerRef.Name, "-"+pod.Labels["pod-template-hash"])
workloadObjectMeta.Name = name
typeMetadata.Kind = "Deployment"
} else if typeMetadata.Kind == "ReplicaSet" && pod.Labels["pod-template-hash"] == "" {
workloadObjectMeta.Name = controllerRef.Name
typeMetadata.Kind = "ReplicaSet"
} else if typeMetadata.Kind == "ReplicationController" && pod.Labels["deploymentconfig"] != "" {
// If the pod is controlled by the replication controller, which is created by the DeploymentConfig resource in
// Openshift platform, set the deploy name to the deployment config's name, and the kind to 'DeploymentConfig'.
//
// nolint: lll
// For DeploymentConfig details, refer to
// https://docs.openshift.com/container-platform/4.1/applications/deployments/what-deployments-are.html#deployments-and-deploymentconfigs_what-deployments-are
//
// For the reference to the pod label 'deploymentconfig', refer to
// https://github.com/openshift/library-go/blob/7a65fdb398e28782ee1650959a5e0419121e97ae/pkg/apps/appsutil/const.go#L25
workloadObjectMeta.Name = pod.Labels["deploymentconfig"]
typeMetadata.Kind = "DeploymentConfig"
delete(workloadObjectMeta.Labels, "deploymentconfig")
} else if typeMetadata.Kind == "Job" {
// If job name suffixed with `-<digit-timestamp>`, where the length of digit timestamp is 8~10,
// trim the suffix and set kind to cron job.
if jn := cronJobNameRegexp.FindStringSubmatch(controllerRef.Name); len(jn) == 2 {
workloadObjectMeta.Name = jn[1]
typeMetadata.Kind = "CronJob"
}
}
}
}
return workloadObjectMeta, typeMetadata, ok
}
// SPDX-License-Identifier: Apache-2.0
// Copyright Authors of Cilium
// Package version keeps track of the Kubernetes version the client is
// connected to
package version
import (
"fmt"
"log/slog"
"github.com/blang/semver/v4"
"k8s.io/client-go/kubernetes"
"github.com/cilium/cilium/pkg/lock"
"github.com/cilium/cilium/pkg/versioncheck"
)
// ServerCapabilities is a list of server capabilities derived based on
// version, the Kubernetes discovery API, or probing of individual API
// endpoints.
type ServerCapabilities struct {
// MinimalVersionMet is true when the minimal version of Kubernetes
// required to run Cilium has been met
MinimalVersionMet bool
}
type cachedVersion struct {
mutex lock.RWMutex
capabilities ServerCapabilities
version semver.Version
}
const (
// MinimalVersionConstraint is the minimal version that Cilium supports to
// run kubernetes.
MinimalVersionConstraint = "1.21.0"
)
var (
cached = cachedVersion{}
// isGEThanMinimalVersionConstraint is the minimal version required to run
// Cilium
isGEThanMinimalVersionConstraint = versioncheck.MustCompile(">=" + MinimalVersionConstraint)
)
// Version returns the version of the Kubernetes apiserver
func Version() semver.Version {
cached.mutex.RLock()
c := cached.version
cached.mutex.RUnlock()
return c
}
// Capabilities returns the capabilities of the Kubernetes apiserver
func Capabilities() ServerCapabilities {
cached.mutex.RLock()
c := cached.capabilities
cached.mutex.RUnlock()
return c
}
func updateVersion(version semver.Version) {
cached.mutex.Lock()
defer cached.mutex.Unlock()
cached.version = version
cached.capabilities.MinimalVersionMet = isGEThanMinimalVersionConstraint(version)
}
// Force forces the use of a specific version
func Force(version string) error {
ver, err := versioncheck.Version(version)
if err != nil {
return err
}
updateVersion(ver)
return nil
}
func updateK8sServerVersion(client kubernetes.Interface) error {
var ver semver.Version
sv, err := client.Discovery().ServerVersion()
if err != nil {
return err
}
// Try GitVersion first. In case of error fallback to MajorMinor
if sv.GitVersion != "" {
// This is a string like "v1.9.0"
ver, err = versioncheck.Version(sv.GitVersion)
if err == nil {
updateVersion(ver)
return nil
}
}
if sv.Major != "" && sv.Minor != "" {
ver, err = versioncheck.Version(fmt.Sprintf("%s.%s", sv.Major, sv.Minor))
if err == nil {
updateVersion(ver)
return nil
}
}
return fmt.Errorf("cannot parse k8s server version from %+v: %w", sv, err)
}
// Update retrieves the version of the Kubernetes apiserver and derives the
// capabilities. This function must be called after connectivity to the
// apiserver has been established.
//
// Discovery of capabilities only works if the discovery API of the apiserver
// is functional. If it is not available, a warning is logged and the discovery
// falls back to probing individual API endpoints.
func Update(logger *slog.Logger, client kubernetes.Interface, apiDiscoveryEnabled bool) error {
err := updateK8sServerVersion(client)
if err != nil {
return err
}
return nil
}
// SPDX-License-Identifier: Apache-2.0
// Copyright Authors of Cilium
// This package contains exported resource identifiers and metric resource labels related to
// K8s watchers.
package resources
import (
"k8s.io/apimachinery/pkg/api/meta"
"github.com/cilium/cilium/pkg/container/cache"
)
const (
// K8sAPIGroupNetworkingV1Core is the identifier for K8S resources of type networking.k8s.io/v1/NetworkPolicy
K8sAPIGroupNetworkingV1Core = "networking.k8s.io/v1::NetworkPolicy"
// K8sAPIGroupPodV1Core is the identifier for K8s resources of type core/v1/Pod.
K8sAPIGroupPodV1Core = "core/v1::Pods"
// K8sAPIGroupEndpointSliceOrEndpoint is the combined identifier for K8s EndpointSlice and
// Endpoint resources.
K8sAPIGroupEndpointSliceOrEndpoint = "EndpointSliceOrEndpoint"
// MetricCNP is the scope label for CiliumNetworkPolicy event metrics.
MetricCNP = "CiliumNetworkPolicy"
// MetricCCNP is the scope label for CiliumClusterwideNetworkPolicy event metrics.
MetricCCNP = "CiliumClusterwideNetworkPolicy"
// MetricCCG is the scope label for CiliumCIDRGroup event metrics.
MetricCCG = "CiliumCIDRGroup"
// MetricService is the scope label for Kubernetes Service event metrics.
MetricService = "Service"
// MetricEndpoint is the scope label for Kubernetes Endpoint event metrics.
MetricEndpoint = "Endpoint"
// MetricEndpointSlice is the scope label for Kubernetes EndpointSlice event metrics.
MetricEndpointSlice = "EndpointSlice"
// MetricCreate the label for watcher metrics related to create events.
MetricCreate = "create"
// MetricUpdate the label for watcher metrics related to update events.
MetricUpdate = "update"
// MetricDelete the label for watcher metrics related to delete events.
MetricDelete = "delete"
)
// dedupMetadata deduplicates the allocated strings in the metadata using the container/cache package.
func DedupMetadata(obj any) {
meta, err := meta.Accessor(obj)
if err != nil {
return
}
meta.SetName(cache.Strings.Get(meta.GetName()))
meta.SetNamespace(cache.Strings.Get(meta.GetNamespace()))
meta.SetLabels(cache.StringMaps.Get(meta.GetLabels()))
meta.SetAnnotations(cache.StringMaps.Get(meta.GetAnnotations()))
}
// SPDX-License-Identifier: Apache-2.0
// Copyright Authors of Cilium
package kpr
import (
"fmt"
"github.com/cilium/hive/cell"
"github.com/spf13/pflag"
)
var Cell = cell.Module(
"kube-proxy-replacement",
"Provides KPR config",
cell.Config(defaultFlags),
cell.Provide(NewKPRConfig),
)
type KPRFlags struct {
KubeProxyReplacement string
EnableSocketLB bool `mapstructure:"bpf-lb-sock"`
EnableNodePort bool
EnableExternalIPs bool
EnableHostPort bool
}
var defaultFlags = KPRFlags{
KubeProxyReplacement: "false",
EnableSocketLB: false,
EnableNodePort: false,
EnableExternalIPs: false,
EnableHostPort: false,
}
func (def KPRFlags) Flags(flags *pflag.FlagSet) {
flags.String("kube-proxy-replacement", def.KubeProxyReplacement, "Enable kube-proxy replacement")
flags.Bool("bpf-lb-sock", def.EnableSocketLB, "Enable socket-based LB for E/W traffic")
flags.Bool("enable-node-port", def.EnableNodePort, "Enable NodePort type services by Cilium")
flags.MarkDeprecated("enable-node-port", "The flag will be removed in v1.19. The feature will be unconditionally enabled by enabling kube-proxy-replacement")
flags.Bool("enable-external-ips", def.EnableExternalIPs, "Enable k8s service externalIPs feature (requires enabling enable-node-port)")
flags.MarkDeprecated("enable-external-ips", "The flag will be removed in v1.19. The feature will be unconditionally enabled by enabling kube-proxy-replacement")
flags.Bool("enable-host-port", def.EnableHostPort, "Enable k8s hostPort mapping feature (requires enabling enable-node-port)")
flags.MarkDeprecated("enable-host-port", "The flag will be removed in v1.19. The feature will be unconditionally enabled by enabling kube-proxy-replacement")
}
type KPRConfig struct {
KubeProxyReplacement string
EnableNodePort bool
EnableExternalIPs bool
EnableHostPort bool
EnableSocketLB bool
}
func NewKPRConfig(flags KPRFlags) (KPRConfig, error) {
if flags.KubeProxyReplacement != "true" && flags.KubeProxyReplacement != "false" {
return KPRConfig{}, fmt.Errorf("invalid value for kube-proxy-replacement")
}
cfg := KPRConfig{
KubeProxyReplacement: flags.KubeProxyReplacement,
EnableNodePort: flags.EnableNodePort,
EnableExternalIPs: flags.EnableExternalIPs,
EnableHostPort: flags.EnableHostPort,
EnableSocketLB: flags.EnableSocketLB,
}
if flags.KubeProxyReplacement == "true" {
cfg.EnableNodePort = true
cfg.EnableExternalIPs = true
cfg.EnableHostPort = true
cfg.EnableSocketLB = true
}
if !cfg.EnableNodePort {
// Disable features depending on NodePort
cfg.EnableHostPort = false
cfg.EnableExternalIPs = false
}
return cfg, nil
}
// SPDX-License-Identifier: Apache-2.0
// Copyright Authors of Cilium
package allocator
import (
"context"
"fmt"
"log/slog"
"path"
"strconv"
"strings"
"github.com/cilium/cilium/pkg/allocator"
"github.com/cilium/cilium/pkg/idpool"
"github.com/cilium/cilium/pkg/kvstore"
"github.com/cilium/cilium/pkg/logging/logfields"
"github.com/cilium/cilium/pkg/rate"
)
// kvstoreBackend is an implementation of pkg/allocator.Backend. It stores
// identities in the following format:
//
// Slave keys:
//
// Slave keys are owned by individual nodes:
// - basePath/value/key1/node1 => 1001
// - basePath/value/key1/node2 => 1001
// - basePath/value/key2/node1 => 1002
// - basePath/value/key2/node2 => 1002
//
// If at least one key exists with the prefix basePath/value/keyN then that
// key must be considered to be in use in the allocation space.
//
// Slave keys are protected by a lease and will automatically get removed
// after ~ option.Config.KVstoreLeaseTTL if the node does not renew in time.
//
// Master key:
// - basePath/id/1001 => key1
// - basePath/id/1002 => key2
//
// Master keys provide the mapping from ID to key. As long as a master key
// for an ID exists, the ID is still in use. However, if a master key is no
// longer backed by at least one slave key, the garbage collector will
// eventually release the master key and return it back to the pool.
type kvstoreBackend struct {
logger *slog.Logger
// basePrefix is the prefix in the kvstore that all keys share which
// are being managed by this allocator. The basePrefix typically
// consists of something like: "space/project/allocatorName"
basePrefix string
// idPrefix is the kvstore key prefix for all master keys. It is being
// derived from the basePrefix.
idPrefix string
// valuePrefix is the kvstore key prefix for all slave keys. It is
// being derived from the basePrefix.
valuePrefix string
// lockPrefix is the prefix to use for all kvstore locks. This prefix
// is different from the idPrefix and valuePrefix to simplify watching
// for ID and key changes.
lockPrefix string
// suffix is the suffix attached to keys which must be node specific,
// this is typical set to the node's IP address
suffix string
backend kvstore.BackendOperations
keyType allocator.AllocatorKey
}
func prefixMatchesKey(prefix, key string) bool {
// cilium/state/identities/v1/value/label;foo;bar;/172.0.124.60
lastSlash := strings.LastIndex(key, "/")
return len(prefix) == lastSlash
}
type KVStoreBackendConfiguration struct {
BasePath string
Suffix string
Typ allocator.AllocatorKey
Backend kvstore.BackendOperations
}
// NewKVStoreBackend creates a pkg/allocator.Backend compatible instance. The
// specific kvstore used is configured in pkg/kvstore.
func NewKVStoreBackend(logger *slog.Logger, c KVStoreBackendConfiguration) (allocator.Backend, error) {
if c.Backend == nil {
return nil, fmt.Errorf("kvstore client not configured")
}
return &kvstoreBackend{
logger: logger.With(logfields.LogSubsys, "kvstorebackend"),
basePrefix: c.BasePath,
idPrefix: path.Join(c.BasePath, "id"),
valuePrefix: path.Join(c.BasePath, "value"),
lockPrefix: path.Join(c.BasePath, "locks"),
suffix: c.Suffix,
keyType: c.Typ,
backend: c.Backend,
}, nil
}
// lockPath locks a key in the scope of an allocator
func (k *kvstoreBackend) lockPath(ctx context.Context, key string) (*kvstore.Lock, error) {
suffix := strings.TrimPrefix(key, k.basePrefix)
return kvstore.LockPath(ctx, k.logger, k.backend, path.Join(k.lockPrefix, suffix))
}
// DeleteAllKeys will delete all keys
func (k *kvstoreBackend) DeleteAllKeys(ctx context.Context) {
k.backend.DeletePrefix(ctx, k.basePrefix)
}
func (k *kvstoreBackend) DeleteID(ctx context.Context, id idpool.ID) error {
return k.backend.Delete(ctx, path.Join(k.idPrefix, id.String()))
}
// AllocateID allocates a key->ID mapping in the kvstore.
func (k *kvstoreBackend) AllocateID(ctx context.Context, id idpool.ID, key allocator.AllocatorKey) (allocator.AllocatorKey, error) {
// create /id/<ID> and fail if it already exists
keyPath := path.Join(k.idPrefix, id.String())
success, err := k.backend.CreateOnly(ctx, keyPath, []byte(key.GetKey()), false)
if err != nil || !success {
return nil, fmt.Errorf("unable to create master key '%s': %w", keyPath, err)
}
return key, nil
}
// AllocateID allocates a key->ID mapping in the kvstore.
func (k *kvstoreBackend) AllocateIDIfLocked(ctx context.Context, id idpool.ID, key allocator.AllocatorKey, lock kvstore.KVLocker) (allocator.AllocatorKey, error) {
// create /id/<ID> and fail if it already exists
keyPath := path.Join(k.idPrefix, id.String())
success, err := k.backend.CreateOnlyIfLocked(ctx, keyPath, []byte(key.GetKey()), false, lock)
if err != nil || !success {
return nil, fmt.Errorf("unable to create master key '%s': %w", keyPath, err)
}
return key, nil
}
// AcquireReference marks that this node is using this key->ID mapping in the kvstore.
func (k *kvstoreBackend) AcquireReference(ctx context.Context, id idpool.ID, key allocator.AllocatorKey, lock kvstore.KVLocker) error {
keyString := key.GetKey()
if err := k.createValueNodeKey(ctx, keyString, id, lock); err != nil {
return fmt.Errorf("unable to create slave key '%s': %w", keyString, err)
}
return nil
}
// createValueKey records that this "node" is using this key->ID
func (k *kvstoreBackend) createValueNodeKey(ctx context.Context, key string, newID idpool.ID, lock kvstore.KVLocker) error {
// add a new key /value/<key>/<node> to account for the reference
// The key is protected with a TTL/lease and will expire after LeaseTTL
valueKey := path.Join(k.valuePrefix, key, k.suffix)
if _, err := k.backend.UpdateIfDifferentIfLocked(ctx, valueKey, []byte(newID.String()), true, lock); err != nil {
return fmt.Errorf("unable to create value-node key '%s': %w", valueKey, err)
}
return nil
}
// Lock locks a key in the scope of an allocator
func (k *kvstoreBackend) lock(ctx context.Context, key string) (*kvstore.Lock, error) {
suffix := strings.TrimPrefix(key, k.basePrefix)
return kvstore.LockPath(ctx, k.logger, k.backend, path.Join(k.lockPrefix, suffix))
}
// Lock locks a key in the scope of an allocator
func (k *kvstoreBackend) Lock(ctx context.Context, key allocator.AllocatorKey) (kvstore.KVLocker, error) {
return k.lock(ctx, key.GetKey())
}
// Get returns the ID which is allocated to a key in the kvstore
func (k *kvstoreBackend) Get(ctx context.Context, key allocator.AllocatorKey) (idpool.ID, error) {
// ListPrefix() will return all keys matching the prefix, the prefix
// can cover multiple different keys, example:
//
// key1 := label1;label2;
// key2 := label1;label2;label3;
//
// In order to retrieve the correct key, the position of the last '/'
// is significant, e.g.
//
// prefix := cilium/state/identities/v1/value/label;foo;
//
// key1 := cilium/state/identities/v1/value/label;foo;/172.0.124.60
// key2 := cilium/state/identities/v1/value/label;foo;bar;/172.0.124.60
//
// Only key1 should match
prefix := path.Join(k.valuePrefix, key.GetKey())
pairs, err := k.backend.ListPrefix(ctx, prefix)
kvstore.Trace(k.logger, "ListPrefix",
logfields.Error, err,
logfields.Prefix, prefix,
logfields.Entries, len(pairs),
)
if err != nil {
return 0, err
}
for k, v := range pairs {
if prefixMatchesKey(prefix, k) {
id, err := strconv.ParseUint(string(v.Data), 10, 64)
if err == nil {
return idpool.ID(id), nil
}
}
}
return idpool.NoID, nil
}
// GetIfLocked returns the ID which is allocated to a key in the kvstore
// if the client is still holding the given lock.
func (k *kvstoreBackend) GetIfLocked(ctx context.Context, key allocator.AllocatorKey, lock kvstore.KVLocker) (idpool.ID, error) {
// ListPrefixIfLocked() will return all keys matching the prefix, the prefix
// can cover multiple different keys, example:
//
// key1 := label1;label2;
// key2 := label1;label2;label3;
//
// In order to retrieve the correct key, the position of the last '/'
// is significant, e.g.
//
// prefix := cilium/state/identities/v1/value/label;foo;
//
// key1 := cilium/state/identities/v1/value/label;foo;/172.0.124.60
// key2 := cilium/state/identities/v1/value/label;foo;bar;/172.0.124.60
//
// Only key1 should match
prefix := path.Join(k.valuePrefix, key.GetKey())
pairs, err := k.backend.ListPrefixIfLocked(ctx, prefix, lock)
kvstore.Trace(k.logger, "ListPrefixLocked",
logfields.Prefix, prefix,
logfields.Entries, len(pairs),
)
if err != nil {
return 0, err
}
for k, v := range pairs {
if prefixMatchesKey(prefix, k) {
id, err := strconv.ParseUint(string(v.Data), 10, 64)
if err == nil {
return idpool.ID(id), nil
}
}
}
return idpool.NoID, nil
}
// GetByID returns the key associated with an ID. Returns nil if no key is
// associated with the ID.
func (k *kvstoreBackend) GetByID(ctx context.Context, id idpool.ID) (allocator.AllocatorKey, error) {
v, err := k.backend.Get(ctx, path.Join(k.idPrefix, id.String()))
if err != nil {
return nil, err
}
if v == nil {
return nil, nil
}
return k.keyType.PutKey(string(v)), nil
}
// UpdateKey refreshes the record that this node is using this key -> id
// mapping. When reliablyMissing is set it will also recreate missing master or
// slave keys.
func (k *kvstoreBackend) UpdateKey(ctx context.Context, id idpool.ID, key allocator.AllocatorKey, reliablyMissing bool) error {
var (
err error
recreated bool
keyPath = path.Join(k.idPrefix, id.String())
valueKey = path.Join(k.valuePrefix, key.GetKey(), k.suffix)
)
// Use of CreateOnly() ensures that any existing potentially
// conflicting key is never overwritten.
success, err := k.backend.CreateOnly(ctx, keyPath, []byte(key.GetKey()), false)
switch {
case err != nil:
return fmt.Errorf("Unable to re-create missing master key \"%s\" -> \"%s\": %w", logfields.Key, valueKey, err)
case success:
k.logger.Warn(
"Re-created missing master key",
logfields.Key, keyPath,
)
}
// Also re-create the slave key in case it has been deleted. This will
// ensure that the next garbage collection cycle of any participating
// node does not remove the master key again.
if reliablyMissing {
recreated, err = k.backend.CreateOnly(ctx, valueKey, []byte(id.String()), true)
} else {
recreated, err = k.backend.UpdateIfDifferent(ctx, valueKey, []byte(id.String()), true)
}
switch {
case err != nil:
return fmt.Errorf("Unable to re-create missing slave key \"%s\" -> \"%s\": %w", logfields.Key, valueKey, err)
case recreated:
k.logger.Warn(
"Re-created missing slave key",
logfields.Key, valueKey,
)
}
return nil
}
// UpdateKeyIfLocked refreshes the record that this node is using this key -> id
// mapping. When reliablyMissing is set it will also recreate missing master or
// slave keys.
func (k *kvstoreBackend) UpdateKeyIfLocked(ctx context.Context, id idpool.ID, key allocator.AllocatorKey, reliablyMissing bool, lock kvstore.KVLocker) error {
var (
err error
recreated bool
keyPath = path.Join(k.idPrefix, id.String())
valueKey = path.Join(k.valuePrefix, key.GetKey(), k.suffix)
)
// Use of CreateOnly() ensures that any existing potentially
// conflicting key is never overwritten.
success, err := k.backend.CreateOnlyIfLocked(ctx, keyPath, []byte(key.GetKey()), false, lock)
switch {
case err != nil:
return fmt.Errorf("Unable to re-create missing master key \"%s\" -> \"%s\": %w", logfields.Key, valueKey, err)
case success:
k.logger.Warn(
"Re-created missing master key",
logfields.Key, keyPath,
)
}
// Also re-create the slave key in case it has been deleted. This will
// ensure that the next garbage collection cycle of any participating
// node does not remove the master key again.
// lock is ignored since the key doesn't exist.
if reliablyMissing {
recreated, err = k.backend.CreateOnly(ctx, valueKey, []byte(id.String()), true)
} else {
recreated, err = k.backend.UpdateIfDifferentIfLocked(ctx, valueKey, []byte(id.String()), true, lock)
}
switch {
case err != nil:
return fmt.Errorf("Unable to re-create missing slave key \"%s\" -> \"%s\": %w", logfields.Key, valueKey, err)
case recreated:
k.logger.Warn(
"Re-created missing slave key",
logfields.Key, valueKey,
)
}
return nil
}
// Release releases the use of an ID associated with the provided key. It does
// not guard against concurrent releases. This is currently guarded by
// Allocator.slaveKeysMutex when called from pkg/allocator.Allocator.Release.
func (k *kvstoreBackend) Release(ctx context.Context, _ idpool.ID, key allocator.AllocatorKey) (err error) {
valueKey := path.Join(k.valuePrefix, key.GetKey(), k.suffix)
k.logger.Info(
"Released last local use of key, invoking global release",
logfields.Key, key,
)
// does not need to be deleted with a lock as its protected by the
// Allocator.slaveKeysMutex
if err := k.backend.Delete(ctx, valueKey); err != nil {
k.logger.Warn(
"Ignoring node specific ID",
logfields.Error, err,
logfields.Key, key,
)
return err
}
// if k.lockless {
// FIXME: etcd 3.3 will make it possible to do a lockless
// cleanup of the ID and release it right away. For now we rely
// on the GC to kick in a release unused IDs.
// }
return nil
}
// RunLocksGC scans the kvstore for unused locks and removes them. Returns
// a map of locks that are currently being held, including the ones that have
// failed to be GCed.
func (k *kvstoreBackend) RunLocksGC(ctx context.Context, staleKeysPrevRound map[string]kvstore.Value) (map[string]kvstore.Value, error) {
// fetch list of all /../locks keys
allocated, err := k.backend.ListPrefix(ctx, k.lockPrefix)
if err != nil {
return nil, fmt.Errorf("list failed: %w", err)
}
staleKeys := map[string]kvstore.Value{}
// iterate over /../locks
for key, v := range allocated {
// Only delete if this key was previously marked as to be deleted
if modRev, ok := staleKeysPrevRound[key]; ok &&
// comparing ModRevision ensures the same client is still holding
// this lock since the last GC was called.
modRev.ModRevision == v.ModRevision &&
modRev.LeaseID == v.LeaseID {
if err := k.backend.Delete(ctx, key); err == nil {
k.logger.Warn("Forcefully removed distributed lock due to client staleness."+
" Please check the connectivity between the KVStore and the client with that lease ID.",
logfields.Key, key,
logfields.LeaseID, strconv.FormatUint(uint64(v.LeaseID), 16),
)
continue
}
k.logger.Warn(
"Unable to remove distributed lock due to client staleness."+
" Please check the connectivity between the KVStore and the client with that lease ID.",
logfields.Error, err,
logfields.Key, key,
logfields.LeaseID, strconv.FormatUint(uint64(v.LeaseID), 16),
)
}
// If the key was not found mark it to be delete in the next RunGC
staleKeys[key] = kvstore.Value{
ModRevision: v.ModRevision,
LeaseID: v.LeaseID,
}
}
return staleKeys, nil
}
// RunGC scans the kvstore for unused master keys and removes them
func (k *kvstoreBackend) RunGC(
ctx context.Context,
rateLimit *rate.Limiter,
staleKeysPrevRound map[string]uint64,
minID, maxID idpool.ID,
) (map[string]uint64, *allocator.GCStats, error) {
// fetch list of all /id/ keys
allocated, err := k.backend.ListPrefix(ctx, k.idPrefix)
if err != nil {
return nil, nil, fmt.Errorf("list failed: %w", err)
}
totalEntries := len(allocated)
deletedEntries := 0
staleKeys := map[string]uint64{}
min := uint64(minID)
max := uint64(maxID)
reasonOutOfRange := "out of local cluster identity range [" + strconv.FormatUint(min, 10) + "," + strconv.FormatUint(max, 10) + "]"
// iterate over /id/
for key, v := range allocated {
// if k.lockless {
// FIXME: Add DeleteOnZeroCount support
// }
// Parse identity ID
items := strings.Split(key, "/")
if len(items) == 0 {
k.logger.Warn(
"Unknown identity key found, skipping",
logfields.Error, err,
logfields.Key, key,
)
continue
}
if identityID, err := strconv.ParseUint(items[len(items)-1], 10, 64); err != nil {
k.logger.Warn(
"Parse identity failed, skipping",
logfields.Error, err,
logfields.Key, key,
)
continue
} else {
// We should not GC those identities that are out of our scope
if identityID < min || identityID > max {
k.logger.Debug(
"Skipping this key",
logfields.Key, key,
logfields.Reason, reasonOutOfRange,
)
continue
}
}
lock, err := k.lockPath(ctx, key)
if err != nil {
k.logger.Warn(
"allocator garbage collector was unable to lock key",
logfields.Error, err,
logfields.Key, key,
)
continue
}
// fetch list of all /value/<key> keys
valueKeyPrefix := path.Join(k.valuePrefix, string(v.Data))
pairs, err := k.backend.ListPrefixIfLocked(ctx, valueKeyPrefix, lock)
if err != nil {
k.logger.Warn(
"allocator garbage collector was unable to list keys",
logfields.Error, err,
logfields.Prefix, valueKeyPrefix,
)
lock.Unlock(context.Background())
continue
}
hasUsers := false
for prefix := range pairs {
if prefixMatchesKey(valueKeyPrefix, prefix) {
hasUsers = true
break
}
}
var deleted bool
// if ID has no user, delete it
if !hasUsers {
// Only delete if this key was previously marked as to be deleted
if modRev, ok := staleKeysPrevRound[key]; ok {
// if the v.ModRevision is different than the modRev (which is
// the last seen v.ModRevision) then this key was re-used in
// between GC calls. We should not mark it as stale keys yet,
// but the next GC call will do it.
if modRev == v.ModRevision {
if err := k.backend.DeleteIfLocked(ctx, key, lock); err != nil {
k.logger.Warn(
"Unable to delete unused allocator master key",
logfields.Error, err,
logfields.Key, key,
logfields.Identity, path.Base(key),
)
} else {
deletedEntries++
k.logger.Info(
"Deleted unused allocator master key in KVStore",
logfields.Key, key,
logfields.Identity, path.Base(key),
)
}
// consider the key regardless if there was an error from
// the kvstore. We want to rate limit the number of requests
// done to the KVStore.
deleted = true
}
} else {
// If the key was not found mark it to be delete in the next RunGC
staleKeys[key] = v.ModRevision
}
}
lock.Unlock(context.Background())
if deleted {
// Wait after deleted the key. This is not ideal because we have
// done the operation that should be rate limited before checking the
// rate limit. We have to do this here to avoid holding the global lock
// for a long period of time.
err = rateLimit.Wait(ctx)
if err != nil {
return nil, nil, err
}
}
}
gcStats := &allocator.GCStats{
Alive: totalEntries - deletedEntries,
Deleted: deletedEntries,
}
return staleKeys, gcStats, nil
}
func (k *kvstoreBackend) keyToID(key string) (id idpool.ID, err error) {
if !strings.HasPrefix(key, k.idPrefix) {
return idpool.NoID, fmt.Errorf("Found invalid key \"%s\" outside of prefix \"%s\"", key, k.idPrefix)
}
suffix := strings.TrimPrefix(key, k.idPrefix)
if suffix[0] == '/' {
suffix = suffix[1:]
}
idParsed, err := strconv.ParseUint(suffix, 10, 64)
if err != nil {
return idpool.NoID, fmt.Errorf("Cannot parse key suffix \"%s\"", suffix)
}
return idpool.ID(idParsed), nil
}
func (k *kvstoreBackend) ListIDs(ctx context.Context) (identityIDs []idpool.ID, err error) {
identities, err := k.backend.ListPrefix(ctx, k.idPrefix)
if err != nil {
return nil, err
}
for key := range identities {
id, err := k.keyToID(key)
if err != nil {
k.logger.Warn(
"Cannot parse identity ID",
logfields.Identity, key,
)
continue
}
identityIDs = append(identityIDs, id)
}
return identityIDs, nil
}
func (k *kvstoreBackend) ListAndWatch(ctx context.Context, handler allocator.CacheMutations) {
events := k.backend.ListAndWatch(ctx, k.idPrefix)
for event := range events {
if event.Typ == kvstore.EventTypeListDone {
handler.OnListDone()
continue
}
id, err := k.keyToID(event.Key)
switch {
case err != nil:
k.logger.Warn(
"Invalid key",
logfields.Error, err,
logfields.Identity, event.Key,
)
case id != idpool.NoID:
var key allocator.AllocatorKey
if len(event.Value) > 0 {
key = k.keyType.PutKey(string(event.Value))
} else {
if event.Typ != kvstore.EventTypeDelete {
k.logger.Error(
"Received a key with an empty value",
logfields.Key, event.Key,
logfields.EventType, event.Typ,
)
continue
}
}
switch event.Typ {
case kvstore.EventTypeCreate, kvstore.EventTypeModify:
handler.OnUpsert(id, key)
case kvstore.EventTypeDelete:
handler.OnDelete(id, key)
}
}
}
}
// SPDX-License-Identifier: Apache-2.0
// Copyright Authors of Cilium
package doublewrite
import (
"context"
"log/slog"
"strings"
"github.com/cilium/cilium/pkg/allocator"
"github.com/cilium/cilium/pkg/idpool"
"github.com/cilium/cilium/pkg/k8s/identitybackend"
"github.com/cilium/cilium/pkg/kvstore"
kvstoreallocator "github.com/cilium/cilium/pkg/kvstore/allocator"
"github.com/cilium/cilium/pkg/logging/logfields"
"github.com/cilium/cilium/pkg/rate"
)
// NewDoubleWriteBackend creates a backend which writes identities to both the CRD and KVStore backends.
// It should be used for migration purposes only.
func NewDoubleWriteBackend(logger *slog.Logger, c DoubleWriteBackendConfiguration) (allocator.Backend, error) {
logger = logger.With(logfields.LogSubsys, "double-write-allocator")
crdBackend, err := identitybackend.NewCRDBackend(logger, c.CRDBackendConfiguration)
if err != nil {
return nil, err
}
kvstoreBackend, err := kvstoreallocator.NewKVStoreBackend(logger, c.KVStoreBackendConfiguration)
if err != nil {
return nil, err
}
logger.Debug(
"Creating the Double-Write backend",
logfields.KVStoreBackendConfigurationSuffix, c.KVStoreBackendConfiguration.Suffix,
logfields.KVStoreBackendConfigurationTyp, c.KVStoreBackendConfiguration.Typ,
logfields.KVStoreBackendConfigurationBasePath, c.KVStoreBackendConfiguration.BasePath,
logfields.ReadFromKVStore, c.ReadFromKVStore,
)
return &doubleWriteBackend{logger: logger, crdBackend: crdBackend, kvstoreBackend: kvstoreBackend, readFromKVStore: c.ReadFromKVStore}, nil
}
type DoubleWriteBackendConfiguration struct {
CRDBackendConfiguration identitybackend.CRDBackendConfiguration
KVStoreBackendConfiguration kvstoreallocator.KVStoreBackendConfiguration
ReadFromKVStore bool
}
type doubleWriteBackend struct {
logger *slog.Logger
crdBackend allocator.Backend
kvstoreBackend allocator.Backend
readFromKVStore bool
}
func (d *doubleWriteBackend) DeleteAllKeys(ctx context.Context) {
d.crdBackend.DeleteAllKeys(ctx)
d.kvstoreBackend.DeleteAllKeys(ctx)
}
func (d *doubleWriteBackend) DeleteID(ctx context.Context, id idpool.ID) error {
crdErr := d.crdBackend.DeleteID(ctx, id)
if crdErr != nil {
d.logger.Error("CRD backend failed to delete identity",
logfields.Error, crdErr,
logfields.Identity, id,
)
}
kvStoreErr := d.kvstoreBackend.DeleteID(ctx, id)
if kvStoreErr != nil {
d.logger.Error("KVStore backend failed to delete identity",
logfields.Error, kvStoreErr,
logfields.Identity, id,
)
}
if d.readFromKVStore {
return kvStoreErr
}
return crdErr
}
func (d *doubleWriteBackend) AllocateID(ctx context.Context, id idpool.ID, key allocator.AllocatorKey) (allocator.AllocatorKey, error) {
crdKey, crdErr := d.crdBackend.AllocateID(ctx, id, key)
if crdErr != nil {
d.logger.Error("CRD backend failed to allocate identity",
logfields.Error, crdErr,
logfields.Identity, id,
logfields.Key, key,
)
return crdKey, crdErr
}
kvStoreKey, kvStoreErr := d.kvstoreBackend.AllocateID(ctx, id, key)
if kvStoreErr != nil {
d.logger.Error("KVStore backend failed to allocate identity), deleting the corresponding CRD identity",
logfields.Error, kvStoreErr,
logfields.Identity, id,
logfields.Key, key,
)
// revert the allocation in the CRD backend
releaseErr := d.crdBackend.DeleteID(ctx, id)
if releaseErr != nil {
d.logger.Error("CRD backend failed to release identity",
logfields.Error, releaseErr,
logfields.Identity, id,
logfields.Key, crdKey,
)
}
return kvStoreKey, kvStoreErr
}
if d.readFromKVStore {
return kvStoreKey, nil
}
return crdKey, nil
}
func (d *doubleWriteBackend) AllocateIDIfLocked(ctx context.Context, id idpool.ID, key allocator.AllocatorKey, lock kvstore.KVLocker) (allocator.AllocatorKey, error) {
crdKey, crdErr := d.crdBackend.AllocateIDIfLocked(ctx, id, key, lock)
if crdErr != nil {
d.logger.Error("CRD backend failed to allocate identity with lock",
logfields.Error, crdErr,
logfields.Identity, id,
logfields.Key, key,
)
return crdKey, crdErr
}
kvStoreKey, kvStoreErr := d.kvstoreBackend.AllocateIDIfLocked(ctx, id, key, lock)
if kvStoreErr != nil {
d.logger.Error("KVStore backend failed to allocate identity with lock), deleting the corresponding CRD identity",
logfields.Error, kvStoreErr,
logfields.Identity, id,
logfields.Key, key,
)
// revert the allocation in the CRD backend
releaseErr := d.crdBackend.DeleteID(ctx, id)
if releaseErr != nil {
d.logger.Error("CRD backend failed to release identity",
logfields.Error, releaseErr,
logfields.Identity, id,
logfields.Key, crdKey,
)
}
return kvStoreKey, kvStoreErr
}
if d.readFromKVStore {
return kvStoreKey, nil
}
return crdKey, nil
}
func (d *doubleWriteBackend) AcquireReference(ctx context.Context, id idpool.ID, key allocator.AllocatorKey, lock kvstore.KVLocker) error {
crdErr := d.crdBackend.AcquireReference(ctx, id, key, lock)
if crdErr != nil {
const logMessage = "CRD backend failed to acquire reference with lock"
if d.readFromKVStore && strings.Contains(crdErr.Error(), "does not exist") {
// This is a common error when CRD identities don't exist during the very first migration so we log it as debug
d.logger.Debug(logMessage,
logfields.Error, crdErr,
logfields.Identity, id,
logfields.Key, key,
)
} else {
d.logger.Error(logMessage,
logfields.Error, crdErr,
logfields.Identity, id,
logfields.Key, key,
)
}
}
kvStoreErr := d.kvstoreBackend.AcquireReference(ctx, id, key, lock)
if kvStoreErr != nil {
d.logger.Error("KVStore backend failed to acquire reference with lock",
logfields.Error, kvStoreErr,
logfields.Identity, id,
logfields.Key, key,
)
}
if d.readFromKVStore {
return kvStoreErr
}
return crdErr
}
func (d *doubleWriteBackend) RunLocksGC(ctx context.Context, staleKeysPrevRound map[string]kvstore.Value) (map[string]kvstore.Value, error) {
// This is a no-op for the CRD backend
return d.kvstoreBackend.RunLocksGC(ctx, staleKeysPrevRound)
}
func (d *doubleWriteBackend) RunGC(
ctx context.Context,
rateLimit *rate.Limiter,
staleKeysPrevRound map[string]uint64,
minID, maxID idpool.ID,
) (map[string]uint64, *allocator.GCStats, error) {
// This is a no-op for the CRD backend
return d.kvstoreBackend.RunGC(ctx, rateLimit, staleKeysPrevRound, minID, maxID)
}
func (d *doubleWriteBackend) UpdateKey(ctx context.Context, id idpool.ID, key allocator.AllocatorKey, reliablyMissing bool) error {
// Note: reliablyMissing is forced to "true" in order to ensure that when using the doublewrite backend for the first time,
// identities are properly created in the "secondary" identity store
crdErr := d.crdBackend.UpdateKey(ctx, id, key, true)
if crdErr != nil {
d.logger.Error("CRD backend failed to update key",
logfields.Error, crdErr,
logfields.Identity, id,
logfields.Key, key,
logfields.ReliablyMissing, reliablyMissing,
)
}
kvStoreErr := d.kvstoreBackend.UpdateKey(ctx, id, key, reliablyMissing)
if kvStoreErr != nil {
d.logger.Error("KVStore backend failed to update key",
logfields.Error, kvStoreErr,
logfields.Identity, id,
logfields.Key, key,
logfields.ReliablyMissing, reliablyMissing,
)
}
if d.readFromKVStore {
return kvStoreErr
}
return crdErr
}
func (d *doubleWriteBackend) UpdateKeyIfLocked(ctx context.Context, id idpool.ID, key allocator.AllocatorKey, reliablyMissing bool, lock kvstore.KVLocker) error {
// Note: reliablyMissing is forced to "true" in order to ensure that when using the doublewrite backend for the first time,
// identities are properly created in the "secondary" identity store
crdErr := d.crdBackend.UpdateKeyIfLocked(ctx, id, key, true, lock)
if crdErr != nil {
d.logger.Error("CRD backend failed to update key with lock",
logfields.Error, crdErr,
logfields.Identity, id,
logfields.Key, key,
logfields.ReliablyMissing, reliablyMissing,
)
}
kvStoreErr := d.kvstoreBackend.UpdateKeyIfLocked(ctx, id, key, reliablyMissing, lock)
if kvStoreErr != nil {
d.logger.Error("KVStore backend failed to update key with lock",
logfields.Error, kvStoreErr,
logfields.Identity, id,
logfields.Key, key,
logfields.ReliablyMissing, reliablyMissing,
)
}
if d.readFromKVStore {
return kvStoreErr
}
return crdErr
}
func (d *doubleWriteBackend) Lock(ctx context.Context, key allocator.AllocatorKey) (kvstore.KVLocker, error) {
// CRD backend doesn't require locking but since we are still doing KVStore operations, let's use the KVStore lock
return d.kvstoreBackend.Lock(ctx, key)
}
func (d *doubleWriteBackend) Get(ctx context.Context, key allocator.AllocatorKey) (idpool.ID, error) {
if d.readFromKVStore {
return d.kvstoreBackend.Get(ctx, key)
}
return d.crdBackend.Get(ctx, key)
}
func (d *doubleWriteBackend) GetIfLocked(ctx context.Context, key allocator.AllocatorKey, lock kvstore.KVLocker) (idpool.ID, error) {
if d.readFromKVStore {
return d.kvstoreBackend.GetIfLocked(ctx, key, lock)
}
return d.crdBackend.GetIfLocked(ctx, key, lock)
}
func (d *doubleWriteBackend) GetByID(ctx context.Context, id idpool.ID) (allocator.AllocatorKey, error) {
if d.readFromKVStore {
return d.kvstoreBackend.GetByID(ctx, id)
}
return d.crdBackend.GetByID(ctx, id)
}
func (d *doubleWriteBackend) Release(ctx context.Context, id idpool.ID, key allocator.AllocatorKey) (err error) {
kvStoreErr := d.kvstoreBackend.Release(ctx, id, key)
if kvStoreErr != nil {
d.logger.Error("KVStore backend failed to release identity",
logfields.Error, kvStoreErr,
logfields.Identity, id,
logfields.Key, key,
)
}
// This is a no-op for the CRD backend
if d.readFromKVStore {
return kvStoreErr
}
return nil
}
func (d *doubleWriteBackend) ListIDs(ctx context.Context) (identityIDs []idpool.ID, err error) {
if d.readFromKVStore {
return d.kvstoreBackend.ListIDs(ctx)
}
return d.crdBackend.ListIDs(ctx)
}
type NoOpHandler struct{}
func (h NoOpHandler) OnListDone() {}
func (h NoOpHandler) OnUpsert(idpool.ID, allocator.AllocatorKey) {}
func (h NoOpHandler) OnDelete(idpool.ID, allocator.AllocatorKey) {}
func (d *doubleWriteBackend) ListAndWatch(ctx context.Context, handler allocator.CacheMutations) {
if d.readFromKVStore {
// We still need to run ListAndWatch for the CRD backend to initialize the underlying store.
// Since we don't need to use the results of the list operation, we can use a no-op handler
go d.crdBackend.ListAndWatch(ctx, NoOpHandler{})
d.kvstoreBackend.ListAndWatch(ctx, handler)
}
d.crdBackend.ListAndWatch(ctx, handler)
}
// SPDX-License-Identifier: Apache-2.0
// Copyright Authors of Cilium
package kvstore
import (
"context"
"log/slog"
"google.golang.org/grpc"
"github.com/cilium/cilium/api/v1/models"
"github.com/cilium/cilium/pkg/logging"
"github.com/cilium/cilium/pkg/logging/logfields"
"github.com/cilium/cilium/pkg/time"
)
type backendOption struct {
// description is the description of the option
description string
// value is the value the option has been configured to
value string
// validate, if set, is called to validate the value before assignment
validate func(value string) error
}
type backendOptions map[string]*backendOption
type ClusterSizeDependantIntervalFunc func(baseInterval time.Duration) time.Duration
// ExtraOptions represents any options that can not be represented in a textual
// format and need to be set programmatically.
type ExtraOptions struct {
DialOption []grpc.DialOption
// ClusterSizeDependantInterval defines the function to calculate
// intervals based on cluster size
ClusterSizeDependantInterval ClusterSizeDependantIntervalFunc
// NoLockQuorumCheck disables the lock acquisition quorum check
NoLockQuorumCheck bool
// ClusterName is the name of each etcd cluster
ClusterName string
// BootstrapComplete is an optional channel that can be provided to signal
// to the client that bootstrap is complete. If provided, the client will
// have an initial rate limit equal to etcd.bootstrapQps and be updated to
// etcd.qps after this channel is closed.
BootstrapComplete <-chan struct{}
// NoEndpointStatusChecks disables the status checks for the endpoints
NoEndpointStatusChecks bool
// LeaseTTL is the TTL of the leases.
LeaseTTL time.Duration
// MaxConsecutiveQuorumErrors represents the maximum number of consecutive
// quorum errors before recreating the etcd connection.
MaxConsecutiveQuorumErrors uint
}
// StatusCheckInterval returns the interval of status checks depending on the
// cluster size and the current connectivity state
//
// nodes OK Failing
// 1 20s 3s
// 4 45s 7s
// 8 1m05s 11s
// 32 1m45s 18s
// 128 2m25s 24s
// 512 3m07s 32s
// 2048 3m46s 38s
// 8192 4m30s 45s
func (e *ExtraOptions) StatusCheckInterval(allConnected bool) time.Duration {
interval := 30 * time.Second
// Reduce the interval while connectivity issues are being detected
if !allConnected {
interval = 5 * time.Second
}
if e != nil && e.ClusterSizeDependantInterval != nil {
interval = e.ClusterSizeDependantInterval(interval)
}
return interval
}
// backendModule is the interface that each kvstore backend has to implement.
type backendModule interface {
// setConfig must configure the backend with the specified options.
// This function is called once before newClient().
setConfig(logger *slog.Logger, opts map[string]string) error
// newClient must initializes the backend and create a new kvstore
// client which implements the BackendOperations interface
newClient(ctx context.Context, logger *slog.Logger, opts ExtraOptions) (BackendOperations, chan error)
// createInstance creates a new instance of the module
createInstance() backendModule
}
var (
// registeredBackends is a slice of all backends that have registered
// itself via registerBackend()
registeredBackends = map[string]backendModule{}
)
// registerBackend must be called by kvstore backends to register themselves
func registerBackend(name string, module backendModule) {
if _, ok := registeredBackends[name]; ok {
// slogloggercheck: it's safe to use the default logger here since it's just to print a panic.
logging.Panic(logging.DefaultSlogLogger, "backend already registered", logfields.Name, name)
}
registeredBackends[name] = module
}
// getBackend finds a registered backend by name
func getBackend(name string) backendModule {
if backend, ok := registeredBackends[name]; ok {
return backend.createInstance()
}
return nil
}
// BackendOperations are the individual kvstore operations that each backend
// must implement. Direct use of this interface is possible but will bypass the
// tracing layer.
type BackendOperations interface {
// Status returns the status of the kvstore client
Status() *models.Status
// StatusCheckErrors returns a channel which receives status check
// errors
StatusCheckErrors() <-chan error
// LockPath locks the provided path
LockPath(ctx context.Context, path string) (KVLocker, error)
// Get returns value of key
Get(ctx context.Context, key string) ([]byte, error)
// GetIfLocked returns value of key if the client is still holding the given lock.
GetIfLocked(ctx context.Context, key string, lock KVLocker) ([]byte, error)
// Delete deletes a key. It does not return an error if the key does not exist.
Delete(ctx context.Context, key string) error
// DeleteIfLocked deletes a key if the client is still holding the given lock. It does not return an error if the key does not exist.
DeleteIfLocked(ctx context.Context, key string, lock KVLocker) error
DeletePrefix(ctx context.Context, path string) error
// Update creates or updates a key.
Update(ctx context.Context, key string, value []byte, lease bool) error
// UpdateIfLocked updates a key if the client is still holding the given lock.
UpdateIfLocked(ctx context.Context, key string, value []byte, lease bool, lock KVLocker) error
// UpdateIfDifferent updates a key if the value is different
UpdateIfDifferent(ctx context.Context, key string, value []byte, lease bool) (bool, error)
// UpdateIfDifferentIfLocked updates a key if the value is different and if the client is still holding the given lock.
UpdateIfDifferentIfLocked(ctx context.Context, key string, value []byte, lease bool, lock KVLocker) (bool, error)
// CreateOnly atomically creates a key or fails if it already exists
CreateOnly(ctx context.Context, key string, value []byte, lease bool) (bool, error)
// CreateOnlyIfLocked atomically creates a key if the client is still holding the given lock or fails if it already exists
CreateOnlyIfLocked(ctx context.Context, key string, value []byte, lease bool, lock KVLocker) (bool, error)
// ListPrefix returns a list of keys matching the prefix
ListPrefix(ctx context.Context, prefix string) (KeyValuePairs, error)
// ListPrefixIfLocked returns a list of keys matching the prefix only if the client is still holding the given lock.
ListPrefixIfLocked(ctx context.Context, prefix string, lock KVLocker) (KeyValuePairs, error)
// Close closes the kvstore client
Close()
// ListAndWatch creates a new watcher which will watch the specified
// prefix for changes. Before doing this, it will list the current keys
// matching the prefix and report them as new keys. The Events channel is
// unbuffered. Upon every change observed, a KeyValueEvent will be sent
// to the Events channel
ListAndWatch(ctx context.Context, prefix string) EventChan
// RegisterLeaseExpiredObserver registers a function which is executed when
// the lease associated with a key having the given prefix is detected as expired.
// If the function is nil, the previous observer (if any) is unregistered.
RegisterLeaseExpiredObserver(prefix string, fn func(key string))
// RegisterLockLeaseExpiredObserver registers a function which is executed when
// the lease associated with a locked key having the given prefix is detected as expired.
// If the function is nil, the previous observer (if any) is unregistered.
RegisterLockLeaseExpiredObserver(prefix string, fn func(key string))
BackendOperationsUserMgmt
}
// BackendOperationsUserMgmt are the kvstore operations for users management.
type BackendOperationsUserMgmt interface {
// UserEnforcePresence creates a user in the kvstore if not already present, and grants the specified roles.
UserEnforcePresence(ctx context.Context, name string, roles []string) error
// UserEnforcePresence deletes a user from the kvstore, if present.
UserEnforceAbsence(ctx context.Context, name string) error
}
// SPDX-License-Identifier: Apache-2.0
// Copyright Authors of Cilium
package kvstore
import (
"cmp"
"fmt"
"log/slog"
"github.com/cilium/hive"
"github.com/cilium/hive/cell"
"github.com/spf13/pflag"
"github.com/cilium/cilium/pkg/defaults"
"github.com/cilium/cilium/pkg/logging/logfields"
"github.com/cilium/cilium/pkg/option"
"github.com/cilium/cilium/pkg/spanstat"
"github.com/cilium/cilium/pkg/time"
)
// DisabledBackendName disables the kvstore client.
const DisabledBackendName = ""
// BootstrapStat is the type of the object that, if provided, gets updated with
// the measurement of the bootstrap time of the kvstore client.
type BootstrapStat *spanstat.SpanStat
// Cell returns a cell which provides the global kvstore client.
func Cell(defaultBackend string) cell.Cell {
return cell.Module(
"kvstore-client",
"KVStore Client",
cell.Config(Config{
KVStore: defaultBackend,
KVStoreOpt: make(map[string]string),
KVStoreLeaseTTL: defaults.KVstoreLeaseTTL,
KVstoreMaxConsecutiveQuorumErrors: defaults.KVstoreMaxConsecutiveQuorumErrors,
}),
cell.Provide(func(in struct {
cell.In
Logger *slog.Logger
Lifecycle cell.Lifecycle
Config Config
Opts ExtraOptions `optional:"true"`
Stats BootstrapStat `optional:"true"`
}) (Client, hive.ScriptCmdsOut) {
if in.Config.KVStore == DisabledBackendName {
return &clientImpl{enabled: false}, hive.ScriptCmdsOut{}
}
in.Opts.LeaseTTL = cmp.Or(in.Opts.LeaseTTL, in.Config.KVStoreLeaseTTL)
in.Opts.MaxConsecutiveQuorumErrors = cmp.Or(in.Opts.MaxConsecutiveQuorumErrors,
in.Config.KVstoreMaxConsecutiveQuorumErrors)
cl := &clientImpl{
enabled: true, cfg: in.Config, opts: in.Opts,
stats: cmp.Or((*spanstat.SpanStat)(in.Stats), &spanstat.SpanStat{}),
logger: in.Logger.With(logfields.BackendName, in.Config.KVStore),
}
in.Lifecycle.Append(cl)
return cl, hive.NewScriptCmds(cl.commands())
}),
cell.Invoke(Config.Validate),
)
}
type Config struct {
KVStore string
KVStoreOpt map[string]string
KVStoreLeaseTTL time.Duration
KVstoreMaxConsecutiveQuorumErrors uint
}
func (def Config) Flags(flags *pflag.FlagSet) {
flags.String(option.KVStore, def.KVStore, "Key-value store type")
flags.StringToString(option.KVStoreOpt, def.KVStoreOpt,
"Key-value store options e.g. etcd.address=127.0.0.1:4001")
flags.Duration(option.KVstoreLeaseTTL, def.KVStoreLeaseTTL,
"Time-to-live for the KVstore lease.")
flags.Uint(option.KVstoreMaxConsecutiveQuorumErrorsName, def.KVstoreMaxConsecutiveQuorumErrors,
"Max acceptable kvstore consecutive quorum errors before recreating the etcd connection")
}
func (cfg Config) Validate() error {
if cfg.KVStoreLeaseTTL > defaults.KVstoreLeaseMaxTTL || cfg.KVStoreLeaseTTL < defaults.LockLeaseTTL {
return fmt.Errorf("%s does not lie in required range (%v - %v)",
option.KVstoreLeaseTTL, defaults.LockLeaseTTL, defaults.KVstoreLeaseMaxTTL)
}
return nil
}
// SPDX-License-Identifier: Apache-2.0
// Copyright Authors of Cilium
package kvstore
import (
"context"
"fmt"
"log/slog"
"github.com/cilium/hive/cell"
"github.com/cilium/hive/script"
"github.com/cilium/cilium/pkg/spanstat"
)
// Client is the client to interact with the kvstore (i.e., etcd).
type Client interface {
// IsEnabled returns true if kvstore support is enabled,
// and the client can be used.
IsEnabled() bool
BackendOperations
}
type clientImpl struct {
enabled bool
cfg Config
opts ExtraOptions
logger *slog.Logger
stats *spanstat.SpanStat
BackendOperations
}
func (cl *clientImpl) IsEnabled() bool {
return cl.enabled
}
func (cl *clientImpl) Start(hctx cell.HookContext) (err error) {
cl.stats.Start()
defer func() { cl.stats.EndError(err) }()
cl.logger.Info("Establishing connection to kvstore")
client, errCh := NewClient(context.Background(), cl.logger, cl.cfg.KVStore, cl.cfg.KVStoreOpt, cl.opts)
select {
case err = <-errCh:
case <-hctx.Done():
err = hctx.Err()
}
if err != nil {
if client != nil {
client.Close()
}
return fmt.Errorf("failed to establish connection to kvstore: %w", err)
}
cl.logger.Info("Connection to kvstore successfully established")
cl.BackendOperations = client
return nil
}
func (cl *clientImpl) Stop(cell.HookContext) error {
if cl.BackendOperations != nil {
cl.BackendOperations.Close()
}
return nil
}
// commands returns the script commands suitable to be used in production environments.
func (cl *clientImpl) commands() map[string]script.Cmd {
if !cl.IsEnabled() {
return nil
}
cmds := cmds{client: cl}
return map[string]script.Cmd{
"kvstore/list": cmds.list(),
}
}
// NewClient returns a new kvstore client based on the configuration
func NewClient(ctx context.Context, logger *slog.Logger, selectedBackend string, opts map[string]string, options ExtraOptions) (BackendOperations, chan error) {
// Channel used to report immediate errors, module.newClient will
// create and return a different channel, caller doesn't need to know
errChan := make(chan error, 1)
defer close(errChan)
module := getBackend(selectedBackend)
if module == nil {
errChan <- fmt.Errorf("unknown key-value store type %q. See cilium.link/err-kvstore for details", selectedBackend)
return nil, errChan
}
if err := module.setConfig(logger, opts); err != nil {
errChan <- err
return nil, errChan
}
return module.newClient(ctx, logger, options)
}
// SPDX-License-Identifier: Apache-2.0
// Copyright Authors of Cilium
package kvstore
import (
"bytes"
"encoding/json"
"errors"
"fmt"
"maps"
"os"
"slices"
"github.com/cilium/hive/script"
"github.com/spf13/pflag"
)
// Commands returns the script commands associated with the given client.
func Commands(client Client) map[string]script.Cmd {
if !client.IsEnabled() {
return nil
}
cmds := cmds{client: client}
return map[string]script.Cmd{
"kvstore/update": cmds.update(),
"kvstore/delete": cmds.delete(),
"kvstore/list": cmds.list(),
}
}
type cmds struct{ client Client }
func (c cmds) update() script.Cmd {
return script.Command(
script.CmdUsage{
Summary: "update kvstore key-value",
Args: "key value-file",
},
func(s *script.State, args ...string) (script.WaitFunc, error) {
if len(args) != 2 {
return nil, fmt.Errorf("%w: expected key and value file", script.ErrUsage)
}
b, err := os.ReadFile(s.Path(args[1]))
if err != nil {
return nil, fmt.Errorf("could not read %q: %w", s.Path(args[1]), err)
}
return nil, c.client.Update(s.Context(), args[0], b, false)
},
)
}
func (c cmds) delete() script.Cmd {
return script.Command(
script.CmdUsage{
Summary: "delete kvstore key-value",
Args: "key",
},
func(s *script.State, args ...string) (script.WaitFunc, error) {
if len(args) != 1 {
return nil, fmt.Errorf("%w: expected key", script.ErrUsage)
}
return nil, c.client.Delete(s.Context(), args[0])
},
)
}
func (c cmds) list() script.Cmd {
return script.Command(
script.CmdUsage{
Summary: "list kvstore key-value pairs",
Args: "prefix (output file)",
Flags: func(fs *pflag.FlagSet) {
fs.StringP("output", "o", "plain", "Output format. One of: (plain, json)")
fs.Bool("keys-only", false, "Only output the listed keys")
fs.Bool("values-only", false, "Only output the listed values")
},
},
func(s *script.State, args ...string) (script.WaitFunc, error) {
var prefix string
if len(args) > 0 {
prefix = args[0]
}
keysOnly, _ := s.Flags.GetBool("keys-only")
valuesOnly, _ := s.Flags.GetBool("values-only")
if keysOnly && valuesOnly {
return nil, errors.New("--keys-only and --values-only are mutually exclusive")
}
kvs, err := c.client.ListPrefix(s.Context(), prefix)
if err != nil {
return nil, fmt.Errorf("error listing %q: %w", prefix, err)
}
return func(s *script.State) (stdout string, stderr string, err error) {
var b bytes.Buffer
for _, k := range slices.Sorted(maps.Keys(kvs)) {
if !valuesOnly {
fmt.Fprintf(&b, "# %s\n", k)
}
if !keysOnly {
outfmt, _ := s.Flags.GetString("output")
switch outfmt {
case "plain":
fmt.Fprintln(&b, string(kvs[k].Data))
case "json":
if err := json.Indent(&b, kvs[k].Data, "", " "); err != nil {
fmt.Fprintf(&b, "ERROR: %s", err)
}
fmt.Fprintln(&b)
default:
return "", "", fmt.Errorf("unexpected output format %q", outfmt)
}
}
fmt.Fprint(&b)
}
if len(args) == 2 {
err = os.WriteFile(s.Path(args[1]), b.Bytes(), 0644)
if err != nil {
err = fmt.Errorf("could not write %q: %w", s.Path(args[1]), err)
}
} else {
stdout = b.String()
}
return
}, nil
},
)
}
// SPDX-License-Identifier: Apache-2.0
// Copyright Authors of Cilium
package kvstore
import (
"fmt"
"log/slog"
"github.com/cilium/cilium/pkg/logging/logfields"
)
// setOpts validates the specified options against the selected backend and
// then modifies the configuration
func setOpts(logger *slog.Logger, opts map[string]string, supportedOpts backendOptions) error {
errors := 0
for key, val := range opts {
opt, ok := supportedOpts[key]
if !ok {
errors++
logger.Error("unknown kvstore configuration key", logfields.Key, key)
continue
}
if opt.validate != nil {
if err := opt.validate(val); err != nil {
logger.Error("invalid value for key",
logfields.Error, err,
logfields.Key, key,
)
errors++
}
}
}
// if errors have occurred, print the supported configuration keys to
// the log
if errors > 0 {
logger.Error("Supported configuration keys:")
for key, val := range supportedOpts {
logger.Error(fmt.Sprintf(" %-12s %s", key, val.description))
}
return fmt.Errorf("invalid kvstore configuration, see log for details")
}
// modify the configuration atomically after verification
for key, val := range opts {
supportedOpts[key].value = val
}
return nil
}
// SPDX-License-Identifier: Apache-2.0
// Copyright Authors of Cilium
package kvstore
import (
"context"
"maps"
"testing"
"github.com/cilium/hive/hivetest"
"github.com/cilium/cilium/pkg/time"
)
var (
// etcdDummyAddress can be overwritten from test invokers using ldflags
etcdDummyAddress = "http://127.0.0.1:4002"
)
// SetupDummy sets up kvstore for tests. A lock mechanism it used to prevent
// the creation of two clients at the same time, to avoid interferences in case
// different tests are run in parallel. A cleanup function is automatically
// registered to delete all keys and close the client when the test terminates.
func SetupDummy(tb testing.TB, dummyBackend string) Client {
return SetupDummyWithConfigOpts(tb, dummyBackend, nil)
}
// SetupDummyWithConfigOpts sets up the dummy kvstore for tests but also
// configures the module with the provided opts. A lock mechanism it used to
// prevent the creation of two clients at the same time, to avoid interferences
// in case different tests are run in parallel. A cleanup function is
// automatically registered to delete all keys and close the client when the
// test terminates.
func SetupDummyWithConfigOpts(tb testing.TB, dummyBackend string, opts map[string]string) Client {
if dummyBackend == DisabledBackendName {
return &clientImpl{enabled: false}
}
module := getBackend(dummyBackend)
if module == nil {
tb.Fatalf("Unknown dummy kvstore backend %s", dummyBackend)
}
switch dummyBackend {
case EtcdBackendName:
if opts == nil {
opts = make(map[string]string)
} else {
opts = maps.Clone(opts)
}
opts[EtcdAddrOption] = EtcdDummyAddress()
}
err := module.setConfig(hivetest.Logger(tb), opts)
if err != nil {
tb.Fatalf("Unable to set config options for kvstore backend module: %v", err)
}
client, errCh := module.newClient(context.Background(), hivetest.Logger(tb), ExtraOptions{})
ctx, cancel := context.WithTimeout(context.Background(), 120*time.Second)
defer cancel()
select {
case err = <-errCh:
case <-ctx.Done():
err = ctx.Err()
}
if err != nil {
if client != nil {
client.Close()
}
tb.Fatalf("Failed waiting for kvstore connection to be established: %v", err)
}
tb.Cleanup(func() {
if err := client.DeletePrefix(context.Background(), ""); err != nil {
tb.Fatalf("Unable to delete all kvstore keys: %v", err)
}
client.Close()
})
// Multiple tests might be running in parallel by go test if they are part of
// different packages. Let's implement a locking mechanism to ensure that only
// one at a time can access the kvstore, to prevent that they interact with
// each other. Locking is implemented through CreateOnly (rather than using
// the locking abstraction), so that we can release it in the same atomic
// transaction that also removes all the other keys.
for {
succeeded, err := client.CreateOnly(ctx, ".lock", []byte(""), true)
if err != nil {
tb.Fatalf("Unable to acquire the kvstore lock: %v", err)
}
if succeeded {
return &clientImpl{enabled: true, BackendOperations: client}
}
select {
case <-time.After(100 * time.Millisecond):
case <-ctx.Done():
tb.Fatal("Timed out waiting to acquire the kvstore lock")
}
}
}
func EtcdDummyAddress() string {
return etcdDummyAddress
}
// SPDX-License-Identifier: Apache-2.0
// Copyright Authors of Cilium
package kvstore
import (
"bytes"
"cmp"
"context"
"crypto/tls"
"errors"
"fmt"
"log/slog"
"math/rand/v2"
"os"
"strconv"
"strings"
"go.etcd.io/etcd/api/v3/mvccpb"
v3rpcErrors "go.etcd.io/etcd/api/v3/v3rpc/rpctypes"
"go.etcd.io/etcd/client/pkg/v3/logutil"
client "go.etcd.io/etcd/client/v3"
"go.etcd.io/etcd/client/v3/concurrency"
clientyaml "go.etcd.io/etcd/client/v3/yaml"
"go.uber.org/zap"
"go.uber.org/zap/zapcore"
"golang.org/x/time/rate"
"sigs.k8s.io/yaml"
"github.com/cilium/cilium/api/v1/models"
"github.com/cilium/cilium/pkg/backoff"
"github.com/cilium/cilium/pkg/defaults"
"github.com/cilium/cilium/pkg/lock"
"github.com/cilium/cilium/pkg/logging"
"github.com/cilium/cilium/pkg/logging/logfields"
ciliumrate "github.com/cilium/cilium/pkg/rate"
ciliumratemetrics "github.com/cilium/cilium/pkg/rate/metrics"
"github.com/cilium/cilium/pkg/spanstat"
"github.com/cilium/cilium/pkg/time"
)
const (
// EtcdBackendName is the backend name for etcd
EtcdBackendName = "etcd"
EtcdAddrOption = "etcd.address"
EtcdOptionConfig = "etcd.config"
EtcdOptionKeepAliveHeartbeat = "etcd.keepaliveHeartbeat"
EtcdOptionKeepAliveTimeout = "etcd.keepaliveTimeout"
// EtcdRateLimitOption specifies maximum kv operations per second
EtcdRateLimitOption = "etcd.qps"
// EtcdBootstrapRateLimitOption specifies maximum kv operations per second
// during bootstrapping
EtcdBootstrapRateLimitOption = "etcd.bootstrapQps"
// EtcdMaxInflightOption specifies maximum inflight concurrent kv store operations
EtcdMaxInflightOption = "etcd.maxInflight"
// EtcdListLimitOption limits the number of results retrieved in one batch
// by ListAndWatch operations. A 0 value equals to no limit.
EtcdListLimitOption = "etcd.limit"
// etcdMaxKeysPerLease is the maximum number of keys that can be attached to a lease
etcdMaxKeysPerLease = 1000
)
// ErrLockLeaseExpired is an error whenever the lease of the lock does not
// exist or it was expired.
var ErrLockLeaseExpired = errors.New("transaction did not succeed: lock lease expired")
// ErrOperationAbortedByInterceptor is an error that can be used by custom
// interceptors to signal that the given operation has been intentionally
// aborted, and should not be logged as an error.
var ErrOperationAbortedByInterceptor = errors.New("operation aborted")
var ErrEtcdTimeout error = errors.New("etcd client timeout exceeded")
type etcdModule struct {
opts backendOptions
}
var (
// statusCheckTimeout is the timeout when performing status checks with
// all etcd endpoints
statusCheckTimeout = 10 * time.Second
// initialConnectionTimeout is the timeout for the initial connection to
// the etcd server
initialConnectionTimeout = 15 * time.Minute
// etcd3ClientLogger is the logger used for the underlying etcd clients. We
// explicitly initialize a logger and propagate it to prevent each client from
// automatically creating a new one, which comes with a significant memory cost.
etcd3ClientLogger *zap.Logger
)
func newEtcdModule() backendModule {
return &etcdModule{
opts: backendOptions{
EtcdAddrOption: &backendOption{
description: "Addresses of etcd cluster",
},
EtcdOptionConfig: &backendOption{
description: "Path to etcd configuration file",
},
EtcdOptionKeepAliveTimeout: &backendOption{
description: "Timeout after which an unanswered heartbeat triggers the connection to be closed",
validate: func(v string) error {
_, err := time.ParseDuration(v)
return err
},
},
EtcdOptionKeepAliveHeartbeat: &backendOption{
description: "Heartbeat interval to keep gRPC connection alive",
validate: func(v string) error {
_, err := time.ParseDuration(v)
return err
},
},
EtcdRateLimitOption: &backendOption{
description: "Rate limit in kv store operations per second",
validate: func(v string) error {
_, err := strconv.Atoi(v)
return err
},
},
EtcdBootstrapRateLimitOption: &backendOption{
description: "Rate limit in kv store operations per second during bootstrapping",
validate: func(v string) error {
_, err := strconv.Atoi(v)
return err
},
},
EtcdMaxInflightOption: &backendOption{
description: "Maximum inflight concurrent kv store operations; defaults to etcd.qps if unset",
validate: func(v string) error {
_, err := strconv.Atoi(v)
return err
},
},
EtcdListLimitOption: &backendOption{
description: "Max number of results retrieved in one batch by ListAndWatch operations (0 = no limit)",
validate: func(v string) error {
_, err := strconv.Atoi(v)
return err
},
},
},
}
}
func (e *etcdModule) createInstance() backendModule {
return newEtcdModule()
}
func (e *etcdModule) setConfig(logger *slog.Logger, opts map[string]string) error {
return setOpts(logger, opts, e.opts)
}
func shuffleEndpoints(endpoints []string) {
rand.Shuffle(len(endpoints), func(i, j int) {
endpoints[i], endpoints[j] = endpoints[j], endpoints[i]
})
}
type clientOptions struct {
Endpoint string
ConfigPath string
KeepAliveHeartbeat time.Duration
KeepAliveTimeout time.Duration
RateLimit int
BootstrapRateLimit int
MaxInflight int
ListBatchSize int
}
func (e *etcdModule) newClient(ctx context.Context, logger *slog.Logger, opts ExtraOptions) (BackendOperations, chan error) {
errChan := make(chan error, 1)
clientOptions := clientOptions{
KeepAliveHeartbeat: 15 * time.Second,
KeepAliveTimeout: 25 * time.Second,
RateLimit: defaults.KVstoreQPS,
ListBatchSize: 256,
}
if o, ok := e.opts[EtcdRateLimitOption]; ok && o.value != "" {
clientOptions.RateLimit, _ = strconv.Atoi(o.value)
}
if o, ok := e.opts[EtcdBootstrapRateLimitOption]; ok && o.value != "" {
clientOptions.BootstrapRateLimit, _ = strconv.Atoi(o.value)
}
if o, ok := e.opts[EtcdMaxInflightOption]; ok && o.value != "" {
clientOptions.MaxInflight, _ = strconv.Atoi(o.value)
}
if clientOptions.MaxInflight == 0 {
clientOptions.MaxInflight = clientOptions.RateLimit
}
if o, ok := e.opts[EtcdListLimitOption]; ok && o.value != "" {
clientOptions.ListBatchSize, _ = strconv.Atoi(o.value)
}
if o, ok := e.opts[EtcdOptionKeepAliveTimeout]; ok && o.value != "" {
clientOptions.KeepAliveTimeout, _ = time.ParseDuration(o.value)
}
if o, ok := e.opts[EtcdOptionKeepAliveHeartbeat]; ok && o.value != "" {
clientOptions.KeepAliveHeartbeat, _ = time.ParseDuration(o.value)
}
clientOptions.Endpoint = e.opts[EtcdAddrOption].value
clientOptions.ConfigPath = e.opts[EtcdOptionConfig].value
if clientOptions.Endpoint == "" && clientOptions.ConfigPath == "" {
errChan <- fmt.Errorf("invalid etcd configuration, %s or %s must be specified",
EtcdOptionConfig, EtcdAddrOption)
close(errChan)
return nil, errChan
}
logger.Info(
"Creating etcd client",
logfields.ConfigPath, clientOptions.ConfigPath,
logfields.KeepAliveHeartbeat, clientOptions.KeepAliveHeartbeat,
logfields.KeepAliveTimeout, clientOptions.KeepAliveTimeout,
logfields.RateLimit, clientOptions.RateLimit,
logfields.MaxInflight, clientOptions.MaxInflight,
logfields.ListLimit, clientOptions.ListBatchSize,
)
for {
// connectEtcdClient will close errChan when the connection attempt has
// been successful
backend, err := connectEtcdClient(ctx, logger, errChan, clientOptions, opts)
switch {
case os.IsNotExist(err):
logger.Info("Waiting for all etcd configuration files to be available",
logfields.Error, err,
)
time.Sleep(5 * time.Second)
case err != nil:
errChan <- err
close(errChan)
return backend, errChan
default:
return backend, errChan
}
}
}
func init() {
// register etcd module for use
registerBackend(EtcdBackendName, newEtcdModule())
if duration := os.Getenv("CILIUM_ETCD_STATUS_CHECK_INTERVAL"); duration != "" {
timeout, err := time.ParseDuration(duration)
if err == nil {
statusCheckTimeout = timeout
}
}
// Initialize the etcd client logger.
// slogloggercheck: it's safe to use the default logger here since it's just to print a warning from etcdClientDebugLevel.
l, err := logutil.CreateDefaultZapLogger(etcdClientDebugLevel(logging.DefaultSlogLogger))
if err != nil {
// slogloggercheck: it's safe to use the default logger here since it's just to print a warning.
logging.DefaultSlogLogger.Warn("Failed to initialize etcd client logger",
logfields.Error, err,
)
l = zap.NewNop()
}
etcd3ClientLogger = l.Named("etcd-client")
}
// etcdClientDebugLevel translates ETCD_CLIENT_DEBUG into zap log level.
// This is a copy of a private etcd client function:
// https://github.com/etcd-io/etcd/blob/v3.5.9/client/v3/logger.go#L47-L59
func etcdClientDebugLevel(logger *slog.Logger) zapcore.Level {
envLevel := os.Getenv("ETCD_CLIENT_DEBUG")
if envLevel == "" || envLevel == "true" {
return zapcore.InfoLevel
}
var l zapcore.Level
if err := l.Set(envLevel); err != nil {
logger.Warn("Invalid value for environment variable 'ETCD_CLIENT_DEBUG'. Using default level: 'info'")
return zapcore.InfoLevel
}
return l
}
// Hint tries to improve the error message displayed to te user.
func Hint(err error) error {
if errors.Is(err, context.DeadlineExceeded) {
return ErrEtcdTimeout
}
return err
}
type etcdClient struct {
// stopStatusChecker is closed when the status checker can be terminated
stopStatusChecker chan struct{}
client *client.Client
// config and configPath are initialized once and never written to again, they can be accessed without locking
config *client.Config
// statusCheckErrors receives all errors reported by statusChecker()
statusCheckErrors chan error
// protects all sessions and sessionErr from concurrent access
lock.RWMutex
// leaseManager manages the acquisition of etcd leases for generic purposes
leaseManager *etcdLeaseManager
// lockLeaseManager manages the acquisition of etcd leases for locking
// purposes, associated with a shorter TTL
lockLeaseManager *etcdLeaseManager
// statusLock protects status for read/write access
statusLock lock.RWMutex
// status is a snapshot of the latest etcd cluster status
status models.Status
extraOptions ExtraOptions
limiter *ciliumrate.APILimiter
listBatchSize int
lastHeartbeat time.Time
leaseExpiredObservers lock.Map[string, func(string)]
lockLeaseExpiredObservers lock.Map[string, func(string)]
// logger is the scoped logger associated with this client
logger *slog.Logger
}
type etcdMutex struct {
mutex *concurrency.Mutex
onUnlock func()
path string
}
func (e *etcdMutex) Unlock(ctx context.Context) (err error) {
e.onUnlock()
defer func(duration *spanstat.SpanStat) {
increaseMetric(e.path, metricDelete, "Unlock", duration.EndError(err).Total(), err)
}(spanstat.Start())
return e.mutex.Unlock(ctx)
}
func (e *etcdMutex) Comparator() any {
return e.mutex.IsOwner()
}
// StatusCheckErrors returns a channel which receives status check errors
func (e *etcdClient) StatusCheckErrors() <-chan error {
return e.statusCheckErrors
}
func (e *etcdClient) maybeWaitForInitLock(ctx context.Context) error {
if e.extraOptions.NoLockQuorumCheck {
return nil
}
limiter := e.newExpBackoffRateLimiter("etcd-client-init-lock")
defer limiter.Reset()
for {
select {
case <-e.client.Ctx().Done():
return fmt.Errorf("client context ended: %w", e.client.Ctx().Err())
case <-ctx.Done():
return fmt.Errorf("caller context ended: %w", ctx.Err())
default:
}
// Generate a random number so that we can acquire a lock even
// if other agents are killed while locking this path.
randNumber := strconv.FormatUint(rand.Uint64(), 16)
locker, err := e.LockPath(ctx, InitLockPath+"/"+randNumber)
if err == nil {
locker.Unlock(context.Background())
e.logger.Debug("Distributed lock successful, etcd has quorum")
return nil
}
limiter.Wait(ctx)
}
}
func (e *etcdClient) isConnectedAndHasQuorum(ctx context.Context) error {
ctxTimeout, cancel := context.WithTimeout(ctx, statusCheckTimeout)
defer cancel()
if err := e.maybeWaitForInitLock(ctxTimeout); err != nil {
recordQuorumError("lock timeout")
return fmt.Errorf("unable to acquire lock: %w", err)
}
return nil
}
func connectEtcdClient(ctx context.Context, logger *slog.Logger, errChan chan error, clientOptions clientOptions, opts ExtraOptions) (BackendOperations, error) {
config := &client.Config{
Endpoints: []string{clientOptions.Endpoint},
}
if cfgPath := clientOptions.ConfigPath; cfgPath != "" {
cfg, err := clientyaml.NewConfig(cfgPath)
if err != nil {
return nil, err
}
if cfg.TLS != nil {
cfg.TLS.GetClientCertificate, err = getClientCertificateReloader(cfgPath)
if err != nil {
return nil, err
}
}
config = cfg
}
// Shuffle the order of endpoints to avoid all agents connecting to the
// same etcd endpoint and to work around etcd client library failover
// bugs. (https://github.com/etcd-io/etcd/pull/9860)
if config.Endpoints != nil {
shuffleEndpoints(config.Endpoints)
}
// Set client context so that client can be cancelled from outside
config.Context = ctx
// Configure the dial options provided by the caller.
config.DialOptions = append(config.DialOptions, opts.DialOption...)
// Set DialTimeout to 0, otherwise the creation of a new client will
// block until DialTimeout is reached or a connection to the server
// is made.
config.DialTimeout = 0
// Ping the server to verify if the server connection is still valid
config.DialKeepAliveTime = clientOptions.KeepAliveHeartbeat
// Timeout if the server does not reply within 15 seconds and close the
// connection. Ideally it should be lower than staleLockTimeout
config.DialKeepAliveTimeout = clientOptions.KeepAliveTimeout
// Use the shared etcd client logger to prevent unnecessary allocations.
config.Logger = etcd3ClientLogger
c, err := client.New(*config)
if err != nil {
return nil, err
}
ec := &etcdClient{
client: c,
config: config,
status: models.Status{
State: models.StatusStateWarning,
Msg: "Waiting for initial connection to be established",
},
stopStatusChecker: make(chan struct{}),
extraOptions: opts,
listBatchSize: clientOptions.ListBatchSize,
statusCheckErrors: make(chan error, 128),
logger: logger.With(
logfields.Endpoints, config.Endpoints,
logfields.Config, clientOptions.ConfigPath,
),
}
initialLimit := clientOptions.RateLimit
// If BootstrapRateLimit and BootstrapComplete are provided, set the
// initial rate limit to BootstrapRateLimit and apply the standard rate limit
// once the caller has signaled that bootstrap is complete by closing the channel.
if clientOptions.BootstrapRateLimit > 0 && opts.BootstrapComplete != nil {
ec.logger.Info(
"Setting client QPS limit for bootstrap",
logfields.EtcdQPSLimit, clientOptions.BootstrapRateLimit,
)
initialLimit = clientOptions.BootstrapRateLimit
go func() {
select {
case <-ec.client.Ctx().Done():
case <-opts.BootstrapComplete:
ec.logger.Info(
"Bootstrap complete, updating client QPS limit",
logfields.EtcdQPSLimit, clientOptions.RateLimit,
)
ec.limiter.SetRateLimit(rate.Limit(clientOptions.RateLimit))
}
}()
}
ec.limiter = ciliumrate.NewAPILimiter(logger, makeSessionName("etcd", opts), ciliumrate.APILimiterParameters{
RateLimit: rate.Limit(initialLimit),
RateBurst: clientOptions.RateLimit,
ParallelRequests: clientOptions.MaxInflight,
}, ciliumratemetrics.APILimiterObserver())
ec.logger.Info("Connecting to etcd server...")
leaseTTL := cmp.Or(opts.LeaseTTL, defaults.KVstoreLeaseTTL)
ec.leaseManager = newEtcdLeaseManager(ec.logger, c, leaseTTL, etcdMaxKeysPerLease, ec.expiredLeaseObserver)
ec.lockLeaseManager = newEtcdLeaseManager(ec.logger, c, defaults.LockLeaseTTL, etcdMaxKeysPerLease, ec.expiredLockLeaseObserver)
go ec.asyncConnectEtcdClient(errChan)
return ec, nil
}
func (e *etcdClient) asyncConnectEtcdClient(errChan chan<- error) {
var (
ctx = e.client.Ctx()
listDone = make(chan struct{})
)
propagateError := func(err error) {
e.statusLock.Lock()
e.status.State = models.StatusStateFailure
e.status.Msg = fmt.Sprintf("Failed to establish initial connection: %s", err.Error())
e.statusLock.Unlock()
errChan <- err
close(errChan)
e.statusCheckErrors <- err
close(e.statusCheckErrors)
}
wctx, wcancel := context.WithTimeout(ctx, initialConnectionTimeout)
// Don't create a session when running with lock quorum check disabled
// (i.e., for clustermesh clients), to not introduce unnecessary overhead
// on the target etcd instance, considering that the session would never
// be used again. Instead, we'll just rely on the successful synchronization
// of the heartbeat watcher as a signal that we successfully connected.
if !e.extraOptions.NoLockQuorumCheck {
_, err := e.lockLeaseManager.GetSession(wctx, InitLockPath)
if err != nil {
wcancel()
if errors.Is(err, context.DeadlineExceeded) {
err = fmt.Errorf("timed out while waiting for etcd connection. Ensure that etcd is running on %s", e.config.Endpoints)
}
propagateError(err)
return
}
}
go func() {
// Report connection established to the caller and start the status
// checker only after successfully starting the heatbeat watcher, as
// additional sanity check. This also guarantees that there's already
// been an interaction with the target etcd instance at that point,
// and its corresponding cluster ID has been retrieved if using the
// "clusterLock" interceptors.
select {
case <-wctx.Done():
propagateError(fmt.Errorf("timed out while starting the heartbeat watcher. Ensure that etcd is running on %s", e.config.Endpoints))
return
case <-listDone:
e.logger.Info("Initial etcd connection established")
close(errChan)
}
wcancel()
e.statusChecker()
}()
events := e.ListAndWatch(ctx, HeartbeatPath)
for event := range events {
switch event.Typ {
case EventTypeDelete:
// A deletion event is not an heartbeat signal
continue
case EventTypeListDone:
// A list done event signals the initial connection, but
// is also not an heartbeat signal.
close(listDone)
continue
}
// It is tempting to compare against the heartbeat value stored in
// the key. However, this would require the time on all nodes to
// be synchronized. Instead, let's just assume current time.
e.RWMutex.Lock()
e.lastHeartbeat = time.Now()
e.RWMutex.Unlock()
e.logger.Debug("Received update notification of heartbeat")
}
}
// makeSessionName builds up a session/locksession controller name
// clusterName is expected to be empty for main kvstore connection
func makeSessionName(sessionPrefix string, opts ExtraOptions) string {
if opts.ClusterName != "" {
return sessionPrefix + "-" + opts.ClusterName
}
return sessionPrefix
}
func (e *etcdClient) newExpBackoffRateLimiter(name string) backoff.Exponential {
return backoff.Exponential{
Logger: e.logger,
Name: name,
Min: 50 * time.Millisecond,
Max: 1 * time.Minute,
NodeManager: backoff.NewNodeManager(e.extraOptions.ClusterSizeDependantInterval),
}
}
func (e *etcdClient) LockPath(ctx context.Context, path string) (locker KVLocker, err error) {
session, err := e.lockLeaseManager.GetSession(ctx, path)
if err != nil {
return nil, Hint(err)
}
defer func(duration *spanstat.SpanStat) {
increaseMetric(path, metricSet, "Lock", duration.EndError(err).Total(), err)
}(spanstat.Start())
mu := concurrency.NewMutex(session, path)
err = mu.Lock(ctx)
if err != nil {
e.lockLeaseManager.CancelIfExpired(err, session.Lease())
return nil, Hint(err)
}
release := func() { e.lockLeaseManager.Release(path) }
return &etcdMutex{mutex: mu, onUnlock: release, path: path}, nil
}
func (e *etcdClient) DeletePrefix(ctx context.Context, path string) (err error) {
defer func() {
Trace(e.logger, "DeletePrefix",
logfields.Error, err,
fieldPrefix, path,
)
}()
lr, err := e.limiter.Wait(ctx)
if err != nil {
return Hint(err)
}
defer func(duration *spanstat.SpanStat) {
increaseMetric(path, metricDelete, "DeletePrefix", duration.EndError(err).Total(), err)
}(spanstat.Start())
_, err = e.client.Delete(ctx, path, client.WithPrefix())
// Using lr.Error for convenience, as it matches lr.Done() when err is nil
lr.Error(err, -1)
if err == nil {
e.leaseManager.ReleasePrefix(path)
}
return Hint(err)
}
// watch starts watching for changes in a prefix
func (e *etcdClient) watch(ctx context.Context, prefix string, events emitter) {
localCache := watcherCache{}
listSignalSent := false
scopedLog := e.logger.With(fieldPrefix, prefix)
scopedLog.Info("Starting watcher")
defer func() {
scopedLog.Info("Stopped watcher")
events.close()
}()
// errLimiter is used to rate limit the retry of the first Get request in case an error
// has occurred, to prevent overloading the etcd server due to the more aggressive
// default rate limiter.
errLimiter := e.newExpBackoffRateLimiter("etcd-list-before-watch-error")
reList:
for {
select {
case <-e.client.Ctx().Done():
return
case <-ctx.Done():
return
default:
}
lr, err := e.limiter.Wait(ctx)
if err != nil {
continue
}
kvs, revision, err := e.paginatedList(ctx, scopedLog, prefix)
if err != nil {
lr.Error(err, -1)
if attempt := errLimiter.Attempt(); attempt < 10 {
scopedLog.Info(
"Unable to list keys before starting watcher, will retry",
logfields.Error, Hint(err),
logfields.Attempt, attempt,
)
} else {
scopedLog.Warn(
"Unable to list keys before starting watcher, will retry",
logfields.Error, Hint(err),
logfields.Attempt, attempt,
)
}
errLimiter.Wait(ctx)
continue
}
lr.Done()
errLimiter.Reset()
scopedLog.Info(
"Successfully listed keys before starting watcher",
logfields.Count, len(kvs),
fieldRev, revision,
)
for _, key := range kvs {
t := EventTypeCreate
if localCache.Exists(key.Key) {
t = EventTypeModify
}
localCache.MarkInUse(key.Key)
if traceEnabled {
scopedLog.Debug("Emitting list result",
logfields.EventType, t,
logfields.Key, key.Key,
logfields.Value, key.Value,
)
}
if !events.emit(ctx, KeyValueEvent{
Key: string(key.Key),
Value: key.Value,
Typ: t,
}) {
return
}
}
nextRev := revision + 1
// Send out deletion events for all keys that were deleted
// between our last known revision and the latest revision
// received via Get
if !localCache.RemoveDeleted(func(k string) bool {
event := KeyValueEvent{
Key: k,
Typ: EventTypeDelete,
}
if traceEnabled {
scopedLog.Debug("Emitting EventTypeDelete event",
logfields.Key, k,
)
}
return events.emit(ctx, event)
}) {
return
}
// Only send the list signal once
if !listSignalSent {
if !events.emit(ctx, KeyValueEvent{Typ: EventTypeListDone}) {
return
}
listSignalSent = true
}
recreateWatcher:
scopedLog.Info(
"Starting to watch prefix",
fieldRev, nextRev,
)
lr, err = e.limiter.Wait(ctx)
if err != nil {
select {
case <-e.client.Ctx().Done():
return
case <-ctx.Done():
return
default:
goto recreateWatcher
}
}
etcdWatch := e.client.Watch(client.WithRequireLeader(ctx), prefix,
client.WithPrefix(), client.WithRev(nextRev))
lr.Done()
for {
select {
case <-e.client.Ctx().Done():
return
case <-ctx.Done():
return
case r, ok := <-etcdWatch:
if !ok {
time.Sleep(50 * time.Millisecond)
goto recreateWatcher
}
if err := r.Err(); err != nil {
switch {
case errors.Is(err, ErrOperationAbortedByInterceptor):
// Aborted on purpose by a custom interceptor.
scopedLog.Debug("Etcd watcher aborted",
logfields.Error, Hint(err),
fieldRev, r.Header.Revision,
)
case errors.Is(err, v3rpcErrors.ErrCompacted):
// We tried to watch on a compacted
// revision that may no longer exist,
// recreate the watcher and try to
// watch on the next possible revision
scopedLog.Info("Tried watching on compacted revision. Triggering relist of all keys",
logfields.Error, Hint(err),
fieldRev, r.Header.Revision,
)
default:
scopedLog.Info("Etcd watcher errored. Triggering relist of all keys",
logfields.Error, Hint(err),
fieldRev, r.Header.Revision,
)
}
// mark all local keys in state for
// deletion unless the upcoming GET
// marks them alive
localCache.MarkAllForDeletion()
goto reList
}
nextRev = r.Header.Revision + 1
if traceEnabled {
scopedLog.Debug("Received event from etcd",
logfields.Response, r,
)
}
for _, ev := range r.Events {
event := KeyValueEvent{
Key: string(ev.Kv.Key),
Value: ev.Kv.Value,
}
switch {
case ev.Type == client.EventTypeDelete:
event.Typ = EventTypeDelete
localCache.RemoveKey(ev.Kv.Key)
case ev.IsCreate():
event.Typ = EventTypeCreate
localCache.MarkInUse(ev.Kv.Key)
default:
event.Typ = EventTypeModify
localCache.MarkInUse(ev.Kv.Key)
}
if traceEnabled {
scopedLog.Debug("Emitting event",
logfields.EventType, event.Typ,
logfields.Key, event.Key,
logfields.Value, event.Value,
)
}
if !events.emit(ctx, event) {
return
}
}
}
}
}
}
func (e *etcdClient) paginatedList(ctx context.Context, log *slog.Logger, prefix string) (kvs []*mvccpb.KeyValue, revision int64, err error) {
start, end := prefix, client.GetPrefixRangeEnd(prefix)
for {
res, err := e.client.Get(ctx, start, client.WithRange(end),
client.WithSort(client.SortByKey, client.SortAscend),
client.WithRev(revision), client.WithSerializable(),
client.WithLimit(int64(e.listBatchSize)),
)
if err != nil {
return nil, 0, err
}
log.Debug(
"Received list response from etcd",
fieldNumEntries, len(res.Kvs),
fieldRemainingEntries, res.Count-int64(len(res.Kvs)),
)
if kvs == nil {
kvs = make([]*mvccpb.KeyValue, 0, res.Count)
}
kvs = append(kvs, res.Kvs...)
// Do not modify the revision once set, as subsequent Get queries may
// return higher revisions in case other operations are performed in
// parallel (regardless of whether we specify WithRev), leading to
// possibly missing the events happened in the meantime.
if revision == 0 {
revision = res.Header.Revision
}
if !res.More || len(res.Kvs) == 0 {
return kvs, revision, nil
}
start = string(res.Kvs[len(res.Kvs)-1].Key) + "\x00"
}
}
func (e *etcdClient) determineEndpointStatus(ctx context.Context, endpointAddress string) (string, error) {
ctxTimeout, cancel := context.WithTimeout(ctx, statusCheckTimeout)
defer cancel()
e.logger.Debug("Checking status to etcd endpoint",
logfields.Endpoint, endpointAddress,
)
status, err := e.client.Status(ctxTimeout, endpointAddress)
if err != nil {
return fmt.Sprintf("%s - %s", endpointAddress, err), Hint(err)
}
str := fmt.Sprintf("%s - %s", endpointAddress, status.Version)
if status.Header.MemberId == status.Leader {
str += " (Leader)"
}
return str, nil
}
func (e *etcdClient) statusChecker() {
ctx := context.Background()
var consecutiveQuorumErrors uint
var err error
e.RWMutex.Lock()
// Ensure that lastHearbeat is always set to a non-zero value when starting
// the status checker, to guarantee that we can correctly compute the time
// difference even in case we don't receive any heartbeat event. Indeed, we
// want to consider that as an heartbeat failure after the usual timeout.
if e.lastHeartbeat.IsZero() {
e.lastHeartbeat = time.Now()
}
e.RWMutex.Unlock()
for {
newStatus := []string{}
ok := 0
quorumError := e.isConnectedAndHasQuorum(ctx)
e.RWMutex.RLock()
lastHeartbeat := e.lastHeartbeat
e.RWMutex.RUnlock()
// If we don't check the endpoint status of Etcd nor quorum,
// we can check if the last heartbeat was received from operator.
// This check is only performed for Clustermesh clients,
// but not for the main KVStore connections.
// For Clustermesh clients it's better to disconnect from the remote KVStore
// in case of operator failure, as it no longer updates global service state.
// This is not the case for the main KVStore connection, as it is not actually
// required to have operational operator to function properly.
if e.extraOptions.NoEndpointStatusChecks && e.extraOptions.NoLockQuorumCheck {
if heartbeatDelta := time.Since(lastHeartbeat); heartbeatDelta > 2*HeartbeatWriteInterval {
recordQuorumError("no event received")
quorumError = fmt.Errorf("%s since last heartbeat update has been received", heartbeatDelta)
}
}
endpoints := e.client.Endpoints()
if e.extraOptions.NoEndpointStatusChecks {
newStatus = append(newStatus, "endpoint status checks are disabled")
if quorumError == nil {
ok = len(endpoints)
}
} else {
for _, ep := range endpoints {
st, err := e.determineEndpointStatus(ctx, ep)
if err == nil {
ok++
}
newStatus = append(newStatus, st)
}
}
allConnected := len(endpoints) == ok
quorumString := "true"
if quorumError != nil {
quorumString = quorumError.Error()
consecutiveQuorumErrors++
quorumString += fmt.Sprintf(", consecutive-errors=%d", consecutiveQuorumErrors)
} else {
consecutiveQuorumErrors = 0
}
e.statusLock.Lock()
switch {
case consecutiveQuorumErrors > cmp.Or(e.extraOptions.MaxConsecutiveQuorumErrors, defaults.KVstoreMaxConsecutiveQuorumErrors):
err = fmt.Errorf("quorum check failed %d times in a row: %w", consecutiveQuorumErrors, quorumError)
e.status.State = models.StatusStateFailure
e.status.Msg = fmt.Sprintf("Err: %s", err.Error())
case len(endpoints) > 0 && ok == 0:
err = fmt.Errorf("not able to connect to any etcd endpoints")
e.status.State = models.StatusStateFailure
e.status.Msg = fmt.Sprintf("Err: %s", err.Error())
default:
err = nil
e.status.State = models.StatusStateOk
e.status.Msg = fmt.Sprintf("etcd: %d/%d connected, leases=%d, lock leases=%d, has-quorum=%s: %s",
ok, len(endpoints), e.leaseManager.TotalLeases(), e.lockLeaseManager.TotalLeases(), quorumString, strings.Join(newStatus, "; "))
}
e.statusLock.Unlock()
if err != nil {
select {
case e.statusCheckErrors <- err:
default:
// Channel's buffer is full, skip sending errors to the channel but log warnings instead
e.logger.Warn(
"Status check error channel is full, dropping this error",
logfields.Error, err,
)
}
}
select {
case <-e.stopStatusChecker:
close(e.statusCheckErrors)
return
case <-time.After(e.extraOptions.StatusCheckInterval(allConnected)):
}
}
}
func (e *etcdClient) Status() *models.Status {
e.statusLock.RLock()
defer e.statusLock.RUnlock()
return &models.Status{
State: e.status.State,
Msg: e.status.Msg,
}
}
// GetIfLocked returns value of key if the client is still holding the given lock.
func (e *etcdClient) GetIfLocked(ctx context.Context, key string, lock KVLocker) (bv []byte, err error) {
if traceEnabled {
defer func() {
Trace(e.logger, "GetIfLocked",
logfields.Error, err,
fieldKey, key,
fieldValue, string(bv),
)
}()
}
lr, err := e.limiter.Wait(ctx)
if err != nil {
return nil, Hint(err)
}
defer func(duration *spanstat.SpanStat) {
increaseMetric(key, metricRead, "GetLocked", duration.EndError(err).Total(), err)
}(spanstat.Start())
opGet := client.OpGet(key)
cmp := lock.Comparator().(client.Cmp)
txnReply, err := e.client.Txn(ctx).If(cmp).Then(opGet).Commit()
if err == nil && !txnReply.Succeeded {
err = ErrLockLeaseExpired
}
if err != nil {
lr.Error(err, -1)
return nil, Hint(err)
}
lr.Done()
getR := txnReply.Responses[0].GetResponseRange()
// RangeResponse
if getR.Count == 0 {
return nil, nil
}
return getR.Kvs[0].Value, nil
}
// Get returns value of key
func (e *etcdClient) Get(ctx context.Context, key string) (bv []byte, err error) {
if traceEnabled {
defer func() {
Trace(e.logger, "Get",
logfields.Error, err,
fieldKey, key,
fieldValue, string(bv),
)
}()
}
lr, err := e.limiter.Wait(ctx)
if err != nil {
return nil, Hint(err)
}
defer func(duration *spanstat.SpanStat) {
increaseMetric(key, metricRead, "Get", duration.EndError(err).Total(), err)
}(spanstat.Start())
getR, err := e.client.Get(ctx, key)
if err != nil {
lr.Error(err, -1)
return nil, Hint(err)
}
lr.Done()
if getR.Count == 0 {
return nil, nil
}
return getR.Kvs[0].Value, nil
}
// DeleteIfLocked deletes a key if the client is still holding the given lock.
func (e *etcdClient) DeleteIfLocked(ctx context.Context, key string, lock KVLocker) (err error) {
if traceEnabled {
defer func() {
Trace(e.logger, "DeleteIfLocked",
logfields.Error, err,
fieldKey, key,
)
}()
}
lr, err := e.limiter.Wait(ctx)
if err != nil {
return Hint(err)
}
defer func(duration *spanstat.SpanStat) {
increaseMetric(key, metricDelete, "DeleteLocked", duration.EndError(err).Total(), err)
}(spanstat.Start())
opDel := client.OpDelete(key)
cmp := lock.Comparator().(client.Cmp)
txnReply, err := e.client.Txn(ctx).If(cmp).Then(opDel).Commit()
if err == nil && !txnReply.Succeeded {
err = ErrLockLeaseExpired
}
if err == nil {
e.leaseManager.Release(key)
}
// Using lr.Error for convenience, as it matches lr.Done() when err is nil
lr.Error(err, -1)
return Hint(err)
}
// Delete deletes a key
func (e *etcdClient) Delete(ctx context.Context, key string) (err error) {
if traceEnabled {
defer func() {
Trace(e.logger, "Delete",
logfields.Error, err,
fieldKey, key,
)
}()
}
lr, err := e.limiter.Wait(ctx)
if err != nil {
return Hint(err)
}
defer func(duration *spanstat.SpanStat) {
increaseMetric(key, metricDelete, "Delete", duration.EndError(err).Total(), err)
}(spanstat.Start())
_, err = e.client.Delete(ctx, key)
// Using lr.Error for convenience, as it matches lr.Done() when err is nil
lr.Error(err, -1)
if err == nil {
e.leaseManager.Release(key)
}
return Hint(err)
}
// UpdateIfLocked updates a key if the client is still holding the given lock.
func (e *etcdClient) UpdateIfLocked(ctx context.Context, key string, value []byte, lease bool, lock KVLocker) (err error) {
if traceEnabled {
defer func() {
Trace(e.logger, "UpdateIfLocked",
logfields.Error, err,
fieldKey, key,
fieldValue, string(value),
fieldAttachLease, lease,
)
}()
}
var leaseID client.LeaseID
if lease {
leaseID, err = e.leaseManager.GetLeaseID(ctx, key)
if err != nil {
return Hint(err)
}
}
lr, err := e.limiter.Wait(ctx)
if err != nil {
return Hint(err)
}
defer func(duration *spanstat.SpanStat) {
increaseMetric(key, metricSet, "UpdateIfLocked", duration.EndError(err).Total(), err)
}(spanstat.Start())
var txnReply *client.TxnResponse
opPut := client.OpPut(key, string(value), client.WithLease(leaseID))
cmp := lock.Comparator().(client.Cmp)
txnReply, err = e.client.Txn(ctx).If(cmp).Then(opPut).Commit()
e.leaseManager.CancelIfExpired(err, leaseID)
if err == nil && !txnReply.Succeeded {
err = ErrLockLeaseExpired
}
// Using lr.Error for convenience, as it matches lr.Done() when err is nil
lr.Error(err, -1)
return Hint(err)
}
// Update creates or updates a key
func (e *etcdClient) Update(ctx context.Context, key string, value []byte, lease bool) (err error) {
if traceEnabled {
defer func() {
Trace(e.logger, "Update",
logfields.Error, err,
fieldKey, key,
fieldValue, string(value),
fieldAttachLease, lease,
)
}()
}
var leaseID client.LeaseID
if lease {
leaseID, err = e.leaseManager.GetLeaseID(ctx, key)
if err != nil {
return Hint(err)
}
}
lr, err := e.limiter.Wait(ctx)
if err != nil {
return Hint(err)
}
defer func(duration *spanstat.SpanStat) {
increaseMetric(key, metricSet, "Update", duration.EndError(err).Total(), err)
}(spanstat.Start())
_, err = e.client.Put(ctx, key, string(value), client.WithLease(leaseID))
e.leaseManager.CancelIfExpired(err, leaseID)
// Using lr.Error for convenience, as it matches lr.Done() when err is nil
lr.Error(err, -1)
return Hint(err)
}
// UpdateIfDifferentIfLocked updates a key if the value is different and if the client is still holding the given lock.
func (e *etcdClient) UpdateIfDifferentIfLocked(ctx context.Context, key string, value []byte, lease bool, lock KVLocker) (recreated bool, err error) {
if traceEnabled {
defer func() {
Trace(e.logger, "UpdateIfDifferentIfLocked",
logfields.Error, err,
fieldKey, key,
fieldValue, string(value),
fieldAttachLease, lease,
fieldRecreated, recreated,
)
}()
}
lr, err := e.limiter.Wait(ctx)
if err != nil {
return false, Hint(err)
}
duration := spanstat.Start()
cnds := lock.Comparator().(client.Cmp)
txnresp, err := e.client.Txn(ctx).If(cnds).Then(client.OpGet(key)).Commit()
// Using lr.Error for convenience, as it matches lr.Done() when err is nil
lr.Error(err, -1)
increaseMetric(key, metricRead, "Get", duration.EndError(err).Total(), err)
// On error, attempt update blindly
if err != nil {
return true, e.UpdateIfLocked(ctx, key, value, lease, lock)
}
if !txnresp.Succeeded {
return false, ErrLockLeaseExpired
}
getR := txnresp.Responses[0].GetResponseRange()
if getR.Count == 0 {
return true, e.UpdateIfLocked(ctx, key, value, lease, lock)
}
if lease && !e.leaseManager.KeyHasLease(key, client.LeaseID(getR.Kvs[0].Lease)) {
return true, e.UpdateIfLocked(ctx, key, value, lease, lock)
}
// if value is not equal then update.
if !bytes.Equal(getR.Kvs[0].Value, value) {
return true, e.UpdateIfLocked(ctx, key, value, lease, lock)
}
return false, nil
}
// UpdateIfDifferent updates a key if the value is different
func (e *etcdClient) UpdateIfDifferent(ctx context.Context, key string, value []byte, lease bool) (recreated bool, err error) {
if traceEnabled {
defer func() {
Trace(e.logger, "UpdateIfDifferent",
logfields.Error, err,
fieldKey, key,
fieldValue, string(value),
fieldAttachLease, lease,
fieldRecreated, recreated,
)
}()
}
lr, err := e.limiter.Wait(ctx)
if err != nil {
return false, Hint(err)
}
duration := spanstat.Start()
getR, err := e.client.Get(ctx, key)
// Using lr.Error for convenience, as it matches lr.Done() when err is nil
lr.Error(err, -1)
increaseMetric(key, metricRead, "Get", duration.EndError(err).Total(), err)
// On error, attempt update blindly
if err != nil || getR.Count == 0 {
return true, e.Update(ctx, key, value, lease)
}
if lease && !e.leaseManager.KeyHasLease(key, client.LeaseID(getR.Kvs[0].Lease)) {
return true, e.Update(ctx, key, value, lease)
}
// if value is not equal then update.
if !bytes.Equal(getR.Kvs[0].Value, value) {
return true, e.Update(ctx, key, value, lease)
}
return false, nil
}
// CreateOnlyIfLocked atomically creates a key if the client is still holding the given lock or fails if it already exists
func (e *etcdClient) CreateOnlyIfLocked(ctx context.Context, key string, value []byte, lease bool, lock KVLocker) (success bool, err error) {
if traceEnabled {
defer func() {
Trace(e.logger, "CreateOnlyIfLocked",
logfields.Error, err,
fieldKey, key,
fieldValue, string(value),
fieldAttachLease, lease,
fieldSuccess, success,
)
}()
}
var leaseID client.LeaseID
if lease {
leaseID, err = e.leaseManager.GetLeaseID(ctx, key)
if err != nil {
return false, Hint(err)
}
}
lr, err := e.limiter.Wait(ctx)
if err != nil {
return false, Hint(err)
}
duration := spanstat.Start()
req := client.OpPut(key, string(value), client.WithLease(leaseID))
cnds := []client.Cmp{
client.Compare(client.Version(key), "=", 0),
lock.Comparator().(client.Cmp),
}
// We need to do a get in the else of the txn to detect if the lock is still
// valid or not.
opGets := []client.Op{
client.OpGet(key),
}
txnresp, err := e.client.Txn(ctx).If(cnds...).Then(req).Else(opGets...).Commit()
increaseMetric(key, metricSet, "CreateOnlyLocked", duration.EndError(err).Total(), err)
if err != nil {
lr.Error(err, -1)
e.leaseManager.CancelIfExpired(err, leaseID)
return false, Hint(err)
}
lr.Done()
// The txn can failed for the following reasons:
// - Key version is not zero;
// - Lock does not exist or is expired.
// For both of those cases, the key that we are comparing might or not
// exist, so we have:
// A - Key does not exist and lock does not exist => ErrLockLeaseExpired
// B - Key does not exist and lock exist => txn should succeed
// C - Key does exist, version is == 0 and lock does not exist => ErrLockLeaseExpired
// D - Key does exist, version is != 0 and lock does not exist => ErrLockLeaseExpired
// E - Key does exist, version is == 0 and lock does exist => txn should succeed
// F - Key does exist, version is != 0 and lock does exist => txn fails but returned is nil!
if !txnresp.Succeeded {
// case F
if len(txnresp.Responses[0].GetResponseRange().Kvs) != 0 &&
txnresp.Responses[0].GetResponseRange().Kvs[0].Version != 0 {
return false, nil
}
// case A, C and D
return false, ErrLockLeaseExpired
}
// case B and E
return true, nil
}
// CreateOnly creates a key with the value and will fail if the key already exists
func (e *etcdClient) CreateOnly(ctx context.Context, key string, value []byte, lease bool) (success bool, err error) {
if traceEnabled {
defer func() {
Trace(e.logger, "CreateOnly",
logfields.Error, err,
fieldKey, key,
fieldValue, string(value),
fieldAttachLease, lease,
fieldSuccess, success,
)
}()
}
var leaseID client.LeaseID
if lease {
leaseID, err = e.leaseManager.GetLeaseID(ctx, key)
if err != nil {
return false, Hint(err)
}
}
lr, err := e.limiter.Wait(ctx)
if err != nil {
return false, Hint(err)
}
defer func(duration *spanstat.SpanStat) {
increaseMetric(key, metricSet, "CreateOnly", duration.EndError(err).Total(), err)
}(spanstat.Start())
req := client.OpPut(key, string(value), client.WithLease(leaseID))
cond := client.Compare(client.Version(key), "=", 0)
txnresp, err := e.client.Txn(ctx).If(cond).Then(req).Commit()
if err != nil {
lr.Error(err, -1)
e.leaseManager.CancelIfExpired(err, leaseID)
return false, Hint(err)
}
lr.Done()
return txnresp.Succeeded, nil
}
// ListPrefixIfLocked returns a list of keys matching the prefix only if the client is still holding the given lock.
func (e *etcdClient) ListPrefixIfLocked(ctx context.Context, prefix string, lock KVLocker) (v KeyValuePairs, err error) {
if traceEnabled {
defer func() {
Trace(e.logger, "ListPrefixIfLocked",
logfields.Error, err,
fieldPrefix, prefix,
fieldNumEntries, len(v),
)
}()
}
lr, err := e.limiter.Wait(ctx)
if err != nil {
return nil, Hint(err)
}
defer func(duration *spanstat.SpanStat) {
increaseMetric(prefix, metricRead, "ListPrefixLocked", duration.EndError(err).Total(), err)
}(spanstat.Start())
opGet := client.OpGet(prefix, client.WithPrefix())
cmp := lock.Comparator().(client.Cmp)
txnReply, err := e.client.Txn(ctx).If(cmp).Then(opGet).Commit()
if err == nil && !txnReply.Succeeded {
err = ErrLockLeaseExpired
}
if err != nil {
lr.Error(err, -1)
return nil, Hint(err)
}
lr.Done()
getR := txnReply.Responses[0].GetResponseRange()
pairs := KeyValuePairs(make(map[string]Value, getR.Count))
for i := int64(0); i < getR.Count; i++ {
pairs[string(getR.Kvs[i].Key)] = Value{
Data: getR.Kvs[i].Value,
ModRevision: uint64(getR.Kvs[i].ModRevision),
}
}
return pairs, nil
}
// ListPrefix returns a map of matching keys
func (e *etcdClient) ListPrefix(ctx context.Context, prefix string) (v KeyValuePairs, err error) {
if traceEnabled {
defer func() {
Trace(e.logger, "ListPrefix",
logfields.Error, err,
fieldPrefix, prefix,
fieldNumEntries, len(v),
)
}()
}
lr, err := e.limiter.Wait(ctx)
if err != nil {
return nil, Hint(err)
}
defer func(duration *spanstat.SpanStat) {
increaseMetric(prefix, metricRead, "ListPrefix", duration.EndError(err).Total(), err)
}(spanstat.Start())
getR, err := e.client.Get(ctx, prefix, client.WithPrefix())
if err != nil {
lr.Error(err, -1)
return nil, Hint(err)
}
lr.Done()
pairs := KeyValuePairs(make(map[string]Value, getR.Count))
for i := int64(0); i < getR.Count; i++ {
pairs[string(getR.Kvs[i].Key)] = Value{
Data: getR.Kvs[i].Value,
ModRevision: uint64(getR.Kvs[i].ModRevision),
LeaseID: getR.Kvs[i].Lease,
}
}
return pairs, nil
}
// Close closes the etcd session
func (e *etcdClient) Close() {
close(e.stopStatusChecker)
if err := e.client.Close(); err != nil {
e.logger.Warn(
"Failed to close etcd client",
logfields.Error, err,
)
}
// Wait until all child goroutines spawned by the lease managers have terminated.
e.leaseManager.Wait()
e.lockLeaseManager.Wait()
}
// ListAndWatch implements the BackendOperations.ListAndWatch using etcd
func (e *etcdClient) ListAndWatch(ctx context.Context, prefix string) EventChan {
events := make(chan KeyValueEvent)
go e.watch(ctx, prefix, emitter{events: events, scope: GetScopeFromKey(strings.TrimRight(prefix, "/"))})
return events
}
// RegisterLeaseExpiredObserver registers a function which is executed when
// the lease associated with a key having the given prefix is detected as expired.
// If the function is nil, the previous observer (if any) is unregistered.
func (e *etcdClient) RegisterLeaseExpiredObserver(prefix string, fn func(key string)) {
if fn == nil {
e.leaseExpiredObservers.Delete(prefix)
} else {
e.leaseExpiredObservers.Store(prefix, fn)
}
}
func (e *etcdClient) expiredLeaseObserver(key string) {
e.leaseExpiredObservers.Range(func(prefix string, fn func(string)) bool {
if strings.HasPrefix(key, prefix) {
fn(key)
}
return true
})
}
// RegisterLockLeaseExpiredObserver registers a function which is executed when
// the lease associated with a key having the given prefix is detected as expired.
// If the function is nil, the previous observer (if any) is unregistered.
func (e *etcdClient) RegisterLockLeaseExpiredObserver(prefix string, fn func(key string)) {
if fn == nil {
e.lockLeaseExpiredObservers.Delete(prefix)
} else {
e.lockLeaseExpiredObservers.Store(prefix, fn)
}
}
func (e *etcdClient) expiredLockLeaseObserver(key string) {
e.lockLeaseExpiredObservers.Range(func(prefix string, fn func(string)) bool {
if strings.HasPrefix(key, prefix) {
fn(key)
}
return true
})
}
// UserEnforcePresence creates a user in etcd if not already present, and grants the specified roles.
func (e *etcdClient) UserEnforcePresence(ctx context.Context, name string, roles []string) error {
e.logger.Debug("Creating user", FieldUser, name)
_, err := e.client.Auth.UserAddWithOptions(ctx, name, "", &client.UserAddOptions{NoPassword: true})
if err != nil {
if errors.Is(err, v3rpcErrors.ErrUserAlreadyExist) {
e.logger.Debug("User already exists", FieldUser, name)
} else {
return err
}
}
for _, role := range roles {
e.logger.Debug("Granting role to user",
FieldRole, role,
FieldUser, name,
)
_, err := e.client.Auth.UserGrantRole(ctx, name, role)
if err != nil {
return err
}
}
return nil
}
// UserEnforcePresence deletes a user from etcd, if present.
func (e *etcdClient) UserEnforceAbsence(ctx context.Context, name string) error {
e.logger.Debug("Deleting user", FieldUser, name)
_, err := e.client.Auth.UserDelete(ctx, name)
if err != nil {
if errors.Is(err, v3rpcErrors.ErrUserNotFound) {
e.logger.Debug("User not found", FieldUser, name)
} else {
return err
}
}
return nil
}
// reload on-disk certificate and key when needed
func getClientCertificateReloader(fpath string) (func(*tls.CertificateRequestInfo) (*tls.Certificate, error), error) {
yc := &yamlKeyPairConfig{}
b, err := os.ReadFile(fpath)
if err != nil {
return nil, err
}
err = yaml.Unmarshal(b, yc)
if err != nil {
return nil, err
}
if yc.Certfile == "" || yc.Keyfile == "" {
return nil, nil
}
reloader := func(_ *tls.CertificateRequestInfo) (*tls.Certificate, error) {
cer, err := tls.LoadX509KeyPair(yc.Certfile, yc.Keyfile)
return &cer, err
}
return reloader, nil
}
// copy of relevant internal structure fields in go.etcd.io/etcd/clientv3/yaml
// needed to implement certificates reload, not depending on the deprecated
// newconfig/yamlConfig.
type yamlKeyPairConfig struct {
Certfile string `json:"cert-file"`
Keyfile string `json:"key-file"`
}
// SPDX-License-Identifier: Apache-2.0
// Copyright Authors of Cilium
package kvstore
import (
"bytes"
"context"
"crypto/tls"
"crypto/x509"
"encoding/pem"
"errors"
"fmt"
"io"
"net"
"net/url"
"os"
"regexp"
"strings"
client "go.etcd.io/etcd/client/v3"
clientyaml "go.etcd.io/etcd/client/v3/yaml"
"go.uber.org/zap"
"google.golang.org/grpc"
"google.golang.org/grpc/connectivity"
"sigs.k8s.io/yaml"
"github.com/cilium/cilium/pkg/time"
)
var etcdVersionRegexp = regexp.MustCompile(`"etcdserver":"(?P<version>.*?)"`)
// EtcdDbgDialer enables to override the LookupIP and DialContext functions,
// e.g., to support service name to IP address resolution when CoreDNS is not
// the configured DNS server --- for pods running in the host network namespace.
type EtcdDbgDialer interface {
LookupIP(ctx context.Context, hostname string) ([]net.IP, error)
DialContext(ctx context.Context, addr string) (net.Conn, error)
}
// DefaultEtcdDbgDialer provides a default implementation of the EtcdDbgDialer interface.
type DefaultEtcdDbgDialer struct{}
func (DefaultEtcdDbgDialer) LookupIP(ctx context.Context, hostname string) ([]net.IP, error) {
return net.DefaultResolver.LookupIP(ctx, "ip", hostname)
}
func (DefaultEtcdDbgDialer) DialContext(ctx context.Context, addr string) (net.Conn, error) {
return (&net.Dialer{}).DialContext(ctx, "tcp", addr)
}
// EtcdDbg performs a set of sanity checks concerning the connection to the given
// etcd cluster, and outputs the result in a user-friendly format.
func EtcdDbg(ctx context.Context, cfgfile string, dialer EtcdDbgDialer, w io.Writer) {
iw := newIndentedWriter(w, 0)
iw.Println("📄 Configuration path: %s", cfgfile)
cfg, err := clientyaml.NewConfig(cfgfile)
if err != nil {
iw.Println("❌ Cannot parse etcd configuration: %s", err)
return
}
iw.NewLine()
if len(cfg.Endpoints) == 0 {
iw.Println("❌ No available endpoints")
} else {
iw.Println("🔌 Endpoints:")
for _, ep := range cfg.Endpoints {
iiw := iw.WithExtraIndent(3)
iiw.Println("- %s", ep)
etcdDbgEndpoint(ctx, ep, cfg.TLS.Clone(), dialer, iiw.WithExtraIndent(2))
}
}
iw.NewLine()
iw.Println("🔑 Digital certificates:")
etcdDbgCerts(cfgfile, cfg, iw.WithExtraIndent(3))
iw.NewLine()
iw.Println("⚙️ Etcd client:")
iiw := iw.WithExtraIndent(3)
cfg.Context = ctx
cfg.Logger = zap.NewNop()
cfg.DialOptions = append(cfg.DialOptions, grpc.WithContextDialer(dialer.DialContext))
cl, err := client.New(*cfg)
if err != nil {
iiw.Println("❌ Failed to create etcd client: %s", err)
return
}
defer cl.Close()
// Try to retrieve the heartbeat key, as a basic authorization check.
// It doesn't really matter whether the heartbeat key exists or not.
// Client.New() does not block on connection failure, and hence
// we need to check the connection state to determine the type of failure.
ctxGet, cancelGet := context.WithTimeout(ctx, 1*time.Second)
defer cancelGet()
out, err := cl.Get(ctxGet, HeartbeatPath)
if err != nil {
if cl.ActiveConnection().GetState() == connectivity.TransientFailure {
iiw.Println("❌ Failed to establish connection: %s", err)
} else {
iiw.Println("❌ Failed to retrieve key from etcd: %s", err)
}
return
}
iiw.Println("✅ Etcd connection successfully established")
if out.Header != nil {
iiw.Println("ℹ️ Etcd cluster ID: %x", out.Header.GetClusterId())
}
}
func etcdDbgEndpoint(ctx context.Context, ep string, tlscfg *tls.Config, dialer EtcdDbgDialer, iw *indentedWriter) {
u, err := url.Parse(ep)
if err != nil {
iw.Println("❌ Cannot parse endpoint: %s", err)
return
}
// Hostname resolution
hostname := u.Hostname()
if net.ParseIP(hostname) == nil {
ips, err := dialer.LookupIP(ctx, hostname)
if err != nil {
iw.Println("❌ Cannot resolve hostname: %s", err)
} else {
iw.Println("✅ Hostname resolved to: %s", etcdDbgOutputIPs(ips))
}
}
// TCP Connection
conn, err := dialer.DialContext(ctx, u.Host)
if err != nil {
iw.Println("❌ Cannot establish TCP connection to %s: %s", u.Host, err)
return
}
iw.Println("✅ TCP connection successfully established to %s", conn.RemoteAddr())
if u.Scheme != "https" {
conn.Close()
return
}
// TLS Connection
if tlscfg.ServerName == "" {
tlscfg.ServerName = hostname
}
// We use GetClientCertificate rather than Certificates to return an error
// in case the certificate does not match any of the requested CAs. One
// limitation, though, is that the match appears to be performed based on
// the distinguished name only, and it doesn't fail if two CAs have the same
// DN (which is typically the case with the default CA generated by Cilium).
var acceptableCAs [][]byte
tlscfg.GetClientCertificate = func(cri *tls.CertificateRequestInfo) (*tls.Certificate, error) {
for _, chain := range tlscfg.Certificates {
if err := cri.SupportsCertificate(&chain); err == nil {
return &chain, nil
}
}
acceptableCAs = cri.AcceptableCAs
return nil, fmt.Errorf("client certificate is not signed by any acceptable CA")
}
tconn := tls.Client(conn, tlscfg)
defer tconn.Close()
err = tconn.HandshakeContext(ctx)
if err != nil {
iw.Println("❌ Cannot establish TLS connection to %s: %s", u.Host, err)
if len(acceptableCAs) > 0 {
// The output is suboptimal being DER-encoded, but there doesn't
// seem to be any easy way to parse it (the utility used by
// ParseCertificate is not exported). Better than nothing though.
var buf bytes.Buffer
for i, ca := range acceptableCAs {
if i != 0 {
buf.WriteString(", ")
}
buf.WriteRune('"')
buf.WriteString(string(ca))
buf.WriteRune('"')
}
iw.Println("ℹ️ Acceptable CAs: %s", buf.String())
}
return
}
iw.Println("✅ TLS connection successfully established to %s", tconn.RemoteAddr())
iw.Println("ℹ️ Negotiated TLS version: %s, ciphersuite %s",
tls.VersionName(tconn.ConnectionState().Version),
tls.CipherSuiteName(tconn.ConnectionState().CipherSuite))
// With TLS 1.3, the server doesn't acknowledge whether client authentication
// succeeded, and a possible error is returned only when reading some data.
// Hence, let's trigger a request, so that we see if it failed.
tconn.SetDeadline(time.Now().Add(1 * time.Second))
data := fmt.Sprintf("GET /version HTTP/1.1\r\nHost: %s\r\n\r\n", u.Host)
_, err = tconn.Write([]byte(data))
if err != nil {
iw.Println("❌ Failed to perform a GET /version request: %s", err)
return
}
buf := make([]byte, 1000)
_, err = tconn.Read(buf)
if err != nil {
opErr := &net.OpError{}
if errors.As(err, &opErr) && opErr.Op == "remote error" {
iw.Println("❌ TLS client authentication failed: %s", err)
} else {
iw.Println("❌ Failed to retrieve GET /version answer: %s", err)
}
return
}
matches := etcdVersionRegexp.FindAllStringSubmatch(string(buf), 1)
if len(matches) != 1 {
iw.Println("⚠️ Could not retrieve etcd server version")
return
}
iw.Println("ℹ️ Etcd server version: %s", matches[0][etcdVersionRegexp.SubexpIndex("version")])
}
func etcdDbgCerts(cfgfile string, cfg *client.Config, iw *indentedWriter) {
if cfg.TLS.RootCAs == nil {
iw.Println("⚠️ Root CA unset: using system pool")
} else {
// Retrieve the RootCA path from the configuration, as it appears
// that we cannot introspect cfg.TLS.RootCAs.
certs, err := etcdDbgRetrieveRootCAFile(cfgfile)
if err != nil {
iw.Println("❌ Failed to retrieve Root CA path: %s", err)
} else {
iw.Println("✅ TLS Root CA certificates:")
for _, cert := range certs {
parsed, err := x509.ParseCertificate(cert)
if err != nil {
iw.Println("❌ Failed to parse certificate: %s", err)
continue
}
etcdDbgOutputCert(parsed, iw.WithExtraIndent(3))
}
}
}
if len(cfg.TLS.Certificates) == 0 {
iw.Println("⚠️ No available TLS client certificates")
} else {
iw.Println("✅ TLS client certificates:")
for _, cert := range cfg.TLS.Certificates {
if len(cert.Certificate) == 0 {
iw.Println("❌ The certificate looks invalid")
continue
}
leaf, err := x509.ParseCertificate(cert.Certificate[0])
if err != nil {
iw.Println("❌ Failed to parse certificate: %s", err)
continue
}
iiw := iw.WithExtraIndent(3)
etcdDbgOutputCert(leaf, iiw)
iiw = iiw.WithExtraIndent(2)
// Print intermediate certificates, if any.
intermediates := x509.NewCertPool()
for _, cert := range cert.Certificate[1:] {
iiw.Println("Intermediates:")
intermediate, err := x509.ParseCertificate(cert)
if err != nil {
iw.Println("❌ Failed to parse intermediate certificate: %s", err)
continue
}
etcdDbgOutputCert(intermediate, iiw)
intermediates.AddCert(intermediate)
}
// Attempt to verify whether the given certificate can be validated
// using the configured root CAs. Although a failure is not necessarily
// an error, as the remote etcd server may be configured with a different
// root CA, it still signals a misconfiguration in most cases.
opts := x509.VerifyOptions{
Roots: cfg.TLS.RootCAs,
Intermediates: intermediates,
}
_, err = leaf.Verify(opts)
if err != nil {
iiw.Println("⚠️ Cannot verify certificate with the configured root CAs")
}
}
}
if cfg.Username != "" {
passwd := "unset"
if cfg.Password != "" {
passwd = "set"
}
iw.Println("✅ Username set to %s, password is %s", cfg.Username, passwd)
}
}
func etcdDbgOutputIPs(ips []net.IP) string {
var buf bytes.Buffer
for i, ip := range ips {
if i > 0 {
buf.WriteString(", ")
}
if i == 4 {
buf.WriteString("...")
break
}
buf.WriteString(ip.String())
}
return buf.String()
}
func etcdDbgRetrieveRootCAFile(cfgfile string) (certs [][]byte, err error) {
var yc struct {
TrustedCAfile string `json:"trusted-ca-file"`
}
b, err := os.ReadFile(cfgfile)
if err != nil {
return nil, err
}
err = yaml.Unmarshal(b, &yc)
if err != nil {
return nil, err
}
if yc.TrustedCAfile == "" {
return nil, errors.New("not provided")
}
data, err := os.ReadFile(yc.TrustedCAfile)
if err != nil {
return nil, err
}
for {
block, rest := pem.Decode(data)
if block == nil {
if len(certs) == 0 {
return nil, errors.New("no certificate found")
}
return certs, nil
}
if block.Type == "CERTIFICATE" {
certs = append(certs, block.Bytes)
}
data = rest
}
}
func etcdDbgOutputCert(cert *x509.Certificate, iw *indentedWriter) {
sn := cert.SerialNumber.Text(16)
for i := 2; i < len(sn); i += 3 {
sn = sn[:i] + ":" + sn[i:]
}
iw.Println("- Serial number: %s", string(sn))
iw.Println(" Subject: %s", cert.Subject)
iw.Println(" Issuer: %s", cert.Issuer)
iw.Println(" Validity:")
iw.Println(" Not before: %s", cert.NotBefore)
iw.Println(" Not after: %s", cert.NotAfter)
}
type indentedWriter struct {
w io.Writer
indent []byte
}
func newIndentedWriter(w io.Writer, indent int) *indentedWriter {
return &indentedWriter{w: w, indent: []byte(strings.Repeat(" ", indent))}
}
func (iw *indentedWriter) NewLine() { iw.w.Write([]byte("\n")) }
func (iw *indentedWriter) Println(format string, a ...any) {
iw.w.Write(iw.indent)
fmt.Fprintf(iw.w, format, a...)
iw.NewLine()
}
func (iw *indentedWriter) WithExtraIndent(indent int) *indentedWriter {
return newIndentedWriter(iw.w, len(iw.indent)+indent)
}
// SPDX-License-Identifier: Apache-2.0
// Copyright Authors of Cilium
package kvstore
import (
"context"
"errors"
"log/slog"
"strings"
"sync"
v3rpcErrors "go.etcd.io/etcd/api/v3/v3rpc/rpctypes"
client "go.etcd.io/etcd/client/v3"
"go.etcd.io/etcd/client/v3/concurrency"
"github.com/cilium/cilium/pkg/lock"
"github.com/cilium/cilium/pkg/logging/logfields"
"github.com/cilium/cilium/pkg/spanstat"
"github.com/cilium/cilium/pkg/time"
)
type leaseInfo struct {
count uint32
session *concurrency.Session
}
// etcdLeaseManager manages the acquisition of the leases, and keeps track of
// which lease is attached to which etcd key.
type etcdLeaseManager struct {
client *client.Client
log *slog.Logger
ttl time.Duration
limit uint32
expired func(key string)
mu lock.RWMutex
leases map[client.LeaseID]*leaseInfo
keys map[string]client.LeaseID
current client.LeaseID
acquiring chan struct{}
wg sync.WaitGroup
}
// newEtcdLeaseManager builds and returns a new lease manager instance.
func newEtcdLeaseManager(logger *slog.Logger, cl *client.Client, ttl time.Duration, limit uint32, expired func(key string)) *etcdLeaseManager {
return &etcdLeaseManager{
client: cl,
log: logger,
ttl: ttl,
limit: limit,
expired: expired,
current: client.NoLease,
leases: make(map[client.LeaseID]*leaseInfo),
keys: make(map[string]client.LeaseID),
}
}
// GetLeaseID returns a lease ID, and associates it to the given key. It leverages
// one of the already acquired leases if they are not already attached to too many
// keys, otherwise a new one is acquired.
//
// There's a small possibility that the returned lease is already expired, or gets
// expired immediately before use (due the time window between the lease expiration
// on the etcd server and the subsequent client side detection and garbage collection).
// As we cannot completely remove this uncertainty period, let's adopt the easiest
// approach here, without explicitly checking if the lease is expired before returning
// it (given that it would be a client-side check only). Instead, let's just rely on
// the fact that the operation will fail (as the lease is no longer valid), triggering
// a retry. At that point, a new (hopefully valid) lease will be retrieved again.
func (elm *etcdLeaseManager) GetLeaseID(ctx context.Context, key string) (client.LeaseID, error) {
session, err := elm.GetSession(ctx, key)
if err != nil {
return client.NoLease, err
}
return session.Lease(), nil
}
// GetSession returns a session, and associates it to the given key. It leverages
// one of the already acquired leases if they are not already attached to too many
// keys, otherwise a new one is acquired.
//
// There's a small possibility that the returned session is already expired, or gets
// expired immediately before use (due the time window between the lease expiration
// on the etcd server and the subsequent client side detection and garbage collection).
// As we cannot completely remove this uncertainty period, let's adopt the easiest
// approach here, without explicitly checking if the session is expired before returning
// it (given that it would be a client-side check only). Instead, let's just rely on
// the fact that the operation will fail (as the lease is no longer valid), triggering
// a retry. At that point, a new (hopefully valid) session will be retrieved again.
func (elm *etcdLeaseManager) GetSession(ctx context.Context, key string) (*concurrency.Session, error) {
elm.mu.Lock()
// This key is already attached to a lease, hence just return it.
if leaseID := elm.keys[key]; leaseID != client.NoLease {
// The entry is guaranteed to exist if the lease is associated with a key
info := elm.leases[leaseID]
elm.mu.Unlock()
return info.session, nil
}
// Return the current lease if it has not been used more than limit times
if info := elm.leases[elm.current]; info != nil && info.count < elm.limit {
info.count++
elm.keys[key] = elm.current
elm.mu.Unlock()
return info.session, nil
}
// Otherwise, loop through the other known leases to see if any has been released
for lease, info := range elm.leases {
if info.count < elm.limit {
elm.current = lease
info.count++
elm.keys[key] = elm.current
elm.mu.Unlock()
return info.session, nil
}
}
// If none is found, we need to acquire a new lease. acquiring is a channel
// used to detect whether we are already in the process of acquiring a new
// lease, to prevent multiple acquisitions in parallel.
acquiring := elm.acquiring
if acquiring == nil {
elm.acquiring = make(chan struct{})
}
// Unlock, so that we don't block other paraller operations (e.g., releases)
// while acquiring a new lease, since it might be a slow operation.
elm.mu.Unlock()
// Someone else is already acquiring a new lease. Wait until
// it completes, and then retry again.
if acquiring != nil {
select {
case <-acquiring:
return elm.GetSession(ctx, key)
case <-ctx.Done():
return nil, ctx.Err()
case <-elm.client.Ctx().Done():
return nil, elm.client.Ctx().Err()
}
}
// Otherwise, we can proceed to acquire a new lease.
session, err := elm.newSession(ctx)
elm.mu.Lock()
// Signal that the acquisition process has completed.
close(elm.acquiring)
elm.acquiring = nil
if err != nil {
elm.mu.Unlock()
return nil, err
}
elm.current = session.Lease()
elm.leases[session.Lease()] = &leaseInfo{session: session}
elm.mu.Unlock()
return elm.GetSession(ctx, key)
}
// Release decrements the counter of the lease attached to the given key.
func (elm *etcdLeaseManager) Release(key string) {
elm.mu.Lock()
defer elm.mu.Unlock()
elm.releaseUnlocked(key)
}
// ReleasePrefix decrements the counter of the leases attached to the keys
// starting with the given prefix.
func (elm *etcdLeaseManager) ReleasePrefix(prefix string) {
elm.mu.Lock()
defer elm.mu.Unlock()
for key, leaseID := range elm.keys {
if strings.HasPrefix(key, prefix) {
if info := elm.leases[leaseID]; info != nil && info.count > 0 {
info.count--
}
delete(elm.keys, key)
}
}
}
// KeyHasLease returns whether the given key is associated with the specified lease.
func (elm *etcdLeaseManager) KeyHasLease(key string, leaseID client.LeaseID) bool {
elm.mu.RLock()
defer elm.mu.RUnlock()
return elm.keys[key] == leaseID
}
// CancelIfExpired verifies whether the error reports that the given lease has
// expired, and in that case aborts the corresponding keepalive process.
func (elm *etcdLeaseManager) CancelIfExpired(err error, leaseID client.LeaseID) {
if errors.Is(err, v3rpcErrors.ErrLeaseNotFound) {
elm.mu.Lock()
if info := elm.leases[leaseID]; info != nil {
info.session.Orphan()
}
elm.mu.Unlock()
}
}
// TotalLeases returns the number of managed leases.
func (elm *etcdLeaseManager) TotalLeases() uint32 {
elm.mu.RLock()
defer elm.mu.RUnlock()
return uint32(len(elm.leases))
}
// Wait waits until all child goroutines terminated.
func (elm *etcdLeaseManager) Wait() {
elm.wg.Wait()
}
func (elm *etcdLeaseManager) newSession(ctx context.Context) (session *concurrency.Session, err error) {
defer func(duration *spanstat.SpanStat) {
increaseMetric("lease", metricSet, "AcquireLease", duration.EndError(err).Total(), err)
}(spanstat.Start())
resp, err := elm.client.Grant(ctx, int64(elm.ttl.Seconds()))
if err != nil {
return nil, err
}
leaseID := resp.ID
// Construct the session specifying the lease just acquired. This allows to
// split the possibly blocking operation (i.e., lease acquisition), from the
// non-blocking one (i.e., the setup of the keepalive logic), so that we can use
// different contexts. We want the lease acquisition to be controlled by the
// context associated with the given request, while the keepalive process should
// continue until either the etcd client is closed or the session is orphaned.
session, err = concurrency.NewSession(elm.client,
concurrency.WithLease(leaseID),
concurrency.WithTTL(int(elm.ttl.Seconds())),
)
if err != nil {
return nil, err
}
elm.wg.Add(1)
go elm.waitForExpiration(session)
elm.log.Info(
"New lease successfully acquired",
logfields.LeaseID, leaseID,
logfields.TTL, elm.ttl,
)
return session, nil
}
func (elm *etcdLeaseManager) waitForExpiration(session *concurrency.Session) {
defer elm.wg.Done()
// Block until the session gets orphaned, either because it fails to be
// renewed or the etcd client is closed.
<-session.Done()
select {
case <-elm.client.Ctx().Done():
// The context of the etcd client was closed
return
default:
}
elm.log.Warn(
"Lease expired",
logfields.LeaseID, session.Lease(),
)
elm.mu.Lock()
delete(elm.leases, session.Lease())
var keys []string
for key, id := range elm.keys {
if id == session.Lease() {
keys = append(keys, key)
delete(elm.keys, key)
}
}
elm.mu.Unlock()
if elm.expired != nil {
for _, key := range keys {
elm.expired(key)
}
}
}
func (elm *etcdLeaseManager) releaseUnlocked(key string) {
leaseID := elm.keys[key]
if leaseID != client.NoLease {
if info := elm.leases[leaseID]; info != nil && info.count > 0 {
info.count--
}
delete(elm.keys, key)
}
}
// SPDX-License-Identifier: Apache-2.0
// Copyright Authors of Cilium
package kvstore
import (
"context"
"github.com/cilium/cilium/pkg/spanstat"
)
// EventType defines the type of watch event that occurred
type EventType int
const (
// EventTypeCreate represents a newly created key
EventTypeCreate EventType = iota
// EventTypeModify represents a modified key
EventTypeModify
// EventTypeDelete represents a deleted key
EventTypeDelete
//EventTypeListDone signals that the initial list operation has completed
EventTypeListDone
)
// String() returns the human readable format of an event type
func (t EventType) String() string {
switch t {
case EventTypeCreate:
return "create"
case EventTypeModify:
return "modify"
case EventTypeDelete:
return "delete"
case EventTypeListDone:
return "listDone"
default:
return "unknown"
}
}
// KeyValueEvent is a change event for a Key/Value pair
type KeyValueEvent struct {
// Typ is the type of event { EventTypeCreate | EventTypeModify | EventTypeDelete | EventTypeListDone }
Typ EventType
// Key is the kvstore key that changed
Key string
// Value is the kvstore value associated with the key
Value []byte
}
// EventChan is a channel to receive events on
type EventChan <-chan KeyValueEvent
// emitter wraps the channel to send events to, to ensure it is accessed
// via the proper helper methods.
type emitter struct {
events chan<- KeyValueEvent
scope string
}
// emit attempts to notify the watcher of an event within the given context.
// returning false if the context is done before the event is emitted.
func (e emitter) emit(ctx context.Context, event KeyValueEvent) bool {
queueStart := spanstat.Start()
var ok bool
select {
case <-ctx.Done():
case e.events <- event:
ok = true
}
trackEventQueued(e.scope, event.Typ, queueStart.End(ok).Total())
return ok
}
// close closes the events channel.
func (e emitter) close() {
close(e.events)
}
// SPDX-License-Identifier: Apache-2.0
// Copyright Authors of Cilium
package kvstore
import (
"strings"
"github.com/cilium/cilium/pkg/time"
)
// Value is an abstraction of the data stored in the kvstore as well as the
// mod revision of that data.
type Value struct {
Data []byte
ModRevision uint64
LeaseID int64
}
// KeyValuePairs is a map of key=value pairs
type KeyValuePairs map[string]Value
const (
// BaseKeyPrefix is the base prefix that should be used for all keys
BaseKeyPrefix = "cilium"
// StatePrefix is the kvstore prefix used to store the Cilium's state.
StatePrefix = BaseKeyPrefix + "/state"
// CachePrefix is the kvstore prefix used to store the information retrieved
// from a remote cluster and cached locally by KVStoreMesh.
CachePrefix = BaseKeyPrefix + "/cache"
// InitLockPath is the path to the init lock to test quorum
InitLockPath = BaseKeyPrefix + "/.initlock"
// HeartbeatPath is the path to the key at which the operator updates
// the heartbeat
HeartbeatPath = BaseKeyPrefix + "/.heartbeat"
// ClusterConfigPrefix is the kvstore prefix to cluster configuration
ClusterConfigPrefix = BaseKeyPrefix + "/cluster-config"
// SyncedPrefix is the kvstore prefix used to convey whether
// synchronization from an external source has completed for a given prefix
SyncedPrefix = BaseKeyPrefix + "/synced"
// HeartbeatWriteInterval is the interval in which the heartbeat key at
// HeartbeatPath is updated
HeartbeatWriteInterval = time.Minute
)
// StateToCachePrefix converts a kvstore prefix starting with "cilium/state"
// (holding the cilium state) to the corresponding one holding cached information
// from another kvstore (that is, "cilium/cache").
func StateToCachePrefix(prefix string) string {
if strings.HasPrefix(prefix, StatePrefix) {
return strings.Replace(prefix, StatePrefix, CachePrefix, 1)
}
return prefix
}
// SPDX-License-Identifier: Apache-2.0
// Copyright Authors of Cilium
package kvstore
import (
"context"
"fmt"
"log/slog"
"github.com/davecgh/go-spew/spew"
"github.com/google/uuid"
"github.com/cilium/cilium/pkg/debug"
"github.com/cilium/cilium/pkg/defaults"
"github.com/cilium/cilium/pkg/lock"
"github.com/cilium/cilium/pkg/logging/logfields"
"github.com/cilium/cilium/pkg/time"
)
var (
kvstoreLocks = pathLocks{lockPaths: map[string]lockOwner{}}
// staleLockTimeout is the timeout after which waiting for a believed
// other local lock user for the same key is given up on and etcd is
// asked directly. It is still highly unlikely that concurrent access
// occurs as only one consumer will manage to acquire the newly
// released lock. The only possibility of concurrent access is if a
// consumer is *still* holding the lock but this is highly unlikely
// given the duration of this timeout.
staleLockTimeout = defaults.KVStoreStaleLockTimeout
)
type KVLocker interface {
Unlock(ctx context.Context) error
// Comparator returns an object that should be used by the KVStore to make
// sure if the lock is still valid for its client or nil if no such
// verification exists.
Comparator() any
}
// getLockPath returns the lock path representation of the given path.
func getLockPath(path string) string {
return path + ".lock"
}
type lockOwner struct {
created time.Time
id uuid.UUID
}
type pathLocks struct {
mutex lock.RWMutex
lockPaths map[string]lockOwner
}
func init() {
debug.RegisterStatusObject("kvstore-locks", &kvstoreLocks)
}
// DebugStatus implements debug.StatusObject to provide debug status collection
// ability
func (pl *pathLocks) DebugStatus() string {
pl.mutex.RLock()
str := spew.Sdump(pl.lockPaths)
pl.mutex.RUnlock()
return str
}
func (pl *pathLocks) runGC(logger *slog.Logger) {
pl.mutex.Lock()
for path, owner := range pl.lockPaths {
if time.Since(owner.created) > staleLockTimeout {
logger.Error("Forcefully unlocking local kvstore lock", fieldKey, path)
delete(pl.lockPaths, path)
}
}
pl.mutex.Unlock()
}
func (pl *pathLocks) lock(ctx context.Context, path string) (id uuid.UUID, err error) {
for {
pl.mutex.Lock()
if _, ok := pl.lockPaths[path]; !ok {
id = uuid.New()
pl.lockPaths[path] = lockOwner{
created: time.Now(),
id: id,
}
pl.mutex.Unlock()
return
}
pl.mutex.Unlock()
select {
case <-time.After(10 * time.Millisecond):
case <-ctx.Done():
err = fmt.Errorf("lock was cancelled: %w", ctx.Err())
return
}
}
}
func (pl *pathLocks) unlock(path string, id uuid.UUID) {
pl.mutex.Lock()
if owner, ok := pl.lockPaths[path]; ok && owner.id == id {
delete(pl.lockPaths, path)
}
pl.mutex.Unlock()
}
// Lock is a lock return by LockPath
type Lock struct {
path string
id uuid.UUID
kvLock KVLocker
logger *slog.Logger
}
// LockPath locks the specified path. The key for the lock is not the path
// provided itself but the path with a suffix of ".lock" appended. The lock
// returned also contains a patch specific local Mutex which will be held.
//
// It is required to call Unlock() on the returned Lock to unlock
func LockPath(ctx context.Context, logger *slog.Logger, backend BackendOperations, path string) (l *Lock, err error) {
id, err := kvstoreLocks.lock(ctx, path)
if err != nil {
return nil, err
}
toCtx, cancel := context.WithTimeout(ctx, time.Minute)
defer cancel()
lock, err := backend.LockPath(toCtx, path)
if err != nil {
kvstoreLocks.unlock(path, id)
Trace(logger, "Failed to lock", fieldKey, path)
err = fmt.Errorf("error while locking path %s: %w", path, err)
return nil, err
}
Trace(logger, "Successful lock", fieldKey, path)
return &Lock{kvLock: lock, path: path, id: id, logger: logger}, err
}
// RunLockGC inspects all local kvstore locks to determine whether they have
// been held longer than the stale lock timeout, and if so, unlocks them
// forceably.
func RunLockGC(logger *slog.Logger) {
kvstoreLocks.runGC(logger)
}
// Unlock unlocks a lock
func (l *Lock) Unlock(ctx context.Context) error {
if l == nil {
return nil
}
// Unlock kvstore mutex first
err := l.kvLock.Unlock(ctx)
if err != nil {
l.logger.Error("Unable to unlock kvstore lock",
logfields.Error, err,
fieldKey, l.path,
)
}
// unlock local lock even if kvstore cannot be unlocked
kvstoreLocks.unlock(l.path, l.id)
Trace(l.logger, "Unlocked", fieldKey, l.path)
return err
}
func (l *Lock) Comparator() any {
return l.kvLock.Comparator()
}
// SPDX-License-Identifier: Apache-2.0
// Copyright Authors of Cilium
package kvstore
import (
"bytes"
"context"
"fmt"
"strings"
"unicode"
"github.com/cilium/statedb"
"github.com/cilium/statedb/index"
"k8s.io/apimachinery/pkg/util/sets"
"github.com/cilium/cilium/api/v1/models"
)
func NewInMemoryClient(db *statedb.DB, clusterName string) Client {
table, err := statedb.NewTable(
db,
"kvstore-"+clusterName,
inMemoryKeyIndex,
)
if err != nil {
panic(err)
}
return &inMemoryClient{
db: db,
table: table,
clusterName: clusterName,
}
}
type inMemoryObject struct {
key string
value []byte
}
// TableHeader implements statedb.TableWritable.
func (i inMemoryObject) TableHeader() []string {
return []string{
"Key",
"Value",
}
}
// TableRow implements statedb.TableWritable.
func (i inMemoryObject) TableRow() []string {
valueIsAscii := true
for _, b := range i.value {
if b > unicode.MaxASCII {
valueIsAscii = false
break
}
}
var value string
if valueIsAscii {
value = string(i.value)
} else {
value = fmt.Sprintf("0x%x", i.value)
}
return []string{
i.key,
value,
}
}
var _ statedb.TableWritable = inMemoryObject{}
var (
inMemoryKeyIndex = statedb.Index[inMemoryObject, string]{
Name: "key",
FromObject: func(obj inMemoryObject) index.KeySet {
return index.NewKeySet(index.String(obj.key))
},
FromKey: index.String,
FromString: index.FromString,
Unique: true,
}
)
type inMemoryClient struct {
db *statedb.DB
table statedb.RWTable[inMemoryObject]
clusterName string
}
func (c *inMemoryClient) IsEnabled() bool { return true }
// Close implements BackendOperations.
func (c *inMemoryClient) Close() {
}
// CreateOnly implements BackendOperations.
func (c *inMemoryClient) CreateOnly(ctx context.Context, key string, value []byte, lease bool) (bool, error) {
wtxn := c.db.WriteTxn(c.table)
defer wtxn.Abort()
_, hadOld, _ := c.table.Insert(wtxn, inMemoryObject{
key: key,
value: value,
})
if hadOld {
return false, fmt.Errorf("key %q existed", key)
}
wtxn.Commit()
return true, nil
}
// CreateOnlyIfLocked implements BackendOperations.
func (c *inMemoryClient) CreateOnlyIfLocked(ctx context.Context, key string, value []byte, lease bool, lock KVLocker) (bool, error) {
return c.CreateOnly(ctx, key, value, lease)
}
// Delete implements BackendOperations.
func (c *inMemoryClient) Delete(ctx context.Context, key string) error {
wtxn := c.db.WriteTxn(c.table)
defer wtxn.Abort()
_, existed, _ := c.table.Delete(wtxn, inMemoryObject{key: key})
if !existed {
return nil
}
wtxn.Commit()
return nil
}
// DeleteIfLocked implements BackendOperations.
func (c *inMemoryClient) DeleteIfLocked(ctx context.Context, key string, lock KVLocker) error {
return c.Delete(ctx, key)
}
// DeletePrefix implements BackendOperations.
func (c *inMemoryClient) DeletePrefix(ctx context.Context, path string) error {
wtxn := c.db.WriteTxn(c.table)
defer wtxn.Commit()
for obj := range c.table.Prefix(wtxn, inMemoryKeyIndex.Query(path)) {
c.table.Delete(wtxn, obj)
}
return nil
}
// Get implements BackendOperations.
func (c *inMemoryClient) Get(ctx context.Context, key string) ([]byte, error) {
obj, _, found := c.table.Get(c.db.ReadTxn(), inMemoryKeyIndex.Query(key))
if !found {
return nil, nil
}
return obj.value, nil
}
// GetIfLocked implements BackendOperations.
func (c *inMemoryClient) GetIfLocked(ctx context.Context, key string, lock KVLocker) ([]byte, error) {
return c.Get(ctx, key)
}
// ListAndWatch implements BackendOperations.
func (c *inMemoryClient) ListAndWatch(ctx context.Context, prefix string) EventChan {
wtxn := c.db.WriteTxn(c.table)
changeIter, err := c.table.Changes(wtxn)
wtxn.Commit()
if err != nil {
panic(fmt.Sprintf("BUG: Changes() returned error: %s", err))
}
events := make(chan KeyValueEvent)
go func() {
defer close(events)
initDone := false
exists := sets.New[string]()
for {
changes, watch := changeIter.Next(c.db.ReadTxn())
for change := range changes {
obj := change.Object
if !strings.HasPrefix(obj.key, prefix) {
continue
}
var typ EventType
switch {
case change.Deleted:
typ = EventTypeDelete
exists.Delete(obj.key)
case exists.Has(obj.key):
typ = EventTypeModify
default:
typ = EventTypeCreate
exists.Insert(obj.key)
}
events <- KeyValueEvent{
Typ: typ,
Key: obj.key,
Value: obj.value,
}
}
if !initDone {
events <- KeyValueEvent{Typ: EventTypeListDone}
initDone = true
}
select {
case <-watch:
case <-ctx.Done():
return
}
}
}()
return events
}
// ListPrefix implements BackendOperations.
func (c *inMemoryClient) ListPrefix(ctx context.Context, prefix string) (kvs KeyValuePairs, err error) {
kvs = KeyValuePairs{}
for obj, rev := range c.table.Prefix(c.db.ReadTxn(), inMemoryKeyIndex.Query(prefix)) {
kvs[obj.key] = Value{
Data: obj.value,
ModRevision: rev,
LeaseID: 0,
}
}
return
}
// ListPrefixIfLocked implements BackendOperations.
func (c *inMemoryClient) ListPrefixIfLocked(ctx context.Context, prefix string, lock KVLocker) (KeyValuePairs, error) {
return c.ListPrefix(ctx, prefix)
}
// LockPath implements BackendOperations.
func (c *inMemoryClient) LockPath(ctx context.Context, path string) (KVLocker, error) {
panic("unimplemented")
}
// RegisterLeaseExpiredObserver implements BackendOperations.
func (c *inMemoryClient) RegisterLeaseExpiredObserver(prefix string, fn func(key string)) {
}
// RegisterLockLeaseExpiredObserver implements BackendOperations.
func (c *inMemoryClient) RegisterLockLeaseExpiredObserver(prefix string, fn func(key string)) {}
// Status implements BackendOperations.
func (c *inMemoryClient) Status() *models.Status {
return &models.Status{}
}
// StatusCheckErrors implements BackendOperations.
func (c *inMemoryClient) StatusCheckErrors() <-chan error {
return nil
}
// Update implements BackendOperations.
func (c *inMemoryClient) Update(ctx context.Context, key string, value []byte, lease bool) error {
wtxn := c.db.WriteTxn(c.table)
defer wtxn.Commit()
c.table.Insert(wtxn, inMemoryObject{key, value})
wtxn.Commit()
return nil
}
// UpdateIfDifferent implements BackendOperations.
func (c *inMemoryClient) UpdateIfDifferent(ctx context.Context, key string, value []byte, lease bool) (bool, error) {
wtxn := c.db.WriteTxn(c.table)
defer wtxn.Abort()
obj, _, found := c.table.Get(wtxn, inMemoryKeyIndex.Query(key))
if found && bytes.Equal(obj.value, value) {
return false, nil
}
c.table.Insert(wtxn, inMemoryObject{key, value})
wtxn.Commit()
return true, nil
}
// UpdateIfDifferentIfLocked implements BackendOperations.
func (c *inMemoryClient) UpdateIfDifferentIfLocked(ctx context.Context, key string, value []byte, lease bool, lock KVLocker) (bool, error) {
return c.UpdateIfDifferent(ctx, key, value, lease)
}
// UpdateIfLocked implements BackendOperations.
func (c *inMemoryClient) UpdateIfLocked(ctx context.Context, key string, value []byte, lease bool, lock KVLocker) error {
return c.Update(ctx, key, value, lease)
}
// UserEnforceAbsence implements BackendOperations.
func (c *inMemoryClient) UserEnforceAbsence(ctx context.Context, name string) error {
panic("unimplemented")
}
// UserEnforcePresence implements BackendOperations.
func (c *inMemoryClient) UserEnforcePresence(ctx context.Context, name string, roles []string) error {
panic("unimplemented")
}
var _ BackendOperations = &inMemoryClient{}
// SPDX-License-Identifier: Apache-2.0
// Copyright Authors of Cilium
package kvstore
import (
"fmt"
"strings"
"github.com/cilium/cilium/pkg/metrics"
"github.com/cilium/cilium/pkg/time"
)
const (
metricDelete = "delete"
metricRead = "read"
metricSet = "set"
)
func GetScopeFromKey(key string) string {
s := strings.SplitN(key, "/", 5)
if len(s) < 4 {
if len(key) >= 12 {
return key[:12]
}
return key
}
return fmt.Sprintf("%s/%s", s[2], s[3])
}
func increaseMetric(key, kind, action string, duration time.Duration, err error) {
if !metrics.KVStoreOperationsDuration.IsEnabled() {
return
}
namespace := GetScopeFromKey(key)
outcome := metrics.Error2Outcome(err)
metrics.KVStoreOperationsDuration.
WithLabelValues(namespace, kind, action, outcome).Observe(duration.Seconds())
}
func trackEventQueued(scope string, typ EventType, duration time.Duration) {
if !metrics.KVStoreEventsQueueDuration.IsEnabled() {
return
}
metrics.KVStoreEventsQueueDuration.WithLabelValues(scope, typ.String()).Observe(duration.Seconds())
}
func recordQuorumError(err string) {
if !metrics.KVStoreQuorumErrors.IsEnabled() {
return
}
metrics.KVStoreQuorumErrors.WithLabelValues(err).Inc()
}
// SPDX-License-Identifier: Apache-2.0
// Copyright Authors of Cilium
package store
import (
"log/slog"
"github.com/cilium/hive/cell"
"github.com/cilium/cilium/pkg/metrics"
)
var Cell = cell.Module(
"kvstore-utils",
"Provides factory for kvstore related synchronizers",
cell.Provide(NewFactory),
metrics.Metric(MetricsProvider),
)
type Factory interface {
NewSyncStore(clusterName string, backend SyncStoreBackend, prefix string, opts ...WSSOpt) SyncStore
NewWatchStore(clusterName string, keyCreator KeyCreator, observer Observer, opts ...RWSOpt) WatchStore
NewWatchStoreManager(backend WatchStoreBackend, clusterName string) WatchStoreManager
}
type factoryImpl struct {
logger *slog.Logger
metrics *Metrics
}
func (w *factoryImpl) NewSyncStore(clusterName string, backend SyncStoreBackend, prefix string, opts ...WSSOpt) SyncStore {
return newWorkqueueSyncStore(w.logger, clusterName, backend, prefix, w.metrics, opts...)
}
func (w *factoryImpl) NewWatchStore(clusterName string, keyCreator KeyCreator, observer Observer, opts ...RWSOpt) WatchStore {
return newRestartableWatchStore(w.logger, clusterName, keyCreator, observer, w.metrics, opts...)
}
func (w *factoryImpl) NewWatchStoreManager(backend WatchStoreBackend, clusterName string) WatchStoreManager {
return newWatchStoreManagerSync(w.logger, backend, clusterName, w)
}
func NewFactory(logger *slog.Logger, storeMetrics *Metrics) Factory {
return &factoryImpl{
logger: logger,
metrics: storeMetrics,
}
}
// SPDX-License-Identifier: Apache-2.0
// Copyright Authors of Cilium
package store
import (
"github.com/cilium/cilium/pkg/metrics"
"github.com/cilium/cilium/pkg/metrics/metric"
)
type Metrics struct {
KVStoreSyncQueueSize metric.Vec[metric.Gauge]
KVStoreSyncErrors metric.Vec[metric.Counter]
KVStoreInitialSyncCompleted metric.Vec[metric.Gauge]
}
func MetricsProvider() *Metrics {
return &Metrics{
KVStoreSyncQueueSize: metric.NewGaugeVec(metric.GaugeOpts{
Namespace: metrics.Namespace,
Subsystem: metrics.SubsystemKVStore,
Name: "sync_queue_size",
Help: "Number of elements queued for synchronization in the kvstore",
}, []string{metrics.LabelScope, metrics.LabelSourceCluster}),
KVStoreSyncErrors: metric.NewCounterVec(metric.CounterOpts{
Namespace: metrics.Namespace,
Subsystem: metrics.SubsystemKVStore,
Name: "sync_errors_total",
Help: "Number of times synchronization to the kvstore failed",
}, []string{metrics.LabelScope, metrics.LabelSourceCluster}),
KVStoreInitialSyncCompleted: metric.NewGaugeVec(metric.GaugeOpts{
Namespace: metrics.Namespace,
Subsystem: metrics.SubsystemKVStore,
Name: "initial_sync_completed",
Help: "Whether the initial synchronization from/to the kvstore has completed",
}, []string{metrics.LabelScope, metrics.LabelSourceCluster, metrics.LabelAction}),
}
}
// SPDX-License-Identifier: Apache-2.0
// Copyright Authors of Cilium
package store
import (
"context"
"fmt"
"log/slog"
"maps"
"path"
"slices"
"strings"
"sync"
"github.com/cilium/cilium/pkg/controller"
"github.com/cilium/cilium/pkg/kvstore"
"github.com/cilium/cilium/pkg/lock"
"github.com/cilium/cilium/pkg/logging/logfields"
"github.com/cilium/cilium/pkg/time"
)
const (
// listTimeoutDefault is the default timeout to wait while performing
// the initial list operation of objects from the kvstore
listTimeoutDefault = 3 * time.Minute
)
var (
controllers controller.Manager
kvstoreSyncControllerGroup = controller.NewGroup("kvstore-sync")
)
// KeyCreator is the function to create a new empty Key instances. Store
// collaborators must implement this interface and provide the implementation
// in the Configuration structure.
type KeyCreator func() Key
// Configuration is the set of configuration parameters of a shared store.
type Configuration struct {
// Prefix is the key prefix of the store shared by all keys. The prefix
// is the unique identification of the store. Multiple collaborators
// connected to the same kvstore cluster configuring stores with
// matching prefixes will automatically form a shared store. This
// parameter is required.
Prefix string
// SynchronizationInterval is the interval in which locally owned keys
// are synchronized with the kvstore. Defaults to 0 (i.e., no periodic
// synchronization is performed) if unset.
SynchronizationInterval time.Duration
// SharedKeyDeleteDelay is the delay before a shared key delete is
// handled. This parameter is optional, and defaults to 0 if unset.
SharedKeyDeleteDelay time.Duration
// KeyCreator is called to allocate a Key instance when a new shared
// key is discovered. This parameter is required.
KeyCreator KeyCreator
// Backend is the kvstore to use as a backend. This parameter is required.
Backend kvstore.BackendOperations
// Observer is the observe that will receive events on key mutations
Observer Observer
Context context.Context
}
// validate is invoked by JoinSharedStore to validate and complete the
// configuration. It returns nil when the configuration is valid.
func (c *Configuration) validate() error {
if c.Prefix == "" {
return fmt.Errorf("prefix must be specified")
}
if c.KeyCreator == nil {
return fmt.Errorf("KeyCreator must be specified")
}
if c.Backend == nil {
return fmt.Errorf("backend must be specified")
}
if c.Context == nil {
c.Context = context.Background()
}
return nil
}
// SharedStore is an instance of a shared store. It is created with
// JoinSharedStore() and released with the SharedStore.Close() function.
type SharedStore struct {
logger *slog.Logger
// conf is a copy of the store configuration. This field is never
// mutated after JoinSharedStore() so it is safe to access this without
// a lock.
conf Configuration
// name is the name of the shared store. It is derived from the kvstore
// prefix.
name string
// controllerName is the name of the controller used to synchronize
// with the kvstore. It is derived from the name.
controllerName string
// backend is the backend as configured via Configuration
backend kvstore.BackendOperations
// mutex protects mutations to localKeys and sharedKeys
mutex lock.RWMutex
// localKeys is a map of keys that are owned by the local instance. All
// local keys are synchronized with the kvstore. This map can be
// modified with UpdateLocalKey() and DeleteLocalKey().
localKeys map[string]LocalKey
// sharedKeys is a map of all keys that either have been discovered
// from remote collaborators or successfully shared local keys. This
// map represents the state in the kvstore and is updated based on
// kvstore events.
sharedKeys map[string]Key
// stop stops the kvstore watcher.
stop context.CancelFunc
wg sync.WaitGroup
}
// Observer receives events when objects in the store mutate
type Observer interface {
// OnDelete is called when the key has been deleted from the shared store
OnDelete(k NamedKey)
// OnUpdate is called whenever a change has occurred in the data
// structure represented by the key
OnUpdate(k Key)
}
// NamedKey is an interface that a data structure must implement in order to
// be deleted from a SharedStore.
type NamedKey interface {
// GetKeyName must return the name of the key. The name of the key must
// be unique within the store and stable for a particular key. The name
// of the key must be identical across agent restarts as the keys
// remain in the kvstore.
GetKeyName() string
}
// Key is the interface that a data structure must implement in order to be
// stored and shared as a key in a SharedStore.
type Key interface {
NamedKey
// Marshal is called to retrieve the byte slice representation of the
// data represented by the key to store it in the kvstore. The function
// must ensure that the underlying datatype is properly locked. It is
// typically a good idea to use json.Marshal to implement this
// function.
Marshal() ([]byte, error)
// Unmarshal is called when an update from the kvstore is received. The
// prefix configured for the store is removed from the key, and the
// byte slice passed to the function is coming from the Marshal
// function from another collaborator. The function must unmarshal and
// update the underlying data type. It is typically a good idea to use
// json.Unmarshal to implement this function.
Unmarshal(key string, data []byte) error
}
// LocalKey is a Key owned by the local store instance
type LocalKey interface {
Key
// DeepKeyCopy must return a deep copy of the key
DeepKeyCopy() LocalKey
}
// KVPair represents a basic implementation of the LocalKey interface
type KVPair struct {
Key string
Value []byte
}
func NewKVPair(key, value string) *KVPair { return &KVPair{Key: key, Value: []byte(value)} }
func KVPairCreator() Key { return &KVPair{} }
func (kv *KVPair) GetKeyName() string { return kv.Key }
func (kv *KVPair) Marshal() ([]byte, error) { return kv.Value, nil }
func (kv *KVPair) Unmarshal(key string, data []byte) error {
kv.Key, kv.Value = key, data
return nil
}
// JoinSharedStore creates a new shared store based on the provided
// configuration. An error is returned if the configuration is invalid. The
// store is initialized with the contents of the kvstore. An error is returned
// if the contents cannot be retrieved synchronously from the kvstore. Starts a
// controller to continuously synchronize the store with the kvstore.
func JoinSharedStore(logger *slog.Logger, c Configuration) (*SharedStore, error) {
if err := c.validate(); err != nil {
return nil, err
}
s := &SharedStore{
logger: logger,
conf: c,
localKeys: map[string]LocalKey{},
sharedKeys: map[string]Key{},
backend: c.Backend,
}
// Wrap the context, so that we can subsequently stop the kvstore watcher.
s.conf.Context, s.stop = context.WithCancel(s.conf.Context)
s.name = "store-" + s.conf.Prefix
s.logger = s.logger.With(logfields.Name, s.name)
s.controllerName = "kvstore-sync-" + s.name
if err := s.listAndStartWatcher(); err != nil {
return nil, err
}
if s.conf.SynchronizationInterval > 0 {
controllers.UpdateController(s.controllerName,
controller.ControllerParams{
Group: kvstoreSyncControllerGroup,
DoFunc: func(ctx context.Context) error {
return s.syncLocalKeys(ctx, true)
},
RunInterval: s.conf.SynchronizationInterval,
},
)
}
return s, nil
}
func (s *SharedStore) onDelete(k NamedKey) {
if s.conf.Observer != nil {
s.conf.Observer.OnDelete(k)
}
}
func (s *SharedStore) onUpdate(k Key) {
if s.conf.Observer != nil {
s.conf.Observer.OnUpdate(k)
}
}
// Release frees all resources own by the store but leaves all keys in the
// kvstore intact
func (s *SharedStore) Release() {
s.stop()
s.wg.Wait()
controllers.RemoveController(s.controllerName)
}
// Close stops participation with a shared store and removes all keys owned by
// this node in the kvstore. This stops the controller started by
// JoinSharedStore().
func (s *SharedStore) Close(ctx context.Context) {
s.Release()
for name, key := range s.localKeys {
if err := s.backend.Delete(ctx, s.keyPath(key)); err != nil {
s.logger.Warn("Unable to delete key in kvstore", logfields.Error, err)
}
delete(s.localKeys, name)
// Since we have received our own notification we also need to remove
// it from the shared keys.
delete(s.sharedKeys, name)
s.onDelete(key)
}
}
// keyPath returns the absolute kvstore path of a key
func (s *SharedStore) keyPath(key NamedKey) string {
// WARNING - STABLE API: The composition of the absolute key path
// cannot be changed without breaking up and downgrades.
return path.Join(s.conf.Prefix, key.GetKeyName())
}
// syncLocalKey synchronizes a key to the kvstore
func (s *SharedStore) syncLocalKey(ctx context.Context, key LocalKey, lease bool) error {
jsonValue, err := key.Marshal()
if err != nil {
return err
}
// Update key in kvstore, overwrite an eventual existing key. If requested, attach
// lease to expire entry when agent dies and never comes back up.
if _, err := s.backend.UpdateIfDifferent(ctx, s.keyPath(key), jsonValue, lease); err != nil {
return err
}
return nil
}
// syncLocalKeys synchronizes all local keys with the kvstore
func (s *SharedStore) syncLocalKeys(ctx context.Context, lease bool) error {
// Create a copy of all local keys so we can unlock and sync to kvstore
// without holding the lock
s.mutex.RLock()
keys := slices.Collect(maps.Values(s.localKeys))
s.mutex.RUnlock()
for _, key := range keys {
if err := s.syncLocalKey(ctx, key, lease); err != nil {
return err
}
}
return nil
}
func (s *SharedStore) lookupLocalKey(name string) LocalKey {
s.mutex.RLock()
defer s.mutex.RUnlock()
for _, key := range s.localKeys {
if key.GetKeyName() == name {
return key
}
}
return nil
}
// NumEntries returns the number of entries in the store
func (s *SharedStore) NumEntries() int {
if s == nil {
return 0
}
s.mutex.RLock()
defer s.mutex.RUnlock()
return len(s.sharedKeys)
}
// SharedKeysMap returns a copy of the SharedKeysMap, the returned map can
// be safely modified but the values of the map represent the actual data
// stored in the internal SharedStore SharedKeys map.
func (s *SharedStore) SharedKeysMap() map[string]Key {
s.mutex.RLock()
defer s.mutex.RUnlock()
return maps.Clone(s.sharedKeys)
}
// UpdateLocalKeySync synchronously synchronizes a local key with the kvstore
// and adds it to the list of local keys to be synchronized if the initial
// synchronous synchronization was successful
func (s *SharedStore) UpdateLocalKeySync(ctx context.Context, key LocalKey) error {
s.mutex.Lock()
defer s.mutex.Unlock()
err := s.syncLocalKey(ctx, key, true)
if err == nil {
s.localKeys[key.GetKeyName()] = key.DeepKeyCopy()
}
return err
}
// UpdateKeySync synchronously synchronizes a key with the kvstore.
func (s *SharedStore) UpdateKeySync(ctx context.Context, key LocalKey, lease bool) error {
return s.syncLocalKey(ctx, key, lease)
}
// DeleteLocalKey removes a key from being synchronized with the kvstore
func (s *SharedStore) DeleteLocalKey(ctx context.Context, key NamedKey) {
name := key.GetKeyName()
s.mutex.Lock()
_, ok := s.localKeys[name]
delete(s.localKeys, name)
s.mutex.Unlock()
err := s.backend.Delete(ctx, s.keyPath(key))
if ok {
if err != nil {
s.logger.Warn("Unable to delete key in kvstore", logfields.Error, err)
}
s.onDelete(key)
}
}
func (s *SharedStore) updateKey(name string, value []byte) error {
newKey := s.conf.KeyCreator()
if err := newKey.Unmarshal(name, value); err != nil {
return err
}
s.mutex.Lock()
s.sharedKeys[name] = newKey
s.mutex.Unlock()
s.onUpdate(newKey)
return nil
}
func (s *SharedStore) deleteSharedKey(name string) {
s.mutex.Lock()
existingKey, ok := s.sharedKeys[name]
delete(s.sharedKeys, name)
s.mutex.Unlock()
if ok {
go func() {
time.Sleep(s.conf.SharedKeyDeleteDelay)
s.mutex.RLock()
_, ok := s.sharedKeys[name]
s.mutex.RUnlock()
if ok {
s.logger.Warn(
"Received delete event for key which re-appeared within delay time window",
logfields.Key, name,
logfields.TimeWindow, s.conf.SharedKeyDeleteDelay,
)
return
}
s.onDelete(existingKey)
}()
} else {
s.logger.Warn(
"Unable to find deleted key in local state",
logfields.Key, name,
)
}
}
func (s *SharedStore) listAndStartWatcher() error {
listDone := make(chan struct{})
s.wg.Add(1)
go func() {
s.watcher(listDone)
s.wg.Done()
}()
select {
case <-listDone:
case <-time.After(listTimeoutDefault):
return fmt.Errorf("timeout while retrieving initial list of objects from kvstore")
}
return nil
}
func (s *SharedStore) watcher(listDone chan struct{}) {
events := s.backend.ListAndWatch(s.conf.Context, s.conf.Prefix)
logger := s.logger
for event := range events {
if event.Typ == kvstore.EventTypeListDone {
logger.Debug("Initial list of objects received from kvstore")
close(listDone)
continue
}
logger.Debug("Received key update via kvstore",
logfields.Value, string(event.Value),
logfields.Key, event.Key,
logfields.EventType, event.Typ,
)
keyName := strings.TrimPrefix(event.Key, s.conf.Prefix)
if keyName[0] == '/' {
keyName = keyName[1:]
}
switch event.Typ {
case kvstore.EventTypeCreate, kvstore.EventTypeModify:
if err := s.updateKey(keyName, event.Value); err != nil {
logger.Warn(
"Unable to unmarshal store value",
logfields.Error, err,
logfields.Value, string(event.Value),
logfields.Key, event.Key,
logfields.EventType, event.Typ,
)
}
case kvstore.EventTypeDelete:
if localKey := s.lookupLocalKey(keyName); localKey != nil {
logger.Warn(
"Received delete event for local key. Re-creating the key in the kvstore",
logfields.Key, event.Key,
logfields.EventType, event.Typ,
)
s.syncLocalKey(s.conf.Context, localKey, true)
} else {
s.deleteSharedKey(keyName)
}
}
}
}
// SPDX-License-Identifier: Apache-2.0
// Copyright Authors of Cilium
package store
import (
"bytes"
"context"
"fmt"
"log/slog"
"path"
"strings"
"sync"
"sync/atomic"
"github.com/prometheus/client_golang/prometheus"
"k8s.io/client-go/util/workqueue"
"github.com/cilium/cilium/pkg/kvstore"
"github.com/cilium/cilium/pkg/lock"
"github.com/cilium/cilium/pkg/logging/logfields"
"github.com/cilium/cilium/pkg/metrics"
"github.com/cilium/cilium/pkg/time"
)
// SyncStore abstracts the operations allowing to synchronize key/value pairs
// into a kvstore.
type SyncStore interface {
// Run starts the SyncStore logic, blocking until the context is closed.
Run(ctx context.Context)
// UpsertKey upserts a key/value pair into the kvstore.
UpsertKey(ctx context.Context, key Key) error
// DeleteKey removes a key from the kvstore.
DeleteKey(ctx context.Context, key NamedKey) error
// Synced triggers the insertion of the "synced" key associated with this
// store into the kvstore once all upsertions already issued have completed
// successfully, eventually executing all specified callbacks (if any).
// Only the first invocation takes effect.
Synced(ctx context.Context, callbacks ...func(ctx context.Context)) error
}
// SyncStoreBackend represents the subset kvstore.BackendOperations leveraged
// by SyncStore implementations.
type SyncStoreBackend interface {
// Update creates or updates a key.
Update(ctx context.Context, key string, value []byte, lease bool) error
// Delete deletes a key.
Delete(ctx context.Context, key string) error
// RegisterLeaseExpiredObserver registers a function which is executed when
// the lease associated with a key having the given prefix is detected as expired.
RegisterLeaseExpiredObserver(prefix string, fn func(key string))
}
// wqSyncStore implements the SyncStore interface leveraging a workqueue to
// coalescence update/delete requests and handle retries in case of errors.
type wqSyncStore struct {
backend SyncStoreBackend
prefix string
source string
workers uint
withLease bool
limiter workqueue.TypedRateLimiter[workqueueKey]
workqueue workqueue.TypedRateLimitingInterface[workqueueKey]
state lock.Map[string, []byte] // map[NamedKey.GetKeyName()]Key.Marshal()
synced atomic.Bool // Synced() has been triggered
pendingSync lock.Map[string, struct{}] // the set of keys still to sync
syncedKey string
syncedCallbacks []func(context.Context)
log *slog.Logger
queuedMetric prometheus.Gauge
errorsMetric prometheus.Counter
syncedMetric prometheus.Gauge
}
type workqueueKey struct {
value string
syncCanary *struct{ skipCallbacks bool }
}
type WSSOpt func(*wqSyncStore)
// WSSWithRateLimiter sets the rate limiting algorithm to be used when requeueing failed events.
func WSSWithRateLimiter(limiter workqueue.TypedRateLimiter[workqueueKey]) WSSOpt {
return func(wss *wqSyncStore) {
wss.limiter = limiter
}
}
// WSSWithWorkers configures the number of workers spawned by Run() to handle update/delete operations.
func WSSWithWorkers(workers uint) WSSOpt {
return func(wss *wqSyncStore) {
wss.workers = workers
}
}
// WSSWithoutLease disables attaching the lease to upserted keys.
func WSSWithoutLease() WSSOpt {
return func(wss *wqSyncStore) {
wss.withLease = false
}
}
// WSSWithSyncedKeyOverride overrides the "synced" key inserted into the kvstore
// when initial synchronization completed (by default it corresponds to the prefix).
func WSSWithSyncedKeyOverride(key string) WSSOpt {
return func(wss *wqSyncStore) {
wss.syncedKey = key
}
}
// NewWorkqueueSyncStore returns a SyncStore instance which leverages a workqueue
// to coalescence update/delete requests and handle retries in case of errors.
func newWorkqueueSyncStore(logger *slog.Logger, clusterName string, backend SyncStoreBackend, prefix string, m *Metrics, opts ...WSSOpt) SyncStore {
wss := &wqSyncStore{
backend: backend,
prefix: prefix,
source: clusterName,
workers: 1,
withLease: true,
limiter: workqueue.DefaultTypedControllerRateLimiter[workqueueKey](),
syncedKey: prefix,
log: logger.With(logfields.Prefix, prefix),
}
for _, opt := range opts {
opt(wss)
}
wss.log = wss.log.With(logfields.ClusterName, wss.source)
wss.workqueue = workqueue.NewTypedRateLimitingQueue(wss.limiter)
wss.queuedMetric = m.KVStoreSyncQueueSize.WithLabelValues(kvstore.GetScopeFromKey(prefix), wss.source)
wss.errorsMetric = m.KVStoreSyncErrors.WithLabelValues(kvstore.GetScopeFromKey(prefix), wss.source)
wss.syncedMetric = m.KVStoreInitialSyncCompleted.WithLabelValues(kvstore.GetScopeFromKey(prefix), wss.source, "write")
return wss
}
// Run starts the SyncStore logic, blocking until the context is closed.
func (wss *wqSyncStore) Run(ctx context.Context) {
var wg sync.WaitGroup
wss.syncedMetric.Set(metrics.BoolToFloat64(false))
defer wss.syncedMetric.Set(metrics.BoolToFloat64(false))
wss.backend.RegisterLeaseExpiredObserver(wss.prefix, wss.handleExpiredLease)
wss.backend.RegisterLeaseExpiredObserver(wss.getSyncedKey(), wss.handleExpiredLease)
wss.log.Info("Starting workqueue-based sync store", logfields.Workers, wss.workers)
wg.Add(int(wss.workers))
for i := uint(0); i < wss.workers; i++ {
go func() {
defer wg.Done()
for wss.processNextItem(ctx) {
}
}()
}
<-ctx.Done()
wss.backend.RegisterLeaseExpiredObserver(wss.prefix, nil)
wss.backend.RegisterLeaseExpiredObserver(wss.getSyncedKey(), nil)
wss.log.Info("Shutting down workqueue-based sync store")
wss.workqueue.ShutDown()
wg.Wait()
}
// UpsertKey registers the key for asynchronous upsertion in the kvstore, if the
// corresponding value has changed. It returns an error in case it is impossible
// to marshal the value, while kvstore failures are automatically handled through
// a retry mechanism.
func (wss *wqSyncStore) UpsertKey(_ context.Context, k Key) error {
key := k.GetKeyName()
value, err := k.Marshal()
if err != nil {
return fmt.Errorf("failed marshaling key %q: %w", key, err)
}
prevValue, loaded := wss.state.Swap(key, value)
if loaded && bytes.Equal(prevValue, value) {
wss.log.Debug("ignoring upsert request for already up-to-date key", logfields.Key, key)
} else {
if !wss.synced.Load() {
wss.pendingSync.Store(key, struct{}{})
}
wss.workqueue.Add(workqueueKey{value: key})
wss.queuedMetric.Set(float64(wss.workqueue.Len()))
}
return nil
}
// DeleteKey registers the key for asynchronous deletion from the kvstore, if it
// was known to be present. It never returns an error, because kvstore failures
// are automatically handled through a retry mechanism.
func (wss *wqSyncStore) DeleteKey(_ context.Context, k NamedKey) error {
key := k.GetKeyName()
if _, loaded := wss.state.LoadAndDelete(key); loaded {
wss.workqueue.Add(workqueueKey{value: key})
wss.queuedMetric.Set(float64(wss.workqueue.Len()))
} else {
wss.log.Debug("ignoring delete request for non-existing key", logfields.Key, key)
}
return nil
}
func (wss *wqSyncStore) Synced(_ context.Context, callbacks ...func(ctx context.Context)) error {
if synced := wss.synced.Swap(true); !synced {
wss.syncedCallbacks = callbacks
wss.workqueue.Add(workqueueKey{syncCanary: &struct{ skipCallbacks bool }{}})
}
return nil
}
func (wss *wqSyncStore) processNextItem(ctx context.Context) bool {
// Retrieve the next key to process from the workqueue.
key, shutdown := wss.workqueue.Get()
wss.queuedMetric.Set(float64(wss.workqueue.Len()))
if shutdown {
return false
}
// We call Done here so the workqueue knows we have finished
// processing this item.
defer func() {
wss.workqueue.Done(key)
// This ensures that the metric is correctly updated in case of requeues.
wss.queuedMetric.Set(float64(wss.workqueue.Len()))
}()
// Run the handler, passing it the key to be processed as parameter.
if err := wss.handle(ctx, key); err != nil {
// Put the item back on the workqueue to handle any transient errors.
wss.errorsMetric.Inc()
wss.workqueue.AddRateLimited(key)
return true
}
// Since no error occurred, forget this item so it does not get queued again
// until another change happens.
wss.workqueue.Forget(key)
wss.pendingSync.Delete(key.value)
return true
}
func (wss *wqSyncStore) handle(ctx context.Context, item workqueueKey) error {
if item.syncCanary != nil {
return wss.handleSync(ctx, item.syncCanary.skipCallbacks)
}
key := item.value
if value, ok := wss.state.Load(key); ok {
return wss.handleUpsert(ctx, key, value)
}
return wss.handleDelete(ctx, key)
}
func (wss *wqSyncStore) handleUpsert(ctx context.Context, key string, value []byte) error {
err := wss.backend.Update(ctx, wss.keyPath(key), value, wss.withLease)
if err != nil {
wss.log.Warn("Failed upserting key in kvstore. Retrying...",
logfields.Error, err,
logfields.Key, key,
)
return err
}
wss.log.Debug("Upserted key in kvstore",
logfields.Key, key,
)
return nil
}
func (wss *wqSyncStore) handleDelete(ctx context.Context, key string) error {
if err := wss.backend.Delete(ctx, wss.keyPath(key)); err != nil {
wss.log.Warn("Failed deleting key from kvstore. Retrying...",
logfields.Error, err,
logfields.Key, key,
)
return err
}
wss.log.Debug("Deleted key from kvstore",
logfields.Key, key,
)
return nil
}
func (wss *wqSyncStore) handleSync(ctx context.Context, skipCallbacks bool) error {
// This could be replaced by wss.toSync.Len() == 0 if it only existed...
syncCompleted := true
wss.pendingSync.Range(func(string, struct{}) bool {
syncCompleted = false
return false
})
if !syncCompleted {
return fmt.Errorf("there are still keys to be synchronized")
}
key := wss.getSyncedKey()
err := wss.backend.Update(ctx, key, []byte(time.Now().Format(time.RFC3339)), wss.withLease)
if err != nil {
wss.log.Warn("Failed upserting synced key in kvstore. Retrying...",
logfields.Error, err,
logfields.Key, key,
)
return err
}
wss.log.Info("Initial synchronization from the external source completed",
logfields.Key, key,
)
wss.syncedMetric.Set(metrics.BoolToFloat64(true))
// Execute any callback that might have been registered.
if !skipCallbacks {
for _, callback := range wss.syncedCallbacks {
callback(ctx)
}
}
return nil
}
// handleExpiredLease gets executed when the lease attached to a given key expired,
// and is responsible for enqueuing the given key to recreate it.
func (wss *wqSyncStore) handleExpiredLease(key string) {
defer wss.queuedMetric.Set(float64(wss.workqueue.Len()))
if key == wss.getSyncedKey() {
// Re-enqueue the creation of the sync canary, but make sure that
// the registered callbacks are not executed a second time.
wss.workqueue.Add(workqueueKey{syncCanary: &struct{ skipCallbacks bool }{true}})
return
}
key = strings.TrimPrefix(strings.TrimPrefix(key, wss.prefix), "/")
_, ok := wss.state.Load(key)
if ok {
wss.log.Debug("enqueuing upsert request for key as the attached lease expired", logfields.Key, key)
if !wss.synced.Load() {
wss.pendingSync.Store(key, struct{}{})
}
wss.workqueue.Add(workqueueKey{value: key})
}
}
// keyPath returns the absolute kvstore path of a key
func (wss *wqSyncStore) keyPath(key string) string {
// WARNING - STABLE API: The composition of the absolute key path
// cannot be changed without breaking up and downgrades.
return path.Join(wss.prefix, key)
}
func (wss *wqSyncStore) getSyncedKey() string {
return path.Join(kvstore.SyncedPrefix, wss.source, wss.syncedKey)
}
// SPDX-License-Identifier: Apache-2.0
// Copyright Authors of Cilium
package store
import (
"context"
"log/slog"
"strings"
"sync/atomic"
"github.com/prometheus/client_golang/prometheus"
"github.com/cilium/cilium/pkg/kvstore"
"github.com/cilium/cilium/pkg/logging"
"github.com/cilium/cilium/pkg/logging/logfields"
"github.com/cilium/cilium/pkg/metrics"
"github.com/cilium/cilium/pkg/metrics/metric"
)
// WatchStore abstracts the operations allowing to synchronize key/value pairs
// from a kvstore, emitting the corresponding events.
type WatchStore interface {
// Watch starts watching the specified kvstore prefix, blocking until the context is closed.
// Depending on the implementation, it might be executed multiple times.
Watch(ctx context.Context, backend WatchStoreBackend, prefix string)
// NumEntries returns the number of entries synchronized from the store.
NumEntries() uint64
// Synced returns whether the initial list of entries has been retrieved from
// the kvstore, and new events are currently being watched.
Synced() bool
// Drain emits a deletion event for each known key. It shall be called only
// when no watch operation is in progress.
Drain()
}
// WatchStoreBackend represents the subset of kvstore.BackendOperations leveraged
// by WatchStore implementations.
type WatchStoreBackend interface {
// ListAndWatch creates a new watcher for the given prefix after listing the existing keys.
ListAndWatch(ctx context.Context, prefix string) kvstore.EventChan
}
type RWSOpt func(*restartableWatchStore)
// WSWithOnSyncCallback registers a function to be executed after
// listing all keys from the kvstore for the first time. Multiple
// callback functions can be registered.
func RWSWithOnSyncCallback(callback func(ctx context.Context)) RWSOpt {
return func(rws *restartableWatchStore) {
rws.onSyncCallbacks = append(rws.onSyncCallbacks, callback)
}
}
// WSWithEntriesGauge registers a Prometheus gauge metric that is kept
// in sync with the number of entries synchronized from the kvstore.
func RWSWithEntriesMetric(gauge prometheus.Gauge) RWSOpt {
return func(rws *restartableWatchStore) {
rws.entriesMetric = gauge
}
}
type rwsEntry struct {
key Key
stale bool
}
// restartableWatchStore implements the WatchStore interface, supporting
// multiple executions of the Watch() operation (granted that the previous one
// already terminated). This allows to transparently handle the case in which
// we had to create a new etcd connection (for instance following a failure)
// which refers to the same remote cluster.
type restartableWatchStore struct {
source string
keyCreator KeyCreator
observer Observer
watching atomic.Bool
synced atomic.Bool
onSyncCallbacks []func(ctx context.Context)
// Using a separate entries counter avoids the need for synchronizing the
// access to the state map, since the only concurrent reader is represented
// by the NumEntries() function.
state map[string]*rwsEntry
numEntries atomic.Uint64
baseLogger *slog.Logger
log *slog.Logger
entriesMetric prometheus.Gauge
syncMetric metric.Vec[metric.Gauge]
}
// NewRestartableWatchStore returns a WatchStore instance which supports
// restarting the watch operation multiple times, automatically handling
// the emission of deletion events for all stale entries (if enabled). It
// shall be restarted only once the previous Watch execution terminated.
func newRestartableWatchStore(logger *slog.Logger, clusterName string, keyCreator KeyCreator, observer Observer, m *Metrics, opts ...RWSOpt) WatchStore {
rws := &restartableWatchStore{
source: clusterName,
keyCreator: keyCreator,
observer: observer,
state: make(map[string]*rwsEntry),
log: logger,
baseLogger: logger,
entriesMetric: metrics.NoOpGauge,
syncMetric: m.KVStoreInitialSyncCompleted,
}
for _, opt := range opts {
opt(rws)
}
rws.baseLogger = rws.baseLogger.With(logfields.ClusterName, rws.source)
rws.log = rws.baseLogger
return rws
}
// Watch starts watching the specified kvstore prefix, blocking until the context is closed.
// It might be executed multiple times, granted that the previous execution already terminated.
func (rws *restartableWatchStore) Watch(ctx context.Context, backend WatchStoreBackend, prefix string) {
// Append a trailing "/" to the prefix, to make sure that we watch only
// sub-elements belonging to that prefix, and not to sibling prefixes
// (for instance in case the last part of the prefix is the cluster name,
// and one is the substring of another).
if !strings.HasSuffix(prefix, "/") {
prefix = prefix + "/"
}
rws.log = rws.baseLogger.With(logfields.Prefix, prefix)
syncedMetric := rws.syncMetric.WithLabelValues(
kvstore.GetScopeFromKey(prefix), rws.source, "read")
rws.log.Info("Starting restartable watch store")
syncedMetric.Set(metrics.BoolToFloat64(false))
if rws.watching.Swap(true) {
logging.Panic(rws.log, "Cannot start the watch store while still running")
}
defer func() {
rws.log.Info("Stopped restartable watch store")
syncedMetric.Set(metrics.BoolToFloat64(false))
rws.watching.Store(false)
rws.synced.Store(false)
}()
// Mark all known keys as stale.
for _, entry := range rws.state {
entry.stale = true
}
// The events channel is closed when the context is closed.
events := backend.ListAndWatch(ctx, prefix)
for event := range events {
if event.Typ == kvstore.EventTypeListDone {
rws.log.Debug("Initial synchronization completed")
rws.drainKeys(true)
syncedMetric.Set(metrics.BoolToFloat64(true))
rws.synced.Store(true)
for _, callback := range rws.onSyncCallbacks {
callback(ctx)
}
// Clear the list of callbacks so that they don't get executed
// a second time in case of reconnections.
rws.onSyncCallbacks = nil
continue
}
key := strings.TrimPrefix(event.Key, prefix)
rws.log.Debug(
"Received event from kvstore",
logfields.Key, key,
logfields.Event, event.Typ,
)
switch event.Typ {
case kvstore.EventTypeCreate, kvstore.EventTypeModify:
rws.handleUpsert(key, event.Value)
case kvstore.EventTypeDelete:
rws.handleDelete(key)
}
}
}
// NumEntries returns the number of entries synchronized from the store.
func (rws *restartableWatchStore) NumEntries() uint64 {
return rws.numEntries.Load()
}
// Synced returns whether the initial list of entries has been retrieved from
// the kvstore, and new events are currently being watched.
func (rws *restartableWatchStore) Synced() bool {
return rws.synced.Load()
}
// Drain emits a deletion event for each known key. It shall be called only
// when no watch operation is in progress.
func (rws *restartableWatchStore) Drain() {
if rws.watching.Swap(true) {
logging.Panic(rws.log, "Cannot drain the watch store while still running")
}
defer rws.watching.Store(false)
rws.log.Info("Draining restartable watch store")
rws.drainKeys(false)
rws.log.Info("Drained restartable watch store")
}
// drainKeys emits synthetic deletion events:
// * staleOnly == true: for all keys marked as stale;
// * staleOnly == false: for all known keys;
func (rws *restartableWatchStore) drainKeys(staleOnly bool) {
for key, entry := range rws.state {
if !staleOnly || entry.stale {
rws.log.Debug(
"Emitting deletion event for stale key",
logfields.Key, key,
)
rws.handleDelete(key)
}
}
}
func (rws *restartableWatchStore) handleUpsert(key string, value []byte) {
entry := &rwsEntry{key: rws.keyCreator()}
if err := entry.key.Unmarshal(key, value); err != nil {
rws.log.Warn(
"Unable to unmarshal value",
logfields.Error, err,
logfields.Key, key,
logfields.Value, string(value),
)
return
}
rws.state[key] = entry
rws.numEntries.Store(uint64(len(rws.state)))
rws.entriesMetric.Set(float64(len(rws.state)))
rws.observer.OnUpdate(entry.key)
}
func (rws *restartableWatchStore) handleDelete(key string) {
entry, ok := rws.state[key]
if !ok {
rws.log.Warn(
"Received deletion event for unknown key",
logfields.Key, key,
)
return
}
delete(rws.state, key)
rws.numEntries.Store(uint64(len(rws.state)))
rws.entriesMetric.Set(float64(len(rws.state)))
rws.observer.OnDelete(entry.key)
}
// SPDX-License-Identifier: Apache-2.0
// Copyright Authors of Cilium
package store
import (
"context"
"log/slog"
"path"
"sync"
"sync/atomic"
"github.com/cilium/cilium/pkg/kvstore"
"github.com/cilium/cilium/pkg/logging"
"github.com/cilium/cilium/pkg/logging/logfields"
)
// WSMFunc if a function which can be registered in the WatchStoreManager.
type WSMFunc func(context.Context)
// WatchStoreManager enables to register a set of functions to be asynchronously
// executed when the corresponding kvstore prefixes are synchronized (based on
// the implementation).
type WatchStoreManager interface {
// Register registers a function associated with a given kvstore prefix.
// It cannot be called once Run() has started.
Register(prefix string, function WSMFunc)
// Run starts the manager, blocking until the context is closed and all
// started functions terminated.
Run(ctx context.Context)
}
// wsmCommon implements the common logic shared by WatchStoreManager implementations.
type wsmCommon struct {
wg sync.WaitGroup
functions map[string]WSMFunc
running atomic.Bool
log *slog.Logger
}
func newWSMCommon(logger *slog.Logger) wsmCommon {
return wsmCommon{
functions: make(map[string]WSMFunc),
log: logger,
}
}
// Register registers a function associated with a given kvstore prefix.
// It cannot be called once Run() has started.
func (mgr *wsmCommon) Register(prefix string, function WSMFunc) {
if mgr.running.Load() {
logging.Panic(mgr.log, "Cannot call Register while the watch store manager is running")
}
mgr.functions[prefix] = function
}
func (mgr *wsmCommon) ready(ctx context.Context, prefix string) {
if fn := mgr.functions[prefix]; fn != nil {
mgr.log.Debug("Starting function for kvstore prefix", logfields.Prefix, prefix)
delete(mgr.functions, prefix)
mgr.wg.Add(1)
go func() {
defer mgr.wg.Done()
fn(ctx)
mgr.log.Debug("Function terminated for kvstore prefix", logfields.Prefix, prefix)
}()
} else {
mgr.log.Debug("Received sync event for unregistered prefix", logfields.Prefix, prefix)
}
}
func (mgr *wsmCommon) run() {
mgr.log.Info("Starting watch store manager")
if mgr.running.Swap(true) {
logging.Panic(mgr.log, "Cannot start the watch store manager twice")
}
}
func (mgr *wsmCommon) wait() {
mgr.wg.Wait()
mgr.log.Info("Stopped watch store manager")
}
type wsmSync struct {
wsmCommon
clusterName string
backend WatchStoreBackend
store WatchStore
onUpdate func(prefix string)
}
// NewWatchStoreManagerSync implements the WatchStoreManager interface, starting the
// registered functions only once the corresponding prefix sync canary has been received.
// This ensures that the synchronization of the keys hosted under the given prefix
// have been successfully synchronized from the external source, even in case an
// ephemeral kvstore is used.
func newWatchStoreManagerSync(logger *slog.Logger, backend WatchStoreBackend, clusterName string, factory Factory) WatchStoreManager {
mgr := wsmSync{
wsmCommon: newWSMCommon(logger.With(logfields.ClusterName, clusterName)),
clusterName: clusterName,
backend: backend,
}
mgr.store = factory.NewWatchStore(clusterName, KVPairCreator, &mgr)
return &mgr
}
// Run starts the manager, blocking until the context is closed and all
// started functions terminated.
func (mgr *wsmSync) Run(ctx context.Context) {
mgr.run()
mgr.onUpdate = func(prefix string) { mgr.ready(ctx, prefix) }
mgr.store.Watch(ctx, mgr.backend, path.Join(kvstore.SyncedPrefix, mgr.clusterName))
mgr.wait()
}
func (mgr *wsmSync) OnUpdate(k Key) { mgr.onUpdate(k.GetKeyName()) }
func (mgr *wsmSync) OnDelete(k NamedKey) {}
type wsmImmediate struct {
wsmCommon
}
// NewWatchStoreManagerImmediate implements the WatchStoreManager interface,
// immediately starting the registered functions once Run() is executed.
func NewWatchStoreManagerImmediate(logger *slog.Logger) WatchStoreManager {
return &wsmImmediate{
wsmCommon: newWSMCommon(logger),
}
}
// Run starts the manager, blocking until the context is closed and all
// started functions terminated.
func (mgr *wsmImmediate) Run(ctx context.Context) {
mgr.run()
for prefix := range mgr.functions {
mgr.ready(ctx, prefix)
}
mgr.wait()
}
// SPDX-License-Identifier: Apache-2.0
// Copyright Authors of Cilium
package kvstore
import (
"log/slog"
)
var (
traceEnabled bool
)
// EnableTracing enables kvstore tracing
func EnableTracing() {
traceEnabled = true
}
// Trace is used to trace kvstore debug messages
func Trace(logger *slog.Logger, msg string, fields ...any) {
if traceEnabled {
logger.Debug(
msg,
fields...,
)
}
}
// SPDX-License-Identifier: Apache-2.0
// Copyright Authors of Cilium
package kvstore
type watchState struct {
deletionMark bool
}
type watcherCache map[string]watchState
func (wc watcherCache) Exists(key []byte) bool {
if _, ok := wc[string(key)]; ok {
return true
}
return false
}
// RemoveDeleted removes keys marked for deletion from the local cache exiting
// early if the given function returns false.
func (wc watcherCache) RemoveDeleted(f func(string) bool) bool {
for k, localKey := range wc {
if localKey.deletionMark {
if !f(k) {
return false
}
delete(wc, k)
}
}
return true
}
func (wc watcherCache) MarkAllForDeletion() {
for k := range wc {
wc[k] = watchState{deletionMark: true}
}
}
func (wc watcherCache) MarkInUse(key []byte) {
wc[string(key)] = watchState{deletionMark: false}
}
func (wc watcherCache) RemoveKey(key []byte) {
delete(wc, string(key))
}
// SPDX-License-Identifier: Apache-2.0
// Copyright Authors of Cilium
package labels
import (
"bytes"
"sort"
"strings"
)
// LabelArray is an array of labels forming a set
type LabelArray []Label
// Sort is an internal utility to return all LabelArrays in sorted
// order, when the source material may be unsorted. 'ls' is sorted
// in-place, but also returns the sorted array for convenience.
func (ls LabelArray) Sort() LabelArray {
sort.Slice(ls, func(i, j int) bool {
return ls[i].Key < ls[j].Key
})
return ls
}
// ParseLabelArray parses a list of labels and returns a LabelArray
func ParseLabelArray(labels ...string) LabelArray {
array := make(LabelArray, len(labels))
for i := range labels {
array[i] = ParseLabel(labels[i])
}
return array.Sort()
}
// ParseSelectLabelArray parses a list of select labels and returns a LabelArray
func ParseSelectLabelArray(labels ...string) LabelArray {
array := make(LabelArray, len(labels))
for i := range labels {
array[i] = ParseSelectLabel(labels[i])
}
return array.Sort()
}
// ParseLabelArrayFromArray converts an array of strings as labels and returns a LabelArray
func ParseLabelArrayFromArray(base []string) LabelArray {
array := make(LabelArray, len(base))
for i := range base {
array[i] = ParseLabel(base[i])
}
return array.Sort()
}
// NewLabelArrayFromSortedList returns labels based on the output of SortedList()
// Trailing ';' will result in an empty key that must be filtered out.
func NewLabelArrayFromSortedList(list string) LabelArray {
base := strings.Split(list, ";")
array := make(LabelArray, 0, len(base))
for _, v := range base {
if lbl := ParseLabel(v); lbl.Key != "" {
array = append(array, lbl)
}
}
return array
}
// ParseSelectLabelArrayFromArray converts an array of strings as select labels and returns a LabelArray
func ParseSelectLabelArrayFromArray(base []string) LabelArray {
array := make(LabelArray, len(base))
for i := range base {
array[i] = ParseSelectLabel(base[i])
}
return array.Sort()
}
// Labels returns the LabelArray as Labels
func (ls LabelArray) Labels() Labels {
lbls := Labels{}
for _, l := range ls {
lbls[l.Key] = l
}
return lbls
}
// Contains returns true if all ls contains all the labels in needed. If
// needed contains no labels, Contains() will always return true
func (ls LabelArray) Contains(needed LabelArray) bool {
nextLabel:
for i := range needed {
for l := range ls {
if ls[l].Has(&needed[i]) {
continue nextLabel
}
}
return false
}
return true
}
// Intersects returns true if ls contains at least one label in needed.
//
// This has the same matching semantics as Has, namely,
// ["k8s:foo=bar"].Intersects(["any:foo=bar"]) == true
// ["any:foo=bar"].Intersects(["k8s:foo=bar"]) == false
func (ls LabelArray) Intersects(needed LabelArray) bool {
for _, l := range ls {
for _, n := range needed {
if l.Has(&n) {
return true
}
}
}
return false
}
// Lacks is identical to Contains but returns all missing labels
func (ls LabelArray) Lacks(needed LabelArray) LabelArray {
missing := LabelArray{}
nextLabel:
for i := range needed {
for l := range ls {
if ls[l].Has(&needed[l]) {
continue nextLabel
}
}
missing = append(missing, needed[i])
}
return missing
}
// Has returns whether the provided key exists in the label array.
// Implementation of the
// github.com/cilium/cilium/pkg/k8s/slim/k8s/apis/labels.Labels interface.
//
// The key can be of source "any", in which case the source is
// ignored. The inverse, however, is not true.
// ["k8s.foo=bar"].Has("any.foo") => true
// ["any.foo=bar"].Has("k8s.foo") => false
//
// If the key is of source "cidr", this will also match
// broader keys.
// ["cidr:1.1.1.1/32"].Has("cidr.1.0.0.0/8") => true
// ["cidr:1.0.0.0/8"].Has("cidr.1.1.1.1/32") => false
func (ls LabelArray) Has(key string) bool {
// The key is submitted in the form of `source.key=value`
keyLabel := parseSelectLabel(key, '.')
for _, l := range ls {
if l.HasKey(&keyLabel) {
return true
}
}
return false
}
// Get returns the value for the provided key.
// Implementation of the
// github.com/cilium/cilium/pkg/k8s/slim/k8s/apis/labels.Labels interface.
//
// The key can be of source "any", in which case the source is
// ignored. The inverse, however, is not true.
// ["k8s.foo=bar"].Get("any.foo") => "bar"
// ["any.foo=bar"].Get("k8s.foo") => ""
//
// If the key is of source "cidr", this will also match
// broader keys.
// ["cidr:1.1.1.1/32"].Has("cidr.1.0.0.0/8") => true
// ["cidr:1.0.0.0/8"].Has("cidr.1.1.1.1/32") => false
func (ls LabelArray) Get(key string) string {
keyLabel := parseSelectLabel(key, '.')
for _, l := range ls {
if l.HasKey(&keyLabel) {
return l.Value
}
}
return ""
}
// DeepCopy returns a deep copy of the labels.
func (ls LabelArray) DeepCopy() LabelArray {
if ls == nil {
return nil
}
o := make(LabelArray, len(ls))
copy(o, ls)
return o
}
// GetModel returns the LabelArray as a string array with fully-qualified labels.
// The output is parseable by ParseLabelArrayFromArray
func (ls LabelArray) GetModel() []string {
res := make([]string, 0, len(ls))
for l := range ls {
res = append(res, ls[l].String())
}
return res
}
func LabelArrayFromString(str string) LabelArray {
// each LabelArray starts with '[' and ends with ']'
if len(str) > 2 && str[0] == '[' && str[len(str)-1] == ']' {
str = str[1 : len(str)-1] // remove brackets
labels := strings.Split(str, " ")
la := make(LabelArray, 0, len(labels))
for j := range labels {
la = append(la, ParseLabel(labels[j]))
}
if len(la) > 0 {
return la
}
}
return nil
}
func (ls LabelArray) BuildString(sb *strings.Builder) {
sb.WriteString("[")
for l := range ls {
if l > 0 {
sb.WriteString(" ")
}
ls[l].BuildString(sb)
}
sb.WriteString("]")
}
func (ls LabelArray) String() string {
var sb strings.Builder
ls.BuildString(&sb)
return sb.String()
}
func (ls LabelArray) BuildBytes(buf *bytes.Buffer) {
buf.WriteString("[")
for l := range ls {
if l > 0 {
buf.WriteString(" ")
}
ls[l].BuildBytes(buf)
}
buf.WriteString("]")
}
// StringMap converts LabelArray into map[string]string
// Note: The source is included in the keys with a ':' separator.
// Note: LabelArray does not deduplicate entries, as it is an array. It is
// possible for the output to contain fewer entries when the source and key are
// repeated in a LabelArray, as that is the key of the output. This scenario is
// not expected.
func (ls LabelArray) StringMap() map[string]string {
o := map[string]string{}
for _, v := range ls {
o[v.Source+":"+v.Key] = v.Value
}
return o
}
// Equals returns true if the label arrays are the same, i.e., have the same labels in the same order.
func (ls LabelArray) Equals(b LabelArray) bool {
if len(ls) != len(b) {
return false
}
for l := range ls {
if !ls[l].Equals(&b[l]) {
return false
}
}
return true
}
// Less returns true if ls comes before b in the lexicographical order.
// Assumes both ls and b are already sorted.
func (ls LabelArray) Less(b LabelArray) bool {
lsLen, bLen := len(ls), len(b)
minLen := min(bLen, lsLen)
for i := range minLen {
switch {
// Key
case ls[i].Key < b[i].Key:
return true
case ls[i].Key > b[i].Key:
return false
// Value
case ls[i].Value < b[i].Value:
return true
case ls[i].Value > b[i].Value:
return false
// Source
case ls[i].Source < b[i].Source:
return true
case ls[i].Source > b[i].Source:
return false
}
}
return lsLen < bLen
}
// SPDX-License-Identifier: Apache-2.0
// Copyright Authors of Cilium
package labels
import (
"bytes"
"iter"
"sort"
"strings"
)
// LabelArrayList is an array of LabelArrays. It is primarily intended as a
// simple collection
type LabelArrayList []LabelArray
// DeepCopy returns a deep copy of the LabelArray, with each element also copied.
func (ls LabelArrayList) DeepCopy() LabelArrayList {
if ls == nil {
return nil
}
o := make(LabelArrayList, 0, len(ls))
for _, v := range ls {
o = append(o, v.DeepCopy())
}
return o
}
// GetModel returns the LabelArrayList as a [][]string. Each member LabelArray
// becomes a []string.
func (ls LabelArrayList) GetModel() [][]string {
res := make([][]string, 0, len(ls))
for _, v := range ls {
res = append(res, v.GetModel())
}
return res
}
// Equals returns true if the label arrays lists have the same label arrays in the same order.
func (ls LabelArrayList) Equals(b LabelArrayList) bool {
if len(ls) != len(b) {
return false
}
for l := range ls {
if !ls[l].Equals(b[l]) {
return false
}
}
return true
}
// Diff returns the string of differences between 'ls' and 'expected' LabelArrayList with
// '+ ' or '- ' for obtaining something unexpected, or not obtaining the expected, respectively.
// For use in debugging. Assumes sorted LabelArrayLists.
func (ls LabelArrayList) Diff(expected LabelArrayList) (res string) {
res += ""
i := 0
j := 0
for i < len(ls) && j < len(expected) {
if ls[i].Equals(expected[j]) {
i++
j++
continue
}
if ls[i].Less(expected[j]) {
// obtained has an unexpected labelArray
res += " + " + ls[i].String() + "\n"
i++
}
for j < len(expected) && expected[j].Less(ls[i]) {
// expected has a missing labelArray
res += " - " + expected[j].String() + "\n"
j++
}
}
for i < len(ls) {
// obtained has an unexpected labelArray
res += " + " + ls[i].String() + "\n"
i++
}
for j < len(expected) {
// expected has a missing labelArray
res += " - " + expected[j].String() + "\n"
j++
}
return res
}
// LabelArrayListString is the string representation of a list of lists
// of labels. It must always be sorted.
//
// e.g. "[foo:a=b foo:c=d], [any:x=y]"
type LabelArrayListString string
// ArrayListString returns the LabelArrayList as a structured string
func (ls LabelArrayList) ArrayListString() LabelArrayListString {
var sb strings.Builder
for i := range ls {
if i > 0 {
sb.WriteString(", ")
}
ls[i].BuildString(&sb)
}
return LabelArrayListString(sb.String())
}
func (ls LabelArrayList) String() string {
return string(ls.ArrayListString())
}
func LabelArrayListFromString(str LabelArrayListString) (ls LabelArrayList) {
// each LabelArray starts with '[' and ends with ']'
if len(str) > 2 && str[0] == '[' && str[len(str)-1] == ']' {
str = str[1 : len(str)-1] // remove first and last bracket
arrays := strings.Split(string(str), "], [")
for i := range arrays {
labels := strings.Split(arrays[i], " ")
var la LabelArray
for j := range labels {
la = append(la, ParseLabel(labels[j]))
}
ls = append(ls, la)
}
}
return ls
}
func ModelsFromLabelArrayListString(las LabelArrayListString) iter.Seq[[]string] {
str := string(las)
return func(yield func(labelArray []string) bool) {
// each LabelArray starts with '[' and ends with ']'
if len(str) > 2 && str[0] == '[' && str[len(str)-1] == ']' {
str = str[1 : len(str)-1] // remove first and last bracket
for {
i := strings.Index(str, "], [")
if i < 0 {
break
}
if !yield(strings.Split(str[:i], " ")) {
return
}
str = str[i+4:]
}
// last label array
yield(strings.Split(str, " "))
}
}
}
func (ls LabelArrayList) BuildBytes(buf *bytes.Buffer) {
for l, v := range ls {
if l > 0 {
buf.WriteString(", ")
}
v.BuildBytes(buf)
}
}
// Sort sorts the LabelArrayList in-place, but also returns the sorted list
// for convenience. The LabelArrays themselves must already be sorted. This is
// true for all constructors of LabelArray.
func (ls LabelArrayList) Sort() LabelArrayList {
sort.Slice(ls, func(i, j int) bool {
return ls[i].Less(ls[j])
})
return ls
}
// Merge incorporates new LabelArrays into an existing LabelArrayList, without
// introducing duplicates, returning the result for convenience. Existing
// duplication in either list is not removed.
func (lsp *LabelArrayList) Merge(include ...LabelArray) LabelArrayList {
lsp.Sort()
incl := LabelArrayList(include).Sort()
return lsp.MergeSorted(incl)
}
// MergeSorted incorporates new labels from 'include' to the receiver,
// both of which must be already sorted.
// LabelArrays are inserted from 'include' to the receiver as needed.
func (lsp *LabelArrayList) MergeSorted(include LabelArrayList) LabelArrayList {
merged := *lsp
i := 0
for j := 0; i < len(include) && j < len(merged); j++ {
if include[i].Less(merged[j]) {
merged = append(merged[:j+1], merged[j:]...) // make space at merged[j]
merged[j] = include[i]
i++
} else if include[i].Equals(merged[j]) {
i++
}
}
// 'include' may have more entries after original labels have been exhausted
if i < len(include) {
merged = append(merged, include[i:]...)
}
*lsp = merged
return *lsp
}
func nextArray(str string, end int) (int, int) {
start := strings.IndexByte(str[end:], '[')
if start >= 0 {
start += end
end = strings.IndexByte(str[start:], ']')
if end >= 0 {
end += start + 1
}
}
return start, end
}
func writeRemainder(str string, start, end int, sb *strings.Builder) {
if start >= 0 && start < end {
if sb.Len() > 0 {
sb.WriteString(", ")
}
sb.WriteString(str[start:])
}
}
// merge 'b' to 'a' assuming both are sorted
func MergeSortedLabelArrayListStrings(la, lb LabelArrayListString) LabelArrayListString {
var sb strings.Builder
var aStart, aEnd, bStart, bEnd int
a := string(la)
b := string(lb)
Loop:
for {
// get the next label array on 'a'
aStart, aEnd = nextArray(a, aEnd)
if aStart < 0 || aEnd < 0 || aStart >= aEnd {
// no more label arrays in a, concat the rest of 'b'
// next item from 'b' has not been parsed yet
bStart, bEnd = nextArray(b, bEnd)
writeRemainder(b, bStart, bEnd, &sb)
break
}
// get the next label array on 'b'
bStart, bEnd = nextArray(b, bEnd)
if bStart < 0 || bEnd < 0 || bStart >= bEnd {
// no more label arrays in b, concat the rest of 'a'
writeRemainder(a, aStart, aEnd, &sb)
break
}
// Add lesser label arrays from 'a'
for a[aStart:aEnd] < b[bStart:bEnd] {
if sb.Len() > 0 {
sb.WriteString(", ")
}
sb.WriteString(a[aStart:aEnd])
// get the next label array on 'a'
aStart, aEnd = nextArray(a, aEnd)
if aStart < 0 || aEnd < 0 || aStart >= aEnd {
// no more label arrays in 'a', concat the rest of 'b'
writeRemainder(b, bStart, bEnd, &sb)
break Loop
}
}
// Add lesser values from 'b'
for a[aStart:aEnd] > b[bStart:bEnd] {
if sb.Len() > 0 {
sb.WriteString(", ")
}
sb.WriteString(b[bStart:bEnd])
// get the next label array on 'b'
bStart, bEnd = nextArray(b, bEnd)
if bStart < 0 || bEnd < 0 || bStart >= bEnd {
// no more label arrays in 'b', concat the rest of 'a'
writeRemainder(a, aStart, aEnd, &sb)
break Loop
}
}
if a[aStart:aEnd] == b[bStart:bEnd] {
if sb.Len() > 0 {
sb.WriteString(", ")
}
sb.WriteString(b[bStart:bEnd])
}
}
return LabelArrayListString(sb.String())
}
// SPDX-License-Identifier: Apache-2.0
// Copyright Authors of Cilium
package labels
import (
"fmt"
"net/netip"
"strconv"
"strings"
"github.com/cilium/cilium/pkg/option"
)
var (
worldLabelNonDualStack = Label{Source: LabelSourceReserved, Key: IDNameWorld}
worldLabelV4 = Label{Source: LabelSourceReserved, Key: IDNameWorldIPv4}
worldLabelV6 = Label{Source: LabelSourceReserved, Key: IDNameWorldIPv6}
)
// maskedIPToLabelString is the base method for serializing an IP + prefix into
// a string that can be used for creating Labels and EndpointSelector objects.
//
// For IPv6 addresses, it converts ":" into "-" as EndpointSelectors don't
// support colons inside the name section of a label.
func maskedIPToLabel(ipStr string, prefix int) Label {
var str strings.Builder
str.Grow(
1 /* preZero */ +
len(ipStr) +
1 /* postZero */ +
2 /*len of prefix*/ +
1, /* '/' */
)
for i := range len(ipStr) {
if ipStr[i] == ':' {
// EndpointSelector keys can't start or end with a "-", so insert a
// zero at the start or end if it would otherwise have a "-" at that
// position.
if i == 0 {
str.WriteByte('0')
str.WriteByte('-')
continue
}
if i == len(ipStr)-1 {
str.WriteByte('-')
str.WriteByte('0')
continue
}
str.WriteByte('-')
} else {
str.WriteByte(ipStr[i])
}
}
str.WriteRune('/')
str.WriteString(strconv.Itoa(prefix))
return Label{Key: str.String(), Source: LabelSourceCIDR}
}
// IPStringToLabel parses a string and returns it as a CIDR label.
//
// If ip is not a valid IP address or CIDR Prefix, returns an error.
func IPStringToLabel(ip string) (Label, error) {
// factored out of netip.ParsePrefix to avoid allocating an empty netip.Prefix in case it's
// an IP and not a CIDR.
i := strings.LastIndexByte(ip, '/')
if i < 0 {
parsedIP, err := netip.ParseAddr(ip)
if err != nil {
return Label{}, fmt.Errorf("%q is not an IP address: %w", ip, err)
}
return maskedIPToLabel(ip, parsedIP.BitLen()), nil
} else {
parsedPrefix, err := netip.ParsePrefix(ip)
if err != nil {
return Label{}, fmt.Errorf("%q is not a CIDR: %w", ip, err)
}
return maskedIPToLabel(parsedPrefix.Masked().Addr().String(), parsedPrefix.Bits()), nil
}
}
// GetCIDRLabels turns a CIDR in to a specially formatted label, and returns
// a Labels including the CIDR-specific label and the appropriate world label.
// e.g. "10.0.0.0/8" => ["cidr:10.0.0.0/8", "reserved:world-ipv4"]
//
// IPv6 requires some special treatment, since ":" is special in the label selector
// grammar. For example, "::/0" becomes "cidr:0--0/0",
func GetCIDRLabels(prefix netip.Prefix) Labels {
lbls := make(Labels, 2)
if prefix.Bits() > 0 {
l := maskedIPToLabel(prefix.Addr().String(), prefix.Bits())
l.cidr = &prefix
lbls[l.Key] = l
}
lbls.AddWorldLabel(prefix.Addr())
return lbls
}
func (lbls Labels) AddWorldLabel(addr netip.Addr) {
switch {
case !option.Config.IsDualStack():
lbls[worldLabelNonDualStack.Key] = worldLabelNonDualStack
case addr.Is4():
lbls[worldLabelV4.Key] = worldLabelV4
default:
lbls[worldLabelV6.Key] = worldLabelV6
}
}
func LabelToPrefix(key string) (netip.Prefix, error) {
prefixStr := strings.ReplaceAll(key, "-", ":")
pfx, err := netip.ParsePrefix(prefixStr)
if err != nil {
return netip.Prefix{}, fmt.Errorf("failed to parse label prefix %s: %w", key, err)
}
return pfx, nil
}
// SPDX-License-Identifier: Apache-2.0
// Copyright Authors of Cilium
package labels
import (
"bytes"
"encoding/json"
"fmt"
"maps"
"net/netip"
"slices"
"strings"
"github.com/cilium/cilium/pkg/container/cache"
"github.com/cilium/cilium/pkg/logging"
"github.com/cilium/cilium/pkg/logging/logfields"
)
const (
// PathDelimiter is the delimiter used in the labels paths.
PathDelimiter = "."
// IDNameHost is the label used for the hostname ID.
IDNameHost = "host"
// IDNameRemoteNode is the label used to describe the
// ReservedIdentityRemoteNode
IDNameRemoteNode = "remote-node"
// IDNameWorld is the label used for the world ID.
IDNameWorld = "world"
// IDNameWorldIPv4 is the label used for the world-ipv4 ID, to distinguish
// it from world-ipv6 in dual-stack mode.
IDNameWorldIPv4 = "world-ipv4"
// IDNameWorldIPv6 is the label used for the world-ipv6 ID, to distinguish
// it from world-ipv4 in dual-stack mode.
IDNameWorldIPv6 = "world-ipv6"
// IDNameCluster is the label used to identify an unspecified endpoint
// inside the cluster
IDNameCluster = "cluster"
// IDNameHealth is the label used for the local cilium-health endpoint
IDNameHealth = "health"
// IDNameInit is the label used to identify any endpoint that has not
// received any labels yet.
IDNameInit = "init"
// IDNameKubeAPIServer is the label used to identify the kube-apiserver. It
// is part of the reserved identity 7 and it is also used in conjunction
// with IDNameHost if the kube-apiserver is running on the local host.
IDNameKubeAPIServer = "kube-apiserver"
// IDNameEncryptedOverlay is the label used to identify encrypted overlay
// traffic.
//
// It is part of the reserved identity 11 and signals that overlay traffic
// with this identity must be IPSec encrypted before leaving the host.
//
// This identity should never be seen on the wire and is used only on the
// local host.
IDNameEncryptedOverlay = "overlay-to-encrypt"
// IDNameIngress is the label used to identify Ingress proxies. It is part
// of the reserved identity 8.
IDNameIngress = "ingress"
// IDNameNone is the label used to identify no endpoint or other L3 entity.
// It will never be assigned and this "label" is here for consistency with
// other Entities.
IDNameNone = "none"
// IDNameUnmanaged is the label used to identify unmanaged endpoints
IDNameUnmanaged = "unmanaged"
// IDNameUnknown is the label used to to identify an endpoint with an
// unknown identity.
IDNameUnknown = "unknown"
)
var (
// LabelHealth is the label used for health.
LabelHealth = Labels{IDNameHealth: NewLabel(IDNameHealth, "", LabelSourceReserved)}
// LabelHost is the label used for the host endpoint.
LabelHost = Labels{IDNameHost: NewLabel(IDNameHost, "", LabelSourceReserved)}
// LabelWorld is the label used for world.
LabelWorld = Labels{IDNameWorld: NewLabel(IDNameWorld, "", LabelSourceReserved)}
// LabelWorldIPv4 is the label used for world-ipv4.
LabelWorldIPv4 = Labels{IDNameWorldIPv4: NewLabel(IDNameWorldIPv4, "", LabelSourceReserved)}
// LabelWorldIPv6 is the label used for world-ipv6.
LabelWorldIPv6 = Labels{IDNameWorldIPv6: NewLabel(IDNameWorldIPv6, "", LabelSourceReserved)}
// LabelRemoteNode is the label used for remote nodes.
LabelRemoteNode = Labels{IDNameRemoteNode: NewLabel(IDNameRemoteNode, "", LabelSourceReserved)}
// LabelKubeAPIServer is the label used for the kube-apiserver. See comment
// on IDNameKubeAPIServer.
LabelKubeAPIServer = Labels{IDNameKubeAPIServer: NewLabel(IDNameKubeAPIServer, "", LabelSourceReserved)}
LabelKubeAPIServerExt = Labels{
IDNameKubeAPIServer: NewLabel(IDNameKubeAPIServer, "", LabelSourceReserved),
IDNameWorld: NewLabel(IDNameWorld, "", LabelSourceReserved),
}
// LabelIngress is the label used for Ingress proxies. See comment
// on IDNameIngress.
LabelIngress = Labels{IDNameIngress: NewLabel(IDNameIngress, "", LabelSourceReserved)}
// LabelKeyFixedIdentity is the label that can be used to define a fixed
// identity.
LabelKeyFixedIdentity = "io.cilium.fixed-identity"
)
const (
// LabelSourceUnspec is a label with unspecified source
LabelSourceUnspec = "unspec"
// LabelSourceAny is a label that matches any source
LabelSourceAny = "any"
// LabelSourceAnyKeyPrefix is prefix of a "any" label
LabelSourceAnyKeyPrefix = LabelSourceAny + "."
// LabelSourceK8s is a label imported from Kubernetes
LabelSourceK8s = "k8s"
// LabelSourceK8sKeyPrefix is prefix of a Kubernetes label
LabelSourceK8sKeyPrefix = LabelSourceK8s + "."
// LabelSourceContainer is a label imported from the container runtime
LabelSourceContainer = "container"
// LabelSourceCNI is a label imported from the CNI plugin
LabelSourceCNI = "cni"
// LabelSourceReserved is the label source for reserved types.
LabelSourceReserved = "reserved"
// LabelSourceCIDR is the label source for generated CIDRs.
LabelSourceCIDR = "cidr"
// LabelSourceCIDRGroup is the label source used for labels from CIDRGroups
LabelSourceCIDRGroup = "cidrgroup"
// LabelSourceCIDRGroupKeyPrefix is the source as a k8s selector key prefix
LabelSourceCIDRGroupKeyPrefix = LabelSourceCIDRGroup + "."
// LabelSourceNode is the label source for remote-nodes.
LabelSourceNode = "node"
// LabelSourceFQDN is the label source for IPs resolved by fqdn lookups
LabelSourceFQDN = "fqdn"
// LabelSourceReservedKeyPrefix is the prefix of a reserved label
LabelSourceReservedKeyPrefix = LabelSourceReserved + "."
// LabelSourceDirectory is the label source for policies read from files
LabelSourceDirectory = "directory"
)
// Label is the Cilium's representation of a container label.
type Label struct {
Key string `json:"key"`
Value string `json:"value,omitempty"`
// Source can be one of the above values (e.g.: LabelSourceContainer).
//
// +kubebuilder:validation:Optional
Source string `json:"source"`
// optimization for CIDR prefixes
// +deepequal-gen=false
cidr *netip.Prefix `json:"-"`
}
// Labels is a map of labels where the map's key is the same as the label's key.
type Labels map[string]Label
//
// Convenience functions to use instead of Has(), which iterates through the labels
//
// HasLabelWithKey returns true if lbls has a label with 'key'
func (l Labels) HasLabelWithKey(key string) bool {
_, ok := l[key]
return ok
}
func (l Labels) HasFixedIdentityLabel() bool {
return l.HasLabelWithKey(LabelKeyFixedIdentity)
}
func (l Labels) HasInitLabel() bool {
return l.HasLabelWithKey(IDNameInit)
}
func (l Labels) HasHealthLabel() bool {
return l.HasLabelWithKey(IDNameHealth)
}
func (l Labels) HasIngressLabel() bool {
return l.HasLabelWithKey(IDNameIngress)
}
func (l Labels) HasHostLabel() bool {
return l.HasLabelWithKey(IDNameHost)
}
func (l Labels) HasKubeAPIServerLabel() bool {
return l.HasLabelWithKey(IDNameKubeAPIServer)
}
func (l Labels) HasRemoteNodeLabel() bool {
return l.HasLabelWithKey(IDNameRemoteNode)
}
func (l Labels) HasWorldIPv6Label() bool {
return l.HasLabelWithKey(IDNameWorldIPv6)
}
func (l Labels) HasWorldIPv4Label() bool {
return l.HasLabelWithKey(IDNameWorldIPv4)
}
func (l Labels) HasNonDualstackWorldLabel() bool {
return l.HasLabelWithKey(IDNameWorld)
}
func (l Labels) HasWorldLabel() bool {
return l.HasNonDualstackWorldLabel() || l.HasWorldIPv4Label() || l.HasWorldIPv6Label()
}
// GetPrintableModel turns the Labels into a sorted list of strings
// representing the labels.
func (l Labels) GetPrintableModel() (res []string) {
res = make([]string, 0, len(l))
for _, v := range l {
if v.Source == LabelSourceCIDR {
prefix, err := LabelToPrefix(v.Key)
if err != nil {
res = append(res, v.String())
} else {
res = append(res, LabelSourceCIDR+":"+prefix.String())
}
} else {
// not a CIDR label, no magic needed
res = append(res, v.String())
}
}
slices.Sort(res)
return res
}
// String returns the map of labels as human readable string
func (l Labels) String() string {
return strings.Join(l.GetPrintableModel(), ",")
}
// Equals returns true if the two Labels contain the same set of labels.
func (l Labels) Equals(other Labels) bool {
if len(l) != len(other) {
return false
}
for k, lbl1 := range l {
if lbl2, ok := other[k]; ok {
if lbl1.Source == lbl2.Source && lbl1.Key == lbl2.Key && lbl1.Value == lbl2.Value {
continue
}
}
return false
}
return true
}
// GetFromSource returns all labels that are from the given source.
func (l Labels) GetFromSource(source string) Labels {
lbls := Labels{}
for k, v := range l {
if v.Source == source {
lbls[k] = v
}
}
return lbls
}
// RemoveFromSource removes all labels that are from the given source
func (l Labels) RemoveFromSource(source string) {
maps.DeleteFunc(l, func(k string, v Label) bool {
return v.Source == source
})
}
// NewLabel returns a new label from the given key, value and source. If source is empty,
// the default value will be LabelSourceUnspec. If key starts with '$', the source
// will be overwritten with LabelSourceReserved. If key contains ':', the value
// before ':' will be used as source if given source is empty, otherwise the value before
// ':' will be deleted and unused.
func NewLabel(key string, value string, source string) Label {
var src string
src, key = parseSource(key, ':')
if source == "" {
if src == "" {
source = LabelSourceUnspec
} else {
source = src
}
}
if src == LabelSourceReserved && key == "" {
key = value
value = ""
}
l := Label{
Key: cache.Strings.Get(key),
Value: cache.Strings.Get(value),
Source: cache.Strings.Get(source),
}
if l.Source == LabelSourceCIDR {
c, err := LabelToPrefix(l.Key)
if err != nil {
// slogloggercheck: it's safe to use the default logger here as it has been initialized by the program up to this point.
logging.DefaultSlogLogger.Error("Failed to parse CIDR label: invalid prefix.",
logfields.Error, err,
logfields.Key, l.Key,
)
} else {
l.cidr = &c
}
}
return l
}
// Equals returns true if source, Key and Value are equal and false otherwise.
func (l *Label) Equals(b *Label) bool {
if !l.IsAnySource() && l.Source != b.Source {
return false
}
return l.Key == b.Key && l.Value == b.Value
}
// IsAnySource return if the label was set with source "any".
func (l *Label) IsAnySource() bool {
return l.Source == LabelSourceAny
}
// IsReservedSource return if the label was set with source "Reserved".
func (l *Label) IsReservedSource() bool {
return l.Source == LabelSourceReserved
}
// Has returns true label L contains target.
// target may be "looser" w.r.t source or cidr, i.e.
// "k8s:foo=bar".Has("any:foo=bar") is true
// "any:foo=bar".Has("k8s:foo=bar") is false
// "cidr:10.0.0.1/32".Has("cidr:10.0.0.0/24") is true
func (l *Label) Has(target *Label) bool {
return l.HasKey(target) && l.Value == target.Value
}
// HasKey returns true if l has target's key.
// target may be "looser" w.r.t source or cidr, i.e.
// "k8s:foo=bar".HasKey("any:foo") is true
// "any:foo=bar".HasKey("k8s:foo") is false
// "cidr:10.0.0.1/32".HasKey("cidr:10.0.0.0/24") is true
// "cidr:10.0.0.0/24".HasKey("cidr:10.0.0.1/32") is false
func (l *Label) HasKey(target *Label) bool {
if !target.IsAnySource() && l.Source != target.Source {
return false
}
// Do cidr-aware matching if both sources are "cidr".
if target.Source == LabelSourceCIDR && l.Source == LabelSourceCIDR {
tc := target.cidr
if tc == nil {
v, err := LabelToPrefix(target.Key)
if err != nil {
tc = &v
}
}
lc := l.cidr
if lc == nil {
v, err := LabelToPrefix(l.Key)
if err != nil {
lc = &v
}
}
if tc != nil && lc != nil && tc.Bits() <= lc.Bits() && tc.Contains(lc.Addr()) {
return true
}
}
return l.Key == target.Key
}
// String returns the string representation of Label in the for of Source:Key=Value or
// Source:Key if Value is empty.
func (l *Label) String() string {
if len(l.Value) != 0 {
return l.Source + ":" + l.Key + "=" + l.Value
}
return l.Source + ":" + l.Key
}
func (l *Label) BuildString(sb *strings.Builder) {
sb.WriteString(l.Source)
sb.WriteString(":")
sb.WriteString(l.Key)
if len(l.Value) != 0 {
sb.WriteString("=")
sb.WriteString(l.Value)
}
}
func (l *Label) BuildBytes(buf *bytes.Buffer) {
buf.WriteString(l.Source)
buf.WriteString(":")
buf.WriteString(l.Key)
if len(l.Value) != 0 {
buf.WriteString("=")
buf.WriteString(l.Value)
}
}
// IsValid returns true if Key != "".
func (l *Label) IsValid() bool {
return l.Key != ""
}
// UnmarshalJSON TODO create better explanation about unmarshall with examples
func (l *Label) UnmarshalJSON(data []byte) error {
if l == nil {
return fmt.Errorf("cannot unmarshal to nil pointer")
}
if len(data) == 0 {
return fmt.Errorf("invalid Label: empty data")
}
var aux struct {
Source string `json:"source"`
Key string `json:"key"`
Value string `json:"value,omitempty"`
}
err := json.Unmarshal(data, &aux)
if err != nil {
// If parsing of the full representation failed then try the short
// form in the format:
//
// [SOURCE:]KEY[=VALUE]
var aux string
if err := json.Unmarshal(data, &aux); err != nil {
return fmt.Errorf("decode of Label as string failed: %w", err)
}
if aux == "" {
return fmt.Errorf("invalid Label: Failed to parse %s as a string", data)
}
*l = ParseLabel(aux)
} else {
if aux.Key == "" {
return fmt.Errorf("invalid Label: '%s' does not contain label key", data)
}
l.Source = aux.Source
l.Key = aux.Key
l.Value = aux.Value
}
if l.Source == LabelSourceCIDR {
c, err := LabelToPrefix(l.Key)
if err == nil {
l.cidr = &c
} else {
// slogloggercheck: it's safe to use the default logger here as it has been initialized by the program up to this point.
logging.DefaultSlogLogger.Error("Failed to parse CIDR label: invalid prefix.",
logfields.Error, err,
logfields.Key, l.Key,
)
}
}
return nil
}
// GetExtendedKey returns the key of a label with the source encoded.
func (l *Label) GetExtendedKey() string {
return l.Source + PathDelimiter + l.Key
}
// GetCiliumKeyFrom returns the label's source and key from the an extended key
// in the format SOURCE:KEY.
func GetCiliumKeyFrom(extKey string) string {
i := strings.IndexByte(extKey, PathDelimiter[0])
if i >= 0 {
return extKey[:i] + ":" + extKey[i+1:]
}
return LabelSourceAny + ":" + extKey
}
// GetExtendedKeyFrom returns the extended key of a label string.
// For example:
// `k8s:foo=bar` returns `k8s.foo`
// `container:foo=bar` returns `container.foo`
// `foo=bar` returns `any.foo=bar`
func GetExtendedKeyFrom(str string) string {
src, next := parseSource(str, ':')
if src == "" {
src = LabelSourceAny
}
// Remove an eventually value
i := strings.IndexByte(next, '=')
if i >= 0 {
return src + PathDelimiter + next[:i]
}
return src + PathDelimiter + next
}
type KeyExtender func(string) string
// Extender to convert label keys from Cilium representation to kubernetes representation.
// Key passed to this extender is converted to format `<source>.<key>`.
// The extender is not idempotent, caller needs to make sure its only called once for a key.
var DefaultKeyExtender KeyExtender = GetExtendedKeyFrom
func GetSourcePrefixKeyExtender(srcPrefix string) KeyExtender {
return func(str string) string {
return srcPrefix + str
}
}
// Map2Labels transforms in the form: map[key(string)]value(string) into Labels. The
// source argument will overwrite the source written in the key of the given map.
// Example:
// l := Map2Labels(map[string]string{"k8s:foo": "bar"}, "cilium")
// fmt.Printf("%+v\n", l)
//
// map[string]Label{"foo":Label{Key:"foo", Value:"bar", Source:"cilium"}}
func Map2Labels(m map[string]string, source string) Labels {
o := make(Labels, len(m))
for k, v := range m {
l := NewLabel(k, v, source)
o[l.Key] = l
}
return o
}
// StringMap converts Labels into map[string]string
func (l Labels) StringMap() map[string]string {
o := make(map[string]string, len(l))
for _, v := range l {
o[v.Source+":"+v.Key] = v.Value
}
return o
}
// StringMap converts Labels into map[string]string
func (l Labels) K8sStringMap() map[string]string {
o := make(map[string]string, len(l))
for _, v := range l {
if v.Source == LabelSourceK8s || v.Source == LabelSourceAny || v.Source == LabelSourceUnspec {
o[v.Key] = v.Value
} else {
o[v.Source+"."+v.Key] = v.Value
}
}
return o
}
// NewLabelsFromModel creates labels from string array.
func NewLabelsFromModel(base []string) Labels {
lbls := make(Labels, len(base))
for _, v := range base {
if lbl := ParseLabel(v); lbl.Key != "" {
lbls[lbl.Key] = lbl
}
}
return lbls
}
// FromSlice creates labels from a slice of labels.
func FromSlice(labels []Label) Labels {
lbls := make(Labels, len(labels))
for _, lbl := range labels {
lbls[lbl.Key] = lbl
}
return lbls
}
// NewLabelsFromSortedList returns labels based on the output of SortedList()
func NewLabelsFromSortedList(list string) Labels {
return NewLabelsFromModel(strings.Split(list, ";"))
}
// NewFrom creates a new Labels from the given labels by creating a copy.
func NewFrom(l Labels) Labels {
nl := make(Labels, len(l))
nl.MergeLabels(l)
return nl
}
// GetModel returns model with all the values of the labels.
func (l Labels) GetModel() []string {
res := make([]string, 0, len(l))
for _, v := range l {
res = append(res, v.String())
}
return res
}
// MergeLabels merges labels from into to. It overwrites all labels with the same Key as
// from written into to.
// Example:
// to := Labels{Label{key1, value1, source1}, Label{key2, value3, source4}}
// from := Labels{Label{key1, value3, source4}}
// to.MergeLabels(from)
// fmt.Printf("%+v\n", to)
//
// Labels{Label{key1, value3, source4}, Label{key2, value3, source4}}
func (l Labels) MergeLabels(from Labels) {
maps.Copy(l, from)
}
// Remove is similar to MergeLabels, but removes the specified Labels from l.
// The received Labels is not modified.
func (l Labels) Remove(from Labels) {
maps.DeleteFunc(l, func(k string, v Label) bool {
_, exists := from[k]
return exists
})
}
// FormatForKVStore returns the label as a formatted string, ending in
// a semicolon
//
// DO NOT BREAK THE FORMAT OF THIS. THE RETURNED STRING IS USED AS
// PART OF THE KEY IN THE KEY-VALUE STORE.
//
// Non-pointer receiver allows this to be called on a value in a map.
func (l Label) FormatForKVStore() []byte {
// We don't care if the values already have a '='.
//
// We absolutely care that the final character is a semi-colon.
// Identity allocation in the kvstore depends on this (see
// kvstore.prefixMatchesKey())
b := make([]byte, 0, len(l.Source)+len(l.Key)+len(l.Value)+3)
buf := bytes.NewBuffer(b)
l.formatForKVStoreInto(buf)
return buf.Bytes()
}
// formatForKVStoreInto writes the label as a formatted string, ending in
// a semicolon into buf.
//
// DO NOT BREAK THE FORMAT OF THIS. THE RETURNED STRING IS USED AS
// PART OF THE KEY IN THE KEY-VALUE STORE.
//
// Non-pointer receiver allows this to be called on a value in a map.
func (l Label) formatForKVStoreInto(buf *bytes.Buffer) {
buf.WriteString(l.Source)
buf.WriteRune(':')
buf.WriteString(l.Key)
buf.WriteRune('=')
buf.WriteString(l.Value)
buf.WriteRune(';')
}
// SortedList returns the labels as a sorted list, separated by semicolon
//
// DO NOT BREAK THE FORMAT OF THIS. THE RETURNED STRING IS USED AS KEY IN
// THE KEY-VALUE STORE.
func (l Labels) SortedList() []byte {
keys := slices.Sorted(maps.Keys(l))
// Labels can have arbitrary size. However, when many CIDR identities are in
// the system, for example due to a FQDN policy matching S3, CIDR labels
// dominate in number. IPv4 CIDR labels in serialized form are max 25 bytes
// long. Allocate slightly more to avoid having a realloc if there's some
// other labels which may longer, since the cost of allocating a few bytes
// more is dominated by a second allocation, especially since these
// allocations are short-lived.
//
// cidr:123.123.123.123/32=;
// 0 1 2
// 1234567890123456789012345
b := make([]byte, 0, len(keys)*30)
buf := bytes.NewBuffer(b)
for _, k := range keys {
l[k].formatForKVStoreInto(buf)
}
return buf.Bytes()
}
// ToSlice returns a slice of label with the values of the given
// Labels' map, sorted by the key.
func (l Labels) ToSlice() []Label {
return l.LabelArray()
}
// LabelArray returns the labels as label array, sorted by the key.
func (l Labels) LabelArray() LabelArray {
labels := make(LabelArray, 0, len(l))
for _, v := range l {
labels = append(labels, v)
}
return labels.Sort()
}
// FindReserved locates all labels with reserved source in the labels and
// returns a copy of them. If there are no reserved labels, returns nil.
func (l Labels) FindReserved() LabelArray {
lbls := make(LabelArray, 0)
for _, lbl := range l {
if lbl.Source == LabelSourceReserved {
lbls = append(lbls, lbl)
}
}
if len(lbls) > 0 {
return lbls
}
return nil
}
// IsReserved returns true if any of the labels has a reserved source.
func (l Labels) IsReserved() bool {
return l.HasSource(LabelSourceReserved)
}
// Has returns true if l contains the given label.
func (l Labels) Has(label Label) bool {
for _, lbl := range l {
if lbl.Has(&label) {
return true
}
}
return false
}
// HasSource returns true if l contains the given label source.
func (l Labels) HasSource(source string) bool {
for _, lbl := range l {
if lbl.Source == source {
return true
}
}
return false
}
// CollectSources returns all distinct label sources found in l
func (l Labels) CollectSources() map[string]struct{} {
sources := make(map[string]struct{})
for _, lbl := range l {
sources[lbl.Source] = struct{}{}
}
return sources
}
// parseSource returns the parsed source of the given str. It also returns the next piece
// of text that is after the source.
// Example:
//
// src, next := parseSource("foo:bar==value")
//
// Println(src) // foo
// Println(next) // bar==value
// For Cilium format 'delim' must be passed in as ':'
// For k8s format 'delim' must be passed in as '.'
func parseSource(str string, delim byte) (src, next string) {
if str == "" {
return "", ""
}
if str[0] == '$' {
return LabelSourceReserved, str[1:]
}
i := strings.IndexByte(str, delim)
if i < 0 {
if delim != '.' && strings.HasPrefix(str, LabelSourceReservedKeyPrefix) {
return LabelSourceReserved, strings.TrimPrefix(str, LabelSourceReservedKeyPrefix)
}
return "", str
}
return str[:i], str[i+1:]
}
// ParseLabel returns the label representation of the given string. The str should be
// in the form of Source:Key=Value or Source:Key if Value is empty. It also parses short
// forms, for example: $host will be Label{Key: "host", Source: "reserved", Value: ""}.
func ParseLabel(str string) Label {
return parseLabel(str, ':')
}
// parseLabel returns the label representation of the given string by value.
// For Cilium format 'delim' must be passed in as ':'
// For k8s format 'delim' must be passed in as '.'
func parseLabel(str string, delim byte) (lbl Label) {
src, next := parseSource(str, delim)
if src != "" {
lbl.Source = src
} else {
lbl.Source = LabelSourceUnspec
}
i := strings.IndexByte(next, '=')
if i < 0 {
lbl.Key = next
} else {
if i == 0 && src == LabelSourceReserved {
lbl.Key = next[i+1:]
} else {
lbl.Key = next[:i]
lbl.Value = next[i+1:]
}
}
if lbl.Source == LabelSourceCIDR {
if lbl.Value != "" {
// slogloggercheck: it's safe to use the default logger here as it has been initialized by the program up to this point.
logging.DefaultSlogLogger.Error("Invalid CIDR label: labels with source cidr cannot have values.",
logfields.Label, lbl,
)
}
c, err := LabelToPrefix(lbl.Key)
if err != nil {
// slogloggercheck: it's safe to use the default logger here as it has been initialized by the program up to this point.
logging.DefaultSlogLogger.Error("Failed to parse CIDR label: invalid prefix.",
logfields.Label, lbl,
)
} else {
lbl.cidr = &c
}
}
return lbl
}
// ParseSelectLabel returns a selecting label representation of the given
// string. Unlike ParseLabel, if source is unspecified, the source defaults to
// LabelSourceAny
func ParseSelectLabel(str string) Label {
return parseSelectLabel(str, ':')
}
// parseSelectLabel returns a selecting label representation of the given
// string by value.
// For Cilium format 'delim' must be passed in as ':'
// For k8s format 'delim' must be passed in as '.'
func parseSelectLabel(str string, delim byte) Label {
lbl := parseLabel(str, delim)
if lbl.Source == LabelSourceUnspec {
lbl.Source = LabelSourceAny
}
return lbl
}
// generateLabelString generates the string representation of a label with
// the provided source, key, and value in the format "source:key=value".
func generateLabelString(source, key, value string) string {
return source + ":" + key + "=" + value
}
// GenerateK8sLabelString generates the string representation of a label with
// the provided source, key, and value in the format "LabelSourceK8s:key=value".
func GenerateK8sLabelString(k, v string) string {
return generateLabelString(LabelSourceK8s, k, v)
}
// SPDX-License-Identifier: Apache-2.0
// Copyright Authors of Cilium
package labels
import (
"fmt"
"log/slog"
"maps"
"github.com/cilium/cilium/pkg/logging/logfields"
)
type keepMarks map[string]struct{}
// set marks the label with 'key' to not be deleted.
func (k keepMarks) set(key string) {
k[key] = struct{}{} // marked for keeping
}
// OpLabels represents the possible types.
type OpLabels struct {
// Active labels that are enabled and disabled but not deleted
Custom Labels
// Labels derived from orchestration system
OrchestrationIdentity Labels
// orchestrationIdentity labels which have been disabled
Disabled Labels
// orchestrationInfo - labels from orchestration which are not used in determining a security identity
OrchestrationInfo Labels
}
// NewOpLabels creates new initialized OpLabels
func NewOpLabels() OpLabels {
return OpLabels{
Custom: Labels{},
Disabled: Labels{},
OrchestrationIdentity: Labels{},
OrchestrationInfo: Labels{},
}
}
// SplitUserLabelChanges returns labels to 'add' and 'del'ete to make
// the custom labels match 'lbls'
// FIXME: Somewhere in the code we crash if the returned maps are non-nil
// but length 0. We retain this behaviour here because it's easier.
func (o *OpLabels) SplitUserLabelChanges(lbls Labels) (add, del Labels) {
for key, lbl := range lbls {
if _, found := o.Custom[key]; !found {
if add == nil {
add = Labels{}
}
add[key] = lbl
}
}
for key, lbl := range o.Custom {
if _, found := lbls[key]; !found {
if del == nil {
del = Labels{}
}
del[key] = lbl
}
}
return add, del
}
// IdentityLabels returns map of labels that are used when determining a
// security identity.
func (o *OpLabels) IdentityLabels() Labels {
enabled := make(Labels, len(o.Custom)+len(o.OrchestrationIdentity))
maps.Copy(enabled, o.Custom)
maps.Copy(enabled, o.OrchestrationIdentity)
return enabled
}
// GetIdentityLabel returns the value of the given Key from all IdentityLabels.
func (o *OpLabels) GetIdentityLabel(key string) (l Label, found bool) {
l, found = o.OrchestrationIdentity[key]
if !found {
l, found = o.Custom[key]
}
return l, found
}
// AllLabels returns all Labels within the provided OpLabels.
func (o *OpLabels) AllLabels() Labels {
all := make(Labels, len(o.Custom)+len(o.OrchestrationInfo)+len(o.OrchestrationIdentity)+len(o.Disabled))
maps.Copy(all, o.Custom)
maps.Copy(all, o.Disabled)
maps.Copy(all, o.OrchestrationIdentity)
maps.Copy(all, o.OrchestrationInfo)
return all
}
func (o *OpLabels) ReplaceInformationLabels(sourceFilter string, l Labels, logger *slog.Logger) bool {
changed := false
keepers := make(keepMarks)
for _, v := range l {
keepers.set(v.Key)
if o.OrchestrationInfo.upsertLabel(sourceFilter, v) {
changed = true
logger.Debug("Assigning information label", logfields.Object, v)
}
}
o.OrchestrationInfo.deleteUnMarked(sourceFilter, keepers)
return changed
}
func (o *OpLabels) ReplaceIdentityLabels(sourceFilter string, l Labels, logger *slog.Logger) bool {
changed := false
keepers := make(keepMarks)
disabledKeepers := make(keepMarks)
for k, v := range l {
// A disabled identity label stays disabled without value updates
if _, found := o.Disabled[k]; found {
disabledKeepers.set(k)
} else if keepers.set(v.Key); o.OrchestrationIdentity.upsertLabel(sourceFilter, v) {
logger.Debug("Assigning security relevant label", logfields.Object, v)
changed = true
}
}
if o.OrchestrationIdentity.deleteUnMarked(sourceFilter, keepers) || o.Disabled.deleteUnMarked(sourceFilter, disabledKeepers) {
changed = true
}
return changed
}
func (o *OpLabels) ModifyIdentityLabels(addLabels, delLabels Labels) (changed bool, err error) {
for k := range delLabels {
// The change request is accepted if the label is on
// any of the lists. If the label is already disabled,
// we will simply ignore that change.
if _, found := o.Custom[k]; !found {
if _, found := o.OrchestrationIdentity[k]; !found {
if _, found := o.Disabled[k]; !found {
return false, fmt.Errorf("label %s not found", k)
}
}
}
}
// Will not fail after this point
for k := range delLabels {
if v, found := o.OrchestrationIdentity[k]; found {
delete(o.OrchestrationIdentity, k)
o.Disabled[k] = v
changed = true
}
if _, found := o.Custom[k]; found {
delete(o.Custom, k)
changed = true
}
}
for k, v := range addLabels {
if _, found := o.Disabled[k]; found { // Restore label.
delete(o.Disabled, k)
o.OrchestrationIdentity[k] = v
changed = true
} else if _, found := o.OrchestrationIdentity[k]; found { // Replace label's source and value.
o.OrchestrationIdentity[k] = v
changed = true
} else {
o.Custom[k] = v
changed = true
}
}
return changed, nil
}
// upsertLabel updates or inserts 'label' in 'l', but only if exactly the same label
// was not already in 'l'. Returns 'true' if a label was added, or an old label was
// updated, 'false' otherwise.
// The label is only updated if its source matches the provided 'sourceFilter'
// or in case the provided sourceFilter is 'LabelSourceAny'. The new label must
// also match the old label 'source' in order for it to be replaced.
func (l Labels) upsertLabel(sourceFilter string, label Label) bool {
oldLabel, found := l[label.Key]
if found {
if sourceFilter != LabelSourceAny && sourceFilter != oldLabel.Source {
return false
}
// Key is the same, check if Value and Source are also the same
if label.Value == oldLabel.Value && label.Source == oldLabel.Source {
return false // No change
}
// If the label is not from the same source, then don't replace it.
if oldLabel.Source != label.Source {
return false
}
}
// Insert or replace old label
l[label.Key] = label
return true
}
// deleteUnMarked deletes the labels which have not been marked for keeping.
// The labels are only deleted if their source matches the provided sourceFilter
// or in case the provided sourceFilter is 'LabelSourceAny'.
// Returns true if any of them were deleted.
func (l Labels) deleteUnMarked(sourceFilter string, marks keepMarks) bool {
deleted := false
for k, v := range l {
if _, keep := marks[k]; !keep && (sourceFilter == LabelSourceAny || sourceFilter == v.Source) {
delete(l, k)
deleted = true
}
}
return deleted
}
//go:build !ignore_autogenerated
// +build !ignore_autogenerated
// SPDX-License-Identifier: Apache-2.0
// Copyright Authors of Cilium
// Code generated by deepequal-gen. DO NOT EDIT.
package labels
// DeepEqual is an autogenerated deepequal function, deeply comparing the
// receiver with other. in must be non-nil.
func (in *Label) DeepEqual(other *Label) bool {
if other == nil {
return false
}
if in.Key != other.Key {
return false
}
if in.Value != other.Value {
return false
}
if in.Source != other.Source {
return false
}
return true
}
// DeepEqual is an autogenerated deepequal function, deeply comparing the
// receiver with other. in must be non-nil.
func (in *LabelArray) DeepEqual(other *LabelArray) bool {
if other == nil {
return false
}
if len(*in) != len(*other) {
return false
} else {
for i, inElement := range *in {
if !inElement.DeepEqual(&(*other)[i]) {
return false
}
}
}
return true
}
// DeepEqual is an autogenerated deepequal function, deeply comparing the
// receiver with other. in must be non-nil.
func (in *LabelArrayList) DeepEqual(other *LabelArrayList) bool {
if other == nil {
return false
}
if len(*in) != len(*other) {
return false
} else {
for i, inElement := range *in {
if !inElement.DeepEqual(&(*other)[i]) {
return false
}
}
}
return true
}
// DeepEqual is an autogenerated deepequal function, deeply comparing the
// receiver with other. in must be non-nil.
func (in *Labels) DeepEqual(other *Labels) bool {
if other == nil {
return false
}
if len(*in) != len(*other) {
return false
} else {
for key, inValue := range *in {
if otherValue, present := (*other)[key]; !present {
return false
} else {
if !inValue.DeepEqual(&otherValue) {
return false
}
}
}
}
return true
}
// DeepEqual is an autogenerated deepequal function, deeply comparing the
// receiver with other. in must be non-nil.
func (in *OpLabels) DeepEqual(other *OpLabels) bool {
if other == nil {
return false
}
if ((in.Custom != nil) && (other.Custom != nil)) || ((in.Custom == nil) != (other.Custom == nil)) {
in, other := &in.Custom, &other.Custom
if other == nil || !in.DeepEqual(other) {
return false
}
}
if ((in.OrchestrationIdentity != nil) && (other.OrchestrationIdentity != nil)) || ((in.OrchestrationIdentity == nil) != (other.OrchestrationIdentity == nil)) {
in, other := &in.OrchestrationIdentity, &other.OrchestrationIdentity
if other == nil || !in.DeepEqual(other) {
return false
}
}
if ((in.Disabled != nil) && (other.Disabled != nil)) || ((in.Disabled == nil) != (other.Disabled == nil)) {
in, other := &in.Disabled, &other.Disabled
if other == nil || !in.DeepEqual(other) {
return false
}
}
if ((in.OrchestrationInfo != nil) && (other.OrchestrationInfo != nil)) || ((in.OrchestrationInfo == nil) != (other.OrchestrationInfo == nil)) {
in, other := &in.OrchestrationInfo, &other.OrchestrationInfo
if other == nil || !in.DeepEqual(other) {
return false
}
}
return true
}
// SPDX-License-Identifier: Apache-2.0
// Copyright Authors of Cilium
package labelsfilter
import (
"encoding/json"
"fmt"
"log/slog"
"os"
"regexp"
"strings"
k8sConst "github.com/cilium/cilium/pkg/k8s/apis/cilium.io"
"github.com/cilium/cilium/pkg/labels"
"github.com/cilium/cilium/pkg/lock"
"github.com/cilium/cilium/pkg/logging/logfields"
)
var (
validLabelPrefixesMU lock.RWMutex
validLabelPrefixes *labelPrefixCfg // Label prefixes used to filter from all labels
validNodeLabelPrefixes *labelPrefixCfg
)
const (
// LPCfgFileVersion represents the version of a Label Prefix Configuration File
LPCfgFileVersion = 1
// reservedLabelsPattern is the prefix pattern for all reserved labels
reservedLabelsPattern = labels.LabelSourceReserved + ":.*"
)
// LabelPrefix is the cilium's representation of a container label.
// +k8s:deepcopy-gen=false
// +k8s:openapi-gen=false
// +deepequal-gen=false
type LabelPrefix struct {
// Ignore if true will cause this prefix to be ignored insted of being accepted
Ignore bool `json:"invert"`
Prefix string `json:"prefix"`
Source string `json:"source"`
expr *regexp.Regexp
}
// String returns a human readable representation of the LabelPrefix
func (p LabelPrefix) String() string {
s := fmt.Sprintf("%s:%s", p.Source, p.Prefix)
if p.Ignore {
s = "!" + s
}
return s
}
// matches returns true and the length of the matched section if the label is
// matched by the LabelPrefix. The Ignore flag has no effect at this point.
func (p LabelPrefix) matches(l labels.Label) (bool, int) {
if p.Source != "" && p.Source != l.Source {
return false, 0
}
// If no regular expression is available, fall back to prefix matching
if p.expr == nil {
return strings.HasPrefix(l.Key, p.Prefix), len(p.Prefix)
}
res := p.expr.FindStringIndex(l.Key)
// No match if regexp was not found
if res == nil {
return false, 0
}
// Otherwise match if match was found at start of key
return res[0] == 0, res[1]
}
// parseLabelPrefix returns a LabelPrefix created from the string label parameter.
func parseLabelPrefix(label string) (*LabelPrefix, error) {
labelPrefix := LabelPrefix{}
i := strings.IndexByte(label, ':')
if i >= 0 {
labelPrefix.Source = label[:i]
labelPrefix.Prefix = label[i+1:]
} else {
labelPrefix.Prefix = label
}
if len(labelPrefix.Prefix) == 0 {
return nil, fmt.Errorf("invalid label source %q, prefix %q",
labelPrefix.Source, labelPrefix.Prefix)
}
if labelPrefix.Prefix[0] == '!' {
labelPrefix.Ignore = true
labelPrefix.Prefix = labelPrefix.Prefix[1:]
}
r, err := regexp.Compile(labelPrefix.Prefix)
if err != nil {
return nil, fmt.Errorf("unable to compile regexp: %w", err)
}
labelPrefix.expr = r
return &labelPrefix, nil
}
// ParseLabelPrefixCfg parses valid label prefixes from a file and from a slice
// of valid prefixes. Both are optional. If both are provided, both list are
// appended together.
func ParseLabelPrefixCfg(logger *slog.Logger, prefixes, nodePrefixes []string, file string) error {
var cfg, nodeCfg *labelPrefixCfg
var err error
var fromCustomFile bool
// Use default label prefix if configuration file not provided
if file == "" {
logger.Info("Parsing base label prefixes from default label list")
cfg = defaultLabelPrefixCfg()
} else {
logger.Info(
"Parsing base label prefixes from file",
logfields.File, file,
)
cfg, err = readLabelPrefixCfgFrom(file)
if err != nil {
return fmt.Errorf("unable to read label prefix file: %w", err)
}
fromCustomFile = true
}
//exhaustruct:ignore // Reading clean configuration, no need to initialize
nodeCfg = &labelPrefixCfg{}
logger.Info(
"Parsing node label prefixes from user inputs",
logfields.Prefix, nodePrefixes,
)
for _, label := range nodePrefixes {
if len(label) == 0 {
continue
}
p, err := parseLabelPrefix(label)
if err != nil {
return err
}
if !p.Ignore {
nodeCfg.whitelist = true
}
nodeCfg.LabelPrefixes = append(nodeCfg.LabelPrefixes, p)
}
logger.Info(
"Parsing additional label prefixes from user inputs",
logfields.Prefix, prefixes,
)
for _, label := range prefixes {
if len(label) == 0 {
continue
}
p, err := parseLabelPrefix(label)
if err != nil {
return err
}
if !p.Ignore {
cfg.whitelist = true
}
cfg.LabelPrefixes = append(cfg.LabelPrefixes, p)
}
if fromCustomFile {
found := false
for _, label := range cfg.LabelPrefixes {
if label.Source+":"+label.Prefix == reservedLabelsPattern {
found = true
break
}
}
if !found {
logger.Error(
fmt.Sprintf("'%s' needs to be included in the final label list for "+
"Cilium to work properly.", reservedLabelsPattern),
)
}
}
validLabelPrefixes = cfg
validNodeLabelPrefixes = nodeCfg
logger.Info("Final label prefixes to be used for identity evaluation:")
for _, l := range validLabelPrefixes.LabelPrefixes {
logger.Info(fmt.Sprintf(" - %s", l))
}
logger.Info("Final node label prefixes to be used for identity evaluation:")
for _, l := range validNodeLabelPrefixes.LabelPrefixes {
logger.Info(fmt.Sprintf(" - %s", l))
}
return nil
}
// labelPrefixCfg is the label prefix configuration to filter labels of started
// containers.
// +k8s:openapi-gen=false
type labelPrefixCfg struct {
Version int `json:"version"`
LabelPrefixes []*LabelPrefix `json:"valid-prefixes"`
// whitelist if true, indicates that an inclusive rule has to match
// in order for the label to be considered
whitelist bool `exhaustruct:"optional"`
}
// defaultLabelPrefixCfg returns a default LabelPrefixCfg using the latest
// LPCfgFileVersion
func defaultLabelPrefixCfg() *labelPrefixCfg {
cfg := &labelPrefixCfg{
Version: LPCfgFileVersion,
LabelPrefixes: []*LabelPrefix{},
}
expressions := []string{
reservedLabelsPattern, // include all reserved labels
regexp.QuoteMeta(k8sConst.PodNamespaceLabel), // include io.kubernetes.pod.namespace
regexp.QuoteMeta(k8sConst.PodNamespaceMetaLabels), // include all namespace labels
regexp.QuoteMeta(k8sConst.AppKubernetes), // include app.kubernetes.io
regexp.QuoteMeta(k8sConst.PolicyLabelCluster), // include io.cilium.k8s.policy.cluster
regexp.QuoteMeta(k8sConst.PolicyLabelServiceAccount), // include io.cilium.k8s.policy.serviceaccount
`!io\.kubernetes`, // ignore all other io.kubernetes labels
`!kubernetes\.io`, // ignore all other kubernetes.io labels
"!" + regexp.QuoteMeta(k8sConst.StatefulSetPodNameLabel), // ignore statefulset.kubernetes.io/pod-name label
"!" + regexp.QuoteMeta(k8sConst.StatefulSetPodIndexLabel), // ignore apps.kubernetes.io/pod-index label
"!" + regexp.QuoteMeta(k8sConst.IndexedJobCompletionIndexLabel), // ignore batch.kubernetes.io/job-completion-index label
"!" + regexp.QuoteMeta(k8sConst.BatchJobControllerUID), // ignore batch.kubernetes.io/controller-uid
`!.*beta\.kubernetes\.io`, // ignore all beta.kubernetes.io labels
`!k8s\.io`, // ignore all k8s.io labels
`!pod-template-generation`, // ignore pod-template-generation
`!pod-template-hash`, // ignore pod-template-hash
`!controller-revision-hash`, // ignore controller-revision-hash
`!controller-uid`, // ignore controller-uid
`!annotation.*`, // ignore all annotation labels
`!etcd_node`, // ignore etcd_node label
}
for _, e := range expressions {
p, err := parseLabelPrefix(e)
if err != nil {
msg := fmt.Sprintf("BUG: Unable to parse default label prefix '%s': %s", e, err)
panic(msg)
}
cfg.LabelPrefixes = append(cfg.LabelPrefixes, p)
}
return cfg
}
// readLabelPrefixCfgFrom reads a label prefix configuration file from fileName.
// return an error if fileName is empty, or if version is not supported.
func readLabelPrefixCfgFrom(fileName string) (*labelPrefixCfg, error) {
if fileName == "" {
return nil, fmt.Errorf("label prefix file not provided")
}
f, err := os.Open(fileName)
if err != nil {
return nil, err
}
defer f.Close()
//exhaustruct:ignore // Reading clean configuration, no need to initialize
lpc := labelPrefixCfg{}
err = json.NewDecoder(f).Decode(&lpc)
if err != nil {
return nil, err
}
if lpc.Version != LPCfgFileVersion {
return nil, fmt.Errorf("unsupported version %d", lpc.Version)
}
for _, lp := range lpc.LabelPrefixes {
if lp.Prefix == "" {
return nil, fmt.Errorf("invalid label prefix file: prefix was empty")
}
if lp.Source == "" {
return nil, fmt.Errorf("invalid label prefix file: source was empty")
}
if !lp.Ignore {
lpc.whitelist = true
}
}
return &lpc, nil
}
func (cfg *labelPrefixCfg) filterLabels(lbls labels.Labels) (identityLabels, informationLabels labels.Labels) {
if len(lbls) == 0 {
return nil, nil
}
validLabelPrefixesMU.RLock()
defer validLabelPrefixesMU.RUnlock()
identityLabels = labels.Labels{}
informationLabels = labels.Labels{}
for k, v := range lbls {
included, ignored := 0, 0
for _, p := range cfg.LabelPrefixes {
if m, len := p.matches(v); m {
if p.Ignore {
// save length of shortest matching ignore
if ignored == 0 || len < ignored {
ignored = len
}
} else {
// save length of longest matching include
if len > included {
included = len
}
}
}
}
// A label is accepted if :
// - No inclusive LabelPrefix (Ignore flag not set) is
// configured and label is not ignored.
// - An inclusive LabelPrefix matches the label
// - If both an inclusive and ignore LabelPrefix match, the
// label is accepted if the matching section in the label
// is greater than the ignored matching section in label,
// e.g. when evaluating the label foo.bar, the prefix rules
// {!foo, foo.bar} will cause the label to be accepted
// because the inclusive prefix matches over a longer section.
if (!cfg.whitelist && ignored == 0) || included > ignored {
// Just want to make sure we don't have labels deleted in
// on side and disappearing in the other side...
identityLabels[k] = v
} else {
informationLabels[k] = v
}
}
return identityLabels, informationLabels
}
// Filter returns Labels from the given labels that have the same source and the
// same prefix as one of lpc valid prefixes, as well as labels that do not match
// the aforementioned filtering criteria.
func Filter(lbls labels.Labels) (identityLabels, informationLabels labels.Labels) {
return validLabelPrefixes.filterLabels(lbls)
}
func FilterNodeLabels(lbls labels.Labels) (identityLabels, informationLabels labels.Labels) {
return validNodeLabelPrefixes.filterLabels(lbls)
}
func FilterLabelsByRegex(excludePatterns []*regexp.Regexp, labels map[string]string) map[string]string {
if len(excludePatterns) == 0 && labels != nil {
return labels
}
newLabels := make(map[string]string)
for k, v := range labels {
labelNeedsExclusion := false
for _, pattern := range excludePatterns {
if pattern.MatchString(k) {
labelNeedsExclusion = true
break
}
}
if !labelNeedsExclusion {
newLabels[k] = v
}
}
return newLabels
}
// Copyright 2023 ADA Logics Ltd
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//
package labelsfilter
import (
"encoding/json"
"fmt"
"io"
"log/slog"
"os"
fuzz "github.com/AdaLogics/go-fuzz-headers"
"github.com/cilium/cilium/pkg/labels"
)
func FuzzLabelsfilterPkg(data []byte) int {
f := fuzz.NewConsumer(data)
prefixes := make([]string, 0)
err := f.CreateSlice(&prefixes)
if err != nil {
return 0
}
lpc := &labelPrefixCfg{}
err = f.GenerateStruct(lpc)
if err != nil {
return 0
}
lpc.Version = LPCfgFileVersion
fileBytes, err := json.Marshal(lpc)
if err != nil {
return 0
}
stringMap := make(map[string]string)
err = f.FuzzMap(&stringMap)
if err != nil {
return 0
}
source, err := f.GetString()
if err != nil {
return 0
}
lbls := labels.Map2Labels(stringMap, source)
file, err := os.Create("file")
defer file.Close()
if err != nil {
return 0
}
_, err = file.Write(fileBytes)
if err != nil {
return 0
}
logger := slog.New(slog.NewTextHandler(io.Discard, nil))
err = ParseLabelPrefixCfg(logger, prefixes, nil, "file")
if err != nil {
fmt.Println(err)
return 0
}
_, _ = Filter(lbls)
return 1
}
// SPDX-License-Identifier: Apache-2.0
// Copyright Authors of Cilium
package loadbalancer
import (
"fmt"
"iter"
"strings"
"unsafe"
"github.com/cilium/statedb"
"github.com/cilium/statedb/index"
"github.com/cilium/statedb/part"
"github.com/cilium/cilium/pkg/source"
"github.com/cilium/cilium/pkg/time"
)
const (
BackendTableName = "backends"
)
// BackendParams defines the parameters of a backend for insertion into the backends table.
type BackendParams struct {
Address L3n4Addr
// PortNames are the optional names for the ports. A frontend can specify which
// backends to select by port name.
PortNames []string
// Weight of backend for load-balancing.
Weight uint16
// Node hosting this backend. This is used to determine backends local to
// a node.
NodeName string
// Optional zone information for topology-aware routing.
Zone *BackendZone
// ClusterID of the cluster in which the backend is located. 0 for local cluster.
ClusterID uint32
// Source of the backend.
Source source.Source
// State of the backend, e.g. active, quarantined or terminating.
State BackendState
// Unhealthy marks a backend as unhealthy and overrides [State] to mark the backend
// as quarantined. We require a separate field for active health checking to merge
// with the original source of this backend. Negative is used here to allow the
// zero value to mean that the backend is healthy.
Unhealthy bool
// UnhealthyUpdatedAt is the timestamp for when [Unhealthy] was last updated.
UnhealthyUpdatedAt *time.Time
}
const maxBackendParamsSize = 110
// Assert on the size of [BackendParams] to keep changes to it at check.
// If you're adding more fields to [BackendParams] and they're most of the time
// not set, please consider putting them behind a separate struct and referring to
// it by pointer. This way we use less memory for the majority of use-cases.
var _ = func() struct{} {
if size := unsafe.Sizeof(BackendParams{}); size > maxBackendParamsSize {
panic(fmt.Sprintf("BackendParams has size %d, maximum set to %d\n", size, maxBackendParamsSize))
}
return struct{}{}
}()
func (bep *BackendParams) GetZone() string {
if bep.Zone == nil {
return ""
}
return bep.Zone.Zone
}
// BackendZone locates the backend to a specific zone and specifies what zones
// the backend should be used in for topology aware routing.
type BackendZone struct {
// Zone where backend is located.
Zone string
// ForZones where this backend should be consumed in
ForZones []string
}
// Backend is a composite of the per-service backend instances that share the same
// IP address and port.
type Backend struct {
Address L3n4Addr
// Instances of this backend. A backend is always linked to a specific
// service and the instances may call the backend by different name
// (PortName) or they may come from differents sources.
// Instances may contain multiple [BackendInstance]s per service
// coming from different sources. The version from the source with the
// highest priority (smallest uint8) is used. This is needed for smooth
// transitions when ownership of endpoints is passed between upstream
// data sources.
Instances part.Map[BackendInstanceKey, BackendParams]
}
type BackendInstanceKey struct {
ServiceName ServiceName
SourcePriority uint8
}
func (k BackendInstanceKey) Key() []byte {
if k.SourcePriority == 0 {
return k.ServiceName.Key()
}
sk := k.ServiceName.Key()
buf := make([]byte, 0, 2+len(sk))
buf = append(buf, sk...)
return append(buf, ' ', k.SourcePriority)
}
func (be *Backend) GetInstance(name ServiceName) *BackendParams {
// Return the instance matching the service name with highest priority
// (lowest number)
for _, inst := range be.GetInstancesOfService(name) {
return &inst
}
return nil
}
func (be *Backend) GetInstancesOfService(name ServiceName) iter.Seq2[BackendInstanceKey, BackendParams] {
return be.Instances.Prefix(BackendInstanceKey{ServiceName: name, SourcePriority: 0})
}
func (be *Backend) GetInstanceForFrontend(fe *Frontend) *BackendParams {
serviceName := fe.ServiceName
if fe.RedirectTo != nil {
serviceName = *fe.RedirectTo
}
return be.GetInstance(serviceName)
}
func (be *Backend) GetInstanceFromSource(name ServiceName, src source.Source) *BackendParams {
for k, inst := range be.Instances.Prefix(BackendInstanceKey{ServiceName: name}) {
if k.ServiceName == name && inst.Source == src {
return &inst
}
break
}
return nil
}
// IsAlive returns true if any of the instances are marked active or terminating and healthy.
// This signals whether the backend should still be considered alive or not for the purposes
// of terminating connections to it.
func (be *Backend) IsAlive() bool {
for _, inst := range be.Instances.All() {
switch {
case inst.Unhealthy:
continue
case inst.State == BackendStateActive, inst.State == BackendStateTerminating:
return true
}
}
return false
}
func (be *Backend) String() string {
return strings.Join(be.TableRow(), " ")
}
func (be *Backend) TableHeader() []string {
return []string{
"Address",
"Instances",
"Shadows",
"NodeName",
}
}
func (be *Backend) TableRow() []string {
nodeName := ""
for _, inst := range be.Instances.All() {
if nodeName == "" {
nodeName = inst.NodeName
}
}
return []string{
be.Address.StringWithProtocol(),
showInstances(be),
showShadows(be),
nodeName,
}
}
// showInstances shows the backend instances in the following forms:
// - no port name(s): "default/nginx"
// - port name(s): "default/nginx (http, http-alt)"
// - not active: "default/nginx [quarantined]"
// - not active, port name(s): "default/nginx [quarantined] (http, http-alt)"
func showInstances(be *Backend) string {
var b strings.Builder
for k, inst := range be.PreferredInstances() {
b.WriteString(k.ServiceName.String())
if inst.State != BackendStateActive || inst.Unhealthy {
b.WriteString(" [")
if inst.Unhealthy {
b.WriteString("unhealthy")
} else {
s, _ := inst.State.String()
b.WriteString(s)
}
b.WriteRune(']')
}
if len(inst.PortNames) > 0 {
b.WriteString(" (")
for i, name := range inst.PortNames {
b.WriteString(string(name))
if i < len(inst.PortNames)-1 {
b.WriteRune(' ')
}
}
b.WriteRune(')')
}
b.WriteString(", ")
}
return strings.TrimSuffix(b.String(), ", ")
}
func showShadows(be *Backend) string {
var (
services []string
instances []string
emptyName, svcName ServiceName
)
updateServices := func() {
if len(instances) > 0 {
services = append(services, fmt.Sprintf("%s [%s]", svcName.String(), strings.Join(instances, ", ")))
}
}
for k, inst := range be.Instances.All() {
if k.ServiceName != svcName {
if svcName != emptyName {
updateServices()
}
svcName = k.ServiceName
instances = instances[:0]
continue // Omit the instance that is already included in showInstances
}
instance := string(inst.Source)
if len(inst.PortNames) > 0 {
instance += fmt.Sprintf(" (%s)", strings.Join(inst.PortNames, " "))
}
instances = append(instances, instance)
}
updateServices()
return strings.Join(services, ", ")
}
func (be *Backend) serviceNameKeys() index.KeySet {
if be.Instances.Len() == 1 {
// Avoid allocating the slice.
for k := range be.PreferredInstances() {
return index.NewKeySet(k.ServiceName.Key())
}
}
keys := make([]index.Key, 0, be.Instances.Len()) // This may be more than enough if non-preferred instances exist.
for k := range be.PreferredInstances() {
keys = append(keys, k.ServiceName.Key())
}
return index.NewKeySet(keys...)
}
func (be *Backend) PreferredInstances() iter.Seq2[BackendInstanceKey, BackendParams] {
return func(yield func(BackendInstanceKey, BackendParams) bool) {
var svcName ServiceName
for k, v := range be.Instances.All() {
if k.ServiceName != svcName {
svcName = k.ServiceName
if !yield(k, v) {
break
}
} // Skip instances with the same ServiceName but lower (numerically larger) priorities.
}
}
}
// Clone returns a shallow clone of the backend.
func (be *Backend) Clone() *Backend {
be2 := *be
return &be2
}
var (
backendAddrIndex = statedb.Index[*Backend, L3n4Addr]{
Name: "address",
FromObject: func(obj *Backend) index.KeySet {
return index.NewKeySet(obj.Address.Bytes())
},
FromKey: func(l L3n4Addr) index.Key { return index.Key(l.Bytes()) },
FromString: L3n4AddrFromString,
Unique: true,
}
BackendByAddress = backendAddrIndex.Query
backendServiceIndex = statedb.Index[*Backend, ServiceName]{
Name: "service",
FromObject: (*Backend).serviceNameKeys,
FromKey: ServiceName.Key,
FromString: index.FromString,
Unique: false,
}
BackendByServiceName = backendServiceIndex.Query
)
func NewBackendsTable(db *statedb.DB) (statedb.RWTable[*Backend], error) {
return statedb.NewTable(
db,
BackendTableName,
backendAddrIndex,
backendServiceIndex,
)
}
// SPDX-License-Identifier: Apache-2.0
// Copyright Authors of Cilium
package loadbalancer
import (
"errors"
"fmt"
"log/slog"
"strconv"
"strings"
"github.com/spf13/pflag"
"github.com/cilium/hive/cell"
"github.com/cilium/cilium/pkg/kpr"
"github.com/cilium/cilium/pkg/option"
"github.com/cilium/cilium/pkg/time"
)
// Configuration option names
const (
// LBMapEntriesName configures max entries for BPF lbmap.
LBMapEntriesName = "bpf-lb-map-max"
// LBServiceMapMaxEntries configures max entries of bpf map for services.
LBServiceMapMaxEntries = "bpf-lb-service-map-max"
// LBBackendMapMaxEntries configures max entries of bpf map for service backends.
LBBackendMapMaxEntries = "bpf-lb-service-backend-map-max"
// LBRevNatMapMaxEntries configures max entries of bpf map for reverse NAT.
LBRevNatMapMaxEntries = "bpf-lb-rev-nat-map-max"
// LBAffinityMapMaxEntries configures max entries of bpf map for session affinity.
LBAffinityMapMaxEntries = "bpf-lb-affinity-map-max"
// LBSourceRangeAllTypes configures service source ranges for all service types.
LBSourceRangeAllTypes = "bpf-lb-source-range-all-types"
// LBSourceRangeMapMaxEntries configures max entries of bpf map for service source ranges.
LBSourceRangeMapMaxEntries = "bpf-lb-source-range-map-max"
// LBMaglevMapMaxEntries configures max entries of bpf map for Maglev.
LBMaglevMapMaxEntries = "bpf-lb-maglev-map-max"
// SockRevNatEntriesName configures max entries for BPF sock reverse nat
// entries.
LBSockRevNatEntriesName = "bpf-sock-rev-map-max"
// NodePortRange defines a custom range where to look up NodePort services
NodePortRange = "node-port-range"
LBAlgorithmName = "bpf-lb-algorithm"
// Deprecated option for setting [LBAlgorithm]
NodePortAlgName = "node-port-algorithm"
// LoadBalancerMode indicates in which mode NodePort implementation should run
// ("snat", "dsr" or "hybrid")
LoadBalancerModeName = "bpf-lb-mode"
// LoadBalancerModeAnnotation tells whether controller should check service
// level annotation for configuring bpf loadbalancing method (snat vs dsr).
LoadBalancerModeAnnotationName = "bpf-lb-mode-annotation"
// Deprecated option for setting [LoadBalancerMode]
NodePortModeName = "node-port-mode"
// LoadBalancerDSRDispatchName is the config option for setting the method for
// pushing packets to backends under DSR ("opt" or "ipip")
LoadBalancerDSRDispatchName = "bpf-lb-dsr-dispatch"
// ExternalClusterIPName is the name of the option to enable
// cluster external access to ClusterIP services.
ExternalClusterIPName = "bpf-lb-external-clusterip"
// AlgorithmAnnotationName tells whether controller should check service
// level annotation for configuring bpf loadbalancing algorithm.
AlgorithmAnnotationName = "bpf-lb-algorithm-annotation"
// EnableHealthCheckNodePort is the name of the EnableHealthCheckNodePort option
EnableHealthCheckNodePortName = "enable-health-check-nodeport"
// EnableServiceTopologyName is the flag name of for the EnableServiceTopology option
EnableServiceTopologyName = "enable-service-topology"
)
// Configuration option defaults
const (
// DefaultLBMapMaxEntries is the default size for the load-balancing BPF maps.
DefaultLBMapMaxEntries = 65536
// NodePortMinDefault is the minimal port to listen for NodePort requests
NodePortMinDefault = 30000
// NodePortMaxDefault is the maximum port to listen for NodePort requests
NodePortMaxDefault = 32767
)
const (
// LBAlgorithmRandom is for randomly selecting a backend
LBAlgorithmRandom = "random"
// LBAlgorithmMaglev is for using maglev consistent hashing for backend selection
LBAlgorithmMaglev = "maglev"
// LBModeSNAT is for SNATing requests to remote nodes
LBModeSNAT = "snat"
// LBModeDSR is for performing DSR for requests to remote nodes
LBModeDSR = "dsr"
// LBModeHybrid is a dual mode of the above, that is, DSR for TCP and SNAT for UDP
LBModeHybrid = "hybrid"
// DSR dispatch mode to encode service into IP option or extension header
DSRDispatchOption = "opt"
// DSR dispatch mode to encapsulate to IPIP
DSRDispatchIPIP = "ipip"
// DSR dispatch mode to encapsulate to Geneve
DSRDispatchGeneve = "geneve"
)
// UserConfig is the configuration provided by the user that has not been processed.
// +deepequal-gen=true
type UserConfig struct {
RetryBackoffMin time.Duration `mapstructure:"lb-retry-backoff-min"`
RetryBackoffMax time.Duration `mapstructure:"lb-retry-backoff-max"`
// LBMapEntries is the maximum number of entries allowed in BPF lbmap.
LBMapEntries int `mapstructure:"bpf-lb-map-max"`
// LBServiceMapEntries is the maximum number of entries allowed in BPF lbmap for services.
LBServiceMapEntries int `mapstructure:"bpf-lb-service-map-max"`
// LBBackendMapEntries is the maximum number of entries allowed in BPF lbmap for service backends.
LBBackendMapEntries int `mapstructure:"bpf-lb-service-backend-map-max"`
// LBRevNatEntries is the maximum number of entries allowed in BPF lbmap for reverse NAT.
LBRevNatEntries int `mapstructure:"bpf-lb-rev-nat-map-max"`
// LBAffinityMapEntries is the maximum number of entries allowed in BPF lbmap for session affinities.
LBAffinityMapEntries int `mapstructure:"bpf-lb-affinity-map-max"`
// LBSourceRangeAllTypes enables propagation of loadbalancerSourceRanges to all Kubernetes
// service types which were created from the LoadBalancer service.
LBSourceRangeAllTypes bool `mapstructure:"bpf-lb-source-range-all-types"`
// LBSourceRangeMapEntries is the maximum number of entries allowed in BPF lbmap for source ranges.
LBSourceRangeMapEntries int `mapstructure:"bpf-lb-source-range-map-max"`
// LBMaglevMapEntries is the maximum number of entries allowed in BPF lbmap for maglev.
LBMaglevMapEntries int `mapstructure:"bpf-lb-maglev-map-max"`
// LBSockRevNatEntries is the maximum number of sock rev nat mappings
// allowed in the BPF rev nat table
LBSockRevNatEntries int `mapstructure:"bpf-sock-rev-map-max"`
// NodePortRange is the minimum and maximum ports to use for NodePort
NodePortRange []string
// LBMode indicates in which mode NodePort implementation should run
// ("snat", "dsr" or "hybrid")
LBMode string `mapstructure:"bpf-lb-mode"`
// LBModeAnnotation tells whether controller should check service
// level annotation for configuring bpf load balancing algorithm.
LBModeAnnotation bool `mapstructure:"bpf-lb-mode-annotation"`
// LoadBalancerAlgorithm indicates which backend selection algorithm is used
// ("random" or "maglev")
LBAlgorithm string `mapstructure:"bpf-lb-algorithm"`
// DSRDispatch indicates the method for pushing packets to
// backends under DSR ("opt" or "ipip")
DSRDispatch string `mapstructure:"bpf-lb-dsr-dispatch"`
// ExternalClusterIP enables routing to ClusterIP services from outside
// the cluster. This mirrors the behaviour of kube-proxy.
ExternalClusterIP bool `mapstructure:"bpf-lb-external-clusterip"`
// AlgorithmAnnotation tells whether controller should check service
// level annotation for configuring bpf load balancing algorithm.
AlgorithmAnnotation bool `mapstructure:"bpf-lb-algorithm-annotation"`
// EnableHealthCheckNodePort enables health checking of NodePort by
// cilium
EnableHealthCheckNodePort bool `mapstructure:"enable-health-check-nodeport"`
// LBPressureMetricsInterval sets the interval for updating the load-balancer BPF map
// pressure metrics. A batch lookup is performed for all maps periodically to count
// the number of elements that are then reported in the `bpf-map-pressure` metric.
LBPressureMetricsInterval time.Duration `mapstructure:"lb-pressure-metrics-interval"`
// LBSockTerminateAllProtos enables termination of both UDP and TCP sockets as
// opposed to just UDP sockets.
LBSockTerminateAllProtos bool `mapstructure:"lb-sock-terminate-all-protos"`
// Enable processing of service topology aware hints
EnableServiceTopology bool
// InitWaitTimeout is the amount of time we wait for the load-balancing tables to be initialized before
// we start reconciling towards the BPF maps. This reduces the probability that load-balancing is scaled
// down temporarily due to not yet seeing all backends.
//
// The delay happens only when existing BPF state existed.
//
// We must not wait forever for initialization though due to potential interdependencies between load-balancing
// data sources. For example we might depend on Kubernetes data to connect to the ClusterMesh api-server and
// thus may need to first reconcile the Kubernetes services to connect to ClusterMesh (if endpoints have changed
// while agent was down).
InitWaitTimeout time.Duration `mapstructure:"lb-init-wait-timeout"`
}
// ConfigCell provides the [Config] and [ExternalConfig] configurations.
var ConfigCell = cell.Group(
cell.Config(DefaultUserConfig),
cell.Config(DeprecatedConfig{}),
cell.Provide(
// Bridge options from [option.DaemonConfig] to [loadbalancer.ExternalConfig] to avoid
// direct dependency to DaemonConfig
NewExternalConfig,
// Validate and populate [loadbalancer.userConfig] to produce the final [loadbalancer.Config]
NewConfig,
),
)
// Config for load-balancing
// +deepequal-gen=true
type Config struct {
UserConfig
// NodePortMin is the minimum port address for the NodePort range
NodePortMin uint16
// NodePortMax is the maximum port address for the NodePort range
NodePortMax uint16
}
func (c *Config) LoadBalancerUsesDSR() bool {
return c.LBMode == LBModeDSR ||
c.LBMode == LBModeHybrid ||
c.LBModeAnnotation
}
type DeprecatedConfig struct {
// NodePortAlg indicates which backend selection algorithm is used
// ("random" or "maglev")
NodePortAlg string `mapstructure:"node-port-algorithm"`
// NodePortMode indicates in which mode NodePort implementation should run
// ("snat", "dsr" or "hybrid")
NodePortMode string `mapstructure:"node-port-mode"`
}
func (DeprecatedConfig) Flags(flags *pflag.FlagSet) {
// Deprecated option for setting [LBAlgorithm]
flags.String(NodePortAlgName, "", "BPF load balancing algorithm (\"random\", \"maglev\")")
flags.MarkHidden(NodePortAlgName)
flags.MarkDeprecated(NodePortAlgName, "Use --"+LBAlgorithmName+" instead")
// Deprecated option for setting [LBMode]
flags.String(NodePortModeName, "", "BPF NodePort mode (\"snat\", \"dsr\", \"hybrid\")")
flags.MarkHidden(NodePortModeName)
flags.MarkDeprecated(NodePortAlgName, "Use --"+LoadBalancerModeName+" instead")
}
func (def UserConfig) Flags(flags *pflag.FlagSet) {
flags.Duration("lb-retry-backoff-min", def.RetryBackoffMin, "Minimum amount of time to wait before retrying LB operation")
flags.MarkHidden("lb-retry-backoff-min")
flags.Duration("lb-retry-backoff-max", def.RetryBackoffMin, "Maximum amount of time to wait before retrying LB operation")
flags.MarkHidden("lb-retry-backoff-max")
flags.Int(LBMapEntriesName, def.LBMapEntries, "Maximum number of entries in Cilium BPF lbmap")
flags.Int(LBServiceMapMaxEntries, def.LBServiceMapEntries, fmt.Sprintf("Maximum number of entries in Cilium BPF lbmap for services (if this isn't set, the value of --%s will be used.)", LBMapEntriesName))
flags.MarkHidden(LBServiceMapMaxEntries)
flags.Int(LBBackendMapMaxEntries, def.LBBackendMapEntries, fmt.Sprintf("Maximum number of entries in Cilium BPF lbmap for service backends (if this isn't set, the value of --%s will be used.)", LBMapEntriesName))
flags.MarkHidden(LBBackendMapMaxEntries)
flags.Int(LBRevNatMapMaxEntries, def.LBRevNatEntries, fmt.Sprintf("Maximum number of entries in Cilium BPF lbmap for reverse NAT (if this isn't set, the value of --%s will be used.)", LBMapEntriesName))
flags.MarkHidden(LBRevNatMapMaxEntries)
flags.Int(LBAffinityMapMaxEntries, def.LBAffinityMapEntries, fmt.Sprintf("Maximum number of entries in Cilium BPF lbmap for session affinities (if this isn't set, the value of --%s will be used.)", LBMapEntriesName))
flags.MarkHidden(LBAffinityMapMaxEntries)
flags.Int(LBSourceRangeMapMaxEntries, def.LBSourceRangeMapEntries, fmt.Sprintf("Maximum number of entries in Cilium BPF lbmap for source ranges (if this isn't set, the value of --%s will be used.)", LBMapEntriesName))
flags.MarkHidden(LBSourceRangeMapMaxEntries)
flags.Bool(LBSourceRangeAllTypes, def.LBSourceRangeAllTypes, "Propagate loadbalancerSourceRanges to all corresponding service types")
flags.Int(LBMaglevMapMaxEntries, def.LBMaglevMapEntries, fmt.Sprintf("Maximum number of entries in Cilium BPF lbmap for maglev (if this isn't set, the value of --%s will be used.)", LBMapEntriesName))
flags.MarkHidden(LBMaglevMapMaxEntries)
flags.Int(LBSockRevNatEntriesName, def.LBSockRevNatEntries, "Maximum number of entries for the SockRevNAT BPF map")
flags.StringSlice(NodePortRange, []string{fmt.Sprintf("%d", NodePortMinDefault), fmt.Sprintf("%d", NodePortMaxDefault)}, "Set the min/max NodePort port range")
flags.String(LBAlgorithmName, def.LBAlgorithm, "BPF load balancing algorithm (\"random\", \"maglev\")")
flags.Bool(LoadBalancerModeAnnotationName, false, "Enable service-level annotation for configuring BPF load balancing mode")
flags.String(LoadBalancerModeName, def.LBMode, "BPF load balancing mode (\"snat\", \"dsr\", \"hybrid\")")
flags.String(LoadBalancerDSRDispatchName, def.DSRDispatch, "BPF load balancing DSR dispatch method (\"opt\", \"ipip\", \"geneve\")")
flags.Bool(ExternalClusterIPName, def.ExternalClusterIP, "Enable external access to ClusterIP services (default false)")
flags.Bool(AlgorithmAnnotationName, def.AlgorithmAnnotation, "Enable service-level annotation for configuring BPF load balancing algorithm")
flags.Bool(EnableHealthCheckNodePortName, def.EnableHealthCheckNodePort, "Enables a healthcheck nodePort server for NodePort services with 'healthCheckNodePort' being set")
flags.Duration("lb-pressure-metrics-interval", def.LBPressureMetricsInterval, "Interval for reporting pressure metrics for load-balancing BPF maps. 0 disables reporting.")
flags.MarkHidden("lb-pressure-metrics-interval")
flags.Bool("lb-sock-terminate-all-protos", false, "Enable terminating connections to deleted service backends for both TCP and UDP")
flags.MarkHidden("lb-sock-terminate-all-protos")
flags.Bool(EnableServiceTopologyName, def.EnableServiceTopology, "Enable support for service topology aware hints")
flags.Duration("lb-init-wait-timeout", def.InitWaitTimeout, "Amount of time to wait for initialization before reconciling BPF maps")
flags.MarkHidden("lb-init-wait-timeout")
}
// NewConfig takes the user-provided configuration, validates and processes it to produce the final
// configuration for load-balancing.
func NewConfig(log *slog.Logger, userConfig UserConfig, deprecatedConfig DeprecatedConfig, dcfg *option.DaemonConfig) (cfg Config, err error) {
cfg.UserConfig = userConfig
if cfg.LBMapEntries <= 0 {
return Config{}, fmt.Errorf("specified LBMap max entries %d must be a value greater than 0", cfg.LBMapEntries)
}
if cfg.LBServiceMapEntries < 0 ||
cfg.LBBackendMapEntries < 0 ||
cfg.LBRevNatEntries < 0 ||
cfg.LBAffinityMapEntries < 0 ||
cfg.LBSourceRangeMapEntries < 0 ||
cfg.LBMaglevMapEntries < 0 {
return Config{}, fmt.Errorf("specified LB Service Map max entries must not be a negative value"+
"(Service Map: %d, Service Backend: %d, Reverse NAT: %d, Session Affinity: %d, Source Range: %d, Maglev: %d)",
cfg.LBServiceMapEntries,
cfg.LBBackendMapEntries,
cfg.LBRevNatEntries,
cfg.LBAffinityMapEntries,
cfg.LBSourceRangeMapEntries,
cfg.LBMaglevMapEntries)
}
// Dynamically size the SockRevNat map if not set by the user.
if cfg.LBSockRevNatEntries == 0 {
getEntries := dcfg.GetDynamicSizeCalculator(log)
cfg.LBSockRevNatEntries = getEntries(option.SockRevNATMapEntriesDefault, option.LimitTableAutoSockRevNatMin, option.LimitTableMax)
log.Info(fmt.Sprintf("option %s set by dynamic sizing to %v", LBSockRevNatEntriesName, cfg.LBSockRevNatEntries)) // FIXME
}
if cfg.LBSockRevNatEntries < option.LimitTableMin {
return Config{}, fmt.Errorf("specified Socket Reverse NAT table size %d must be greater or equal to %d",
cfg.LBSockRevNatEntries, option.LimitTableMin)
}
if cfg.LBSockRevNatEntries > option.LimitTableMax {
return Config{}, fmt.Errorf("specified Socket Reverse NAT tables size %d must not exceed maximum %d",
cfg.LBSockRevNatEntries, option.LimitTableMax)
}
// Use [cfg.LBMapEntries] for map size if not overridden.
opts := []*int{
&cfg.LBServiceMapEntries,
&cfg.LBBackendMapEntries,
&cfg.LBRevNatEntries,
&cfg.LBAffinityMapEntries,
&cfg.LBSourceRangeMapEntries,
&cfg.LBMaglevMapEntries,
}
for _, opt := range opts {
if *opt == 0 {
*opt = cfg.LBMapEntries
}
}
cfg.NodePortMin = NodePortMinDefault
cfg.NodePortMax = NodePortMaxDefault
nodePortRange := cfg.NodePortRange
// When passed via configmap, we might not get a slice but single
// string instead, so split it if needed.
if len(nodePortRange) == 1 {
nodePortRange = strings.Split(nodePortRange[0], ",")
}
switch len(nodePortRange) {
case 0:
// Use the defaults
case 2:
min, err := strconv.ParseUint(nodePortRange[0], 10, 16)
if err != nil {
return Config{}, fmt.Errorf("Unable to parse min port value for NodePort range: %w", err)
}
cfg.NodePortMin = uint16(min)
max, err := strconv.ParseUint(nodePortRange[1], 10, 16)
if err != nil {
return Config{}, fmt.Errorf("Unable to parse max port value for NodePort range: %w", err)
}
cfg.NodePortMax = uint16(max)
if cfg.NodePortMax <= cfg.NodePortMin {
return Config{}, errors.New("NodePort range min port must be smaller than max port")
}
default:
return Config{}, fmt.Errorf("Unable to parse min/max port value for NodePort range: %s", NodePortRange)
}
if deprecatedConfig.NodePortAlg != "" {
cfg.LBAlgorithm = deprecatedConfig.NodePortAlg
}
if cfg.LBAlgorithm != LBAlgorithmRandom &&
cfg.LBAlgorithm != LBAlgorithmMaglev {
return Config{}, fmt.Errorf("Invalid value for --%s: %s", LBAlgorithmName, cfg.LBAlgorithm)
}
if deprecatedConfig.NodePortMode != "" {
if cfg.LBMode != DefaultUserConfig.LBMode {
return Config{}, fmt.Errorf("both --%s and --%s were set. Use --%s instead.",
LoadBalancerModeName, NodePortModeName, LoadBalancerModeName)
}
cfg.LBMode = deprecatedConfig.NodePortMode
}
if cfg.LBMode != LBModeSNAT && cfg.LBMode != LBModeDSR && cfg.LBMode != LBModeHybrid {
return Config{}, fmt.Errorf("Invalid value for --%s: %s", LoadBalancerModeName, cfg.LBMode)
}
if cfg.LBModeAnnotation &&
cfg.LBMode == LBModeHybrid {
return Config{}, fmt.Errorf("The value --%s=%s is not supported as default under annotation mode", LoadBalancerModeName, cfg.LBMode)
}
/* FIXME:
if cfg.NodePortMode == option.NodePortModeDSR &&
cfg.LoadBalancerDSRDispatch != option.DSRDispatchOption &&
cfg.LoadBalancerDSRDispatch != option.DSRDispatchIPIP &&
cfg.LoadBalancerDSRDispatch != option.DSRDispatchGeneve {
return fmt.Errorf("Invalid value for --%s: %s", option.LoadBalancerDSRDispatch, cfg.LoadBalancerDSRDispatch)
}
if cfg.NodePortMode == option.NodePortModeHybrid &&
cfg.LoadBalancerDSRDispatch != option.DSRDispatchOption &&
cfg.LoadBalancerDSRDispatch != option.DSRDispatchGeneve {
return fmt.Errorf("Invalid value for --%s: %s", option.LoadBalancerDSRDispatch, cfg.LoadBalancerDSRDispatch)
}
if cfg.LoadBalancerModeAnnotation &&
cfg.LoadBalancerDSRDispatch != option.DSRDispatchIPIP {
return fmt.Errorf("Invalid value for --%s: %s", option.LoadBalancerDSRDispatch, cfg.LoadBalancerDSRDispatch)
}*/
return
}
var DefaultUserConfig = UserConfig{
RetryBackoffMin: time.Second,
RetryBackoffMax: time.Minute,
LBMapEntries: DefaultLBMapMaxEntries,
LBPressureMetricsInterval: 5 * time.Minute,
LBServiceMapEntries: 0, // Uses [LBMapEntries] if zero
LBBackendMapEntries: 0, // ...
LBRevNatEntries: 0, // ...
LBAffinityMapEntries: 0, // ...
LBSourceRangeMapEntries: 0, // ...
LBMaglevMapEntries: 0, // ...
LBSockRevNatEntries: 0, // Probes for suitable size if zero
LBSourceRangeAllTypes: false,
LBSockTerminateAllProtos: false,
NodePortRange: []string{},
LBAlgorithm: LBAlgorithmRandom,
LBMode: LBModeSNAT,
DSRDispatch: DSRDispatchOption,
// Defaults to false to retain prior behaviour to not route external packets
// to ClusterIP services.
ExternalClusterIP: false,
AlgorithmAnnotation: false,
EnableHealthCheckNodePort: true,
EnableServiceTopology: false,
InitWaitTimeout: 1 * time.Minute,
}
var DefaultConfig = Config{
UserConfig: DefaultUserConfig,
NodePortMin: NodePortMinDefault,
NodePortMax: NodePortMaxDefault,
}
// TestConfig are the configuration options for testing. Only provided by tests and not present in the agent.
type TestConfig struct {
TestFaultProbability float32 `mapstructure:"lb-test-fault-probability"`
}
func (def TestConfig) Flags(flags *pflag.FlagSet) {
flags.Float32("lb-test-fault-probability", def.TestFaultProbability, "Probability for fault injection in LBMaps (0..1)")
}
// ExternalConfig are configuration options derived from external sources such as
// DaemonConfig. This avoids direct access of larger configuration structs.
type ExternalConfig struct {
ZoneMapper
EnableIPv4, EnableIPv6 bool
KubeProxyReplacement bool
NodePortMin, NodePortMax uint16
NodePortAlg string
LoadBalancerAlgorithmAnnotation bool
BPFSocketLBHostnsOnly bool
EnableSocketLB bool
EnableSocketLBPodConnectionTermination bool
EnableHealthCheckLoadBalancerIP bool
// The following options will be removed in v1.19
EnableHostPort bool
}
// NewExternalConfig maps the daemon config to [ExternalConfig].
func NewExternalConfig(cfg *option.DaemonConfig, kprCfg kpr.KPRConfig) ExternalConfig {
return ExternalConfig{
ZoneMapper: cfg,
EnableIPv4: cfg.EnableIPv4,
EnableIPv6: cfg.EnableIPv6,
KubeProxyReplacement: kprCfg.KubeProxyReplacement == option.KubeProxyReplacementTrue || kprCfg.EnableNodePort,
BPFSocketLBHostnsOnly: cfg.BPFSocketLBHostnsOnly,
EnableSocketLB: kprCfg.EnableSocketLB,
EnableSocketLBPodConnectionTermination: cfg.EnableSocketLBPodConnectionTermination,
EnableHealthCheckLoadBalancerIP: cfg.EnableHealthCheckLoadBalancerIP,
EnableHostPort: kprCfg.EnableHostPort,
}
}
type ZoneMapper interface {
GetZoneID(string) uint8
}
// SPDX-License-Identifier: Apache-2.0
// Copyright Authors of Cilium
package loadbalancer
import (
"encoding/json"
"fmt"
"iter"
"slices"
"strings"
"github.com/cilium/statedb"
"github.com/cilium/statedb/index"
"github.com/cilium/statedb/reconciler"
"k8s.io/apimachinery/pkg/util/duration"
"github.com/cilium/cilium/api/v1/models"
"github.com/cilium/cilium/pkg/option"
"github.com/cilium/cilium/pkg/time"
)
// FrontendParams defines the static parameters of a frontend.
// This is separate from [Frontend] to clearly separate which fields
// can be manipulated and which are internally managed by [Writer].
type FrontendParams struct {
// Frontend address and port
Address L3n4Addr
// Service type (e.g. ClusterIP, NodePort, ...)
Type SVCType
// Name of the associated service
ServiceName ServiceName
// PortName if set will select only backends with matching
// port name.
PortName FEPortName
// ServicePort is the associated "ClusterIP" port of this frontend.
// Same as [Address.L4Addr.Port] except when [Type] NodePort or
// This is used to match frontends with the [Ports] of
// [Service.ProxyRedirect].
ServicePort uint16
}
type Frontend struct {
FrontendParams
// Status is the reconciliation status for this frontend and
// reflects whether or not the frontend and the associated backends
// have been reconciled with the BPF maps.
// Managed by [Writer].
Status reconciler.Status
// Backends associated with the frontend.
Backends BackendsSeq2
// HealthCheckBackends associated with the frontend that includes the ones that should be health checked.
HealthCheckBackends BackendsSeq2
// ID is the identifier allocated to this frontend. Used as the key
// in the services BPF map. This field is populated by the reconciler
// and is initially set to zero. It can be considered valid only when
// [Status] is set to done.
ID ServiceID
// RedirectTo if set selects the backends from this service name instead
// of that of [FrontendParams.ServiceName]. This is used to implement the
// local redirect policies where traffic going to a specific service/frontend
// is redirected to a local pod instead.
RedirectTo *ServiceName
// Service associated with the frontend. If service is updated
// this pointer to the service will update as well and the
// frontend is marked for reconciliation.
Service *Service `json:"-" yaml:"-"`
}
// BackendsSeq2 is an iterator for sequence of backends that is also JSON and YAML
// marshalable.
type BackendsSeq2 iter.Seq2[BackendParams, statedb.Revision]
func (s BackendsSeq2) MarshalJSON() ([]byte, error) {
return json.Marshal(slices.Collect(statedb.ToSeq(iter.Seq2[BackendParams, statedb.Revision](s))))
}
func (s BackendsSeq2) MarshalYAML() (any, error) {
return slices.Collect(statedb.ToSeq(iter.Seq2[BackendParams, statedb.Revision](s))), nil
}
func (fe *Frontend) Clone() *Frontend {
fe2 := *fe
return &fe2
}
func (fe *Frontend) TableHeader() []string {
return []string{
"Address",
"Type",
"ServiceName",
"PortName",
"Backends",
"RedirectTo",
"Status",
"Since",
"Error",
}
}
func (fe *Frontend) TableRow() []string {
redirectTo := ""
if fe.RedirectTo != nil {
redirectTo = fe.RedirectTo.String()
}
return []string{
fe.Address.StringWithProtocol(),
string(fe.Type),
fe.ServiceName.String(),
string(fe.PortName),
showBackends(fe.Backends),
redirectTo,
fe.Status.Kind.String(),
duration.HumanDuration(time.Since(fe.Status.UpdatedAt)),
fe.Status.GetError(),
}
}
func (fe *Frontend) ToModel() *models.Service {
var natPolicy string
svc := fe.Service
id := int64(fe.ID)
if svc.NatPolicy != SVCNatPolicyNone {
natPolicy = string(svc.NatPolicy)
}
spec := &models.ServiceSpec{
ID: id,
FrontendAddress: fe.Address.GetModel(),
Flags: &models.ServiceSpecFlags{
Type: string(fe.Type),
TrafficPolicy: string(svc.ExtTrafficPolicy),
ExtTrafficPolicy: string(svc.ExtTrafficPolicy),
IntTrafficPolicy: string(svc.IntTrafficPolicy),
NatPolicy: natPolicy,
HealthCheckNodePort: svc.HealthCheckNodePort,
Name: svc.Name.Name(),
Namespace: svc.Name.Namespace(),
},
}
if fe.RedirectTo != nil {
spec.Flags.Type = string(SVCTypeLocalRedirect)
}
if svc.Name.Cluster() != option.Config.ClusterName {
spec.Flags.Cluster = svc.Name.Cluster()
}
backendModel := func(be BackendParams) *models.BackendAddress {
addrClusterStr := be.Address.AddrCluster().String()
state := be.State
if be.Unhealthy {
state = BackendStateQuarantined
}
stateStr, _ := state.String()
return &models.BackendAddress{
IP: &addrClusterStr,
Protocol: be.Address.Protocol(),
Port: be.Address.Port(),
NodeName: be.NodeName,
Zone: be.GetZone(),
State: stateStr,
Preferred: true,
Weight: &be.Weight,
}
}
for be := range fe.Backends {
spec.BackendAddresses = append(spec.BackendAddresses, backendModel(be))
}
return &models.Service{
Spec: spec,
Status: &models.ServiceStatus{
Realized: spec,
},
}
}
// showBackends returns the backends associated with a frontend in form
// "1.2.3.4:80, [2001::1]:443"
func showBackends(bes BackendsSeq2) string {
const maxToShow = 5
count := 0
var b strings.Builder
for be := range bes {
if count < maxToShow {
b.WriteString(be.Address.String())
b.WriteString(", ")
}
count++
}
s := b.String()
s, _ = strings.CutSuffix(s, ", ")
if count > maxToShow {
s += fmt.Sprintf(" + %d more ...", count-maxToShow)
}
return s
}
var (
frontendAddressIndex = statedb.Index[*Frontend, L3n4Addr]{
Name: "address",
FromObject: func(fe *Frontend) index.KeySet {
return index.NewKeySet(fe.Address.Bytes())
},
FromKey: func(l L3n4Addr) index.Key {
return index.Key(l.Bytes())
},
FromString: L3n4AddrFromString,
Unique: true,
}
FrontendByAddress = frontendAddressIndex.Query
frontendServiceIndex = statedb.Index[*Frontend, ServiceName]{
Name: "service",
FromObject: func(fe *Frontend) index.KeySet {
return index.NewKeySet(fe.ServiceName.Key())
},
FromKey: ServiceName.Key,
FromString: index.FromString,
Unique: false,
}
FrontendByServiceName = frontendServiceIndex.Query
)
const (
FrontendTableName = "frontends"
)
func NewFrontendsTable(cfg Config, db *statedb.DB) (statedb.RWTable[*Frontend], error) {
return statedb.NewTable(
db,
FrontendTableName,
frontendAddressIndex,
frontendServiceIndex,
)
}
// SPDX-License-Identifier: Apache-2.0
// Copyright Authors of Cilium
package loadbalancer
import (
"bytes"
"encoding/binary"
"encoding/json"
"errors"
"fmt"
"net/netip"
"slices"
"strconv"
"strings"
"unique"
"unsafe"
"github.com/cespare/xxhash/v2"
"github.com/cilium/statedb/index"
"github.com/cilium/statedb/part"
"go.yaml.in/yaml/v3"
"github.com/cilium/cilium/api/v1/models"
cmtypes "github.com/cilium/cilium/pkg/clustermesh/types"
"github.com/cilium/cilium/pkg/container/cache"
"github.com/cilium/cilium/pkg/hive"
)
// InitWaitFunc is provided by the load-balancing cell to wait until the
// load-balancing control-plane has finished reconciliation of the initial
// data set.
type InitWaitFunc hive.WaitFunc
type IPFamily = bool
const (
IPFamilyIPv4 = IPFamily(false)
IPFamilyIPv6 = IPFamily(true)
)
// SVCType is a type of a service.
type SVCType string
const (
SVCTypeNone = SVCType("NONE")
SVCTypeHostPort = SVCType("HostPort")
SVCTypeClusterIP = SVCType("ClusterIP")
SVCTypeNodePort = SVCType("NodePort")
SVCTypeExternalIPs = SVCType("ExternalIPs")
SVCTypeLoadBalancer = SVCType("LoadBalancer")
SVCTypeLocalRedirect = SVCType("LocalRedirect")
)
// SVCTrafficPolicy defines which backends are chosen
type SVCTrafficPolicy string
const (
SVCTrafficPolicyNone = SVCTrafficPolicy("NONE")
SVCTrafficPolicyCluster = SVCTrafficPolicy("Cluster")
SVCTrafficPolicyLocal = SVCTrafficPolicy("Local")
)
// SVCNatPolicy defines whether we need NAT46/64 translation for backends
type SVCNatPolicy string
const (
SVCNatPolicyNone = SVCNatPolicy("NONE")
SVCNatPolicyNat46 = SVCNatPolicy("Nat46")
SVCNatPolicyNat64 = SVCNatPolicy("Nat64")
)
type SVCForwardingMode string
const (
SVCForwardingModeUndef = SVCForwardingMode("")
SVCForwardingModeDSR = SVCForwardingMode("dsr")
SVCForwardingModeSNAT = SVCForwardingMode("snat")
)
func ToSVCForwardingMode(s string) SVCForwardingMode {
switch s {
case LBModeDSR:
return SVCForwardingModeDSR
case LBModeSNAT:
return SVCForwardingModeSNAT
default:
return SVCForwardingModeUndef
}
}
type SVCLoadBalancingAlgorithm uint8
const (
SVCLoadBalancingAlgorithmUndef SVCLoadBalancingAlgorithm = 0
SVCLoadBalancingAlgorithmRandom SVCLoadBalancingAlgorithm = 1
SVCLoadBalancingAlgorithmMaglev SVCLoadBalancingAlgorithm = 2
)
func (d SVCLoadBalancingAlgorithm) String() string {
switch d {
case SVCLoadBalancingAlgorithmRandom:
return "random"
case SVCLoadBalancingAlgorithmMaglev:
return "maglev"
default:
return "undef"
}
}
func ToSVCLoadBalancingAlgorithm(s string) SVCLoadBalancingAlgorithm {
if s == LBAlgorithmMaglev {
return SVCLoadBalancingAlgorithmMaglev
}
if s == LBAlgorithmRandom {
return SVCLoadBalancingAlgorithmRandom
}
return SVCLoadBalancingAlgorithmUndef
}
type SVCSourceRangesPolicy string
const (
SVCSourceRangesPolicyAllow = SVCSourceRangesPolicy("allow")
SVCSourceRangesPolicyDeny = SVCSourceRangesPolicy("deny")
)
type SVCProxyDelegation string
const (
SVCProxyDelegationNone = SVCProxyDelegation("none")
SVCProxyDelegationDelegateIfLocal = SVCProxyDelegation("delegate-if-local")
)
// ServiceFlags is the datapath representation of the service flags that can be
// used (lb{4,6}_service.flags)
type ServiceFlags uint16
const (
serviceFlagNone = 0
serviceFlagExternalIPs = 1 << 0
serviceFlagNodePort = 1 << 1
serviceFlagExtLocalScope = 1 << 2
serviceFlagHostPort = 1 << 3
serviceFlagSessionAffinity = 1 << 4
serviceFlagLoadBalancer = 1 << 5
serviceFlagRoutable = 1 << 6
serviceFlagSourceRange = 1 << 7
serviceFlagLocalRedirect = 1 << 8
serviceFlagNat46x64 = 1 << 9
serviceFlagL7LoadBalancer = 1 << 10
serviceFlagLoopback = 1 << 11
serviceFlagIntLocalScope = 1 << 12
serviceFlagTwoScopes = 1 << 13
serviceFlagQuarantined = 1 << 14
// serviceFlagSrcRangesDeny is set on master
// svc entry, serviceFlagQuarantined is only
// set on backend svc entries.
serviceFlagSourceRangeDeny = 1 << 14
serviceFlagFwdModeDSR = 1 << 15
)
// +k8s:deepcopy-gen=true
type SvcFlagParam struct {
SvcType SVCType
SvcNatPolicy SVCNatPolicy
SvcFwdModeDSR bool
SvcExtLocal bool
SvcIntLocal bool
SessionAffinity bool
IsRoutable bool
CheckSourceRange bool
SourceRangeDeny bool
L7LoadBalancer bool
LoopbackHostport bool
Quarantined bool
}
// NewSvcFlag creates service flag
func NewSvcFlag(p *SvcFlagParam) ServiceFlags {
var flags ServiceFlags
switch p.SvcType {
case SVCTypeExternalIPs:
flags |= serviceFlagExternalIPs
case SVCTypeNodePort:
flags |= serviceFlagNodePort
case SVCTypeLoadBalancer:
flags |= serviceFlagLoadBalancer
case SVCTypeHostPort:
flags |= serviceFlagHostPort
case SVCTypeLocalRedirect:
flags |= serviceFlagLocalRedirect
}
switch p.SvcNatPolicy {
case SVCNatPolicyNat46:
fallthrough
case SVCNatPolicyNat64:
flags |= serviceFlagNat46x64
}
if p.SvcExtLocal {
flags |= serviceFlagExtLocalScope
}
if p.SvcIntLocal {
flags |= serviceFlagIntLocalScope
}
if p.SessionAffinity {
flags |= serviceFlagSessionAffinity
}
if p.IsRoutable {
flags |= serviceFlagRoutable
}
if p.SourceRangeDeny {
flags |= serviceFlagSourceRangeDeny
}
if p.CheckSourceRange {
flags |= serviceFlagSourceRange
}
if p.L7LoadBalancer {
flags |= serviceFlagL7LoadBalancer
}
if p.SvcExtLocal != p.SvcIntLocal && p.SvcType != SVCTypeClusterIP {
flags |= serviceFlagTwoScopes
}
if p.Quarantined {
flags |= serviceFlagQuarantined
}
if p.SvcFwdModeDSR {
flags |= serviceFlagFwdModeDSR
}
if p.LoopbackHostport {
flags |= serviceFlagLoopback
}
return flags
}
// SVCType returns a service type from the flags
func (s ServiceFlags) SVCType() SVCType {
switch {
case s&serviceFlagExternalIPs != 0:
return SVCTypeExternalIPs
case s&serviceFlagNodePort != 0:
return SVCTypeNodePort
case s&serviceFlagLoadBalancer != 0:
return SVCTypeLoadBalancer
case s&serviceFlagHostPort != 0:
return SVCTypeHostPort
case s&serviceFlagLocalRedirect != 0:
return SVCTypeLocalRedirect
default:
return SVCTypeClusterIP
}
}
func (s ServiceFlags) IsL7LB() bool {
return s&serviceFlagL7LoadBalancer != 0
}
// SVCExtTrafficPolicy returns a service traffic policy from the flags
func (s ServiceFlags) SVCExtTrafficPolicy() SVCTrafficPolicy {
switch {
case s&serviceFlagExtLocalScope != 0:
return SVCTrafficPolicyLocal
default:
return SVCTrafficPolicyCluster
}
}
// SVCIntTrafficPolicy returns a service traffic policy from the flags
func (s ServiceFlags) SVCIntTrafficPolicy() SVCTrafficPolicy {
switch {
case s&serviceFlagIntLocalScope != 0:
return SVCTrafficPolicyLocal
default:
return SVCTrafficPolicyCluster
}
}
// SVCNatPolicy returns a service NAT policy from the flags
func (s ServiceFlags) SVCNatPolicy(fe L3n4Addr) SVCNatPolicy {
if s&serviceFlagNat46x64 == 0 {
return SVCNatPolicyNone
}
if fe.IsIPv6() {
return SVCNatPolicyNat64
} else {
return SVCNatPolicyNat46
}
}
// SVCSlotQuarantined
func (s ServiceFlags) SVCSlotQuarantined() bool {
if s&serviceFlagQuarantined == 0 {
return false
} else {
return true
}
}
// String returns the string implementation of ServiceFlags.
func (s ServiceFlags) String() string {
var str []string
seenDeny := false
str = append(str, string(s.SVCType()))
if s&serviceFlagExtLocalScope != 0 {
str = append(str, string(SVCTrafficPolicyLocal))
}
if s&serviceFlagIntLocalScope != 0 {
str = append(str, "Internal"+string(SVCTrafficPolicyLocal))
}
if s&serviceFlagTwoScopes != 0 {
str = append(str, "two-scopes")
}
if s&serviceFlagSessionAffinity != 0 {
str = append(str, "sessionAffinity")
}
if s&serviceFlagRoutable == 0 {
str = append(str, "non-routable")
}
if s&serviceFlagSourceRange != 0 {
str = append(str, "check source-range")
if s&serviceFlagSourceRangeDeny != 0 {
seenDeny = true
str = append(str, "deny")
}
}
if s&serviceFlagNat46x64 != 0 {
str = append(str, "46x64")
}
if s&serviceFlagL7LoadBalancer != 0 {
str = append(str, "l7-load-balancer")
}
if s&serviceFlagLoopback != 0 {
if s.SVCType() == SVCTypeHostPort {
str = append(str, "loopback")
} else {
str = append(str, "delegate-if-local")
}
}
if !seenDeny && s&serviceFlagQuarantined != 0 {
str = append(str, "quarantined")
}
if s&serviceFlagFwdModeDSR != 0 {
str = append(str, "dsr")
}
return strings.Join(str, ", ")
}
// UInt8 returns the UInt16 representation of the ServiceFlags.
func (s ServiceFlags) UInt16() uint16 {
return uint16(s)
}
const (
// NONE type.
NONE = L4Type("NONE")
// ANY type.
ANY = L4Type("ANY")
// TCP type.
TCP = L4Type("TCP")
// UDP type.
UDP = L4Type("UDP")
// SCTP type.
SCTP = L4Type("SCTP")
)
const (
// ScopeExternal is the lookup scope for services from outside the node.
ScopeExternal uint8 = iota
// ScopeInternal is the lookup scope for services from inside the node.
ScopeInternal
)
// BackendState tracks backend's ability to load-balance service traffic.
//
// Valid transition states for a backend -
// BackendStateActive -> BackendStateTerminating, BackendStateQuarantined, BackendStateMaintenance
// BackendStateTerminating -> No valid state transition
// BackendStateTerminatingNotServing -> No valid state transition
// BackendStateQuarantined -> BackendStateActive, BackendStateTerminating
// BackendStateMaintenance -> BackendStateActive
//
// Sources setting the states -
// BackendStateActive - Kubernetes events, service API
// BackendStateTerminating - Kubernetes events
// BackendStateTerminatingNotServing - Kubernetes events
// BackendStateQuarantined - service API
// BackendStateMaintenance - service API
const (
// BackendStateActive refers to the backend state when it's available for
// load-balancing traffic. It's the default state for a backend.
// Backends in this state can be health-checked.
BackendStateActive BackendState = iota
// BackendStateTerminating refers to the terminating backend state so that
// it can be gracefully removed. Backend in this state can be used as a fallback
// if no active backends exist.
// Backends in this state won't be health-checked.
BackendStateTerminating
// BackendStateTerminatingNotServing refers to the terminating backend state
// for a backend that can be gracefully removed but cannot be used as fallback.
// Backends in this state won't be health-checked. In the BPF backend map this
// is the same as [BackendStateTerminating].
BackendStateTerminatingNotServing
// BackendStateQuarantined refers to the backend state when it's unreachable,
// and will not be selected for load-balancing traffic.
// Backends in this state can be health-checked.
BackendStateQuarantined
// BackendStateMaintenance refers to the backend state where the backend
// is put under maintenance, and will neither be selected for load-balancing
// traffic nor be health-checked.
BackendStateMaintenance
// BackendStateInvalid is an invalid state, and is used to report error conditions.
// Keep this as the last entry.
BackendStateInvalid
)
// BackendStateFlags is the datapath representation of the backend flags that
// are used in (lb{4,6}_backend.flags) to store backend state.
type BackendStateFlags = uint8
const (
BackendStateActiveFlag = iota
BackendStateTerminatingFlag
BackendStateQuarantinedFlag
BackendStateMaintenanceFlag
)
func NewBackendFlags(state BackendState) BackendStateFlags {
var flags BackendStateFlags
switch state {
case BackendStateActive:
flags = BackendStateActiveFlag
case BackendStateTerminating, BackendStateTerminatingNotServing:
flags = BackendStateTerminatingFlag
case BackendStateQuarantined:
flags = BackendStateQuarantinedFlag
case BackendStateMaintenance:
flags = BackendStateMaintenanceFlag
}
return flags
}
func GetBackendStateFromFlags(flags uint8) BackendState {
switch flags {
case BackendStateTerminatingFlag:
return BackendStateTerminating
case BackendStateQuarantinedFlag:
return BackendStateQuarantined
case BackendStateMaintenanceFlag:
return BackendStateMaintenance
default:
return BackendStateActive
}
}
// DefaultBackendWeight is used when backend weight is not set in ServiceSpec
const DefaultBackendWeight = 100
// AllProtocols is the list of all supported L4 protocols
var AllProtocols = []L4Type{TCP, UDP, SCTP}
// L4Type name.
type L4Type = string
func L4TypeAsByte(l4 L4Type) byte {
switch l4 {
case TCP:
return 'T'
case UDP:
return 'U'
case SCTP:
return 'S'
default:
return '?'
}
}
// FEPortName is the name of the frontend's port.
type FEPortName string
// ServiceID is the service's ID.
type ServiceID uint16
// ServiceName represents the fully-qualified reference to the service by name,
// including both the namespace and name of the service (and optionally the cluster).
// +k8s:deepcopy-gen=true
type ServiceName struct {
// str is (<cluster>/)<namespace>/<name>
str string
// namePos is where the name starts
// (<cluster>/)<namespace>/<name>
// ^
namePos uint16
// clusterEndPos is where the cluster (including '/' ends. If zero then there is
// no cluster.
// (<cluster>/)<namespace>/<name>
// ^
clusterEndPos uint16
}
func (s ServiceName) Cluster() string {
if s.clusterEndPos > 0 {
return s.str[:s.clusterEndPos-1]
}
return ""
}
func (s ServiceName) Name() string {
return s.str[s.namePos:]
}
func (s ServiceName) Namespace() string {
return s.str[s.clusterEndPos : s.namePos-1]
}
func (n ServiceName) Key() index.Key {
// index.Key is never mutated so it's safe to return the underlying
// string as []byte without copying.
return unsafe.Slice(unsafe.StringData(n.str), len(n.str))
}
func NewServiceName(namespace, name string) ServiceName {
return NewServiceNameInCluster("", namespace, name)
}
// serviceNameCache for deduplicating the [ServiceName.str] to reduce overall
// memory usage.
var serviceNameCache = cache.New(
func(n ServiceName) uint64 {
return serviceNameHash(n.Cluster(), n.Namespace(), n.Name())
},
nil,
func(a, b ServiceName) bool {
return b.str != "" /* only match if non-zero value */ &&
a.str == b.str
},
)
func serviceNameHash(cluster, namespace, name string) uint64 {
var d xxhash.Digest
d.WriteString(cluster)
d.WriteString(namespace)
d.WriteString(name)
return d.Sum64()
}
func NewServiceNameInCluster(cluster, namespace, name string) ServiceName {
return cache.GetOrPutWith(
serviceNameCache,
serviceNameHash(cluster, namespace, name),
func(sn ServiceName) bool {
return len(sn.str) > 0 &&
sn.Cluster() == cluster && sn.Namespace() == namespace && sn.Name() == name
},
func() ServiceName {
// ServiceName not found from cache, create it.
var b strings.Builder
pos := 0
if cluster != "" {
n, _ := b.WriteString(cluster)
b.WriteRune('/')
pos += n + 1
}
cendPos := pos
n, _ := b.WriteString(namespace)
b.WriteRune('/')
pos += n + 1
b.WriteString(name)
return ServiceName{
str: b.String(),
clusterEndPos: uint16(cendPos),
namePos: uint16(pos),
}
},
)
}
func (n ServiceName) MarshalJSON() ([]byte, error) {
return json.Marshal(n.str)
}
func (n *ServiceName) UnmarshalJSON(bs []byte) error {
return n.unmarshalString(strings.Trim(string(bs), `"`))
}
func (n *ServiceName) unmarshalString(s string) error {
s = strings.TrimSpace(s[:min(len(s), 65535)])
orig := s
n.str = s
pos := 0
popSlash := func() int {
if len(s) > 0 {
idx := strings.Index(s, "/")
if idx >= 0 {
s = s[idx+1:]
pos += idx + 1
return pos
}
}
return -1
}
i1, i2 := popSlash(), popSlash()
switch {
case i1 < 0:
n.str = ""
return fmt.Errorf("invalid service name: no namespace in %q", orig)
case i2 < 0:
n.namePos = uint16(i1)
default:
n.clusterEndPos = uint16(i1)
n.namePos = uint16(i2)
}
// Deduplicate
*n = serviceNameCache.Get(*n)
return nil
}
func (n ServiceName) MarshalYAML() (any, error) {
return n.String(), nil
}
func (n *ServiceName) UnmarshalYAML(value *yaml.Node) error {
return n.unmarshalString(value.Value)
}
func (n *ServiceName) Equal(other ServiceName) bool {
return n.clusterEndPos == other.clusterEndPos &&
n.namePos == other.namePos &&
n.str == other.str
}
func (n ServiceName) Compare(other ServiceName) int {
switch {
case n.Namespace() < other.Namespace():
return -1
case n.Namespace() > other.Namespace():
return 1
case n.Name() < other.Name():
return -1
case n.Name() > other.Name():
return 1
case n.Cluster() < other.Cluster():
return -1
case n.Cluster() > other.Cluster():
return 1
default:
return 0
}
}
func (n ServiceName) String() string {
return n.str
}
func (n ServiceName) AppendSuffix(suffix string) ServiceName {
n.str += suffix
return n
}
// BackendID is the backend's ID.
type BackendID uint32
// BackendState is the state of a backend for load-balancing service traffic.
type BackendState uint8
func (state BackendState) String() (string, error) {
switch state {
case BackendStateActive:
return models.BackendAddressStateActive, nil
case BackendStateTerminating:
return models.BackendAddressStateTerminating, nil
case BackendStateTerminatingNotServing:
return models.BackendAddressStateTerminatingDashNotDashServing, nil
case BackendStateQuarantined:
return models.BackendAddressStateQuarantined, nil
case BackendStateMaintenance:
return models.BackendAddressStateMaintenance, nil
default:
return "", fmt.Errorf("invalid backend state %d", state)
}
}
func NewL4Type(name string) (L4Type, error) {
switch strings.ToLower(name) {
case "none":
return NONE, nil
case "any":
return ANY, nil
case "tcp":
return TCP, nil
case "udp":
return UDP, nil
case "sctp":
return SCTP, nil
default:
return "", fmt.Errorf("unknown L4 protocol")
}
}
func NewL4TypeFromNumber(proto uint8) L4Type {
switch proto {
case 6:
return TCP
case 17:
return UDP
case 132:
return SCTP
default:
return ANY
}
}
// L4Addr is an abstraction for the backend port with a L4Type, usually tcp or udp, and
// the Port number.
//
// +k8s:deepcopy-gen=true
// +deepequal-gen=true
// +deepequal-gen:private-method=true
type L4Addr struct {
Protocol L4Type
Port uint16
}
// DeepEqual returns true if both the receiver and 'o' are deeply equal.
func (l *L4Addr) DeepEqual(o *L4Addr) bool {
if l == nil {
return o == nil
}
return l.deepEqual(o)
}
// NewL4Addr creates a new L4Addr.
func NewL4Addr(protocol L4Type, number uint16) L4Addr {
return L4Addr{Protocol: protocol, Port: number}
}
// Equals returns true if both L4Addr are considered equal.
func (l L4Addr) Equals(o L4Addr) bool {
return l.Port == o.Port && l.Protocol == o.Protocol
}
// String returns a string representation of an L4Addr
func (l L4Addr) String() string {
return fmt.Sprintf("%d/%s", l.Port, l.Protocol)
}
// L3n4Addr is an unique L3+L4 address and scope (for traffic policies).
type L3n4Addr unique.Handle[l3n4AddrRep]
// l3n4AddrRep is the internal representation for L3n4Addr.
type l3n4AddrRep struct {
addrCluster cmtypes.AddrCluster
L4Addr
scope uint8
}
func (l L3n4Addr) rep() l3n4AddrRep {
return unique.Handle[l3n4AddrRep](l).Value()
}
func (l L3n4Addr) Addr() netip.Addr {
return l.rep().addrCluster.Addr()
}
func (l L3n4Addr) Protocol() L4Type {
return l.rep().Protocol
}
func (l L3n4Addr) Port() uint16 {
return l.rep().Port
}
func (l L3n4Addr) Scope() uint8 {
return l.rep().scope
}
func (l L3n4Addr) AddrCluster() cmtypes.AddrCluster {
return l.rep().addrCluster
}
// NewL3n4Addr creates a new L3n4Addr.
func NewL3n4Addr(protocol L4Type, addrCluster cmtypes.AddrCluster, portNumber uint16, scope uint8) L3n4Addr {
lbport := NewL4Addr(protocol, portNumber)
rep := l3n4AddrRep{addrCluster: addrCluster, L4Addr: lbport, scope: scope}
return L3n4Addr(unique.Make(rep))
}
func NewL3n4AddrFromModel(base *models.FrontendAddress) (*L3n4Addr, error) {
var scope uint8
if base == nil {
return nil, nil
}
if base.IP == "" {
return nil, fmt.Errorf("missing IP address")
}
proto := NONE
if base.Protocol != "" {
p, err := NewL4Type(base.Protocol)
if err != nil {
return nil, err
}
proto = p
}
addrCluster, err := cmtypes.ParseAddrCluster(base.IP)
if err != nil {
return nil, err
}
if base.Scope == models.FrontendAddressScopeExternal {
scope = ScopeExternal
} else if base.Scope == models.FrontendAddressScopeInternal {
scope = ScopeInternal
} else {
return nil, fmt.Errorf("invalid scope \"%s\"", base.Scope)
}
addr := NewL3n4Addr(proto, addrCluster, base.Port, scope)
return &addr, nil
}
// L3n4AddrFromString constructs a StateDB key by parsing the input in the form of
// L3n4Addr.String(), e.g. <addr>:<port>/protocol. The input can be partial to construct
// keys for prefix searches, e.g. "1.2.3.4".
// This must be kept in sync with Bytes().
func L3n4AddrFromString(key string) (index.Key, error) {
keyErr := errors.New("bad key, expected \"<addr>:<port>/<proto>(/i)\", e.g. \"1.2.3.4:80/TCP\" or classful prefix \"10.0.0.0/8\"")
var out []byte
if len(key) == 0 {
return index.Key{}, keyErr
}
// Parse address
var addr string
if strings.HasPrefix(key, "[") {
addr, key, _ = strings.Cut(key[1:], "]")
switch {
case strings.HasPrefix(key, ":"):
key = key[1:]
case len(key) > 0:
return index.Key{}, keyErr
}
} else {
addr, key, _ = strings.Cut(key, ":")
}
addrCluster, err := cmtypes.ParseAddrCluster(addr)
if err != nil {
// See if the address is a prefix and try to parse it as such.
// We only support classful searches, e.g. /8, /16, /24, /32 since
// indexing is byte-wise.
if prefix, err := netip.ParsePrefix(addr); err == nil {
bits := prefix.Bits()
if bits%8 != 0 {
return index.Key{}, fmt.Errorf("%w: only classful prefixes supported (/8,/16,/24,/32)", keyErr)
}
bytes := prefix.Addr().As16()
if prefix.Addr().Is6() {
return index.Key(bytes[:bits/8]), nil
} else {
// The address is in the 16-byte format, cut from the last 4 bytes.
return index.Key(bytes[:12+bits/8]), nil
}
}
return index.Key{}, fmt.Errorf("%w: %w", keyErr, err)
}
addr20 := addrCluster.As20()
out = append(out, addr20[:]...)
// Parse port
if len(key) > 0 {
var s string
s, key, _ = strings.Cut(key, "/")
port, err := strconv.ParseUint(s, 10, 16)
if err != nil {
return index.Key{}, fmt.Errorf("%w: %w", keyErr, err)
}
out = binary.BigEndian.AppendUint16(out, uint16(port))
}
// Parse protocol
hadProto := false
if len(key) > 0 {
var proto string
proto, key, _ = strings.Cut(key, "/")
protoByte := L4TypeAsByte(strings.ToUpper(proto))
if protoByte == '?' {
return index.Key{}, fmt.Errorf("%w: bad protocol, expected TCP/UDP/SCTP", keyErr)
}
out = append(out, protoByte)
hadProto = true
}
// Parse scope.
switch {
case key == "i":
out = append(out, ScopeInternal)
case hadProto:
// Since external scope is implicit we add it here if the protocol was
// also provided. This way we can construct partial keys for prefix
// searching and we can construct complete key for 'get'.
out = append(out, ScopeExternal)
}
return index.Key(out), nil
}
func (l *L3n4Addr) ParseFromString(s string) error {
formatError := fmt.Errorf(
"bad address %q, expected \"<addr>:<port>/<proto>(/i)\", e.g. \"1.2.3.4:80/TCP\"",
s,
)
// Parse address
var addr string
if strings.HasPrefix(s, "[") {
addr, s, _ = strings.Cut(s[1:], "]")
switch {
case strings.HasPrefix(s, ":"):
s = s[1:]
case len(s) > 0:
return formatError
}
} else {
addr, s, _ = strings.Cut(s, ":")
}
addrCluster, err := cmtypes.ParseAddrCluster(addr)
if err != nil {
return formatError
}
// Parse port
if len(s) < 1 {
return formatError
}
var portS string
portS, s, _ = strings.Cut(s, "/")
port, err := strconv.ParseUint(portS, 10, 16)
if err != nil {
return formatError
}
// Parse protocol
protocol := TCP
if len(s) > 0 {
var proto string
proto, s, _ = strings.Cut(s, "/")
protocol = L4Type(strings.ToUpper(proto))
if !slices.Contains(AllProtocols, protocol) {
return formatError
}
}
// Parse scope.
scope := ScopeExternal
if s == "i" {
scope = ScopeInternal
}
*l = NewL3n4Addr(protocol, addrCluster, uint16(port), scope)
return nil
}
func (a *L3n4Addr) GetModel() *models.FrontendAddress {
if a == nil {
return nil
}
scope := models.FrontendAddressScopeExternal
if a.Scope() == ScopeInternal {
scope = models.FrontendAddressScopeInternal
}
return &models.FrontendAddress{
IP: a.AddrCluster().String(),
Protocol: a.Protocol(),
Port: a.Port(),
Scope: scope,
}
}
// String returns the L3n4Addr in the "IPv4:Port/Protocol[/Scope]" format for IPv4 and
// "[IPv6]:Port/Protocol[/Scope]" format for IPv6.
func (a L3n4Addr) String() string {
return a.StringWithProtocol()
}
// StringWithProtocol returns the L3n4Addr in the "IPv4:Port/Protocol[/Scope]"
// format for IPv4 and "[IPv6]:Port/Protocol[/Scope]" format for IPv6.
func (a L3n4Addr) StringWithProtocol() string {
rep := a.rep()
var scope string
if rep.scope == ScopeInternal {
scope = "/i"
}
if a.IsIPv6() {
return "[" + rep.addrCluster.String() + "]:" + strconv.FormatUint(uint64(rep.Port), 10) + "/" + rep.Protocol + scope
}
return rep.addrCluster.String() + ":" + strconv.FormatUint(uint64(rep.Port), 10) + "/" + rep.Protocol + scope
}
// StringID returns the L3n4Addr as string to be used for unique identification
func (a L3n4Addr) StringID() string {
return a.String()
}
// IsIPv6 returns true if the IP address in the given L3n4Addr is IPv6 or not.
func (a L3n4Addr) IsIPv6() bool {
return a.rep().addrCluster.Is6()
}
func (l L3n4Addr) AddrString() string {
rep := l.rep()
return rep.addrCluster.Addr().String() + ":" + strconv.FormatUint(uint64(rep.Port), 10)
}
type l3n4AddrCacheEntry struct {
addr L3n4Addr
bytes []byte
}
var l3n4AddrCache = cache.New(
func(e l3n4AddrCacheEntry) uint64 {
return e.addr.l3n4AddrCacheHash()
},
nil,
func(a, b l3n4AddrCacheEntry) bool {
return bytes.Equal(a.bytes, b.bytes)
},
)
func (l L3n4Addr) l3n4AddrCacheHash() uint64 {
var d xxhash.Digest
buf := l.Addr().As16()
d.Write(buf[:])
binary.BigEndian.PutUint16(buf[:], l.Port())
d.Write(buf[:2])
return d.Sum64()
}
// Bytes returns the address as a byte slice for indexing purposes.
// The returned byte slice must not be mutated.
func (l L3n4Addr) Bytes() []byte {
return cache.GetOrPutWith(
l3n4AddrCache,
l.l3n4AddrCacheHash(),
func(e l3n4AddrCacheEntry) bool {
return e.addr == l
},
func() l3n4AddrCacheEntry {
const keySize = cmtypes.AddrClusterLen +
2 /* Port */ +
1 /* Protocol */ +
1 /* Scope */
rep := l.rep()
key := make([]byte, 0, keySize)
addr20 := rep.addrCluster.As20()
key = append(key, addr20[:]...)
key = binary.BigEndian.AppendUint16(key, rep.Port)
key = append(key, L4TypeAsByte(rep.Protocol))
key = append(key, rep.scope)
return l3n4AddrCacheEntry{
addr: l,
bytes: key,
}
}).bytes
}
func (l L3n4Addr) MarshalYAML() (any, error) {
return l.StringWithProtocol(), nil
}
func (l *L3n4Addr) UnmarshalYAML(value *yaml.Node) error {
return l.ParseFromString(value.Value)
}
func (l L3n4Addr) MarshalJSON() ([]byte, error) {
return json.Marshal(l.StringWithProtocol())
}
func NewL3n4AddrFromBackendModel(base *models.BackendAddress) (*L3n4Addr, error) {
if base.IP == nil {
return nil, fmt.Errorf("missing IP address")
}
l4addr := NewL4Addr(base.Protocol, base.Port)
addrCluster, err := cmtypes.ParseAddrCluster(*base.IP)
if err != nil {
return nil, err
}
addr := NewL3n4Addr(l4addr.Protocol, addrCluster, l4addr.Port, ScopeExternal)
return &addr, nil
}
func init() {
// Register the types for use with part.Map and part.Set.
part.RegisterKeyType(
func(name ServiceName) []byte { return []byte(name.Key()) })
part.RegisterKeyType(L3n4Addr.Bytes)
}
// SPDX-License-Identifier: Apache-2.0
// Copyright Authors of Cilium
package loadbalancer
import (
"fmt"
"maps"
"net/netip"
"slices"
"sort"
"strconv"
"strings"
"github.com/cilium/statedb"
"github.com/cilium/statedb/index"
"github.com/cilium/cilium/pkg/annotation"
"github.com/cilium/cilium/pkg/labels"
"github.com/cilium/cilium/pkg/source"
"github.com/cilium/cilium/pkg/time"
)
// Service defines the common properties for a load-balancing service. Associated with a
// service are a set of frontends that receive the traffic, and a set of backends to which
// the traffic is directed. A single frontend can map to a partial subset of backends depending
// on its properties.
type Service struct {
// Name is the fully qualified service name, e.g. (<cluster>/)<namespace>/<name>.
Name ServiceName
// Source is the data source from which this service was ingested from.
Source source.Source
// Labels associated with the service.
Labels labels.Labels
// Annotations associated with this service.
Annotations map[string]string
// Selector specifies which pods should be associated with this service. If
// this is empty the backends associated to this service are managed externally
// and not by Kubernetes.
Selector map[string]string
// NatPolicy defines whether we need NAT46/64 translation for backends.
NatPolicy SVCNatPolicy
// ExtTrafficPolicy controls how backends are selected for North-South traffic.
// If set to "Local", only node-local backends are chosen.
ExtTrafficPolicy SVCTrafficPolicy
// IntTrafficPolicy controls how backends are selected for East-West traffic.
// If set to "Local", only node-local backends are chosen.
IntTrafficPolicy SVCTrafficPolicy
// ForwardingMode controls whether DSR or SNAT should be used for the dispatch
// to the backend. If undefined the default mode is used (--bpf-lb-mode).
ForwardingMode SVCForwardingMode
// SessionAffinity if true will enable the client IP based session affinity.
SessionAffinity bool
// SessionAffinityTimeout is the duration of inactivity before the session
// affinity is cleared for a specific client IP.
SessionAffinityTimeout time.Duration
// LoadBalancerClass if set specifies the load-balancer class to be used
// for a LoadBalancer service. If unset the default implementation is used.
LoadBalancerClass *string
// ProxyRedirect if non-nil redirects the traffic going to the frontends
// towards a locally running proxy.
ProxyRedirect *ProxyRedirect
// HealthCheckNodePort defines on which port the node runs a HTTP health
// check server which may be used by external loadbalancers to determine
// if a node has local backends. This will only have effect if both
// LoadBalancerIPs is not empty and ExtTrafficPolicy is SVCTrafficPolicyLocal.
HealthCheckNodePort uint16
// LoopbackHostPort defines that HostPort frontends for this service should
// only be exposed internally to the node.
LoopbackHostPort bool
// SourceRanges if non-empty will restrict access to the service to the specified
// client addresses.
SourceRanges []netip.Prefix
// PortNames maps a port name to a port number.
PortNames map[string]uint16
// TrafficDistribution if not default will influence how backends are chosen for
// frontends associated with this service.
TrafficDistribution TrafficDistribution
}
type TrafficDistribution string
const (
// TrafficDistributionDefault will ignore any topology aware hints for choosing the backends.
TrafficDistributionDefault = TrafficDistribution("")
// TrafficDistributionPreferClose Indicates preference for routing traffic to topologically close backends,
// that is to backends that are in the same zone.
TrafficDistributionPreferClose = TrafficDistribution("PreferClose")
)
func (svc *Service) GetLBAlgorithmAnnotation() SVCLoadBalancingAlgorithm {
return ToSVCLoadBalancingAlgorithm(svc.Annotations[annotation.ServiceLoadBalancingAlgorithm])
}
func (svc *Service) GetProxyDelegation() SVCProxyDelegation {
if value, ok := annotation.Get(svc, annotation.ServiceProxyDelegation); ok {
tmp := SVCProxyDelegation(strings.ToLower(value))
if tmp == SVCProxyDelegationDelegateIfLocal {
return tmp
}
}
return SVCProxyDelegationNone
}
func (svc *Service) GetSourceRangesPolicy() SVCSourceRangesPolicy {
if value, ok := annotation.Get(svc, annotation.ServiceSourceRangesPolicy); ok {
if SVCSourceRangesPolicy(strings.ToLower(value)) == SVCSourceRangesPolicyDeny {
return SVCSourceRangesPolicyDeny
}
}
return SVCSourceRangesPolicyAllow
}
func (svc *Service) GetAnnotations() map[string]string {
return svc.Annotations
}
type ProxyRedirect struct {
ProxyPort uint16
// Ports if non-empty will only redirect a frontend with a matching port.
Ports []uint16
}
func (pr *ProxyRedirect) Redirects(port uint16) bool {
if pr == nil {
return false
}
return len(pr.Ports) == 0 || slices.Contains(pr.Ports, port)
}
func (pr *ProxyRedirect) Equal(other *ProxyRedirect) bool {
switch {
case pr == nil && other == nil:
return true
case pr != nil && other != nil:
return pr.ProxyPort == other.ProxyPort && slices.Equal(pr.Ports, other.Ports)
default:
return false
}
}
func (pr *ProxyRedirect) String() string {
if pr == nil {
return ""
}
if len(pr.Ports) > 0 {
return fmt.Sprintf("%d (ports: %v)", pr.ProxyPort, pr.Ports)
}
return strconv.FormatUint(uint64(pr.ProxyPort), 10)
}
// Clone returns a shallow clone of the service, e.g. for updating a service with UpsertService. Fields that are references
// (e.g. Labels or Annotations) must be further cloned if mutated.
func (svc *Service) Clone() *Service {
svc2 := *svc
return &svc2
}
func (svc *Service) TableHeader() []string {
// NOTE: Annotations and labels are not shown here as they're rarely interesting for debugging.
// They are still available for inspection via "cilium-dbg statedb dump".
return []string{
"Name",
"Source",
"PortNames",
"TrafficPolicy",
"Flags",
}
}
func (svc *Service) TableRow() []string {
var trafficPolicy string
if svc.ExtTrafficPolicy == svc.IntTrafficPolicy {
trafficPolicy = string(svc.ExtTrafficPolicy)
} else {
trafficPolicy = fmt.Sprintf("Ext=%s, Int=%s", svc.ExtTrafficPolicy, svc.IntTrafficPolicy)
}
// Collapse the more rarely set fields into a single "Flags" column
var flags []string
if svc.SessionAffinity {
flags = append(flags, "SessionAffinity="+svc.SessionAffinityTimeout.String())
}
if len(svc.SourceRanges) > 0 {
cidrs := svc.SourceRanges
ss := make([]string, len(cidrs))
for i := range cidrs {
ss[i] = cidrs[i].String()
}
flags = append(flags, "SourceRanges="+strings.Join(ss, ", "))
}
if p := svc.GetSourceRangesPolicy(); p == SVCSourceRangesPolicyDeny {
flags = append(flags, "SourceRangesPolicy=deny")
}
if svc.ProxyRedirect != nil {
flags = append(flags, "ProxyRedirect="+svc.ProxyRedirect.String())
}
if svc.HealthCheckNodePort != 0 {
flags = append(flags, fmt.Sprintf("HealthCheckNodePort=%d", svc.HealthCheckNodePort))
}
if svc.LoopbackHostPort {
flags = append(flags, "LoopbackHostPort="+strconv.FormatBool(svc.LoopbackHostPort))
}
if alg := svc.GetLBAlgorithmAnnotation(); alg != SVCLoadBalancingAlgorithmUndef {
flags = append(flags, "ExplicitLBAlgorithm="+alg.String())
}
if svc.ForwardingMode != SVCForwardingModeUndef {
flags = append(flags, "ForwardingMode="+string(svc.ForwardingMode))
}
if svc.TrafficDistribution != TrafficDistributionDefault {
flags = append(flags, "TrafficDistribution="+string(svc.TrafficDistribution))
}
if svc.LoadBalancerClass != nil {
flags = append(flags, "LoadBalancerClass="+*svc.LoadBalancerClass)
}
sort.Strings(flags)
return []string{
svc.Name.String(),
string(svc.Source),
svc.showPortNames(),
trafficPolicy,
strings.Join(flags, ", "),
}
}
func (svc *Service) showPortNames() string {
var b strings.Builder
n := len(svc.PortNames)
for _, name := range slices.Sorted(maps.Keys(svc.PortNames)) {
fmt.Fprintf(&b, "%s=%d", name, svc.PortNames[name])
n--
if n > 0 {
b.WriteString(", ")
}
}
return b.String()
}
var (
serviceNameIndex = statedb.Index[*Service, ServiceName]{
Name: "name",
FromObject: func(obj *Service) index.KeySet {
return index.NewKeySet(obj.Name.Key())
},
FromKey: ServiceName.Key,
FromString: index.FromString,
Unique: true,
}
ServiceByName = serviceNameIndex.Query
)
const (
ServiceTableName = "services"
)
func NewServicesTable(cfg Config, db *statedb.DB) (statedb.RWTable[*Service], error) {
return statedb.NewTable(
db,
ServiceTableName,
serviceNameIndex,
)
}
//go:build !ignore_autogenerated
// +build !ignore_autogenerated
// SPDX-License-Identifier: Apache-2.0
// Copyright Authors of Cilium
// Code generated by deepcopy-gen. DO NOT EDIT.
package loadbalancer
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *L4Addr) DeepCopyInto(out *L4Addr) {
*out = *in
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new L4Addr.
func (in *L4Addr) DeepCopy() *L4Addr {
if in == nil {
return nil
}
out := new(L4Addr)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *ServiceName) DeepCopyInto(out *ServiceName) {
*out = *in
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ServiceName.
func (in *ServiceName) DeepCopy() *ServiceName {
if in == nil {
return nil
}
out := new(ServiceName)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *SvcFlagParam) DeepCopyInto(out *SvcFlagParam) {
*out = *in
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SvcFlagParam.
func (in *SvcFlagParam) DeepCopy() *SvcFlagParam {
if in == nil {
return nil
}
out := new(SvcFlagParam)
in.DeepCopyInto(out)
return out
}
//go:build !ignore_autogenerated
// +build !ignore_autogenerated
// SPDX-License-Identifier: Apache-2.0
// Copyright Authors of Cilium
// Code generated by deepequal-gen. DO NOT EDIT.
package loadbalancer
// DeepEqual is an autogenerated deepequal function, deeply comparing the
// receiver with other. in must be non-nil.
func (in *Config) DeepEqual(other *Config) bool {
if other == nil {
return false
}
if !in.UserConfig.DeepEqual(&other.UserConfig) {
return false
}
if in.NodePortMin != other.NodePortMin {
return false
}
if in.NodePortMax != other.NodePortMax {
return false
}
return true
}
// deepEqual is an autogenerated deepequal function, deeply comparing the
// receiver with other. in must be non-nil.
func (in *L4Addr) deepEqual(other *L4Addr) bool {
if other == nil {
return false
}
if in.Protocol != other.Protocol {
return false
}
if in.Port != other.Port {
return false
}
return true
}
// DeepEqual is an autogenerated deepequal function, deeply comparing the
// receiver with other. in must be non-nil.
func (in *UserConfig) DeepEqual(other *UserConfig) bool {
if other == nil {
return false
}
if in.RetryBackoffMin != other.RetryBackoffMin {
return false
}
if in.RetryBackoffMax != other.RetryBackoffMax {
return false
}
if in.LBMapEntries != other.LBMapEntries {
return false
}
if in.LBServiceMapEntries != other.LBServiceMapEntries {
return false
}
if in.LBBackendMapEntries != other.LBBackendMapEntries {
return false
}
if in.LBRevNatEntries != other.LBRevNatEntries {
return false
}
if in.LBAffinityMapEntries != other.LBAffinityMapEntries {
return false
}
if in.LBSourceRangeAllTypes != other.LBSourceRangeAllTypes {
return false
}
if in.LBSourceRangeMapEntries != other.LBSourceRangeMapEntries {
return false
}
if in.LBMaglevMapEntries != other.LBMaglevMapEntries {
return false
}
if in.LBSockRevNatEntries != other.LBSockRevNatEntries {
return false
}
if ((in.NodePortRange != nil) && (other.NodePortRange != nil)) || ((in.NodePortRange == nil) != (other.NodePortRange == nil)) {
in, other := &in.NodePortRange, &other.NodePortRange
if other == nil {
return false
}
if len(*in) != len(*other) {
return false
} else {
for i, inElement := range *in {
if inElement != (*other)[i] {
return false
}
}
}
}
if in.LBMode != other.LBMode {
return false
}
if in.LBModeAnnotation != other.LBModeAnnotation {
return false
}
if in.LBAlgorithm != other.LBAlgorithm {
return false
}
if in.DSRDispatch != other.DSRDispatch {
return false
}
if in.ExternalClusterIP != other.ExternalClusterIP {
return false
}
if in.AlgorithmAnnotation != other.AlgorithmAnnotation {
return false
}
if in.EnableHealthCheckNodePort != other.EnableHealthCheckNodePort {
return false
}
if in.LBPressureMetricsInterval != other.LBPressureMetricsInterval {
return false
}
if in.LBSockTerminateAllProtos != other.LBSockTerminateAllProtos {
return false
}
if in.EnableServiceTopology != other.EnableServiceTopology {
return false
}
if in.InitWaitTimeout != other.InitWaitTimeout {
return false
}
return true
}
// SPDX-License-Identifier: Apache-2.0
// Copyright Authors of Cilium
//go:build !lockdebug
package lock
import (
"log/slog"
"sync"
)
type internalRWMutex struct {
sync.RWMutex
}
func (i *internalRWMutex) UnlockIgnoreTime() {
i.RWMutex.Unlock()
}
type internalMutex struct {
sync.Mutex
}
func (i *internalMutex) UnlockIgnoreTime() {
i.Mutex.Unlock()
}
func SetLogger(*slog.Logger) {
}
// SPDX-License-Identifier: Apache-2.0
// Copyright Authors of Cilium
package lock
import "sync"
// Map is a thin generic wrapper around sync.Map. The sync.Map description from
// the standard library follows (and is also propagated to the corresponding
// methods) for users' convenience:
//
// Map is like a Go map[interface{}]interface{} but is safe for concurrent use
// by multiple goroutines without additional locking or coordination.
// Loads, stores, and deletes run in amortized constant time.
//
// The Map type is specialized. Most code should use a plain Go map instead,
// with separate locking or coordination, for better type safety and to make it
// easier to maintain other invariants along with the map content.
//
// The Map type is optimized for two common use cases: (1) when the entry for a given
// key is only ever written once but read many times, as in caches that only grow,
// or (2) when multiple goroutines read, write, and overwrite entries for disjoint
// sets of keys. In these two cases, use of a Map may significantly reduce lock
// contention compared to a Go map paired with a separate Mutex or RWMutex.
//
// The zero Map is empty and ready for use. A Map must not be copied after first use.
type Map[K comparable, V any] sync.Map
// MapCmpValues is an extension of Map, which additionally wraps the two extra
// methods requiring values to be also of comparable type.
type MapCmpValues[K, V comparable] Map[K, V]
// Load returns the value stored in the map for a key, or the zero value if no
// value is present. The ok result indicates whether value was found in the map.
func (m *Map[K, V]) Load(key K) (value V, ok bool) {
val, ok := (*sync.Map)(m).Load(key)
return m.convert(val, ok)
}
// LoadOrStore returns the existing value for the key if present.
// Otherwise, it stores and returns the given value.
// The loaded result is true if the value was loaded, false if stored.
func (m *Map[K, V]) LoadOrStore(key K, value V) (actual V, loaded bool) {
val, loaded := (*sync.Map)(m).LoadOrStore(key, value)
return val.(V), loaded
}
// LoadAndDelete deletes the value for a key, returning the previous value if any
// (zero value otherwise). The loaded result reports whether the key was present.
func (m *Map[K, V]) LoadAndDelete(key K) (value V, loaded bool) {
val, loaded := (*sync.Map)(m).LoadAndDelete(key)
return m.convert(val, loaded)
}
// Store sets the value for a key.
func (m *Map[K, V]) Store(key K, value V) {
(*sync.Map)(m).Store(key, value)
}
// Swap swaps the value for a key and returns the previous value if any (zero
// value otherwise). The loaded result reports whether the key was present.
func (m *Map[K, V]) Swap(key K, value V) (previous V, loaded bool) {
val, loaded := (*sync.Map)(m).Swap(key, value)
return m.convert(val, loaded)
}
// Delete deletes the value for a key.
func (m *Map[K, V]) Delete(key K) {
(*sync.Map)(m).Delete(key)
}
// Range calls f sequentially for each key and value present in the map.
// If f returns false, range stops the iteration.
//
// Range does not necessarily correspond to any consistent snapshot of the Map's
// contents: no key will be visited more than once, but if the value for any key
// is stored or deleted concurrently (including by f), Range may reflect any
// mapping for that key from any point during the Range call. Range does not
// block other methods on the receiver; even f itself may call any method on m.
//
// Range may be O(N) with the number of elements in the map even if f returns
// false after a constant number of calls.
func (m *Map[K, V]) Range(f func(key K, value V) bool) {
(*sync.Map)(m).Range(func(key, value any) bool {
return f(key.(K), value.(V))
})
}
// CompareAndDelete deletes the entry for key if its value is equal to old.
// If there is no current value for key in the map, CompareAndDelete returns false
// (even if the old value is the nil interface value).
func (m *MapCmpValues[K, V]) CompareAndDelete(key K, old V) (deleted bool) {
return (*sync.Map)(m).CompareAndDelete(key, old)
}
// CompareAndSwap swaps the old and new values for key if the value stored in
// the map is equal to old.
func (m *MapCmpValues[K, V]) CompareAndSwap(key K, old, new V) bool {
return (*sync.Map)(m).CompareAndSwap(key, old, new)
}
func (m *Map[K, V]) convert(value any, ok bool) (V, bool) {
if !ok {
return *new(V), false
}
return value.(V), true
}
func (m *Map[K, V]) IsEmpty() bool {
empty := true
check := func(_ K, _ V) bool {
empty = false
return false // returning false breaks the iteration
}
m.Range(check)
return empty
}
// SPDX-License-Identifier: Apache-2.0
// Copyright Authors of Cilium
package lock
import (
"context"
"golang.org/x/sync/semaphore"
)
// SemaphoredMutex is a semaphored mutex that provides a RWLocker interface.
type SemaphoredMutex struct {
semaphore *semaphore.Weighted
}
// using the same value set in `go/src/rwmutex.go#rwmutexMaxReaders
const maxReaders = 1 << 30
// NewSemaphoredMutex returns a new SemaphoredMutex.
func NewSemaphoredMutex() SemaphoredMutex {
return SemaphoredMutex{
semaphore: semaphore.NewWeighted(maxReaders),
}
}
func (i *SemaphoredMutex) Lock() {
// It's fine ignoring error since the error is only caused by passing a
// context with a deadline.
i.semaphore.Acquire(context.Background(), maxReaders)
}
// UnlockToRLock releases the current lock for writing but it still keeps it
// for reading purposes.
func (i *SemaphoredMutex) UnlockToRLock() {
i.semaphore.Release(maxReaders - 1)
}
func (i *SemaphoredMutex) Unlock() {
i.semaphore.Release(maxReaders)
}
func (i *SemaphoredMutex) RLock() {
// It's fine ignoring error since the error is only caused by passing a
// context with a deadline.
i.semaphore.Acquire(context.Background(), 1)
}
func (i *SemaphoredMutex) RUnlock() {
i.semaphore.Release(1)
}
// SPDX-License-Identifier: Apache-2.0
// Copyright Authors of Cilium
package lock
import (
"sort"
"sync"
"sync/atomic"
"time"
)
// sortableMutexSeq is a global sequence counter for the creation of new
// SortableMutex's with unique sequence numbers.
var sortableMutexSeq atomic.Uint64
// sortableMutex implements SortableMutex. Not exported as the only way to
// initialize it is via NewSortableMutex().
type sortableMutex struct {
sync.Mutex
seq uint64
acquireDuration time.Duration
}
func (s *sortableMutex) Lock() {
start := time.Now()
s.Mutex.Lock()
s.acquireDuration += time.Since(start)
}
func (s *sortableMutex) Seq() uint64 { return s.seq }
func (s *sortableMutex) AcquireDuration() time.Duration { return s.acquireDuration }
// SortableMutex provides a Mutex that can be globally sorted with other
// sortable mutexes. This allows deadlock-safe locking of a set of mutexes
// as it guarantees consistent lock ordering.
type SortableMutex interface {
sync.Locker
Seq() uint64
AcquireDuration() time.Duration // The amount of time it took to acquire the lock
}
// SortableMutexes is a set of mutexes that can be locked in a safe order.
// Once Lock() is called it must not be mutated!
type SortableMutexes []SortableMutex
// Len implements sort.Interface.
func (s SortableMutexes) Len() int {
return len(s)
}
// Less implements sort.Interface.
func (s SortableMutexes) Less(i int, j int) bool {
return s[i].Seq() < s[j].Seq()
}
// Swap implements sort.Interface.
func (s SortableMutexes) Swap(i int, j int) {
s[i], s[j] = s[j], s[i]
}
// Lock sorts the mutexes, and then locks them in order. If any lock cannot be acquired,
// this will block while holding the locks with a lower sequence number.
func (s SortableMutexes) Lock() {
sort.Sort(s)
for _, mu := range s {
mu.Lock()
}
}
// Unlock locks the sorted set of mutexes locked by prior call to Lock().
func (s SortableMutexes) Unlock() {
for _, mu := range s {
mu.Unlock()
}
}
var _ sort.Interface = SortableMutexes{}
func NewSortableMutex() SortableMutex {
seq := sortableMutexSeq.Add(1)
return &sortableMutex{
seq: seq,
}
}
// SPDX-License-Identifier: Apache-2.0
// Copyright Authors of Cilium
package lock
import (
"sync"
"sync/atomic"
)
// A StoppableWaitGroup waits for a collection of goroutines to finish.
type StoppableWaitGroup struct {
noopDone chan struct{}
noopAdd chan struct{}
// i is the internal counter which can store tolerate negative values
// as opposed the golang's library WaitGroup.
i atomic.Int64
doneOnce, stopOnce sync.Once
}
// NewStoppableWaitGroup returns a new StoppableWaitGroup. When the 'Stop' is
// executed, following 'Add()' calls won't have any effect.
func NewStoppableWaitGroup() *StoppableWaitGroup {
return &StoppableWaitGroup{
noopDone: make(chan struct{}),
noopAdd: make(chan struct{}),
doneOnce: sync.Once{},
stopOnce: sync.Once{},
}
}
// Stop makes following 'Add()' to be considered a no-op.
// If all goroutines that have called Add also called Done, 'Wait()' will
// be immediately unblocked.
func (l *StoppableWaitGroup) Stop() {
l.stopOnce.Do(func() {
// We will do an Add here so we can perform a Done after we close
// the l.noopAdd channel.
done := l.Add()
close(l.noopAdd)
// Calling done() here so we know that in case 'l.i' will become zero
// it will trigger a close of l.noopDone channel.
done()
})
}
// Wait will return once all goroutines that have called Add also called
// Done and StoppableWaitGroup was stopped.
// Internally, Wait() returns once the internal counter becomes negative.
func (l *StoppableWaitGroup) Wait() {
<-l.noopDone
}
// WaitChannel will return a channel that will be closed once all goroutines
// that have called Add also called Done and StoppableWaitGroup was stopped.
func (l *StoppableWaitGroup) WaitChannel() <-chan struct{} {
return l.noopDone
}
// DoneFunc returned by Add() marks the goroutine as completed.
type DoneFunc func()
// Add adds the goroutine to the list of routines to that Wait() will have
// to wait before it returns.
// If the StoppableWaitGroup was stopped this will be a no-op.
// Returns a "done" function to mark the goroutine as completed. Wait() is
// unblocked once all done functions obtained before Stop() have been called.
func (l *StoppableWaitGroup) Add() DoneFunc {
select {
case <-l.noopAdd:
return func() {}
default:
l.i.Add(1)
var once sync.Once
return func() {
once.Do(l.done)
}
}
}
// done will decrement the number of goroutines the Wait() will have to wait
// before it returns.
// This function is a no-op once all goroutines that have called 'Add()' have
// also called 'Done()' and the StoppableWaitGroup was stopped.
func (l *StoppableWaitGroup) done() {
select {
case <-l.noopDone:
return
default:
select {
case <-l.noopAdd:
a := l.i.Add(-1)
if a <= 0 {
l.doneOnce.Do(func() {
close(l.noopDone)
})
}
default:
a := l.i.Add(-1)
select {
// in case the channel was close while we where in this default
// case we will need to check if 'a' is less than zero and close
// l.noopDone channel.
case <-l.noopAdd:
if a <= 0 {
l.doneOnce.Do(func() {
close(l.noopDone)
})
}
default:
}
}
}
}
// SPDX-License-Identifier: Apache-2.0
// Copyright Authors of Cilium
package logging
import (
"bufio"
"flag"
"fmt"
"io"
"log/slog"
"regexp"
"strings"
"k8s.io/klog/v2"
"github.com/cilium/cilium/pkg/logging/logfields"
)
var klogErrorOverrides = []logLevelOverride{
{
// TODO: We can drop the misspelled case here once client-go version is bumped to include:
// https://github.com/kubernetes/client-go/commit/ae43527480ee9d8750fbcde3d403363873fd3d89
matcher: regexp.MustCompile("Failed to update lock (optimitically|optimistically).*falling back to slow path"),
targetLevel: slog.LevelInfo,
},
}
func initializeKLog(logger *slog.Logger) error {
log := logger.With(logfields.LogSubsys, "klog")
// Create a new flag set and set error handler
klogFlags := flag.NewFlagSet("cilium", flag.ExitOnError)
// Make sure that klog logging variables are initialized so that we can
// update them from this file.
klog.InitFlags(klogFlags)
// Make sure klog does not log to stderr as we want it to control the output
// of klog so we want klog to log the errors to each writer of each level.
klogFlags.Set("logtostderr", "false")
// We don't need all headers because logrus will already print them if
// necessary.
klogFlags.Set("skip_headers", "true")
infoWriter, err := severityOverrideWriter(slog.LevelInfo, log, nil)
if err != nil {
return fmt.Errorf("failed to setup klog error writer: %w", err)
}
warnWriter, err := severityOverrideWriter(slog.LevelWarn, log, nil)
if err != nil {
return fmt.Errorf("failed to setup klog error writer: %w", err)
}
errWriter, err := severityOverrideWriter(slog.LevelError, log, klogErrorOverrides)
if err != nil {
return fmt.Errorf("failed to setup klog error writer: %w", err)
}
fatalWriter, err := severityOverrideWriter(LevelPanic, log, nil)
if err != nil {
return fmt.Errorf("failed to setup klog error writer: %w", err)
}
klog.SetOutputBySeverity("INFO", infoWriter)
klog.SetOutputBySeverity("WARNING", warnWriter)
klog.SetOutputBySeverity("ERROR", errWriter)
klog.SetOutputBySeverity("FATAL", fatalWriter)
// Do not repeat log messages on all severities in klog
klogFlags.Set("one_output", "true")
return nil
}
type logLevelOverride struct {
matcher *regexp.Regexp
targetLevel slog.Level
}
func levelToPrintFunc(log *slog.Logger, level slog.Level) (func(msg string, args ...any), error) {
var printFunc func(msg string, args ...any)
switch level {
case slog.LevelInfo:
printFunc = log.Info
case slog.LevelWarn:
printFunc = log.Warn
case slog.LevelError:
printFunc = log.Error
case LevelPanic:
printFunc = func(msg string, args ...any) {
Panic(log, msg, args)
}
case LevelFatal:
printFunc = func(msg string, args ...any) {
Fatal(log, msg, args)
}
default:
return nil, fmt.Errorf("unsupported log level %q", level)
}
return printFunc, nil
}
func severityOverrideWriter(level slog.Level, log *slog.Logger, overrides []logLevelOverride) (*io.PipeWriter, error) {
printFunc, err := levelToPrintFunc(log, level)
if err != nil {
return nil, err
}
reader, writer := io.Pipe()
for _, override := range overrides {
_, err := levelToPrintFunc(log, override.targetLevel)
if err != nil {
return nil, fmt.Errorf("failed to validate klog matcher level overrides (%s -> %s): %w",
override.matcher.String(), level, err)
}
}
go writerScanner(log, reader, printFunc, overrides)
return writer, nil
}
// writerScanner scans the input from the reader and writes it to the appropriate
// log print func.
// In cases where the log message is overridden, that will be emitted via the specified
// target log level logger function.
//
// Based on code from logrus WriterLevel implementation [1]
//
// [1] https://github.com/sirupsen/logrus/blob/v1.9.3/writer.go#L66-L97
func writerScanner(
entry *slog.Logger,
reader *io.PipeReader,
defaultPrintFunc func(msg string, args ...any),
overrides []logLevelOverride) {
defer reader.Close()
scanner := bufio.NewScanner(reader)
// Set the buffer size to the maximum token size to avoid buffer overflows
scanner.Buffer(make([]byte, bufio.MaxScanTokenSize), bufio.MaxScanTokenSize)
// Define a split function to split the input into chunks of up to 64KB
chunkSize := bufio.MaxScanTokenSize // 64KB
splitFunc := func(data []byte, atEOF bool) (int, []byte, error) {
if len(data) >= chunkSize {
return chunkSize, data[:chunkSize], nil
}
return bufio.ScanLines(data, atEOF)
}
// Use the custom split function to split the input
scanner.Split(splitFunc)
// Scan the input and write it to the logger using the specified print function
for scanner.Scan() {
line := scanner.Text()
matched := false
for _, override := range overrides {
printFn, err := levelToPrintFunc(entry, override.targetLevel)
if err != nil {
entry.Error("BUG: failed to get printer for klog override matcher",
logfields.Error, err,
logfields.Matcher, override.matcher,
)
continue
}
if override.matcher.FindString(line) != "" {
printFn(strings.TrimRight(line, "\r\n"))
matched = true
break
}
}
if !matched {
defaultPrintFunc(strings.TrimRight(scanner.Text(), "\r\n"))
}
}
if err := scanner.Err(); err != nil {
entry.Error("klog slog override scanner stopped scanning with an error. "+
"This may mean that k8s client-go logs will no longer be emitted", logfields.Error, err)
}
}
// SPDX-License-Identifier: Apache-2.0
// Copyright Authors of Cilium
package logging
import (
"time"
"golang.org/x/time/rate"
)
// Limiter is a wrapper around rate.Limiter that does not panic when
// the limiter is uninitialized. The wrapping also allows more logging
// specific functionality to be added later without changing all the call
// sites.
type Limiter struct {
bucket *rate.Limiter
}
// NewLimiter returns a new Limiter allowing log messages to be
// emitted on average once every 'interval' and upto 'burst' messages
// during any 'interval'.
func NewLimiter(interval time.Duration, burst int) Limiter {
return Limiter{
bucket: rate.NewLimiter(rate.Every(interval), burst),
}
}
// Allow returns true if the log message is allowed under the
// configured rate limit.
func (ll Limiter) Allow() bool {
if ll.bucket == nil {
return true // limiter not initialized => no limit
}
return ll.bucket.Allow()
}
// SPDX-License-Identifier: Apache-2.0
// Copyright Authors of Cilium
package logging
import (
"context"
"errors"
"fmt"
"log/slog"
"regexp"
"slices"
"strings"
"github.com/cilium/cilium/pkg/lock"
"github.com/cilium/cilium/pkg/logging/logfields"
)
type LogFormat string
const (
Syslog = "syslog"
LevelOpt = "level"
FormatOpt = "format"
WriterOpt = "writer"
StdOutOpt = "stdout"
StdErrOpt = "stderr"
LogFormatText LogFormat = "text"
LogFormatTextTimestamp LogFormat = "text-ts"
LogFormatJSON LogFormat = "json"
LogFormatJSONTimestamp LogFormat = "json-ts"
// DefaultLogFormatTimestamp is the string representation of the default log
// format including timestamps.
// We don't use this for general runtime logs since kubernetes log capture handles those.
// This is only used for applications such as CNI which is written to disk so we have no
// way to correlate with other logs.
DefaultLogFormatTimestamp LogFormat = LogFormatTextTimestamp
// DefaultLogLevel is the default log level we want to use for our logs.
DefaultLogLevel = slog.LevelInfo
)
var (
LevelPanic = slog.LevelError + 8
LevelFatal = LevelPanic + 2
)
var (
levelPanicValue = slog.AnyValue(LevelPanic)
levelFatalValue = slog.AnyValue(LevelFatal)
)
// LogOptions maps configuration key-value pairs related to logging.
type LogOptions map[string]string
// GetLogLevel returns the log level specified in the provided LogOptions. If
// it is not set in the options, it will return the default level.
func (o LogOptions) GetLogLevel() (level slog.Level) {
levelOpt, ok := o[LevelOpt]
if !ok {
return DefaultLogLevel
}
var err error
if level, err = ParseLevel(levelOpt); err != nil {
DefaultSlogLogger.Warn("Ignoring user-configured log level", logfields.Error, err)
return DefaultLogLevel
}
return
}
// GetLogFormat returns the log format specified in the provided LogOptions. If
// it is not set in the options or is invalid, it will return the default format.
func (o LogOptions) GetLogFormat() LogFormat {
formatOpt, ok := o[FormatOpt]
if !ok {
return DefaultLogFormatTimestamp
}
formatOpt = strings.ToLower(formatOpt)
re := regexp.MustCompile(`^(text|text-ts|json|json-ts)$`)
if !re.MatchString(formatOpt) {
DefaultSlogLogger.Warn(
"Ignoring user-configured log format",
logfields.Error, fmt.Errorf("incorrect log format configured '%s', expected 'text', 'text-ts', 'json' or 'json-ts'", formatOpt),
)
return DefaultLogFormatTimestamp
}
return LogFormat(formatOpt)
}
// SetLogLevel updates the DefaultLogger with a new slog.Level
func SetLogLevel(logLevel slog.Level) {
slogLeveler.Set(logLevel)
}
// SetDefaultLogLevel updates the DefaultLogger with the DefaultLogLevel
func SetDefaultLogLevel() {
SetLogLevel(DefaultLogLevel)
}
// SetLogLevelToDebug updates the DefaultLogger with the logrus.DebugLevel
func SetLogLevelToDebug() {
slogLeveler.Set(slog.LevelDebug)
}
// AddHandlers adds additional logrus hook to default logger
func AddHandlers(hooks ...slog.Handler) {
defaultMultiSlogHandler.AddHandlers(hooks...)
}
// SetupLogging sets up each logging service provided in loggers and configures
// each logger with the provided logOpts.
func SetupLogging(loggers []string, logOpts LogOptions, tag string, debug bool) error {
if debug {
logOpts[LevelOpt] = "debug"
}
initializeSlog(logOpts, loggers)
// always suppress the default logger so libraries don't print things
slog.SetLogLoggerLevel(LevelPanic)
// Iterate through all provided loggers and configure them according
// to user-provided settings.
for _, logger := range loggers {
switch logger {
case Syslog:
if err := setupSyslog(logOpts, tag, debug); err != nil {
return fmt.Errorf("failed to set up syslog: %w", err)
}
default:
return fmt.Errorf("provided log driver %q is not a supported log driver", logger)
}
}
lock.SetLogger(DefaultSlogLogger)
// Bridge klog to slog. Note that this will open multiple pipes and fork
// background goroutines that are not cleaned up.
err := initializeKLog(DefaultSlogLogger)
if err != nil {
return err
}
return nil
}
// validateOpts iterates through all of the keys and values in logOpts, and errors out if
// the key in logOpts is not a key in supportedOpts, or the value of corresponding key is
// not listed in the value of validKVs.
func (o LogOptions) validateOpts(logDriver string, supportedOpts map[string]bool, validKVs map[string][]string) error {
for k, v := range o {
if !supportedOpts[k] {
return fmt.Errorf("provided configuration key %q is not supported as a logging option for log driver %s", k, logDriver)
}
if validValues, ok := validKVs[k]; ok {
valid := slices.Contains(validValues, v)
if !valid {
return fmt.Errorf("provided configuration value %q is not a valid value for %q in log driver %s, valid values: %v", v, k, logDriver, validValues)
}
}
}
return nil
}
// getLogDriverConfig returns a map containing the key-value pairs that start
// with string logDriver from map logOpts.
func getLogDriverConfig(logDriver string, logOpts LogOptions) LogOptions {
keysToValidate := make(LogOptions)
for k, v := range logOpts {
ok, err := regexp.MatchString(logDriver+".*", k)
if err != nil {
Fatal(DefaultSlogLogger, err.Error())
}
if ok {
keysToValidate[k] = v
}
}
return keysToValidate
}
// GetSlogLevel returns the log level of the given sloger.
func GetSlogLevel(logger FieldLogger) slog.Level {
switch {
case logger.Enabled(context.Background(), slog.LevelDebug):
return slog.LevelDebug
case logger.Enabled(context.Background(), slog.LevelInfo):
return slog.LevelInfo
case logger.Enabled(context.Background(), slog.LevelWarn):
return slog.LevelWarn
case logger.Enabled(context.Background(), slog.LevelError):
return slog.LevelError
case logger.Enabled(context.Background(), LevelPanic):
return LevelPanic
case logger.Enabled(context.Background(), LevelFatal):
return LevelFatal
}
return slog.LevelInfo
}
// ParseLevel takes a string level and returns the slog log level constant.
func ParseLevel(lvl string) (slog.Level, error) {
switch strings.ToUpper(lvl) {
case "DEBUG":
return slog.LevelDebug, nil
case "INFO":
return slog.LevelInfo, nil
case "WARN", "WARNING":
return slog.LevelWarn, nil
case "ERROR":
return slog.LevelError, nil
case "PANIC":
return LevelPanic, nil
case "FATAL":
return LevelFatal, nil
default:
return slog.LevelInfo, errors.New("unknown level " + lvl)
}
}
// SPDX-License-Identifier: Apache-2.0
// Copyright Authors of Cilium
//go:build !windows
package logging
import (
"log/slog"
"log/syslog"
)
const (
SLevel = "syslog.level"
SNetwork = "syslog.network"
SAddress = "syslog.address"
SSeverity = "syslog.severity"
SFacility = "syslog.facility"
STag = "syslog.tag"
)
var (
// syslogOpts is the set of supported options for syslog configuration.
syslogOpts = map[string]bool{
SLevel: true,
SNetwork: true,
SAddress: true,
SSeverity: true,
SFacility: true,
STag: true,
}
// From /usr/include/sys/syslog.h.
syslogSeverityMap = map[string]syslog.Priority{
"emerg": syslog.LOG_EMERG,
"panic": syslog.LOG_EMERG,
"alert": syslog.LOG_ALERT,
"crit": syslog.LOG_CRIT,
"err": syslog.LOG_ERR,
"error": syslog.LOG_ERR,
"warn": syslog.LOG_WARNING,
"warning": syslog.LOG_WARNING,
"notice": syslog.LOG_NOTICE,
"info": syslog.LOG_INFO,
"debug": syslog.LOG_DEBUG,
}
// From /usr/include/sys/syslog.h.
syslogFacilityMap = map[string]syslog.Priority{
"kern": syslog.LOG_KERN,
"user": syslog.LOG_USER,
"mail": syslog.LOG_MAIL,
"daemon": syslog.LOG_DAEMON,
"auth": syslog.LOG_AUTH,
"syslog": syslog.LOG_SYSLOG,
"lpr": syslog.LOG_LPR,
"news": syslog.LOG_NEWS,
"uucp": syslog.LOG_UUCP,
"cron": syslog.LOG_CRON,
"authpriv": syslog.LOG_AUTHPRIV,
"ftp": syslog.LOG_FTP,
"local0": syslog.LOG_LOCAL0,
"local1": syslog.LOG_LOCAL1,
"local2": syslog.LOG_LOCAL2,
"local3": syslog.LOG_LOCAL3,
"local4": syslog.LOG_LOCAL4,
"local5": syslog.LOG_LOCAL5,
"local6": syslog.LOG_LOCAL6,
"local7": syslog.LOG_LOCAL7,
}
// syslogLevelMap maps slog.Level values to syslog.Priority levels.
syslogLevelMap = map[slog.Level]syslog.Priority{
LevelPanic: syslog.LOG_ALERT,
LevelFatal: syslog.LOG_CRIT,
slog.LevelError: syslog.LOG_ERR,
slog.LevelWarn: syslog.LOG_WARNING,
slog.LevelInfo: syslog.LOG_INFO,
slog.LevelDebug: syslog.LOG_DEBUG,
}
)
func mapStringPriorityToSlice(m map[string]syslog.Priority) []string {
s := make([]string, 0, len(m))
for k := range m {
s = append(s, k)
}
return s
}
// setupSyslog sets up and configures syslog with the provided options in
// logOpts. If some options are not provided, sensible defaults are used.
func setupSyslog(logOpts LogOptions, tag string, debug bool) error {
opts := getLogDriverConfig(Syslog, logOpts)
syslogOptValues := make(map[string][]string)
syslogOptValues[SSeverity] = mapStringPriorityToSlice(syslogSeverityMap)
syslogOptValues[SFacility] = mapStringPriorityToSlice(syslogFacilityMap)
if err := opts.validateOpts(Syslog, syslogOpts, syslogOptValues); err != nil {
return err
}
if stag, ok := opts[STag]; ok {
tag = stag
}
logLevel, ok := opts[SLevel]
if !ok {
if debug {
logLevel = "debug"
} else {
logLevel = "info"
}
}
// Validate provided log level.
level, err := ParseLevel(logLevel)
if err != nil {
Fatal(DefaultSlogLogger, err.Error())
}
network := ""
address := ""
// Inherit severity from log level if syslog.severity is not specified explicitly
severity := syslogLevelMap[level]
// Default values for facility if not specified
facility := syslog.LOG_KERN
if networkStr, ok := opts[SNetwork]; ok {
network = networkStr
}
if addressStr, ok := opts[SAddress]; ok {
address = addressStr
}
if severityStr, ok := opts[SSeverity]; ok {
severity = syslogSeverityMap[severityStr]
}
if facilityStr, ok := opts[SFacility]; ok {
facility = syslogFacilityMap[facilityStr]
}
// Create syslog hook.
h, err := NewSyslogHook(network, address, severity|facility, tag, level)
if err != nil {
Fatal(DefaultSlogLogger, err.Error())
}
AddHandlers(h)
return nil
}
// SPDX-License-Identifier: Apache-2.0
// Copyright Authors of Cilium
package logging
import (
"context"
"errors"
"log/slog"
"github.com/cilium/cilium/pkg/lock"
)
// NewMultiSlogHandler creates a slog.Handler that supports multiple
// underlying handlers, such as to output text and json.
func NewMultiSlogHandler(handler slog.Handler) *multiSlogHandler {
return &multiSlogHandler{
mu: lock.RWMutex{},
handlers: []slog.Handler{handler},
}
}
type multiSlogHandler struct {
mu lock.RWMutex
handlers []slog.Handler
}
func (i *multiSlogHandler) Enabled(ctx context.Context, level slog.Level) bool {
i.mu.RLock()
defer i.mu.RUnlock()
for _, h := range i.handlers {
if h.Enabled(ctx, level) {
return true
}
}
return false
}
func (i *multiSlogHandler) Handle(ctx context.Context, record slog.Record) error {
i.mu.RLock()
defer i.mu.RUnlock()
var errs error
for _, h := range i.handlers {
err := h.Handle(ctx, record)
if err != nil {
errs = errors.Join(errs, err)
}
}
return errs
}
func (i *multiSlogHandler) WithAttrs(attrs []slog.Attr) slog.Handler {
i.mu.RLock()
defer i.mu.RUnlock()
newHandlers := make([]slog.Handler, 0, len(i.handlers))
for _, h := range i.handlers {
newHandlers = append(newHandlers, h.WithAttrs(attrs))
}
return &multiSlogHandler{
handlers: newHandlers,
}
}
func (i *multiSlogHandler) WithGroup(name string) slog.Handler {
i.mu.RLock()
defer i.mu.RUnlock()
newHandlers := make([]slog.Handler, 0, len(i.handlers))
for _, h := range i.handlers {
newHandlers = append(newHandlers, h.WithGroup(name))
}
return &multiSlogHandler{
handlers: newHandlers,
}
}
func (i *multiSlogHandler) AddHandlers(handlers ...slog.Handler) {
i.mu.Lock()
defer i.mu.Unlock()
i.handlers = append(i.handlers, handlers...)
}
func (i *multiSlogHandler) SetHandler(handler slog.Handler) {
i.mu.Lock()
defer i.mu.Unlock()
i.handlers = []slog.Handler{handler}
}
// SPDX-License-Identifier: Apache-2.0
// Copyright Authors of Cilium
package logging
import (
"context"
"log/slog"
"os"
"strings"
"sync/atomic"
"time"
"github.com/cilium/cilium/pkg/logging/logfields"
)
// logrErrorKey is the key used by the logr library for the error parameter.
const logrErrorKey = "err"
var slogHandlerOpts = &slog.HandlerOptions{
AddSource: false,
Level: slogLeveler,
ReplaceAttr: replaceAttrFn,
}
var slogLeveler = func() *slog.LevelVar {
var levelVar slog.LevelVar
levelVar.Set(slog.LevelInfo)
return &levelVar
}()
var defaultMultiSlogHandler = NewMultiSlogHandler(slog.NewTextHandler(
os.Stderr,
slogHandlerOpts,
))
// Default slog logger. Will be overwritten once initializeSlog is called.
var DefaultSlogLogger = slog.New(defaultMultiSlogHandler)
// Approximates the logrus output via slog for job groups during the transition
// phase.
func initializeSlog(logOpts LogOptions, loggers []string) {
opts := *slogHandlerOpts
opts.Level = logOpts.GetLogLevel()
if opts.Level == slog.LevelDebug {
opts.AddSource = true
}
writer := os.Stderr
switch logOpts[WriterOpt] {
case StdErrOpt:
default:
if len(loggers) == 0 {
writer = os.Stdout
}
}
logFormat := logOpts.GetLogFormat()
// Set first the option with or without timestamps
switch logFormat {
case LogFormatJSON, LogFormatText:
opts.ReplaceAttr = ReplaceAttrFnWithoutTimestamp
case LogFormatJSONTimestamp, LogFormatTextTimestamp:
opts.ReplaceAttr = replaceAttrFn
}
// Set the log format in either text or JSON
switch logFormat {
case LogFormatJSON, LogFormatJSONTimestamp:
defaultMultiSlogHandler.SetHandler(slog.NewJSONHandler(
writer,
&opts,
))
case LogFormatText, LogFormatTextTimestamp:
defaultMultiSlogHandler.SetHandler(slog.NewTextHandler(
writer,
&opts,
))
}
}
func ReplaceAttrFn(groups []string, a slog.Attr) slog.Attr {
return replaceAttrFn(groups, a)
}
func replaceAttrFn(groups []string, a slog.Attr) slog.Attr {
switch a.Key {
case slog.TimeKey:
// Adjust to timestamp format that logrus uses; except that we can't
// force slog to quote the value like logrus does...
return slog.String(slog.TimeKey, a.Value.Time().Format(time.RFC3339Nano))
case slog.LevelKey:
switch level := a.Value; {
case level.Equal(levelFatalValue):
return slog.Attr{
Key: a.Key,
Value: slog.StringValue("fatal"),
}
case level.Equal(levelPanicValue):
return slog.Attr{
Key: a.Key,
Value: slog.StringValue("panic"),
}
}
// Lower-case the log level
return slog.Attr{
Key: a.Key,
Value: slog.StringValue(strings.ToLower(a.Value.String())),
}
case logrErrorKey:
// Uniform the attribute identifying the error
return slog.Attr{
Key: logfields.Error,
Value: a.Value,
}
}
return a
}
func ReplaceAttrFnWithoutTimestamp(groups []string, a slog.Attr) slog.Attr {
switch a.Key {
case slog.TimeKey:
// Drop timestamps
return slog.Attr{}
default:
return replaceAttrFn(groups, a)
}
}
type FieldLogger interface {
Handler() slog.Handler
With(args ...any) *slog.Logger
WithGroup(name string) *slog.Logger
Enabled(ctx context.Context, level slog.Level) bool
Log(ctx context.Context, level slog.Level, msg string, args ...any)
LogAttrs(ctx context.Context, level slog.Level, msg string, attrs ...slog.Attr)
Debug(msg string, args ...any)
DebugContext(ctx context.Context, msg string, args ...any)
Info(msg string, args ...any)
InfoContext(ctx context.Context, msg string, args ...any)
Warn(msg string, args ...any)
WarnContext(ctx context.Context, msg string, args ...any)
Error(msg string, args ...any)
ErrorContext(ctx context.Context, msg string, args ...any)
}
func init() {
// Set a no-op exit handler to avoid nil dereference
a := func() {}
exitHandler.Store(&a)
}
var (
exitHandler atomic.Pointer[func()]
)
func Fatal(logger FieldLogger, msg string, args ...any) {
(*exitHandler.Load())()
logger.Log(context.Background(), LevelFatal, msg, args...)
os.Exit(-1)
}
func Panic(logger FieldLogger, msg string, args ...any) {
(*exitHandler.Load())()
logger.Log(context.Background(), LevelPanic, msg, args...)
panic(msg)
}
func RegisterExitHandler(handler func()) {
exitHandler.Store(&handler)
}
// SetSlogLevel updates the DefaultSlogLogger with a new slog.Level
func SetSlogLevel(logLevel slog.Level) {
slogLeveler.Set(logLevel)
}
// SPDX-License-Identifier: Apache-2.0
// Copyright Authors of Cilium
//go:build !windows
package logging
import (
"context"
"fmt"
"log/slog"
"log/syslog"
"strings"
"time"
)
// SyslogHook to send logs via syslog.
type SyslogHook struct {
Writer *syslog.Writer
SyslogNetwork string
SyslogRaddr string
handler slog.Handler
}
func (hook *SyslogHook) Enabled(ctx context.Context, level slog.Level) bool {
return hook.handler.Enabled(ctx, level)
}
func (hook *SyslogHook) Handle(ctx context.Context, r slog.Record) error {
timestamp := r.Time.Format(time.RFC3339)
var logStr strings.Builder
logStr.WriteString(fmt.Sprintf("[%s] [%s] %s", timestamp, r.Level.String(), r.Message))
r.Attrs(func(a slog.Attr) bool {
logStr.WriteString(fmt.Sprintf(" %s=%v", a.Key, a.Value))
return true
})
str := logStr.String()
switch r.Level {
case LevelPanic:
return hook.Writer.Crit(str)
case LevelFatal:
return hook.Writer.Crit(str)
case slog.LevelError:
return hook.Writer.Err(str)
case slog.LevelWarn:
return hook.Writer.Warning(str)
case slog.LevelInfo:
return hook.Writer.Info(str)
case slog.LevelDebug:
return hook.Writer.Debug(str)
default:
return hook.Writer.Info(str)
}
}
func (hook *SyslogHook) WithAttrs(attrs []slog.Attr) slog.Handler {
return hook.handler.WithAttrs(attrs)
}
func (hook *SyslogHook) WithGroup(name string) slog.Handler {
return hook.handler.WithGroup(name)
}
func NewSyslogHook(network, raddr string, priority syslog.Priority, tag string, slogLevel slog.Level) (*SyslogHook, error) {
w, err := syslog.Dial(network, raddr, priority, tag)
if err != nil {
return nil, err
}
return &SyslogHook{
Writer: w,
SyslogNetwork: network,
SyslogRaddr: raddr,
handler: slog.NewTextHandler(w, &slog.HandlerOptions{
AddSource: false,
Level: slogLevel,
}),
}, nil
}
// SPDX-License-Identifier: Apache-2.0
// Copyright Authors of Cilium
package mac
import (
"bytes"
"crypto/rand"
"encoding/hex"
"fmt"
"net"
)
// Untagged ethernet (IEEE 802.3) frame header len
const EthHdrLen = 14
// Uint64MAC is the __u64 representation of a MAC address.
// It corresponds to the C mac_t type used in bpf/.
type Uint64MAC uint64
func (m Uint64MAC) String() string {
return fmt.Sprintf("%02X:%02X:%02X:%02X:%02X:%02X",
uint64((m & 0x0000000000FF)),
uint64((m&0x00000000FF00)>>8),
uint64((m&0x000000FF0000)>>16),
uint64((m&0x0000FF000000)>>24),
uint64((m&0x00FF00000000)>>32),
uint64((m&0xFF0000000000)>>40),
)
}
// MAC address is an net.HardwareAddr encapsulation to force cilium to only use MAC-48.
type MAC net.HardwareAddr
// String returns the string representation of m.
func (m MAC) String() string {
return net.HardwareAddr(m).String()
}
// As8 returns the MAC as an array of 8 bytes for use in datapath configuration
// structs. This is 8 bytes due to padding of union macaddr.
func (m MAC) As8() [8]byte {
var res [8]byte
copy(res[:], m)
return res
}
// ParseMAC parses s only as an IEEE 802 MAC-48.
func ParseMAC(s string) (MAC, error) {
ha, err := net.ParseMAC(s)
if err != nil {
return nil, err
}
if len(ha) != 6 {
return nil, fmt.Errorf("invalid MAC address %s", s)
}
return MAC(ha), nil
}
// Uint64 returns the MAC in uint64 format. The MAC is represented as little-endian in
// the returned value.
// Example:
//
// m := MAC([]{0x11, 0x12, 0x23, 0x34, 0x45, 0x56})
// v, err := m.Uint64()
// fmt.Printf("0x%X", v) // 0x564534231211
func (m MAC) Uint64() (Uint64MAC, error) {
if len(m) != 6 {
return 0, fmt.Errorf("invalid MAC address %s", m.String())
}
res := uint64(m[5])<<40 | uint64(m[4])<<32 | uint64(m[3])<<24 |
uint64(m[2])<<16 | uint64(m[1])<<8 | uint64(m[0])
return Uint64MAC(res), nil
}
func (m MAC) MarshalJSON() ([]byte, error) {
if len(m) == 0 {
return []byte(`""`), nil
}
if len(m) != 6 {
return nil, fmt.Errorf("invalid MAC address length %s", string(m))
}
return fmt.Appendf(nil, "\"%02x:%02x:%02x:%02x:%02x:%02x\"", m[0], m[1], m[2], m[3], m[4], m[5]), nil
}
func (m MAC) MarshalIndentJSON(prefix, indent string) ([]byte, error) {
return m.MarshalJSON()
}
func (m *MAC) UnmarshalJSON(data []byte) error {
if len(data) == len([]byte(`""`)) {
if m == nil {
m = new(MAC)
}
*m = MAC{}
return nil
}
if len(data) != 19 {
return fmt.Errorf("invalid MAC address length %s", string(data))
}
data = data[1 : len(data)-1]
macStr := bytes.ReplaceAll(data, []byte(`:`), []byte(``))
if len(macStr) != 12 {
return fmt.Errorf("invalid MAC address format")
}
macByte := make([]byte, len(macStr))
hex.Decode(macByte, macStr)
*m = MAC{macByte[0], macByte[1], macByte[2], macByte[3], macByte[4], macByte[5]}
return nil
}
// GenerateRandMAC generates a random unicast and locally administered MAC address.
func GenerateRandMAC() (MAC, error) {
buf := make([]byte, 6)
if _, err := rand.Read(buf); err != nil {
return nil, fmt.Errorf("Unable to retrieve 6 rnd bytes: %w", err)
}
// Set locally administered addresses bit and reset multicast bit
buf[0] = (buf[0] | 0x02) & 0xfe
return buf, nil
}
// HaveMACAddrs returns true if all given network interfaces have L2 addr.
func HaveMACAddrs(ifaces []string) bool {
for _, iface := range ifaces {
if !HasMacAddr(iface) {
return false
}
}
return true
}
// CArrayString returns a string which can be used for assigning the given
// MAC addr to "union macaddr" in C.
func CArrayString(m net.HardwareAddr) string {
if m == nil || len(m) != 6 {
return "{0x0,0x0,0x0,0x0,0x0,0x0}"
}
return fmt.Sprintf("{0x%x,0x%x,0x%x,0x%x,0x%x,0x%x}",
m[0], m[1], m[2], m[3], m[4], m[5])
}
// SPDX-License-Identifier: Apache-2.0
// Copyright Authors of Cilium
package mac
import (
"errors"
"net"
"github.com/vishvananda/netlink"
"github.com/cilium/cilium/pkg/datapath/linux/safenetlink"
)
// HasMacAddr returns true if the given network interface has L2 addr.
func HasMacAddr(iface string) bool {
link, err := safenetlink.LinkByName(iface)
if err != nil {
return false
}
return LinkHasMacAddr(link)
}
// LinkHasMacAddr returns true if the given network interface has L2 addr.
func LinkHasMacAddr(link netlink.Link) bool {
return len(link.Attrs().HardwareAddr) != 0
}
// ReplaceMacAddressWithLinkName replaces the MAC address of the given link
func ReplaceMacAddressWithLinkName(ifName, macAddress string) error {
l, err := safenetlink.LinkByName(ifName)
if err != nil {
if errors.As(err, &netlink.LinkNotFoundError{}) {
return nil
}
return err
}
hw, err := net.ParseMAC(macAddress)
if err != nil {
return err
}
return netlink.LinkSetHardwareAddr(l, hw)
}
// SPDX-License-Identifier: Apache-2.0
// Copyright Authors of Cilium
package eventsmap
import (
"fmt"
"github.com/cilium/ebpf"
"github.com/cilium/hive/cell"
"github.com/cilium/cilium/pkg/bpf"
)
// Cell provides eventsmap.Map, which is the hive representation of the cilium
// events perf event ring buffer.
var Cell = cell.Module(
"events-map",
"eBPF ring buffer of cilium events",
cell.Provide(newEventsMap),
)
var (
MaxEntries int
)
type Map any
func newEventsMap(lifecycle cell.Lifecycle) bpf.MapOut[Map] {
eventsMap := &eventsMap{}
lifecycle.Append(cell.Hook{
OnStart: func(context cell.HookContext) error {
cpus, err := ebpf.PossibleCPU()
if err != nil {
return fmt.Errorf("failed to get number of possible CPUs: %w", err)
}
err = eventsMap.init(cpus)
if err != nil {
return fmt.Errorf("initializing events map: %w", err)
}
return nil
},
OnStop: func(context cell.HookContext) error {
// We don't currently care for cleaning up.
return nil
},
})
return bpf.NewMapOut(Map(eventsMap))
}
// SPDX-License-Identifier: Apache-2.0
// Copyright Authors of Cilium
package eventsmap
import (
"fmt"
"github.com/cilium/ebpf"
"github.com/cilium/cilium/pkg/bpf"
"github.com/cilium/cilium/pkg/option"
)
const (
// MapName is the BPF map name.
MapName = "cilium_events"
)
// Key is the index into the prog array map.
type Key struct {
index uint32
}
// Value is the program ID in the prog array map.
type Value struct {
progID uint32
}
// String converts the key into a human readable string format.
func (k *Key) String() string { return fmt.Sprintf("%d", k.index) }
func (k *Key) New() bpf.MapKey { return &Key{} }
// String converts the value into a human readable string format.
func (v *Value) String() string { return fmt.Sprintf("%d", v.progID) }
func (v *Value) New() bpf.MapValue { return &Value{} }
type eventsMap struct {
m *bpf.Map
}
// init creates the events map in the kernel.
func (e *eventsMap) init(maxEntries int) error {
e.m = bpf.NewMap(MapName,
ebpf.PerfEventArray,
&Key{},
&Value{},
maxEntries,
0).
WithEvents(option.Config.GetEventBufferConfig(MapName))
return e.m.Create()
}
// SPDX-License-Identifier: Apache-2.0
// Copyright Authors of Cilium
package metrics
import (
"errors"
"fmt"
"log/slog"
"os"
"slices"
"strings"
"github.com/cilium/ebpf"
"github.com/prometheus/client_golang/prometheus"
"golang.org/x/sync/singleflight"
"github.com/cilium/cilium/pkg/logging/logfields"
)
// This file contains a Prometheus collector that collects the memory usage of
// BPF programs and maps. It iterates all BPF programs in the kernel, filters
// them by their name prefixes, and collects their memory usage and that of all
// related maps.
//
// While the approach taken may seem naive, the other obvious approach (finding
// entrypoints, recursively jumping through prog arrays to find tail calls)
// proved prohibitively slow beyond a small test cluster. Iterating prog arrays
// is expensive, and beyond a few dozen pods, syscall overhead started
// dominating and becoming slower than the bpftool-based implementation that was
// here before. Batch ops aren't implemented for prog arrays, so that's wasn't
// an option either.
//
// For now, settle on matching both the entrypoint and the tail call name
// prefixes and collecting associated maps.
type bpfUsage struct {
programs uint64
programBytes uint64
maps uint64
mapBytes uint64
}
func newBPFVisitor(progPrefixes []string) *bpfVisitor {
return &bpfVisitor{
progPrefixes: progPrefixes,
programsVisited: make(map[ebpf.ProgramID]struct{}),
mapsVisited: make(map[ebpf.MapID]struct{}),
}
}
type bpfVisitor struct {
bpfUsage
progPrefixes []string
programsVisited map[ebpf.ProgramID]struct{}
mapsVisited map[ebpf.MapID]struct{}
}
// Usage returns the memory usage of all BPF programs matching the filter
// specified in the constructor, as well as the memory usage of all maps
// associated with those programs.
func (v *bpfVisitor) Usage() (_ *bpfUsage, err error) {
var id ebpf.ProgramID
for {
id, err = ebpf.ProgramGetNextID(id)
if errors.Is(err, os.ErrNotExist) {
break
}
if err != nil {
return nil, fmt.Errorf("get next program: %w", err)
}
if err := v.visitProgram(id, v.progPrefixes); err != nil {
return nil, fmt.Errorf("check program %d: %w", id, err)
}
}
return &v.bpfUsage, nil
}
// visitProgram opens the given program by id and collects its memory usage and
// that of all maps it uses.
//
// If prefixes are specified, the program is only checked if its name starts
// with one of the prefixes. This is useful to omit programs that are not
// relevant for the caller.
func (v *bpfVisitor) visitProgram(id ebpf.ProgramID, prefixes []string) error {
if _, ok := v.programsVisited[id]; ok {
return nil
}
v.programsVisited[id] = struct{}{}
prog, err := ebpf.NewProgramFromID(id)
if errors.Is(err, os.ErrNotExist) {
return nil
}
if err != nil {
return fmt.Errorf("open program by id: %w", err)
}
defer prog.Close()
info, err := prog.Info()
if err != nil {
return fmt.Errorf("get program info: %w", err)
}
// If a prefix is specified, check if the program name starts with at least
// one of the prefixes. If not, skip the program.
if len(prefixes) > 0 {
hasPrefix := func(prefix string) bool { return strings.HasPrefix(info.Name, prefix) }
if !slices.ContainsFunc(prefixes, hasPrefix) {
return nil
}
}
mem, ok := info.Memlock()
if !ok {
return fmt.Errorf("program %s has zero memlock", info.Name)
}
v.programs++
v.programBytes += mem
maps, _ := info.MapIDs()
for _, mapID := range maps {
if err := v.visitMap(mapID); err != nil {
return fmt.Errorf("check map id %d for program %s: %w", mapID, info.Name, err)
}
}
return nil
}
// visitMap opens the given map by id and collects its memory usage.
func (v *bpfVisitor) visitMap(id ebpf.MapID) error {
if _, ok := v.mapsVisited[id]; ok {
return nil
}
v.mapsVisited[id] = struct{}{}
m, err := ebpf.NewMapFromID(id)
if errors.Is(err, os.ErrNotExist) {
return nil
}
if err != nil {
return fmt.Errorf("open map by id: %w", err)
}
defer m.Close()
info, err := m.Info()
if err != nil {
return fmt.Errorf("get map info: %w", err)
}
// Maps with BPF_F_NO_PREALLOC set (like LPMTrie) report a size of 0 when
// empty. Zero memory usage can be valid for a map.
mem, _ := info.Memlock()
v.maps++
v.mapBytes += mem
return nil
}
type bpfCollector struct {
logger *slog.Logger
sfg singleflight.Group
bpfMapsCount *prometheus.Desc
bpfMapsMemory *prometheus.Desc
bpfProgramsCount *prometheus.Desc
bpfProgramsMemory *prometheus.Desc
}
func newbpfCollector(logger *slog.Logger) *bpfCollector {
return &bpfCollector{
logger: logger,
bpfMapsCount: prometheus.NewDesc(
prometheus.BuildFQName(Namespace, "", "bpf_maps"),
"Total count of BPF maps.",
nil, nil,
),
bpfMapsMemory: prometheus.NewDesc(
prometheus.BuildFQName(Namespace, "", "bpf_maps_virtual_memory_max_bytes"),
"BPF maps kernel max memory usage size in bytes.",
nil, nil,
),
bpfProgramsCount: prometheus.NewDesc(
prometheus.BuildFQName(Namespace, "", "bpf_progs"),
"Total count of BPF programs.",
nil, nil,
),
bpfProgramsMemory: prometheus.NewDesc(
prometheus.BuildFQName(Namespace, "", "bpf_progs_virtual_memory_max_bytes"),
"BPF programs kernel max memory usage size in bytes.",
nil, nil,
),
}
}
func (s *bpfCollector) Describe(ch chan<- *prometheus.Desc) {
prometheus.DescribeByCollect(s, ch)
}
func (s *bpfCollector) Collect(ch chan<- prometheus.Metric) {
// Avoid querying BPF multiple times concurrently, if it happens, additional callers will wait for the
// first one to finish and reuse its resulting values.
results, err, _ := s.sfg.Do("collect", func() (any, error) {
return newBPFVisitor([]string{"cil_", "tail_"}).Usage()
})
if err != nil {
s.logger.Error("retrieving BPF maps & programs usage", logfields.Error, err)
return
}
ch <- prometheus.MustNewConstMetric(
s.bpfMapsCount,
prometheus.GaugeValue,
float64(results.(*bpfUsage).maps),
)
ch <- prometheus.MustNewConstMetric(
s.bpfMapsMemory,
prometheus.GaugeValue,
float64(results.(*bpfUsage).mapBytes),
)
ch <- prometheus.MustNewConstMetric(
s.bpfProgramsCount,
prometheus.GaugeValue,
float64(results.(*bpfUsage).programs),
)
ch <- prometheus.MustNewConstMetric(
s.bpfProgramsMemory,
prometheus.GaugeValue,
float64(results.(*bpfUsage).programBytes),
)
}
// SPDX-License-Identifier: Apache-2.0
// Copyright Authors of Cilium
package metrics
import (
"fmt"
"log/slog"
"reflect"
"github.com/cilium/hive/cell"
"github.com/prometheus/client_golang/prometheus"
pkgmetric "github.com/cilium/cilium/pkg/metrics/metric"
)
// Cell provides metrics registry and the 'metrics*' shell commands.
var Cell = cell.Module("metrics", "Metrics",
// Provide registry to hive, but also invoke if case no cells decide to use as dependency
cell.Provide(NewAgentRegistry),
cell.Config(defaultRegistryConfig),
cell.Config(defaultSamplerConfig),
cell.Provide(
metricsCommands,
newSampler,
),
)
// AgentCell provides metrics for the Cilium Agent. Includes [Cell] and sets up the global registry
// variable for legacy uses. Separate allow use of [Cell] without data race issues in parallel tests
// and without pulling in the legacy metrics.
var AgentCell = cell.Group(
Cell,
Metric(NewLegacyMetrics),
cell.Invoke(
func(logger *slog.Logger, reg *Registry) {
// Register the agent status and BPF metrics.
// Don't register status and BPF collectors into the [r.collectors] as it is
// expensive to sample and currently not terrible useful to keep data on.
reg.inner.MustRegister(pkgmetric.EnabledCollector{C: newStatusCollector(logger)})
reg.inner.MustRegister(pkgmetric.EnabledCollector{C: newbpfCollector(logger)})
// Resolve the global registry variable for as long as we still have global functions
registryResolver.Resolve(reg)
// This is a hack to ensure that errors/warnings collected in the pre hive initialization
// phase are emitted as metrics.
FlushLoggingMetrics()
},
),
)
var OperatorCell = cell.Module("operator-metrics", "Operator Metrics",
cell.Config(defaultSamplerConfig),
cell.Provide(NewRegistry),
cell.Provide(metricsCommands, newSampler),
)
// Metric constructs a new metric cell.
//
// This cell type provides `S` to the hive as returned by `ctor`, it also makes each individual field
// value available via the `hive-metrics` value group. Infrastructure components such as a registry,
// inspection tool, or documentation generator can collect all metrics in the hive via this value group.
//
// The `ctor` constructor must return a struct or pointer to a struct of type `S`. The returned struct
// must only contain public fields. All field types should implement the
// `github.com/cilium/cilium/pkg/metrics/metric.WithMetadata`
// and `github.com/prometheus/client_golang/prometheus.Collector` interfaces.
func Metric[S any](ctor func() S) cell.Cell {
var (
withMeta pkgmetric.WithMetadata
collector prometheus.Collector
)
var nilOut S
outTyp := reflect.TypeOf(nilOut)
if outTyp.Kind() == reflect.Ptr {
outTyp = outTyp.Elem()
}
if outTyp.Kind() != reflect.Struct {
panic(fmt.Errorf(
"metrics.Metric must be invoked with a constructor function that returns a struct or pointer to a struct, "+
"a constructor which returns a %s was supplied",
outTyp.Kind(),
))
}
// Let's be strict for now, could lift this in the future if we ever need to
if outTyp.NumField() == 0 {
panic(fmt.Errorf(
"metrics.Metric must be invoked with a constructor function that returns exactly a struct with at least 1 " +
"metric, a constructor which returns a struct with zero fields was supplied",
))
}
withMetaTyp := reflect.TypeOf(&withMeta).Elem()
collectorTyp := reflect.TypeOf(&collector).Elem()
for i := range outTyp.NumField() {
field := outTyp.Field(i)
if !field.IsExported() {
panic(fmt.Errorf(
"The struct returned by the constructor passed to metrics.Metric has a private field '%s', which "+
"is not allowed. All fields on the returning struct must be exported",
field.Name,
))
}
if !field.Type.Implements(withMetaTyp) {
panic(fmt.Errorf(
"The struct returned by the constructor passed to metrics.Metric has a field '%s', which is not metric.WithMetadata.",
field.Name,
))
}
if !field.Type.Implements(collectorTyp) {
panic(fmt.Errorf(
"The struct returned by the constructor passed to metrics.Metric has a field '%s', which is not prometheus.Collector.",
field.Name,
))
}
}
return cell.Provide(ctor, provideMetrics[S])
}
type hiveMetricOut struct {
cell.Out
Metrics []pkgmetric.WithMetadata `group:"hive-metrics,flatten"`
}
func provideMetrics[S any](metricSet S) hiveMetricOut {
var metrics []pkgmetric.WithMetadata
value := reflect.ValueOf(metricSet)
typ := value.Type()
if typ.Kind() == reflect.Pointer {
value = value.Elem()
typ = typ.Elem()
}
if typ.Kind() != reflect.Struct {
return hiveMetricOut{}
}
for i := range typ.NumField() {
if withMeta, ok := value.Field(i).Interface().(pkgmetric.WithMetadata); ok {
metrics = append(metrics, withMeta)
}
}
return hiveMetricOut{
Metrics: metrics,
}
}
// SPDX-License-Identifier: Apache-2.0
// Copyright Authors of Cilium
package metrics
import (
"cmp"
_ "embed"
"encoding/json"
"fmt"
"html/template"
"io"
"maps"
"math"
"os"
"regexp"
"slices"
"sort"
"strings"
"text/tabwriter"
"github.com/cilium/hive"
"github.com/cilium/hive/script"
"github.com/prometheus/client_golang/prometheus"
dto "github.com/prometheus/client_model/go"
"github.com/spf13/pflag"
"go.yaml.in/yaml/v3"
"k8s.io/apimachinery/pkg/util/duration"
"github.com/cilium/cilium/api/v1/models"
)
func metricsCommands(r *Registry, dc *sampler) hive.ScriptCmdsOut {
return hive.NewScriptCmds(map[string]script.Cmd{
"metrics": metricsCommand(r, dc),
"metrics/plot": plotCommand(dc),
"metrics/html": htmlCommand(dc),
})
}
// metricsCommand implements the "metrics" script command. This can be accessed
// in script tests, via "cilium-dbg shell" or indirectly via 'cilium-dbg metrics list'.
func metricsCommand(r *Registry, dc *sampler) script.Cmd {
return script.Command(
script.CmdUsage{
Summary: "List registered metrics",
Args: "[match regex]",
Flags: func(fs *pflag.FlagSet) {
fs.StringP("out", "o", "", "Output file")
fs.BoolP("sampled", "s", false, "Show sampled metrics")
fs.StringP("format", "f", "table", "Output format, one of: table, json or yaml")
},
RegexpArgs: func(rawArgs ...string) []int {
for i, arg := range rawArgs {
if !strings.HasPrefix(arg, "-") {
return []int{i}
}
if arg == "--" {
return []int{i + 1}
}
}
return nil
},
Detail: []string{
"To write the metrics to a file: 'metrics --out=/path/to/file'",
"To show enabled metrics matching a regex: 'metrics foo.*'",
"To show sampled metrics (enabled and disabled): 'metrics --sampled'",
"",
"The metric samples can be plotted with 'metrics/plot' command.",
"",
"Run 'metrics -h' for extended help of the flags.",
"",
"Metrics can be filtered with a regexp. The match is made",
"against the metric name and its labels.",
"For example 'metrics regen.*scope=total' would match the",
"regenerations metric with one of the labels being scope=total",
"",
"In the sample output the 50th, 90th and 99th quantiles are shown",
"for histograms, e.g. in '15ms / 30ms / 60ms' 50th is 15ms and so on.",
},
},
func(s *script.State, args ...string) (script.WaitFunc, error) {
file, err := s.Flags.GetString("out")
if err != nil {
return nil, err
}
sampled, err := s.Flags.GetBool("sampled")
if err != nil {
return nil, err
}
format, err := s.Flags.GetString("format")
if err != nil {
return nil, err
}
var re *regexp.Regexp
if len(args) > 0 {
var err error
re, err = regexp.Compile(args[0])
if err != nil {
return nil, fmt.Errorf("regex: %w", err)
}
}
var w io.Writer
if file != "" {
f, err := os.OpenFile(s.Path(file), os.O_CREATE|os.O_TRUNC|os.O_WRONLY, 0644)
if err != nil {
return nil, err
}
w = f
defer f.Close()
} else {
w = s.LogWriter()
}
if sampled {
return nil, writeMetricsFromSamples(w, format, re, dc)
}
return nil, writeMetricsFromRegistry(w, format, re, r.inner)
},
)
}
// plotCommand implements the "metrics/plot" script command. This can be accessed
// in script tests, via "cilium-dbg shell" or indirectly via 'cilium-dbg metrics list'.
func plotCommand(dc *sampler) script.Cmd {
return script.Command(
script.CmdUsage{
Summary: "Plot sampled metrics as a line graph",
Args: "[match regex]",
Flags: func(fs *pflag.FlagSet) {
fs.StringP("out", "o", "", "Output file")
fs.Bool("rate", false, "Plot the rate of change")
},
RegexpArgs: func(rawArgs ...string) []int {
for i, arg := range rawArgs {
if !strings.HasPrefix(arg, "-") {
return []int{i}
}
if arg == "--" {
return []int{i + 1}
}
}
return nil
},
Detail: []string{
"The sampled metric is specified with the regex argument.",
"Both the metric name and its labels are matched against.",
"Use the 'metrics' command to search for the right regex.",
"",
"For example to plot the 'go_sched_latencies_seconds':",
"",
"cilium> metrics/plot go_sched_lat",
"",
"Or to plot the sysctl reconciliation durations:",
"",
"cilium> metrics/plot reconciler_duration.*sysctl",
"",
"Specify '-rate' to show the rate of change for a counter,",
"for example to plot how many bytes are allocated per minute:",
"",
"cilium> metrics/plot --rate go.*heap_alloc_bytes",
},
},
func(s *script.State, args ...string) (script.WaitFunc, error) {
file, err := s.Flags.GetString("out")
if err != nil {
return nil, err
}
rate, err := s.Flags.GetBool("rate")
if err != nil {
return nil, err
}
var re *regexp.Regexp
if len(args) > 0 {
var err error
re, err = regexp.Compile(args[0])
if err != nil {
return nil, fmt.Errorf("regex: %w", err)
}
}
var w io.Writer
if file != "" {
f, err := os.OpenFile(s.Path(file), os.O_CREATE|os.O_TRUNC|os.O_WRONLY, 0644)
if err != nil {
return nil, err
}
w = f
defer f.Close()
} else {
w = s.LogWriter()
}
dc.mu.Lock()
defer dc.mu.Unlock()
if re == nil {
fmt.Fprintln(w, "regexp needed to find metric")
return nil, nil
}
sampledMetrics := slices.Collect(maps.Values(dc.metrics))
slices.SortFunc(sampledMetrics, func(a, b debugSamples) int {
return cmp.Or(
cmp.Compare(a.getName(), b.getName()),
cmp.Compare(a.getLabels(), b.getLabels()),
)
})
var ds debugSamples
matched := true
for _, ds = range sampledMetrics {
matched = re.MatchString(ds.getName() + ds.getLabels())
if matched {
break
}
}
if !matched {
fmt.Fprintf(w, "no metric found matching regexp %q", re.String())
return nil, nil
}
samplingTimeSpan := dc.cfg.timeSpan()
samplingInterval := dc.cfg.MetricsSamplingInterval
switch ds := ds.(type) {
case *gaugeOrCounterSamples:
PlotSamples(w, rate, ds.getName(), ds.getLabels(), samplingTimeSpan, samplingInterval, ds.samples.grab(), ds.bits)
case *histogramSamples:
PlotSamples(w, rate, ds.getName()+" (p50)", ds.getLabels(), samplingTimeSpan, samplingInterval, ds.p50.grab(), ds.bits)
fmt.Fprintln(w)
PlotSamples(w, rate, ds.getName()+" (p90)", ds.getLabels(), samplingTimeSpan, samplingInterval, ds.p90.grab(), ds.bits)
fmt.Fprintln(w)
PlotSamples(w, rate, ds.getName()+" (p99)", ds.getLabels(), samplingTimeSpan, samplingInterval, ds.p99.grab(), ds.bits)
}
return nil, nil
},
)
}
//go:embed dump.html.tmpl
var htmlTemplate string
func htmlCommand(dc *sampler) script.Cmd {
return script.Command(
script.CmdUsage{
Summary: "Produce a HTML file from the sampled metrics",
Args: "",
Flags: func(fs *pflag.FlagSet) {
fs.StringP("out", "o", "", "Output file")
},
Detail: []string{},
},
func(s *script.State, args ...string) (script.WaitFunc, error) {
file, err := s.Flags.GetString("out")
if err != nil {
return nil, err
}
var w io.Writer
if file != "" {
f, err := os.OpenFile(s.Path(file), os.O_CREATE|os.O_TRUNC|os.O_WRONLY, 0644)
if err != nil {
return nil, err
}
w = f
defer f.Close()
} else {
w = s.LogWriter()
}
dc.mu.Lock()
defer dc.mu.Unlock()
dump := JSONSampleDump{
NumSamples: numSamples,
IntervalSeconds: int(dc.cfg.MetricsSamplingInterval.Seconds()),
}
for _, ds := range dc.metrics {
dump.Samples = append(dump.Samples, ds.getJSON())
}
slices.SortFunc(dump.Samples, func(a, b JSONSamples) int {
return cmp.Or(
cmp.Compare(a.Name, b.Name),
cmp.Compare(a.Labels, b.Labels),
)
})
tmpl, err := template.New("metrics.html").Parse(htmlTemplate)
if err != nil {
return nil, err
}
return nil, tmpl.Execute(w, &dump)
},
)
}
func writeMetricsFromSamples(outw io.Writer, format string, re *regexp.Regexp, dc *sampler) error {
dc.mu.Lock()
defer dc.mu.Unlock()
samplingInterval := dc.cfg.MetricsSamplingInterval
sampledMetrics := slices.Collect(maps.Values(dc.metrics))
slices.SortFunc(sampledMetrics, func(a, b debugSamples) int {
return cmp.Or(
cmp.Compare(a.getName(), b.getName()),
cmp.Compare(a.getLabels(), b.getLabels()),
)
})
switch format {
case "json", "yaml":
dump := JSONSampleDump{
NumSamples: numSamples,
IntervalSeconds: int(dc.cfg.MetricsSamplingInterval.Seconds()),
}
for _, ds := range sampledMetrics {
if re != nil && !re.MatchString(ds.getName()+ds.getLabels()) {
continue
}
dump.Samples = append(dump.Samples, ds.getJSON())
}
if format == "json" {
enc := json.NewEncoder(outw)
enc.SetIndent("", " ")
return enc.Encode(dump)
} else {
enc := yaml.NewEncoder(outw)
return enc.Encode(dump)
}
case "table":
w := tabwriter.NewWriter(outw, 5, 0, 3, ' ', 0)
defer w.Flush()
_, err := fmt.Fprintf(w, "Metric\tLabels\t%s\t%s\t%s\t%s\n",
duration.HumanDuration(samplingInterval),
duration.HumanDuration((1+quarterIndex)*samplingInterval),
duration.HumanDuration((1+halfIndex)*samplingInterval),
duration.HumanDuration((1+lastIndex)*samplingInterval),
)
if err != nil {
return err
}
for _, ds := range sampledMetrics {
if re != nil && !re.MatchString(ds.getName()+ds.getLabels()) {
continue
}
sZero, sQuarter, sHalf, sLast := ds.get()
_, err := fmt.Fprintf(w, "%s\t%s\t%s\t%s\t%s\t%s\n", ds.getName(), ds.getLabels(), sZero, sQuarter, sHalf, sLast)
if err != nil {
return err
}
}
return nil
default:
return fmt.Errorf("unknown format %q", format)
}
}
func writeMetricsFromRegistry(w io.Writer, format string, re *regexp.Regexp, reg *prometheus.Registry) error {
metrics, err := reg.Gather()
if err != nil {
return fmt.Errorf("gather: %w", err)
}
var (
// Since Gather() collects the metrics in unsorted order, we need
// to collect the lines we want to write and then sort them.
lines []string
jsonMetrics []models.Metric
)
for _, val := range metrics {
metricName := val.GetName()
metricType := val.GetType()
for _, metric := range val.Metric {
value, valueS := getMetricValue(metricName, metricType, metric)
label := joinLabels(metric.GetLabel())
if re != nil && !re.MatchString(metricName+label) {
continue
}
if format == "table" {
lines = append(lines, fmt.Sprintf("%s\t%s\t%s\n", metricName, label, valueS))
} else {
jsonMetrics = append(jsonMetrics,
models.Metric{
Name: metricName,
Labels: labelsMap(metric.GetLabel()),
Value: value,
})
}
}
}
switch format {
case "json":
enc := json.NewEncoder(w)
enc.SetIndent("", " ")
return enc.Encode(jsonMetrics)
case "yaml":
enc := yaml.NewEncoder(w)
return enc.Encode(jsonMetrics)
case "table":
sort.Strings(lines)
tw := tabwriter.NewWriter(w, 5, 0, 3, ' ', 0)
defer tw.Flush()
if _, err := fmt.Fprintln(tw, "Metric\tLabels\tValue"); err != nil {
return err
}
for _, l := range lines {
_, err := tw.Write([]byte(l))
if err != nil {
return err
}
}
return nil
default:
return fmt.Errorf("unknown format %q", format)
}
}
// getMetricValue produces a single representative value out of the metric.
func getMetricValue(name string, typ dto.MetricType, m *dto.Metric) (float64, string) {
suffix := ""
if strings.HasSuffix(name, "seconds") {
suffix = "s"
}
switch typ {
case dto.MetricType_COUNTER:
v := m.Counter.GetValue()
return v, fmt.Sprintf("%f", v)
case dto.MetricType_GAUGE:
v := m.Gauge.GetValue()
return v, fmt.Sprintf("%f", v)
case dto.MetricType_SUMMARY:
s := m.Summary
x := ""
for i, q := range s.Quantile {
x += fmt.Sprintf("p%d(%s%s)", int(100.0*(*q.Quantile)), prettyValue(*q.Value), suffix)
if i != len(s.Quantile)-1 {
x += " "
}
}
return 0.0, x
case dto.MetricType_HISTOGRAM:
b := convertHistogram(m.Histogram)
p50 := getHistogramQuantile(b, 0.50)
p90 := getHistogramQuantile(b, 0.90)
p99 := getHistogramQuantile(b, 0.99)
return p90, fmt.Sprintf("%s%s / %s%s / %s%s",
prettyValue(p50), suffix, prettyValue(p90), suffix, prettyValue(p99), suffix)
default:
return -1, fmt.Sprintf("(?%s)", typ)
}
}
func joinLabels(labels []*dto.LabelPair) string {
var b strings.Builder
for i, lp := range labels {
b.WriteString(lp.GetName())
b.WriteByte('=')
b.WriteString(lp.GetValue())
if i < len(labels)-1 {
b.WriteByte(' ')
}
}
return b.String()
}
func labelsMap(labels []*dto.LabelPair) map[string]string {
m := map[string]string{}
for _, lp := range labels {
m[lp.GetName()] = lp.GetValue()
}
return m
}
func prettyValue(v float64) string {
unit, multp := chooseUnit(v)
return fmt.Sprintf("%.4g%s", v*multp, unit)
}
func chooseUnit(v float64) (string, float64) {
unit := ""
multp := 1.0
v = math.Abs(v)
switch {
case v == 0.0:
case v > 1_000_000_000_000:
unit = "T"
multp = 0.000_000_000_001
case v > 1_000_000_000:
unit = "G"
multp = 0.000_000_001
case v > 1_000_000:
unit = "M"
multp = 0.000_001
case v > 1000:
unit = "k"
multp = 0.001
case v < 0.000_000_001:
unit = "p"
multp = 1_000_000_000_000
case v < 0.000_001:
unit = "n"
multp = 1_000_000_000
case v < 0.001:
unit = "µ"
multp = 1_000_000
case v < 1:
unit = "m"
multp = 1000
}
return unit, multp
}
// SPDX-License-Identifier: Apache-2.0
// Copyright Authors of Cilium
package metrics
import (
"cmp"
"math"
"slices"
"sort"
dto "github.com/prometheus/client_model/go"
)
type histogramBucket struct {
cumulativeCount uint64
upperBound float64
}
func convertHistogram(h *dto.Histogram) []histogramBucket {
histogram := make([]histogramBucket, len(h.GetBucket()))
for i, b := range h.GetBucket() {
histogram[i] = histogramBucket{b.GetCumulativeCount(), b.GetUpperBound()}
}
slices.SortFunc(histogram,
func(a, b histogramBucket) int {
return cmp.Compare(a.upperBound, b.upperBound)
})
return histogram
}
// subtractHistogram removes from 'a' the observations from 'b'.
func subtractHistogram(a, b []histogramBucket) {
if len(a) != len(b) {
panic("impossible: histogram bucket sizes do not match")
}
for i := range a {
if a[i].upperBound != b[i].upperBound {
panic("impossible: different upper bounds")
}
a[i].cumulativeCount -= b[i].cumulativeCount
}
}
func histogramSampleCount(histogram []histogramBucket) uint64 {
if len(histogram) == 0 {
return 0
}
return histogram[len(histogram)-1].cumulativeCount
}
// getHistogramQuantile calculates quantile from the Prometheus Histogram message.
// For example: getHistogramQuantile(h, 0.95) returns the 95th quantile.
func getHistogramQuantile(histogram []histogramBucket, quantile float64) float64 {
if len(histogram) < 1 {
return 0.0
}
if quantile < 0.0 {
return math.Inf(-1)
} else if quantile > 1.0 {
return math.Inf(+1)
}
totalCount := histogram[len(histogram)-1].cumulativeCount
if totalCount == 0 {
return 0.0
}
// Find the bucket onto which the quantile falls
rank := quantile * float64(totalCount)
index := sort.Search(
len(histogram)-1,
func(i int) bool {
return float64(histogram[i].cumulativeCount) >= rank
})
if index == 0 {
// Sample in first bucket, interpolate between 0.0..UpperBound within the bucket.
return histogram[0].upperBound * (rank / float64(histogram[0].cumulativeCount))
}
// Return the linearly interpolated value between the upper bounds of the
// two buckets in between which the quantile falls.
start := histogram[index-1].upperBound
end := histogram[index].upperBound
relativeCount := float64(histogram[index].cumulativeCount - histogram[index-1].cumulativeCount)
relativeRank := rank - float64(histogram[index-1].cumulativeCount)
return start + (end-start)*(relativeRank/relativeCount)
}
// SPDX-License-Identifier: Apache-2.0
// Copyright Authors of Cilium
package metrics
import (
"github.com/prometheus/client_golang/prometheus"
dto "github.com/prometheus/client_model/go"
"github.com/cilium/cilium/api/v1/client/daemon"
"github.com/cilium/cilium/api/v1/health/client/connectivity"
metricpkg "github.com/cilium/cilium/pkg/metrics/metric"
)
type daemonHealthGetter interface {
GetHealthz(params *daemon.GetHealthzParams, opts ...daemon.ClientOption) (*daemon.GetHealthzOK, error)
}
type connectivityStatusGetter interface {
GetStatus(params *connectivity.GetStatusParams, opts ...connectivity.ClientOption) (*connectivity.GetStatusOK, error)
}
var (
NoOpMetric prometheus.Metric = &mockMetric{}
NoOpCollector prometheus.Collector = &collector{}
NoOpCounter metricpkg.Counter = &counter{NoOpMetric, NoOpCollector}
NoOpCounterVec metricpkg.Vec[metricpkg.Counter] = &counterVec{NoOpCollector}
NoOpObserver metricpkg.Observer = &observer{}
NoOpHistogram metricpkg.Histogram = &histogram{NoOpCollector}
NoOpObserverVec metricpkg.Vec[metricpkg.Observer] = &observerVec{NoOpCollector}
NoOpGauge metricpkg.Gauge = &gauge{NoOpMetric, NoOpCollector}
NoOpGaugeVec metricpkg.Vec[metricpkg.Gauge] = &gaugeVec{NoOpCollector}
NoOpGaugeDeletableVec metricpkg.DeletableVec[metricpkg.Gauge] = &gaugeDeletableVec{gaugeVec{NoOpCollector}}
)
// Metric
type mockMetric struct{}
// *WARNING*: Desc returns nil so do not register this metric into prometheus
// default register.
func (m *mockMetric) Desc() *prometheus.Desc { return nil }
func (m *mockMetric) Write(*dto.Metric) error { return nil }
// Collector
type collector struct{}
func (c *collector) Describe(chan<- *prometheus.Desc) {}
func (c *collector) Collect(chan<- prometheus.Metric) {}
// Counter
type counter struct {
prometheus.Metric
prometheus.Collector
}
func (cv *counter) Add(float64) {}
func (cv *counter) Get() float64 { return 0 }
func (cv *counter) Inc() {}
func (cv *counter) IsEnabled() bool { return false }
func (cv *counter) SetEnabled(bool) {}
func (cv *counter) Opts() metricpkg.Opts { return metricpkg.Opts{} }
// CounterVec
type counterVec struct{ prometheus.Collector }
func (cv *counterVec) With(prometheus.Labels) metricpkg.Counter { return NoOpGauge }
func (cv *counterVec) WithLabelValues(...string) metricpkg.Counter { return NoOpGauge }
func (cv *counterVec) CurryWith(prometheus.Labels) (metricpkg.Vec[metricpkg.Counter], error) {
return NoOpCounterVec, nil
}
func (cv *counterVec) MustCurryWith(prometheus.Labels) metricpkg.Vec[metricpkg.Counter] {
return NoOpCounterVec
}
func (cv *counterVec) GetMetricWith(prometheus.Labels) (metricpkg.Counter, error) {
return NoOpCounter, nil
}
func (cv *counterVec) GetMetricWithLabelValues(...string) (metricpkg.Counter, error) {
return NoOpCounter, nil
}
func (cv *counterVec) IsEnabled() bool { return false }
func (cv *counterVec) SetEnabled(bool) {}
func (cv *counterVec) Opts() metricpkg.Opts { return metricpkg.Opts{} }
// Observer
type observer struct{}
func (o *observer) Observe(float64) {}
func (o *observer) IsEnabled() bool { return false }
func (o *observer) SetEnabled(bool) {}
func (o *observer) Opts() metricpkg.Opts { return metricpkg.Opts{} }
// Histogram
type histogram struct {
prometheus.Collector
}
func (h *histogram) Observe(float64) {}
func (h *histogram) Desc() *prometheus.Desc { return nil }
func (h *histogram) Write(*dto.Metric) error { return nil }
func (h *histogram) IsEnabled() bool { return false }
func (h *histogram) SetEnabled(bool) {}
func (h *histogram) Opts() metricpkg.Opts { return metricpkg.Opts{} }
// ObserverVec
type observerVec struct {
prometheus.Collector
}
func (ov *observerVec) GetMetricWith(prometheus.Labels) (metricpkg.Observer, error) {
return NoOpObserver, nil
}
func (ov *observerVec) GetMetricWithLabelValues(lvs ...string) (metricpkg.Observer, error) {
return NoOpObserver, nil
}
func (ov *observerVec) With(prometheus.Labels) metricpkg.Observer { return NoOpObserver }
func (ov *observerVec) WithLabelValues(...string) metricpkg.Observer { return NoOpObserver }
func (ov *observerVec) CurryWith(prometheus.Labels) (metricpkg.Vec[metricpkg.Observer], error) {
return NoOpObserverVec, nil
}
func (ov *observerVec) MustCurryWith(prometheus.Labels) metricpkg.Vec[metricpkg.Observer] {
return NoOpObserverVec
}
func (ov *observerVec) IsEnabled() bool { return false }
func (ov *observerVec) SetEnabled(bool) {}
func (ov *observerVec) Opts() metricpkg.Opts { return metricpkg.Opts{} }
// Gauge
type gauge struct {
prometheus.Metric
prometheus.Collector
}
func (g *gauge) Set(float64) {}
func (g *gauge) Get() float64 { return 0 }
func (g *gauge) Inc() {}
func (g *gauge) Dec() {}
func (g *gauge) Add(float64) {}
func (g *gauge) Sub(float64) {}
func (g *gauge) SetToCurrentTime() {}
func (g *gauge) IsEnabled() bool { return false }
func (g *gauge) SetEnabled(bool) {}
func (g *gauge) Opts() metricpkg.Opts { return metricpkg.Opts{} }
// GaugeVec
type gaugeDeletableVec struct {
gaugeVec
}
func (*gaugeDeletableVec) Delete(ll prometheus.Labels) bool {
return false
}
func (*gaugeDeletableVec) DeleteLabelValues(lvs ...string) bool {
return false
}
func (*gaugeDeletableVec) DeletePartialMatch(labels prometheus.Labels) int {
return 0
}
func (*gaugeDeletableVec) Reset() {}
type gaugeVec struct {
prometheus.Collector
}
func (gv *gaugeVec) With(prometheus.Labels) metricpkg.Gauge { return NoOpGauge }
func (gv *gaugeVec) WithLabelValues(...string) metricpkg.Gauge { return NoOpGauge }
func (gv *gaugeVec) CurryWith(prometheus.Labels) (metricpkg.Vec[metricpkg.Gauge], error) {
return NoOpGaugeVec, nil
}
func (gv *gaugeVec) MustCurryWith(prometheus.Labels) metricpkg.Vec[metricpkg.Gauge] {
return NoOpGaugeVec
}
func (gv *gaugeVec) GetMetricWith(prometheus.Labels) (metricpkg.Gauge, error) {
return NoOpGauge, nil
}
func (gv *gaugeVec) GetMetricWithLabelValues(...string) (metricpkg.Gauge, error) {
return NoOpGauge, nil
}
func (gv *gaugeVec) IsEnabled() bool { return false }
func (gv *gaugeVec) SetEnabled(bool) {}
func (gv *gaugeVec) Opts() metricpkg.Opts { return metricpkg.Opts{} }
// SPDX-License-Identifier: Apache-2.0
// Copyright Authors of Cilium
package metrics
import (
"context"
"fmt"
"io"
"log/slog"
"maps"
"os"
"strconv"
"sync"
"sync/atomic"
"github.com/cilium/cilium/pkg/logging/logfields"
)
var slogDupAttrDetection = false
func init() {
// Detector to check if we have duplicate attributes in slog logging hook.
slogDupAttrDetection, _ = strconv.ParseBool(os.Getenv("CILIUM_SLOG_DUP_ATTR_DETECTOR"))
}
var (
metricsInitialized = make(chan struct{})
flushMetrics = sync.Once{}
)
// FlushLoggingMetrics will cause all logging hook metrics accumulated prior
// to the errors_warnings metrics being registered with the Prometheus collector
// to be incremented to their respective errors_warnings metrics tuple.
func FlushLoggingMetrics() {
flushMetrics.Do(func() {
if metricsInitialized != nil {
close(metricsInitialized)
}
})
}
// LoggingHook is a hook for logrus which counts error and warning messages as a
// Prometheus metric.
type LoggingHook struct {
errs, warn *atomic.Uint64
th slog.Handler
attrs map[string]slog.Value
}
// NewLoggingHook returns a new instance of LoggingHook for the given Cilium
// component.
func NewLoggingHook() *LoggingHook {
lh := &LoggingHook{
errs: &atomic.Uint64{},
warn: &atomic.Uint64{},
// We want to override the default level with slog.LevelWarn
//nolint:sloglint
th: slog.NewTextHandler(io.Discard, &slog.HandlerOptions{
AddSource: false,
Level: slog.LevelWarn,
}),
attrs: make(map[string]slog.Value),
}
go func() {
// This channel is closed after registry is created. At this point if the errs/warnings metric
// is enabled we flush counts of errors/warnings we collected before the registry was created.
// This is a hack to ensure that errors/warnings collected in the pre hive initialization
// phase are emitted as metrics.
// Because the ErrorsWarnings metric is a counter, this means that the rate of these errors won't be
// accurate, however init errors can only happen during initialization so it probably doesn't make
// a big difference in practice.
<-metricsInitialized
metricsInitialized = nil
ErrorsWarnings.WithLabelValues(slog.LevelError.String(), "init").Add(float64(lh.errs.Load()))
ErrorsWarnings.WithLabelValues(slog.LevelWarn.String(), "init").Add(float64(lh.warn.Load()))
}()
return lh
}
// Levels returns the list of logging levels on which the hook is triggered.
func (h *LoggingHook) Levels() []slog.Level {
return []slog.Level{
slog.LevelError,
slog.LevelWarn,
}
}
func (h *LoggingHook) Enabled(ctx context.Context, level slog.Level) bool {
return h.th.Enabled(ctx, level)
}
func (h *LoggingHook) Handle(ctx context.Context, record slog.Record) error {
// Get information about subsystem from logging entry field.
logSysValue, logSysPresent := h.attrs[logfields.LogSubsys]
if slogDupAttrDetection {
var i int
if logSysPresent {
i = 1
}
record.Attrs(func(attr slog.Attr) bool {
if attr.Key == logfields.LogSubsys {
logSysPresent = true
logSysValue = attr.Value
i++
}
if v, ok := h.attrs[attr.Key]; ok {
panic(fmt.Sprintf("duplicate attribute: %q. existing-value=%s, new-value=%s", attr.Key, v, attr.Value))
}
if i > 1 {
panic(fmt.Sprintf("more than one subsys found in %s", record.Message))
}
return true
})
if i > 1 {
panic(fmt.Sprintf("more than one subsys found in %s", record.Message))
}
}
if !logSysPresent {
return fmt.Errorf("log entry doesn't contain 'subsys' field: %s", record.Message)
}
if logSysValue.Kind() != slog.KindString {
return fmt.Errorf("type of the 'subsystem' log entry field is not string but %s", logSysValue)
}
// We count errors/warnings outside of the prometheus metric.
switch record.Level {
case slog.LevelError:
h.errs.Add(1)
case slog.LevelWarn:
h.warn.Add(1)
}
// Increment the metric.
ErrorsWarnings.WithLabelValues(record.Level.String(), logSysValue.String()).Inc()
return nil
}
func (h *LoggingHook) WithAttrs(attrs []slog.Attr) slog.Handler {
lh := &LoggingHook{errs: h.errs, warn: h.warn, th: h.th.WithAttrs(attrs)}
lh.attrs = maps.Clone(h.attrs)
for _, attr := range attrs {
if slogDupAttrDetection {
if v, ok := h.attrs[attr.Key]; ok {
panic(fmt.Sprintf("duplicate attribute: %q. existing-value=%s, new-value=%s", attr.Key, v, attr.Value))
}
}
lh.attrs[attr.Key] = attr.Value
}
return lh
}
func (h *LoggingHook) WithGroup(name string) slog.Handler {
return &LoggingHook{errs: h.errs, warn: h.warn, th: h.th.WithGroup(name)}
}
// SPDX-License-Identifier: Apache-2.0
// Copyright Authors of Cilium
package collections
// CartesianProduct returns the cartesian product of the input vectors as
// a vector of vectors, each with length the same as the number of input vectors.
func CartesianProduct[T any](vs ...[]T) [][]T {
if len(vs) == 0 {
return [][]T{}
}
dimension := len(vs) // Each output will be a vector of this length.
// Iterate to find out the number of output vectors.
size := len(vs[0])
for i := 1; i < len(vs); i++ {
size *= len(vs[i])
}
// Allocate the output vectors.
dst := make([][]T, size)
for i := range dst {
dst[i] = make([]T, dimension)
}
lastm := 1
for i := range dimension {
permuteColumn[T](dst, i, lastm, vs[i])
lastm = lastm * len(vs[i])
}
return dst
}
// permuteColumn fills in the nth column of the output vectors of the cartesian
// product of the input vectors.
//
// leftPermSize is the number of vectors as a result of permuting 0,..,col-1 columns.
// That is, this is the block size upon which we will repeat the values of v0 such that
// every previous permutation is again permuted with each value of v0.
//
// For ex.
// CartesianProduct[string]({"a", "b"}, {"x", "y", "z"})
//
// Iteration (i.e. col, leftPermSize=1) 1:
//
// dst = [
// ["a"],
// ["b"],
// ["a"]
// ["b"]
// ["a"]
// ["b"]
// ]
//
// Iteration (leftPermSize=2):
//
// dst = [
// ["a", "x"], // <- each elem of vec is repeated leftPermSize times.
// ["b", "x"],
// ["a", "y"]
// ["b", "y"]
// ["a", "z"]
// ["b", "z"]
// ]
func permuteColumn[T any](dst [][]T, col int, leftPermSize int, vec []T) {
// Go down the column with the current lhs.
// You want to skip along, lastm elements at a time.
for i := 0; i < len(dst); i += leftPermSize { // So we're skipping n rows at a time,
vi := (i / leftPermSize) % len(vec)
for off := range leftPermSize { // this is a repeat
dst[i+off][col] = vec[vi]
}
}
}
// SPDX-License-Identifier: Apache-2.0
// Copyright Authors of Cilium
package metric
import (
"github.com/prometheus/client_golang/prometheus"
dto "github.com/prometheus/client_model/go"
)
func NewCounter(opts CounterOpts) Counter {
return &counter{
Counter: prometheus.NewCounter(opts.toPrometheus()),
metric: metric{
enabled: !opts.Disabled,
opts: Opts(opts),
},
}
}
type Counter interface {
prometheus.Counter
WithMetadata
Get() float64
}
type counter struct {
prometheus.Counter
metric
}
func (c *counter) Get() float64 {
var pm dto.Metric
err := c.Counter.Write(&pm)
if err == nil {
return *pm.Counter.Value
}
return 0
}
// NewCounterVec creates a new DeletableVec[Counter] based on the provided CounterOpts and
// partitioned by the given label names.
func NewCounterVec(opts CounterOpts, labelNames []string) *counterVec {
return &counterVec{
CounterVec: prometheus.NewCounterVec(opts.toPrometheus(), labelNames),
metric: metric{
enabled: !opts.Disabled,
opts: Opts(opts),
},
}
}
// NewCounterVecWithLabels creates a new DeletableVec[Counter] based on the provided CounterOpts and
// partitioned by the given labels.
// This will also initialize the labels with the provided values so that metrics with known label value
// ranges can be pre-initialized to zero upon init.
//
// This should only be used when all label values are known at init, otherwise use of the
// metric vector with uninitialized labels will result in warnings.
//
// Note: Disabled metrics will not have their label values initialized.
//
// For example:
//
// NewCounterVecWithLabels(CounterOpts{
// Namespace: "cilium",
// Subsystem: "subsystem",
// Name: "cilium_test",
// Disabled: false,
// }, Labels{
// {Name: "foo", Values: NewValues("0", "1")},
// {Name: "bar", Values: NewValues("a", "b")},
// })
//
// Will initialize the following metrics to:
//
// cilium_subsystem_cilium_test{foo="0", bar="a"} 0
// cilium_subsystem_cilium_test{foo="0", bar="b"} 0
// cilium_subsystem_cilium_test{foo="1", bar="a"} 0
// cilium_subsystem_cilium_test{foo="1", bar="b"} 0
func NewCounterVecWithLabels(opts CounterOpts, labels Labels) *counterVec {
cv := NewCounterVec(opts, labels.labelNames())
initLabels[Counter](&cv.metric, labels, cv, opts.Disabled)
return cv
}
type counterVec struct {
*prometheus.CounterVec
metric
}
func (cv *counterVec) CurryWith(labels prometheus.Labels) (Vec[Counter], error) {
cv.checkLabels(labels)
vec, err := cv.CounterVec.CurryWith(labels)
if err == nil {
return &counterVec{CounterVec: vec, metric: cv.metric}, nil
}
return nil, err
}
func (cv *counterVec) GetMetricWith(labels prometheus.Labels) (Counter, error) {
promCounter, err := cv.CounterVec.GetMetricWith(labels)
if err == nil {
return &counter{
Counter: promCounter,
metric: cv.metric,
}, nil
}
return nil, err
}
func (cv *counterVec) GetMetricWithLabelValues(lvs ...string) (Counter, error) {
promCounter, err := cv.CounterVec.GetMetricWithLabelValues(lvs...)
if err == nil {
return &counter{
Counter: promCounter,
metric: cv.metric,
}, nil
}
return nil, err
}
func (cv *counterVec) With(labels prometheus.Labels) Counter {
cv.checkLabels(labels)
promCounter := cv.CounterVec.With(labels)
return &counter{
Counter: promCounter,
metric: cv.metric,
}
}
func (cv *counterVec) WithLabelValues(lvs ...string) Counter {
cv.checkLabelValues(lvs...)
promCounter := cv.CounterVec.WithLabelValues(lvs...)
return &counter{
Counter: promCounter,
metric: cv.metric,
}
}
func (cv *counterVec) SetEnabled(e bool) {
if !e {
cv.Reset()
}
cv.metric.SetEnabled(e)
}
type CounterOpts Opts
func (co CounterOpts) toPrometheus() prometheus.CounterOpts {
return prometheus.CounterOpts{
Name: co.Name,
Namespace: co.Namespace,
Subsystem: co.Subsystem,
Help: co.Help,
ConstLabels: co.ConstLabels,
}
}
// SPDX-License-Identifier: Apache-2.0
// Copyright Authors of Cilium
package metric
import (
"github.com/prometheus/client_golang/prometheus"
dto "github.com/prometheus/client_model/go"
)
func NewGauge(opts GaugeOpts) Gauge {
return &gauge{
Gauge: prometheus.NewGauge(opts.toPrometheus()),
metric: metric{
enabled: !opts.Disabled,
opts: Opts(opts),
},
}
}
type Gauge interface {
prometheus.Gauge
WithMetadata
Get() float64
}
type gauge struct {
prometheus.Gauge
metric
}
func (g *gauge) Get() float64 {
var pm dto.Metric
err := g.Gauge.Write(&pm)
if err == nil {
return *pm.Gauge.Value
}
return 0
}
// NewGaugeVec creates a new DeletableVec[Gauge] based on the provided GaugeOpts and
// partitioned by the given label names.
func NewGaugeVec(opts GaugeOpts, labelNames []string) *gaugeVec {
gv := &gaugeVec{
GaugeVec: prometheus.NewGaugeVec(opts.toPrometheus(), labelNames),
metric: metric{
enabled: !opts.Disabled,
opts: Opts(opts),
},
}
return gv
}
// NewGaugeVecWithLabels creates a new DeletableVec[Gauge] based on the provided CounterOpts and
// partitioned by the given labels.
// This will also initialize the labels with the provided values so that metrics with known label value
// ranges can be pre-initialized to zero upon init.
//
// This should only be used when all label values are known at init, otherwise use of the
// metric vector with uninitialized labels will result in warnings.
//
// Note: Disabled metrics will not have their label values initialized.
//
// For example:
//
// NewGaugeVecWithLabels(GaugeOpts{
// Namespace: "cilium",
// Subsystem: "subsystem",
// Name: "cilium_test",
// Disabled: false,
// }, Labels{
// {Name: "foo", Values: NewValues("0", "1")},
// {Name: "bar", Values: NewValues("a", "b")},
// })
//
// Will initialize the following metrics to:
//
// cilium_subsystem_cilium_test{foo="0", bar="a"} 0
// cilium_subsystem_cilium_test{foo="0", bar="b"} 0
// cilium_subsystem_cilium_test{foo="1", bar="a"} 0
// cilium_subsystem_cilium_test{foo="1", bar="b"} 0
func NewGaugeVecWithLabels(opts GaugeOpts, labels Labels) *gaugeVec {
gv := NewGaugeVec(opts, labels.labelNames())
initLabels[Gauge](&gv.metric, labels, gv, opts.Disabled)
return gv
}
type gaugeVec struct {
*prometheus.GaugeVec
metric
}
func (gv *gaugeVec) CurryWith(labels prometheus.Labels) (Vec[Gauge], error) {
gv.checkLabels(labels)
vec, err := gv.GaugeVec.CurryWith(labels)
if err == nil {
return &gaugeVec{GaugeVec: vec, metric: gv.metric}, nil
}
return nil, err
}
func (gv *gaugeVec) GetMetricWith(labels prometheus.Labels) (Gauge, error) {
promGauge, err := gv.GaugeVec.GetMetricWith(labels)
if err == nil {
return &gauge{
Gauge: promGauge,
metric: gv.metric,
}, nil
}
return nil, err
}
func (gv *gaugeVec) GetMetricWithLabelValues(lvs ...string) (Gauge, error) {
promGauge, err := gv.GaugeVec.GetMetricWithLabelValues(lvs...)
if err == nil {
return &gauge{
Gauge: promGauge,
metric: gv.metric,
}, nil
}
return nil, err
}
func (gv *gaugeVec) With(labels prometheus.Labels) Gauge {
gv.checkLabels(labels)
promGauge := gv.GaugeVec.With(labels)
return &gauge{
Gauge: promGauge,
metric: gv.metric,
}
}
func (gv *gaugeVec) WithLabelValues(lvs ...string) Gauge {
gv.checkLabelValues(lvs...)
promGauge := gv.GaugeVec.WithLabelValues(lvs...)
return &gauge{
Gauge: promGauge,
metric: gv.metric,
}
}
func (gv *gaugeVec) SetEnabled(e bool) {
if !e {
gv.Reset()
}
gv.metric.SetEnabled(e)
}
type GaugeFunc interface {
prometheus.GaugeFunc
WithMetadata
}
func NewGaugeFunc(opts GaugeOpts, function func() float64) GaugeFunc {
return &gaugeFunc{
GaugeFunc: prometheus.NewGaugeFunc(opts.toPrometheus(), function),
metric: metric{
enabled: !opts.Disabled,
opts: Opts(opts),
},
}
}
type gaugeFunc struct {
prometheus.GaugeFunc
metric
}
type GaugeOpts Opts
func (o GaugeOpts) toPrometheus() prometheus.GaugeOpts {
return prometheus.GaugeOpts{
Namespace: o.Namespace,
Subsystem: o.Subsystem,
Name: o.Name,
Help: o.Help,
ConstLabels: o.ConstLabels,
}
}
// SPDX-License-Identifier: Apache-2.0
// Copyright Authors of Cilium
package metric
import (
"time"
"github.com/prometheus/client_golang/prometheus"
)
func NewHistogram(opts HistogramOpts) Histogram {
return &histogram{
Histogram: prometheus.NewHistogram(opts.toPrometheus()),
metric: metric{
enabled: !opts.Disabled,
opts: opts.opts(),
},
}
}
type Histogram interface {
prometheus.Histogram
WithMetadata
}
type histogram struct {
prometheus.Histogram
metric
}
type Observer interface {
prometheus.Observer
WithMetadata
}
type observer struct {
prometheus.Observer
metric
}
// NewHistogramVec creates a new Vec[Observer] (i.e. Histogram Vec) based on the provided HistogramOpts and
// partitioned by the given label names.
func NewHistogramVec(opts HistogramOpts, labelNames []string) *histogramVec {
return &histogramVec{
ObserverVec: prometheus.NewHistogramVec(opts.toPrometheus(), labelNames),
metric: metric{
enabled: !opts.Disabled,
opts: opts.opts(),
},
}
}
// NewHistogramVec creates a new Vec[Observer] based on the provided CounterOpts and
// partitioned by the given labels.
// This will also initialize the labels with the provided values so that metrics with known label value
// ranges can be pre-initialized to zero upon init.
//
// This should only be used when all label values are known at init, otherwise use of the
// metric vector with uninitialized labels will result in warnings.
//
// Note: Disabled metrics will not have their label values initialized.
func NewHistogramVecWithLabels(opts HistogramOpts, labels Labels) *histogramVec {
hv := NewHistogramVec(opts, labels.labelNames())
initLabels(&hv.metric, labels, hv, opts.Disabled)
return hv
}
type histogramVec struct {
prometheus.ObserverVec
metric
}
func (cv *histogramVec) CurryWith(labels prometheus.Labels) (Vec[Observer], error) {
cv.checkLabels(labels)
vec, err := cv.ObserverVec.CurryWith(labels)
if err == nil {
return &histogramVec{ObserverVec: vec, metric: cv.metric}, nil
}
return nil, err
}
func (cv *histogramVec) GetMetricWith(labels prometheus.Labels) (Observer, error) {
promObserver, err := cv.ObserverVec.GetMetricWith(labels)
if err == nil {
return &observer{
Observer: promObserver,
metric: cv.metric,
}, nil
}
return nil, err
}
func (cv *histogramVec) GetMetricWithLabelValues(lvs ...string) (Observer, error) {
promObserver, err := cv.ObserverVec.GetMetricWithLabelValues(lvs...)
if err == nil {
return &observer{
Observer: promObserver,
metric: cv.metric,
}, nil
}
return nil, err
}
func (cv *histogramVec) With(labels prometheus.Labels) Observer {
cv.checkLabels(labels)
promObserver := cv.ObserverVec.With(labels)
return &observer{
Observer: promObserver,
metric: cv.metric,
}
}
func (cv *histogramVec) WithLabelValues(lvs ...string) Observer {
cv.checkLabelValues(lvs...)
promObserver := cv.ObserverVec.WithLabelValues(lvs...)
return &observer{
Observer: promObserver,
metric: cv.metric,
}
}
func (cv *histogramVec) SetEnabled(e bool) {
if !e {
if histVec, ok := cv.ObserverVec.(*prometheus.HistogramVec); ok {
histVec.Reset()
}
}
cv.metric.SetEnabled(e)
}
// HistogramOpts are a modified and expanded version of the prometheus.HistogramOpts.
// https://pkg.go.dev/github.com/prometheus/client_golang/prometheus#HistogramOpts
type HistogramOpts struct {
// Namespace, Subsystem, and Name are components of the fully-qualified
// name of the Histogram (created by joining these components with
// "_"). Only Name is mandatory, the others merely help structuring the
// name. Note that the fully-qualified name of the Histogram must be a
// valid Prometheus metric name.
Namespace string
Subsystem string
Name string
// Help provides information about this Histogram.
//
// Metrics with the same fully-qualified name must have the same Help
// string.
Help string
// ConstLabels are used to attach fixed labels to this metric. Metrics
// with the same fully-qualified name must have the same label names in
// their ConstLabels.
//
// ConstLabels are only used rarely. In particular, do not use them to
// attach the same labels to all your metrics. Those use cases are
// better covered by target labels set by the scraping Prometheus
// server, or by one specific metric (e.g. a build_info or a
// machine_role metric). See also
// https://prometheus.io/docs/instrumenting/writing_exporters/#target-labels-not-static-scraped-labels
ConstLabels prometheus.Labels
// Buckets defines the buckets into which observations are counted. Each
// element in the slice is the upper inclusive bound of a bucket. The
// values must be sorted in strictly increasing order. There is no need
// to add a highest bucket with +Inf bound, it will be added
// implicitly. If Buckets is left as nil or set to a slice of length
// zero, it is replaced by default buckets. The default buckets are
// DefBuckets if no buckets for a native histogram (see below) are used,
// otherwise the default is no buckets. (In other words, if you want to
// use both reguler buckets and buckets for a native histogram, you have
// to define the regular buckets here explicitly.)
Buckets []float64
// If NativeHistogramBucketFactor is greater than one, so-called sparse
// buckets are used (in addition to the regular buckets, if defined
// above). A Histogram with sparse buckets will be ingested as a Native
// Histogram by a Prometheus server with that feature enabled (requires
// Prometheus v2.40+). Sparse buckets are exponential buckets covering
// the whole float64 range (with the exception of the “zero” bucket, see
// SparseBucketsZeroThreshold below). From any one bucket to the next,
// the width of the bucket grows by a constant
// factor. NativeHistogramBucketFactor provides an upper bound for this
// factor (exception see below). The smaller
// NativeHistogramBucketFactor, the more buckets will be used and thus
// the more costly the histogram will become. A generally good trade-off
// between cost and accuracy is a value of 1.1 (each bucket is at most
// 10% wider than the previous one), which will result in each power of
// two divided into 8 buckets (e.g. there will be 8 buckets between 1
// and 2, same as between 2 and 4, and 4 and 8, etc.).
//
// Details about the actually used factor: The factor is calculated as
// 2^(2^n), where n is an integer number between (and including) -8 and
// 4. n is chosen so that the resulting factor is the largest that is
// still smaller or equal to NativeHistogramBucketFactor. Note that the
// smallest possible factor is therefore approx. 1.00271 (i.e. 2^(2^-8)
// ). If NativeHistogramBucketFactor is greater than 1 but smaller than
// 2^(2^-8), then the actually used factor is still 2^(2^-8) even though
// it is larger than the provided NativeHistogramBucketFactor.
//
// NOTE: Native Histograms are still an experimental feature. Their
// behavior might still change without a major version
// bump. Subsequently, all NativeHistogram... options here might still
// change their behavior or name (or might completely disappear) without
// a major version bump.
NativeHistogramBucketFactor float64
// All observations with an absolute value of less or equal
// NativeHistogramZeroThreshold are accumulated into a “zero”
// bucket. For best results, this should be close to a bucket
// boundary. This is usually the case if picking a power of two. If
// NativeHistogramZeroThreshold is left at zero,
// DefSparseBucketsZeroThreshold is used as the threshold. To configure
// a zero bucket with an actual threshold of zero (i.e. only
// observations of precisely zero will go into the zero bucket), set
// NativeHistogramZeroThreshold to the NativeHistogramZeroThresholdZero
// constant (or any negative float value).
NativeHistogramZeroThreshold float64
// The remaining fields define a strategy to limit the number of
// populated sparse buckets. If NativeHistogramMaxBucketNumber is left
// at zero, the number of buckets is not limited. (Note that this might
// lead to unbounded memory consumption if the values observed by the
// Histogram are sufficiently wide-spread. In particular, this could be
// used as a DoS attack vector. Where the observed values depend on
// external inputs, it is highly recommended to set a
// NativeHistogramMaxBucketNumber.) Once the set
// NativeHistogramMaxBucketNumber is exceeded, the following strategy is
// enacted: First, if the last reset (or the creation) of the histogram
// is at least NativeHistogramMinResetDuration ago, then the whole
// histogram is reset to its initial state (including regular
// buckets). If less time has passed, or if
// NativeHistogramMinResetDuration is zero, no reset is
// performed. Instead, the zero threshold is increased sufficiently to
// reduce the number of buckets to or below
// NativeHistogramMaxBucketNumber, but not to more than
// NativeHistogramMaxZeroThreshold. Thus, if
// NativeHistogramMaxZeroThreshold is already at or below the current
// zero threshold, nothing happens at this step. After that, if the
// number of buckets still exceeds NativeHistogramMaxBucketNumber, the
// resolution of the histogram is reduced by doubling the width of the
// sparse buckets (up to a growth factor between one bucket to the next
// of 2^(2^4) = 65536, see above).
NativeHistogramMaxBucketNumber uint32
NativeHistogramMinResetDuration time.Duration
NativeHistogramMaxZeroThreshold float64
ConfigName string
// If true, the metric has to be explicitly enabled via config or flags
Disabled bool
}
func (ho HistogramOpts) opts() Opts {
return Opts{
Namespace: ho.Namespace,
Subsystem: ho.Subsystem,
Name: ho.Name,
Help: ho.Help,
ConstLabels: ho.ConstLabels,
ConfigName: ho.ConfigName,
Disabled: ho.Disabled,
}
}
func (ho HistogramOpts) toPrometheus() prometheus.HistogramOpts {
return prometheus.HistogramOpts{
Namespace: ho.Namespace,
Subsystem: ho.Subsystem,
Name: ho.Name,
Help: ho.Help,
ConstLabels: ho.ConstLabels,
Buckets: ho.Buckets,
NativeHistogramBucketFactor: ho.NativeHistogramBucketFactor,
NativeHistogramZeroThreshold: ho.NativeHistogramZeroThreshold,
NativeHistogramMaxBucketNumber: ho.NativeHistogramMaxBucketNumber,
NativeHistogramMinResetDuration: ho.NativeHistogramMinResetDuration,
NativeHistogramMaxZeroThreshold: ho.NativeHistogramMaxZeroThreshold,
}
}
// SPDX-License-Identifier: Apache-2.0
// Copyright Authors of Cilium
package metric
import (
"fmt"
"maps"
"os"
"slices"
"strconv"
"github.com/prometheus/client_golang/prometheus"
"github.com/cilium/cilium/pkg/metrics/metric/collections"
)
var invalidMetricValueDetectionEnabled = false
func init() {
invalidMetricValueDetectionEnabled, _ = strconv.ParseBool(os.Getenv("CILIUM_INVALID_METRIC_VALUE_DETECTOR"))
}
// WithMetadata is the interface implemented by any metric defined in this package. These typically embed existing
// prometheus metric types and add additional metadata. In addition, these metrics have the concept of being enabled
// or disabled which is used in place of conditional registration so all metric types can always be registered.
type WithMetadata interface {
IsEnabled() bool
SetEnabled(bool)
Opts() Opts
}
// metric is a "base" structure which can be embedded to provide common functionality.
type metric struct {
enabled bool
opts Opts
labels *labelSet
}
// forEachLabelVector performs a product of all possible label value combinations
// and calls the provided function for each combination.
func (b *metric) forEachLabelVector(fn func(lvls []string)) {
if b.labels == nil {
return
}
var labelValues [][]string
for _, label := range b.labels.lbls {
labelValues = append(labelValues, slices.Collect(maps.Keys(label.Values)))
}
for _, labelVector := range collections.CartesianProduct(labelValues...) {
fn(labelVector)
}
}
// checkLabelValues checks that the provided label values are within the range
// of provided label values, if labels where defined using the Labels type.
// Violations are logged as errors for detection, but metrics should still
// be collected as is.
func (b *metric) checkLabelValues(lvs ...string) {
if b.labels == nil {
return
}
if invalidMetricValueDetectionEnabled {
if err := b.labels.checkLabelValues(lvs); err != nil {
panic("metric label constraints violated for metric " + b.opts.Name + ": " + err.Error())
}
}
}
func (b *metric) checkLabels(labels prometheus.Labels) {
if b.labels == nil {
return
}
if invalidMetricValueDetectionEnabled {
if err := b.labels.checkLabels(labels); err != nil {
panic("metric label constraints violated for metric " + b.opts.Name + ": " + err.Error())
}
}
}
func (b *metric) IsEnabled() bool {
return b.enabled
}
func (b *metric) SetEnabled(e bool) {
b.enabled = e
}
func (b *metric) Opts() Opts {
return b.opts
}
type collectorWithMetadata interface {
prometheus.Collector
WithMetadata
}
// EnabledCollector collects the underlying metric only when it's enabled.
type EnabledCollector struct {
C prometheus.Collector
}
// Collect implements prometheus.Collector.
func (e EnabledCollector) Collect(ch chan<- prometheus.Metric) {
if m, ok := e.C.(WithMetadata); ok && !m.IsEnabled() {
return
}
e.C.Collect(ch)
}
// Describe implements prometheus.Collector.
func (e EnabledCollector) Describe(ch chan<- *prometheus.Desc) {
e.C.Describe(ch)
}
var _ prometheus.Collector = &EnabledCollector{}
// Vec is a generic type to describe the vectorized version of another metric type, for example Vec[Counter] would be
// our version of a prometheus.CounterVec.
type Vec[T any] interface {
collectorWithMetadata
// CurryWith returns a vector curried with the provided labels, i.e. the
// returned vector has those labels pre-set for all labeled operations performed
// on it. The cardinality of the curried vector is reduced accordingly. The
// order of the remaining labels stays the same (just with the curried labels
// taken out of the sequence – which is relevant for the
// (GetMetric)WithLabelValues methods). It is possible to curry a curried
// vector, but only with labels not yet used for currying before.
//
// The metrics contained in the `Vec[T]` are shared between the curried and
// uncurried vectors. They are just accessed differently. Curried and uncurried
// vectors behave identically in terms of collection. Only one must be
// registered with a given registry (usually the uncurried version). The Reset
// method deletes all metrics, even if called on a curried vector.
CurryWith(labels prometheus.Labels) (Vec[T], error)
// GetMetricWith returns the `T` for the given Labels map (the label names
// must match those of the variable labels in Desc). If that label map is
// accessed for the first time, a new `T` is created. Implications of
// creating a `T` without using it and keeping the `T` for later use are
// the same as for GetMetricWithLabelValues.
//
// An error is returned if the number and names of the Labels are inconsistent
// with those of the variable labels in Desc (minus any curried labels).
//
// This method is used for the same purpose as
// GetMetricWithLabelValues(...string). See there for pros and cons of the two
// methods.
GetMetricWith(labels prometheus.Labels) (T, error)
// GetMetricWithLabelValues returns the `T` for the given slice of label
// values (same order as the variable labels in Desc). If that combination of
// label values is accessed for the first time, a new `T` is created.
//
// It is possible to call this method without using the returned `T` to only
// create the new `T` but leave it at its starting value 0.
//
// Keeping the `T` for later use is possible (and should be considered if
// performance is critical), but keep in mind that Reset, DeleteLabelValues and
// Delete can be used to delete the `T` from the `Vec[T]`, assuming it also
// implements `DeletableVec[T]`. In that case,
// the `T` will still exist, but it will not be exported anymore, even if a
// `T` with the same label values is created later.
//
// An error is returned if the number of label values is not the same as the
// number of variable labels in Desc (minus any curried labels).
//
// Note that for more than one label value, this method is prone to mistakes
// caused by an incorrect order of arguments. Consider GetMetricWith(Labels) as
// an alternative to avoid that type of mistake. For higher label numbers, the
// latter has a much more readable (albeit more verbose) syntax, but it comes
// with a performance overhead (for creating and processing the Labels map).
GetMetricWithLabelValues(lvs ...string) (T, error)
// With works as GetMetricWith, but panics where GetMetricWithLabels would have
// returned an error. Not returning an error allows shortcuts like
//
// myVec.With(prometheus.Labels{"code": "404", "method": "GET"}).Add(42)
With(labels prometheus.Labels) T
// WithLabelValues works as GetMetricWithLabelValues, but panics where
// GetMetricWithLabelValues would have returned an error. Not returning an
// error allows shortcuts like
//
// myVec.WithLabelValues("404", "GET").Add(42)
WithLabelValues(lvs ...string) T
}
// DeletableVec is a generic type to describe a vectorized version of another metric type, like Vec[T], but with the
// additional ability to remove labels without re-creating the metric.
type DeletableVec[T any] interface {
Vec[T]
// Delete deletes the metric where the variable labels are the same as those
// passed in as labels. It returns true if a metric was deleted.
//
// It is not an error if the number and names of the Labels are inconsistent
// with those of the VariableLabels in Desc. However, such inconsistent Labels
// can never match an actual metric, so the method will always return false in
// that case.
//
// This method is used for the same purpose as DeleteLabelValues(...string). See
// there for pros and cons of the two methods.
Delete(labels prometheus.Labels) bool
// DeleteLabelValues removes the metric where the variable labels are the same
// as those passed in as labels (same order as the VariableLabels in Desc). It
// returns true if a metric was deleted.
//
// It is not an error if the number of label values is not the same as the
// number of VariableLabels in Desc. However, such inconsistent label count can
// never match an actual metric, so the method will always return false in that
// case.
//
// Note that for more than one label value, this method is prone to mistakes
// caused by an incorrect order of arguments. Consider Delete(Labels) as an
// alternative to avoid that type of mistake. For higher label numbers, the
// latter has a much more readable (albeit more verbose) syntax, but it comes
// with a performance overhead (for creating and processing the Labels map).
// See also the CounterVec example.
DeleteLabelValues(lvs ...string) bool
// DeletePartialMatch deletes all metrics where the variable labels contain all of those
// passed in as labels. The order of the labels does not matter.
// It returns the number of metrics deleted.
//
// Note that curried labels will never be matched if deleting from the curried vector.
// To match curried labels with DeletePartialMatch, it must be called on the base vector.
DeletePartialMatch(labels prometheus.Labels) int
// Reset deletes all metrics in this vector.
Reset()
}
// Opts are a modified and extended version of the prometheus.Opts
// https://pkg.go.dev/github.com/prometheus/client_golang/prometheus#Opts
type Opts struct {
// Namespace, Subsystem, and Name are components of the fully-qualified
// name of the Metric (created by joining these components with
// "_"). Only Name is mandatory, the others merely help structuring the
// name. Note that the fully-qualified name of the metric must be a
// valid Prometheus metric name.
Namespace string
Subsystem string
Name string
// Help provides information about this metric.
//
// Metrics with the same fully-qualified name must have the same Help
// string.
Help string
// ConstLabels are used to attach fixed labels to this metric. Metrics
// with the same fully-qualified name must have the same label names in
// their ConstLabels.
//
// ConstLabels are only used rarely. In particular, do not use them to
// attach the same labels to all your metrics. Those use cases are
// better covered by target labels set by the scraping Prometheus
// server, or by one specific metric (e.g. a build_info or a
// machine_role metric). See also
// https://prometheus.io/docs/instrumenting/writing_exporters/#target-labels-not-static-scraped-labels
ConstLabels prometheus.Labels
// The name used to enable/disable this metric via the config/flags
ConfigName string
// If true, the metric has to be explicitly enabled via config or flags
Disabled bool
}
func (b Opts) GetConfigName() string {
if b.ConfigName == "" {
return prometheus.BuildFQName(b.Namespace, b.Subsystem, b.Name)
}
return b.ConfigName
}
// Label represents a metric label with a pre-defined range of values.
// This is used with the NewxxxVecWithLabels metrics constructors to initialize
// vector metrics with known label value ranges, avoiding empty metrics.
type Label struct {
Name string
// If defined, only these values are allowed.
Values Values
}
// Values is a distinct set of possible label values for a particular Label.
type Values map[string]struct{}
// NewValues constructs a Values type from a set of strings.
func NewValues(vs ...string) Values {
vals := Values{}
for _, v := range vs {
vals[v] = struct{}{}
}
return vals
}
// Labels is a slice of labels that represents a label set for a vector type
// metric.
type Labels []Label
func (lbls Labels) labelNames() []string {
lns := make([]string, len(lbls))
for i, label := range lbls {
lns[i] = label.Name
}
return lns
}
type labelSet struct {
lbls Labels
m map[string]map[string]struct{}
}
func (l *labelSet) namesToValues() map[string]map[string]struct{} {
if l.m != nil {
return l.m
}
l.m = make(map[string]map[string]struct{})
for _, label := range l.lbls {
l.m[label.Name] = label.Values
}
return l.m
}
func (l *labelSet) checkLabels(labels prometheus.Labels) error {
for name, value := range labels {
if lvs, ok := l.namesToValues()[name]; ok {
if len(lvs) == 0 {
continue
}
if _, ok := lvs[value]; !ok {
return fmt.Errorf("unexpected label vector value for label %q: value %q not defined in label range %v",
name, value, maps.Keys(lvs))
}
} else {
return fmt.Errorf("invalid label name: %s", name)
}
}
return nil
}
func (l *labelSet) checkLabelValues(lvs []string) error {
if len(l.lbls) != len(lvs) {
return fmt.Errorf("unexpected label vector length: expected %d, got %d", len(l.lbls), len(lvs))
}
for i, label := range l.lbls {
if len(label.Values) == 0 {
continue
}
if _, ok := label.Values[lvs[i]]; !ok {
return fmt.Errorf("unexpected label vector value for label %q: value %q not defined in label range %v",
label.Name, lvs[i], maps.Keys(label.Values))
}
}
return nil
}
// initLabels is a helper function to initialize the labels of a metric.
// It is used by xxxVecWithLabels metrics constructors to initialize the
// labels of the metric and the vector (i.e. registering all possible label value combinations).
func initLabels[T any](m *metric, labels Labels, vec Vec[T], disabled bool) {
if disabled {
return
}
m.labels = &labelSet{lbls: labels}
m.forEachLabelVector(func(vs []string) {
vec.WithLabelValues(vs...)
})
}
// SPDX-License-Identifier: Apache-2.0
// Copyright Authors of Cilium
// Package metrics holds prometheus metrics objects and related utility functions. It
// does not abstract away the prometheus client but the caller rarely needs to
// refer to prometheus directly.
package metrics
// Adding a metric
// - Add a metric object of the appropriate type as an exported variable
// - Register the new object in the init function
import (
"context"
"github.com/prometheus/client_golang/prometheus"
dto "github.com/prometheus/client_model/go"
"github.com/cilium/cilium/pkg/logging/logfields"
"github.com/cilium/cilium/pkg/metrics/metric"
"github.com/cilium/cilium/pkg/promise"
"github.com/cilium/cilium/pkg/source"
"github.com/cilium/cilium/pkg/time"
"github.com/cilium/cilium/pkg/version"
)
const (
// ErrorTimeout is the value used to notify timeout errors.
ErrorTimeout = "timeout"
// ErrorProxy is the value used to notify errors on Proxy.
ErrorProxy = "proxy"
// L7DNS is the value used to report DNS label on metrics
L7DNS = "dns"
// SubsystemBPF is the subsystem to scope metrics related to the bpf syscalls.
SubsystemBPF = "bpf"
// SubsystemDatapath is the subsystem to scope metrics related to management of
// the datapath. It is prepended to metric names and separated with a '_'.
SubsystemDatapath = "datapath"
// SubsystemAgent is the subsystem to scope metrics related to the cilium agent itself.
SubsystemAgent = "agent"
// SubsystemFQDN is the subsystem to scope metrics related to the FQDN proxy.
SubsystemIPCache = "ipcache"
// SubsystemK8s is the subsystem to scope metrics related to Kubernetes
SubsystemK8s = "k8s"
// SubsystemK8sClient is the subsystem to scope metrics related to the kubernetes client.
SubsystemK8sClient = "k8s_client"
// SubsystemWorkQueue is the subsystem to scope metrics related to the workqueue.
SubsystemWorkQueue = "k8s_workqueue"
// SubsystemKVStore is the subsystem to scope metrics related to the kvstore.
SubsystemKVStore = "kvstore"
// SubsystemFQDN is the subsystem to scope metrics related to the FQDN proxy.
SubsystemFQDN = "fqdn"
// SubsystemNodes is the subsystem to scope metrics related to the node manager.
SubsystemNodes = "nodes"
// SubsystemTriggers is the subsystem to scope metrics related to the trigger package.
SubsystemTriggers = "triggers"
// SubsystemAPILimiter is the subsystem to scope metrics related to the API limiter package.
SubsystemAPILimiter = "api_limiter"
// CiliumAgentNamespace is used to scope metrics from the Cilium Agent
CiliumAgentNamespace = "cilium"
// CiliumClusterMeshAPIServerNamespace is used to scope metrics from the
// Cilium Cluster Mesh API Server
CiliumClusterMeshAPIServerNamespace = "cilium_clustermesh_apiserver"
// CiliumClusterMeshAPIServerNamespace is used to scope metrics from
// Cilium KVStoreMesh
CiliumKVStoreMeshNamespace = "cilium_kvstoremesh"
// CiliumOperatorNamespace is used to scope metrics from the Cilium Operator
CiliumOperatorNamespace = "cilium_operator"
// LabelError indicates the type of error (string)
LabelError = "error"
// LabelOutcome indicates whether the outcome of the operation was successful or not
LabelOutcome = "outcome"
// LabelAttempts is the number of attempts it took to complete the operation
LabelAttempts = "attempts"
// Labels
// LabelValueFalse is the string value for true metric label values.
LabelValueTrue = "true"
// LabelValueFalse is the string value for false metric label values.
LabelValueFalse = "false"
// LabelValueOutcomeSuccess is used as a successful outcome of an operation
LabelValueOutcomeSuccess = "success"
// LabelValueOutcomeFail is used as an unsuccessful outcome of an operation
LabelValueOutcomeFail = "fail"
// LabelValueOutcomeFailure is used as an unsuccessful outcome of an operation.
// NOTE: This should only be used for existing metrics, new metrics should use LabelValueOutcomeFail.
LabelValueOutcomeFailure = "failure"
// LabelDropReason is used to describe reason for dropping a packets/bytes
LabelDropReason = "reason"
// LabelEventSourceAPI marks event-related metrics that come from the API
LabelEventSourceAPI = "api"
// LabelEventSourceK8s marks event-related metrics that come from k8s
LabelEventSourceK8s = "k8s"
// LabelEventSourceFQDN marks event-related metrics that come from pkg/fqdn
LabelEventSourceFQDN = "fqdn"
// LabelEventSourceContainerd marks event-related metrics that come from docker
LabelEventSourceContainerd = "docker"
// LabelDatapathArea marks which area the metrics are related to (eg, which BPF map)
LabelDatapathArea = "area"
// LabelDatapathName marks a unique identifier for this metric.
// The name should be defined once for a given type of error.
LabelDatapathName = "name"
// LabelDatapathFamily marks which protocol family (IPv4, IPV6) the metric is related to.
LabelDatapathFamily = "family"
// LabelProtocol marks the L4 protocol (TCP, ANY) for the metric.
LabelProtocol = "protocol"
// LabelSignalType marks the signal name
LabelSignalType = "signal"
// LabelSignalData marks the signal data
LabelSignalData = "data"
// LabelStatus the label from completed task
LabelStatus = "status"
// LabelPolicyEnforcement is the label used to see the enforcement status
LabelPolicyEnforcement = "enforcement"
// LabelPolicySource is the label used to see the enforcement status
LabelPolicySource = "source"
LabelSource = "source"
// LabelScope is the label used to defined multiples scopes in the same
// metric. For example, one counter may measure a metric over the scope of
// the entire event (scope=global), or just part of an event
// (scope=slow_path)
LabelScope = "scope"
// LabelProtocolL7 is the label used when working with layer 7 protocols.
LabelProtocolL7 = "protocol_l7"
// LabelBuildState is the state a build queue entry is in
LabelBuildState = "state"
// LabelBuildQueueName is the name of the build queue
LabelBuildQueueName = "name"
// LabelAction is the label used to defined what kind of action was performed in a metric
LabelAction = "action"
// LabelSubsystem is the label used to refer to any of the child process
// started by cilium (Envoy, monitor, etc..)
LabelSubsystem = "subsystem"
// LabelKind is the kind of a label
LabelKind = "kind"
// LabelEventSource is the source of a label for event metrics
// i.e. k8s, containerd, api.
LabelEventSource = "source"
// LabelPath is the label for the API path
LabelPath = "path"
// LabelMethod is the label for the HTTP method
LabelMethod = "method"
// LabelAPIReturnCode is the HTTP code returned for that API path
LabelAPIReturnCode = "return_code"
// LabelOperation is the label for BPF maps operations
LabelOperation = "operation"
// LabelMapName is the label for the BPF map name
LabelMapName = "map_name"
LabelMapGroup = "map_group"
// LabelVersion is the label for the version number
LabelVersion = "version"
// LabelVersionRevision is the label for the version revision
LabelVersionRevision = "revision"
// LabelArch is the label for the platform architecture (e.g. linux/amd64)
LabelArch = "arch"
// LabelDirection is the label for traffic direction
LabelDirection = "direction"
// LabelSourceCluster is the label for source cluster name
LabelSourceCluster = "source_cluster"
// LabelSourceNodeName is the label for source node name
LabelSourceNodeName = "source_node_name"
// LabelTargetCluster is the label for target cluster name
LabelTargetCluster = "target_cluster"
// LabelLeaderElectionName is the name of leader election
LabelLeaderElectionName = "name"
// Rule label is a label for a L7 rule name.
LabelL7Rule = "rule"
// LabelL7ProxyType is the label for denoting a L7 proxy type.
LabelL7ProxyType = "proxy_type"
// LabelType is the label for type in general (e.g. endpoint, node)
LabelType = "type"
LabelPeerEndpoint = "endpoint"
LabelPeerNode = "node"
LabelTrafficHTTP = "http"
LabelTrafficICMP = "icmp"
LabelAddressType = "address_type"
LabelAddressTypePrimary = "primary"
LabelAddressTypeSecondary = "secondary"
// LabelConnectivityStatus is the label for connectivity statuses
LabelConnectivityStatus = "status"
LabelReachable = "reachable"
LabelUnreachable = "unreachable"
LabelUnknown = "unknown"
)
var (
// LabelValuesBool is metric label value set for boolean type.
LabelValuesBool = metric.NewValues(LabelValueTrue, LabelValueFalse)
// Namespace is used to scope metrics from cilium. It is prepended to metric
// names and separated with a '_'
Namespace = CiliumAgentNamespace
registryResolver, registry = promise.New[*Registry]()
BPFMapPressure = true
// BootstrapTimes is the durations of cilium-agent bootstrap sequence.
BootstrapTimes = NoOpGaugeVec
// APIInteractions is the total time taken to process an API call made
// to the cilium-agent
APIInteractions = NoOpObserverVec
// Status
// NodeHealthConnectivityStatus is the number of connections with connectivity status
// between local node to other node intra or inter cluster.
NodeHealthConnectivityStatus = NoOpGaugeVec
// NodeHealthConnectivityLatency is the histogram connectivity latency between local node to
// other node intra or inter cluster.
NodeHealthConnectivityLatency = NoOpObserverVec
// Endpoint
// Endpoint is a function used to collect this metric.
// It must be thread-safe.
Endpoint metric.GaugeFunc
// EndpointMaxIfindex is the maximum observed interface index for existing endpoints
EndpointMaxIfindex = NoOpGauge
// EndpointRegenerationTotal is a count of the number of times any endpoint
// has been regenerated and success/fail outcome
EndpointRegenerationTotal = NoOpCounterVec
// EndpointStateCount is the total count of the endpoints in various states.
EndpointStateCount = NoOpGaugeVec
// EndpointRegenerationTimeStats is the total time taken to regenerate
// endpoints, labeled by span name and status ("success" or "failure")
EndpointRegenerationTimeStats = NoOpObserverVec
// EndpointPropagationDelay is the delay between creation of local CiliumEndpoint
// and update for that CiliumEndpoint received through CiliumEndpointSlice.
// Measure of local CEP roundtrip time with CiliumEndpointSlice feature enabled.
EndpointPropagationDelay = NoOpObserverVec
// Policy
// Policy is the number of policies loaded into the agent
Policy = NoOpGauge
// PolicyRevision is the current policy revision number for this agent
PolicyRevision = NoOpGauge
// PolicyChangeTotal is a count of policy changes by outcome ("success" or
// "failure")
PolicyChangeTotal = NoOpCounterVec
// PolicyEndpointStatus is the number of endpoints with policy labeled by enforcement type
PolicyEndpointStatus = NoOpGaugeVec
// PolicyImplementationDelay is a distribution of times taken from adding a
// policy (and incrementing the policy revision) to seeing it in the datapath
// per Endpoint. This reflects the actual delay perceived by traffic flowing
// through the datapath. The longest times will roughly correlate with the
// time taken to fully deploy an endpoint.
PolicyImplementationDelay = NoOpObserverVec
// PolicyIncrementalUpdateDuration is the time it takes to apply an incremental update
// to the policy engine. An incremental update is a newly-learned identity that can be
// directly added to policy maps without a full policy recalculation.
PolicyIncrementalUpdateDuration = NoOpObserverVec
// Identity
// Identity is the number of identities currently in use on the node by type
Identity = NoOpGaugeVec
// IdentityLabelSources is the number of identities in use on the node with
// have a particular label source. Note that an identity may contain labels
// from multiple sources and thus might be counted in multiple buckets
IdentityLabelSources = NoOpGaugeVec
// Events
// EventTS is the time in seconds since epoch that we last received an
// event that was handled by Cilium. This metric tracks the source of the
// event which can be one of K8s or Cilium's API.
EventTS = NoOpGaugeVec
// EventLagK8s is the lag calculation for k8s Pod events.
EventLagK8s = NoOpGauge
// L7 statistics
// ProxyRedirects is the number of redirects labeled by protocol
ProxyRedirects = NoOpGaugeVec
// ProxyPolicyL7Total is a count of all l7 requests handled by proxy
ProxyPolicyL7Total = NoOpCounterVec
// ProxyUpstreamTime is how long the upstream server took to reply labeled
// by error, protocol and span time
ProxyUpstreamTime = NoOpObserverVec
// ProxyDatapathUpdateTimeout is a count of all the timeouts encountered while
// updating the datapath due to an FQDN IP update
ProxyDatapathUpdateTimeout = NoOpCounter
// L3-L4 statistics
// Datapath statistics
// ConntrackGCRuns is the number of times that the conntrack GC
// process was run.
ConntrackGCRuns = NoOpCounterVec
// ConntrackGCKeyFallbacks number of times that the conntrack key fallback was invalid.
ConntrackGCKeyFallbacks = NoOpCounterVec
// ConntrackGCSize the number of entries in the conntrack table
ConntrackGCSize = NoOpGaugeVec
// NatGCSize the number of entries in the nat table
NatGCSize = NoOpGaugeVec
// ConntrackGCDuration the duration of the conntrack GC process in milliseconds.
ConntrackGCDuration = NoOpObserverVec
// ConntrackInterval is the interval in secodns between conntrack GC runs
ConntrackInterval = NoOpGaugeVec
// ConntrackDumpReset marks the count for conntrack dump resets
ConntrackDumpResets = NoOpCounterVec
// Signals
// SignalsHandled is the number of signals received.
SignalsHandled = NoOpCounterVec
// Services
// ServicesEventsCount counts the number of services
ServicesEventsCount = NoOpCounterVec
// ServiceImplementationDelay the execution duration of the service handler in milliseconds.
// The metric reflects the time it took to program the service excluding the event queue latency.
ServiceImplementationDelay = NoOpObserverVec
// Errors and warnings
// ErrorsWarnings is the number of errors and warnings in cilium-agent instances
ErrorsWarnings = NoOpCounterVec
// ControllerRuns is the number of times that a controller process runs.
ControllerRuns = NoOpCounterVec
// ControllerRunsDuration the duration of the controller process in seconds
ControllerRunsDuration = NoOpObserverVec
// subprocess, labeled by Subsystem
SubprocessStart = NoOpCounterVec
// Kubernetes Events
// KubernetesEventProcessed is the number of Kubernetes events
// processed labeled by scope, action and execution result
KubernetesEventProcessed = NoOpCounterVec
// KubernetesEventReceived is the number of Kubernetes events received
// labeled by scope, action, valid data and equalness.
KubernetesEventReceived = NoOpCounterVec
// Kubernetes interactions
// KubernetesAPIInteractions is the total time taken to process an API call made
// to the kube-apiserver
KubernetesAPIInteractions = NoOpObserverVec
// KubernetesAPIRateLimiterLatency is the client side rate limiter latency metric
KubernetesAPIRateLimiterLatency = NoOpObserverVec
// KubernetesAPICallsTotal is the counter for all API calls made to
// kube-apiserver.
KubernetesAPICallsTotal = NoOpCounterVec
// TerminatingEndpointsEvents is the number of terminating endpoint events received from kubernetes.
TerminatingEndpointsEvents = NoOpCounter
// IPAM events
// IPAMEvent is the number of IPAM events received labeled by action and
// datapath family type
IPAMEvent = NoOpCounterVec
// IPAMCapacity tracks the total number of IPs that could be allocated. To
// get the current number of available IPs, it would be this metric
// subtracted by IPAMEvent{allocated}.
IPAMCapacity = NoOpGaugeVec
// KVstore events
// KVStoreOperationsDuration records the duration of kvstore operations
KVStoreOperationsDuration = NoOpObserverVec
// KVStoreEventsQueueDuration records the duration in seconds of time
// received event was blocked before it could be queued
KVStoreEventsQueueDuration = NoOpObserverVec
// KVStoreQuorumErrors records the number of kvstore quorum errors
KVStoreQuorumErrors = NoOpCounterVec
// FQDNGarbageCollectorCleanedTotal is the number of domains cleaned by the
// GC job.
FQDNGarbageCollectorCleanedTotal = NoOpCounter
// FQDNActiveNames is the number of domains inside the DNS cache that have
// not expired (by TTL), per endpoint.
FQDNActiveNames = NoOpGaugeVec
// FQDNActiveIPs is the number of IPs inside the DNS cache associated with
// a domain that has not expired (by TTL) and are currently active, per
// endpoint.
FQDNActiveIPs = NoOpGaugeVec
// FQDNAliveZombieConnections is the number IPs associated with domains
// that have expired (by TTL) yet still associated with an active
// connection (aka zombie), per endpoint.
FQDNAliveZombieConnections = NoOpGaugeVec
// FQDNSelectors is the total number of registered ToFQDN selectors
FQDNSelectors = NoOpGauge
// FQDNSemaphoreRejectedTotal is the total number of DNS requests rejected
// by the DNS proxy because too many requests were in flight, as enforced by
// the admission semaphore.
FQDNSemaphoreRejectedTotal = NoOpCounter
// IPCacheErrorsTotal is the total number of IPCache events handled in
// the IPCache subsystem that resulted in errors.
IPCacheErrorsTotal = NoOpCounterVec
// IPCacheEventsTotal is the total number of IPCache events handled in
// the IPCache subsystem.
IPCacheEventsTotal = NoOpCounterVec
// BPFSyscallDuration is the metric for bpf syscalls duration.
BPFSyscallDuration = NoOpObserverVec
// BPFMapOps is the metric to measure the number of operations done to a
// bpf map.
BPFMapOps = NoOpCounterVec
// BPFMapCapacity is the max capacity of bpf maps, labelled by map group classification.
BPFMapCapacity = NoOpGaugeVec
// VersionMetric labelled by Cilium version
VersionMetric = NoOpGaugeVec
// APILimiterWaitHistoryDuration is a histogram that measures the
// individual wait durations of API limiters
APILimiterWaitHistoryDuration = NoOpObserverVec
// APILimiterWaitDuration is the gauge of the current mean, min, and
// max wait duration
APILimiterWaitDuration = NoOpGaugeVec
// APILimiterProcessingDuration is the gauge of the mean and estimated
// processing duration
APILimiterProcessingDuration = NoOpGaugeVec
// APILimiterRequestsInFlight is the gauge of the current and max
// requests in flight
APILimiterRequestsInFlight = NoOpGaugeVec
// APILimiterRateLimit is the gauge of the current rate limiting
// configuration including limit and burst
APILimiterRateLimit = NoOpGaugeVec
// APILimiterAdjustmentFactor is the gauge representing the latest
// adjustment factor that was applied
APILimiterAdjustmentFactor = NoOpGaugeVec
// APILimiterProcessedRequests is the counter of the number of
// processed (successful and failed) requests
APILimiterProcessedRequests = NoOpCounterVec
// WorkQueueDepth is the depth of the workqueue
//
// We set actual metrics here instead of NoOp for the workqueue metrics
// because these metrics will be registered with workqueue.SetProvider
// by init function in watcher.go. Otherwise, we will register NoOps.
//
WorkQueueDepth = metric.NewGaugeVec(metric.GaugeOpts{
ConfigName: Namespace + "_" + SubsystemWorkQueue + "_depth",
Namespace: Namespace,
Subsystem: SubsystemWorkQueue,
Name: "depth",
Help: "Current depth of workqueue.",
}, []string{"name"})
// WorkQueueAddsTotal is the total number of adds to the workqueue
WorkQueueAddsTotal = metric.NewCounterVec(metric.CounterOpts{
ConfigName: Namespace + "_" + SubsystemWorkQueue + "_adds_total",
Namespace: Namespace,
Subsystem: SubsystemWorkQueue,
Name: "adds_total",
Help: "Total number of adds handled by workqueue.",
}, []string{"name"})
// WorkQueueLatency is the latency of how long an item stays in the workqueue
WorkQueueLatency = metric.NewHistogramVec(metric.HistogramOpts{
ConfigName: Namespace + "_" + SubsystemWorkQueue + "_queue_duration_seconds",
Namespace: Namespace,
Subsystem: SubsystemWorkQueue,
Name: "queue_duration_seconds",
Help: "How long in seconds an item stays in workqueue before being requested.",
Buckets: prometheus.ExponentialBuckets(10e-9, 10, 10),
}, []string{"name"})
// WorkQueueDuration is the duration of how long processing an item for the workqueue
WorkQueueDuration = metric.NewHistogramVec(metric.HistogramOpts{
ConfigName: Namespace + "_" + SubsystemWorkQueue + "_work_duration_seconds",
Namespace: Namespace,
Subsystem: SubsystemWorkQueue,
Name: "work_duration_seconds",
Help: "How long in seconds processing an item from workqueue takes.",
Buckets: prometheus.ExponentialBuckets(10e-9, 10, 10),
}, []string{"name"})
// WorkQueueUnfinishedWork is how many seconds of work has been done that is in progress
WorkQueueUnfinishedWork = metric.NewGaugeVec(metric.GaugeOpts{
ConfigName: Namespace + "_" + SubsystemWorkQueue + "_unfinished_work_seconds",
Namespace: Namespace,
Subsystem: SubsystemWorkQueue,
Name: "unfinished_work_seconds",
Help: "How many seconds of work has been done that " +
"is in progress and hasn't been observed by work_duration. Large " +
"values indicate stuck threads. One can deduce the number of stuck " +
"threads by observing the rate at which this increases.",
}, []string{"name"})
// WorkQueueLongestRunningProcessor is the longest running processor in the workqueue
WorkQueueLongestRunningProcessor = metric.NewGaugeVec(metric.GaugeOpts{
ConfigName: Namespace + "_" + SubsystemWorkQueue + "_longest_running_processor_seconds",
Namespace: Namespace,
Subsystem: SubsystemWorkQueue,
Name: "longest_running_processor_seconds",
Help: "How many seconds has the longest running " +
"processor for workqueue been running.",
}, []string{"name"})
// WorkQueueRetries is the number of retries for handled by the workqueue
WorkQueueRetries = metric.NewCounterVec(metric.CounterOpts{
ConfigName: Namespace + "_" + SubsystemWorkQueue + "_retries_total",
Namespace: Namespace,
Subsystem: SubsystemWorkQueue,
Name: "retries_total",
Help: "Total number of retries handled by workqueue.",
}, []string{"name"})
)
type LegacyMetrics struct {
BootstrapTimes metric.Vec[metric.Gauge]
APIInteractions metric.Vec[metric.Observer]
NodeHealthConnectivityStatus metric.Vec[metric.Gauge]
NodeHealthConnectivityLatency metric.Vec[metric.Observer]
Endpoint metric.GaugeFunc
EndpointMaxIfindex metric.Gauge
EndpointRegenerationTotal metric.Vec[metric.Counter]
EndpointStateCount metric.Vec[metric.Gauge]
EndpointRegenerationTimeStats metric.Vec[metric.Observer]
EndpointPropagationDelay metric.Vec[metric.Observer]
Policy metric.Gauge
PolicyRevision metric.Gauge
PolicyChangeTotal metric.Vec[metric.Counter]
PolicyEndpointStatus metric.Vec[metric.Gauge]
PolicyImplementationDelay metric.Vec[metric.Observer]
PolicyIncrementalUpdateDuration metric.Vec[metric.Observer]
Identity metric.Vec[metric.Gauge]
IdentityLabelSources metric.Vec[metric.Gauge]
EventTS metric.Vec[metric.Gauge]
EventLagK8s metric.Gauge
ProxyRedirects metric.Vec[metric.Gauge]
ProxyPolicyL7Total metric.Vec[metric.Counter]
ProxyUpstreamTime metric.Vec[metric.Observer]
ProxyDatapathUpdateTimeout metric.Counter
ConntrackGCRuns metric.Vec[metric.Counter]
ConntrackGCKeyFallbacks metric.Vec[metric.Counter]
ConntrackGCSize metric.Vec[metric.Gauge]
NatGCSize metric.Vec[metric.Gauge]
ConntrackGCDuration metric.Vec[metric.Observer]
ConntrackInterval metric.Vec[metric.Gauge]
ConntrackDumpResets metric.Vec[metric.Counter]
SignalsHandled metric.Vec[metric.Counter]
ServicesEventsCount metric.Vec[metric.Counter]
ServiceImplementationDelay metric.Vec[metric.Observer]
ErrorsWarnings metric.Vec[metric.Counter]
ControllerRuns metric.Vec[metric.Counter]
ControllerRunsDuration metric.Vec[metric.Observer]
SubprocessStart metric.Vec[metric.Counter]
KubernetesEventProcessed metric.Vec[metric.Counter]
KubernetesEventReceived metric.Vec[metric.Counter]
KubernetesAPIInteractions metric.Vec[metric.Observer]
KubernetesAPIRateLimiterLatency metric.Vec[metric.Observer]
KubernetesAPICallsTotal metric.Vec[metric.Counter]
TerminatingEndpointsEvents metric.Counter
IPAMEvent metric.Vec[metric.Counter]
IPAMCapacity metric.Vec[metric.Gauge]
KVStoreOperationsDuration metric.Vec[metric.Observer]
KVStoreEventsQueueDuration metric.Vec[metric.Observer]
KVStoreQuorumErrors metric.Vec[metric.Counter]
FQDNGarbageCollectorCleanedTotal metric.Counter
FQDNActiveNames metric.Vec[metric.Gauge]
FQDNActiveIPs metric.Vec[metric.Gauge]
FQDNAliveZombieConnections metric.Vec[metric.Gauge]
FQDNSelectors metric.Gauge
FQDNSemaphoreRejectedTotal metric.Counter
IPCacheErrorsTotal metric.Vec[metric.Counter]
IPCacheEventsTotal metric.Vec[metric.Counter]
BPFSyscallDuration metric.Vec[metric.Observer]
BPFMapOps metric.Vec[metric.Counter]
BPFMapCapacity metric.Vec[metric.Gauge]
VersionMetric metric.Vec[metric.Gauge]
APILimiterWaitHistoryDuration metric.Vec[metric.Observer]
APILimiterWaitDuration metric.Vec[metric.Gauge]
APILimiterProcessingDuration metric.Vec[metric.Gauge]
APILimiterRequestsInFlight metric.Vec[metric.Gauge]
APILimiterRateLimit metric.Vec[metric.Gauge]
APILimiterAdjustmentFactor metric.Vec[metric.Gauge]
APILimiterProcessedRequests metric.Vec[metric.Counter]
WorkQueueDepth metric.Vec[metric.Gauge]
WorkQueueAddsTotal metric.Vec[metric.Counter]
WorkQueueLatency metric.Vec[metric.Observer]
WorkQueueDuration metric.Vec[metric.Observer]
WorkQueueUnfinishedWork metric.Vec[metric.Gauge]
WorkQueueLongestRunningProcessor metric.Vec[metric.Gauge]
WorkQueueRetries metric.Vec[metric.Counter]
}
func NewLegacyMetrics() *LegacyMetrics {
lm := &LegacyMetrics{
BootstrapTimes: metric.NewGaugeVec(metric.GaugeOpts{
ConfigName: Namespace + "_" + SubsystemAgent + "_bootstrap_seconds",
Namespace: Namespace,
Subsystem: SubsystemAgent,
Name: "bootstrap_seconds",
Help: "Duration of bootstrap sequence",
}, []string{LabelScope, LabelOutcome}),
APIInteractions: metric.NewHistogramVec(metric.HistogramOpts{
ConfigName: Namespace + "_" + SubsystemAgent + "_api_process_time_seconds",
Namespace: Namespace,
Subsystem: SubsystemAgent,
Name: "api_process_time_seconds",
Help: "Duration of processed API calls labeled by path, method and return code.",
}, []string{LabelPath, LabelMethod, LabelAPIReturnCode}),
EndpointRegenerationTotal: metric.NewCounterVecWithLabels(metric.CounterOpts{
ConfigName: Namespace + "_endpoint_regenerations_total",
Namespace: Namespace,
Name: "endpoint_regenerations_total",
Help: "Count of all endpoint regenerations that have completed, tagged by outcome",
}, metric.Labels{
{
Name: LabelOutcome,
Values: metric.NewValues(LabelValueOutcomeSuccess, LabelValueOutcomeFail),
},
}),
EndpointStateCount: metric.NewGaugeVec(metric.GaugeOpts{
ConfigName: Namespace + "_endpoint_state",
Namespace: Namespace,
Name: "endpoint_state",
Help: "Count of all endpoints, tagged by different endpoint states",
},
[]string{"endpoint_state"},
),
EndpointRegenerationTimeStats: metric.NewHistogramVec(metric.HistogramOpts{
ConfigName: Namespace + "_endpoint_regeneration_time_stats_seconds",
Namespace: Namespace,
Name: "endpoint_regeneration_time_stats_seconds",
Help: "Endpoint regeneration time stats labeled by the scope",
}, []string{LabelScope, LabelStatus}),
Policy: metric.NewGauge(metric.GaugeOpts{
ConfigName: Namespace + "_policy",
Namespace: Namespace,
Name: "policy",
Help: "Number of policies currently loaded",
}),
PolicyRevision: metric.NewGauge(metric.GaugeOpts{
ConfigName: Namespace + "_policy_max_revision",
Namespace: Namespace,
Name: "policy_max_revision",
Help: "Highest policy revision number in the agent",
}),
PolicyChangeTotal: metric.NewCounterVecWithLabels(metric.CounterOpts{
ConfigName: Namespace + "_policy_change_total",
Namespace: Namespace,
Name: "policy_change_total",
Help: "Number of policy changes by outcome",
}, metric.Labels{
{
Name: LabelOutcome,
Values: metric.NewValues(LabelValueOutcomeSuccess, LabelValueOutcomeFailure),
},
}),
PolicyEndpointStatus: metric.NewGaugeVec(metric.GaugeOpts{
ConfigName: Namespace + "_policy_endpoint_enforcement_status",
Namespace: Namespace,
Name: "policy_endpoint_enforcement_status",
Help: "Number of endpoints labeled by policy enforcement status",
}, []string{LabelPolicyEnforcement}),
PolicyImplementationDelay: metric.NewHistogramVecWithLabels(metric.HistogramOpts{
ConfigName: Namespace + "_policy_implementation_delay",
Namespace: Namespace,
Name: "policy_implementation_delay",
Help: "Time between a policy change and it being fully deployed into the datapath",
}, metric.Labels{
{
Name: LabelPolicySource,
Values: metric.NewValues(string(source.Kubernetes), string(source.CustomResource), string(source.LocalAPI)),
},
}),
PolicyIncrementalUpdateDuration: metric.NewHistogramVec(metric.HistogramOpts{
ConfigName: Namespace + "_policy_incremental_update_duration",
Namespace: Namespace,
Name: "policy_incremental_update_duration",
Help: "Time between learning about a new identity and it being fully added to all policies.",
Buckets: prometheus.ExponentialBuckets(10e-6, 10, 8),
}, []string{"scope"}),
Identity: metric.NewGaugeVec(metric.GaugeOpts{
ConfigName: Namespace + "_identity",
Namespace: Namespace,
Name: "identity",
Help: "Number of identities currently allocated",
}, []string{LabelType}),
IdentityLabelSources: metric.NewGaugeVec(metric.GaugeOpts{
ConfigName: Namespace + "_identity_label_sources",
Namespace: Namespace,
Name: "identity_label_sources",
Help: "Number of identities which contain at least one label of the given label source",
}, []string{LabelSource}),
EventTS: metric.NewGaugeVec(metric.GaugeOpts{
ConfigName: Namespace + "_event_ts",
Namespace: Namespace,
Name: "event_ts",
Help: "Last timestamp when Cilium received an event from a control plane source, per resource and per action",
}, []string{LabelEventSource, LabelScope, LabelAction}),
EventLagK8s: metric.NewGauge(metric.GaugeOpts{
ConfigName: Namespace + "_k8s_event_lag_seconds",
Disabled: true,
Namespace: Namespace,
Name: "k8s_event_lag_seconds",
Help: "Lag for Kubernetes events - computed value between receiving a CNI ADD event from kubelet and a Pod event received from kube-api-server",
ConstLabels: prometheus.Labels{"source": LabelEventSourceK8s},
}),
ProxyRedirects: metric.NewGaugeVec(metric.GaugeOpts{
ConfigName: Namespace + "_proxy_redirects",
Namespace: Namespace,
Name: "proxy_redirects",
Help: "Number of redirects installed for endpoints, labeled by protocol",
}, []string{LabelProtocolL7}),
ProxyPolicyL7Total: metric.NewCounterVecWithLabels(metric.CounterOpts{
ConfigName: Namespace + "_policy_l7_total",
Namespace: Namespace,
Name: "policy_l7_total",
Help: "Number of total proxy requests handled",
}, metric.Labels{
{
Name: LabelL7Rule,
Values: metric.NewValues("received", "forwarded", "denied", "parse_errors"),
},
{
Name: LabelL7ProxyType,
Values: metric.NewValues("fqdn", "envoy"),
},
}),
ProxyUpstreamTime: metric.NewHistogramVec(metric.HistogramOpts{
ConfigName: Namespace + "_proxy_upstream_reply_seconds",
Namespace: Namespace,
Name: "proxy_upstream_reply_seconds",
Help: "Seconds waited to get a reply from a upstream server",
}, []string{"error", LabelProtocolL7, LabelScope}),
ProxyDatapathUpdateTimeout: metric.NewCounter(metric.CounterOpts{
ConfigName: Namespace + "_proxy_datapath_update_timeout_total",
Disabled: true,
Namespace: Namespace,
Name: "proxy_datapath_update_timeout_total",
Help: "Number of total datapath update timeouts due to FQDN IP updates",
}),
ConntrackGCRuns: metric.NewCounterVec(metric.CounterOpts{
ConfigName: Namespace + "_" + SubsystemDatapath + "_conntrack_gc_runs_total",
Namespace: Namespace,
Subsystem: SubsystemDatapath,
Name: "conntrack_gc_runs_total",
Help: "Number of times that the conntrack garbage collector process was run " +
"labeled by completion status",
}, []string{LabelDatapathFamily, LabelProtocol, LabelStatus}),
ConntrackGCKeyFallbacks: metric.NewCounterVec(metric.CounterOpts{
ConfigName: Namespace + "_" + SubsystemDatapath + "_conntrack_gc_key_fallbacks_total",
Namespace: Namespace,
Subsystem: SubsystemDatapath,
Name: "conntrack_gc_key_fallbacks_total",
Help: "Number of times a key fallback was needed when iterating over the BPF map",
}, []string{LabelDatapathFamily, LabelProtocol}),
ConntrackGCSize: metric.NewGaugeVec(metric.GaugeOpts{
ConfigName: Namespace + "_" + SubsystemDatapath + "_conntrack_gc_entries",
Namespace: Namespace,
Subsystem: SubsystemDatapath,
Name: "conntrack_gc_entries",
Help: "The number of alive and deleted conntrack entries at the end " +
"of a garbage collector run labeled by datapath family.",
}, []string{LabelDatapathFamily, LabelProtocol, LabelStatus}),
NatGCSize: metric.NewGaugeVec(metric.GaugeOpts{
ConfigName: Namespace + "_" + SubsystemDatapath + "_nat_gc_entries",
Disabled: true,
Namespace: Namespace,
Subsystem: SubsystemDatapath,
Name: "nat_gc_entries",
Help: "The number of alive and deleted nat entries at the end " +
"of a garbage collector run labeled by datapath family.",
}, []string{LabelDatapathFamily, LabelDirection, LabelStatus}),
ConntrackGCDuration: metric.NewHistogramVec(metric.HistogramOpts{
ConfigName: Namespace + "_" + SubsystemDatapath + "_conntrack_gc_duration_seconds",
Namespace: Namespace,
Subsystem: SubsystemDatapath,
Name: "conntrack_gc_duration_seconds",
Help: "Duration in seconds of the garbage collector process " +
"labeled by datapath family and completion status",
}, []string{LabelDatapathFamily, LabelProtocol, LabelStatus}),
ConntrackInterval: metric.NewGaugeVec(metric.GaugeOpts{
ConfigName: Namespace + "_" + SubsystemDatapath + "_conntrack_gc_interval_seconds",
Namespace: Namespace,
Subsystem: SubsystemDatapath,
Name: "conntrack_gc_interval_seconds",
Help: "Interval in seconds between conntrack garbage collector runs",
}, []string{"global"}),
ConntrackDumpResets: metric.NewCounterVec(metric.CounterOpts{
ConfigName: Namespace + "_" + SubsystemDatapath + "_conntrack_dump_resets_total",
Namespace: Namespace,
Subsystem: SubsystemDatapath,
Name: "conntrack_dump_resets_total",
Help: "Number of conntrack dump resets. Happens when a BPF entry gets removed while dumping the map is in progress",
}, []string{LabelDatapathArea, LabelDatapathName, LabelDatapathFamily}),
SignalsHandled: metric.NewCounterVec(metric.CounterOpts{
ConfigName: Namespace + "_" + SubsystemDatapath + "_signals_handled_total",
Namespace: Namespace,
Subsystem: SubsystemDatapath,
Name: "signals_handled_total",
Help: "Number of times that the datapath signal handler process was run " +
"labeled by signal type, data and completion status",
}, []string{LabelSignalType, LabelSignalData, LabelStatus}),
ServicesEventsCount: metric.NewCounterVec(metric.CounterOpts{
ConfigName: Namespace + "_services_events_total",
Namespace: Namespace,
Name: "services_events_total",
Help: "Number of services events labeled by action type",
}, []string{LabelAction}),
ServiceImplementationDelay: metric.NewHistogramVec(metric.HistogramOpts{
ConfigName: Namespace + "_service_implementation_delay",
Namespace: Namespace,
Name: "service_implementation_delay",
Help: "Duration in seconds to propagate the data plane programming of a service, its network and endpoints " +
"from the time the service or the service pod was changed excluding the event queue latency",
}, []string{LabelAction}),
ErrorsWarnings: newErrorsWarningsMetric(),
ControllerRuns: metric.NewCounterVec(metric.CounterOpts{
ConfigName: Namespace + "_controllers_runs_total",
Namespace: Namespace,
Name: "controllers_runs_total",
Help: "Number of times that a controller process was run labeled by completion status",
}, []string{LabelStatus}),
ControllerRunsDuration: metric.NewHistogramVec(metric.HistogramOpts{
ConfigName: Namespace + "_controllers_runs_duration_seconds",
Namespace: Namespace,
Name: "controllers_runs_duration_seconds",
Help: "Duration in seconds of the controller process labeled by completion status",
}, []string{LabelStatus}),
SubprocessStart: metric.NewCounterVec(metric.CounterOpts{
ConfigName: Namespace + "_subprocess_start_total",
Namespace: Namespace,
Name: "subprocess_start_total",
Help: "Number of times that Cilium has started a subprocess, labeled by subsystem",
}, []string{LabelSubsystem}),
KubernetesEventProcessed: metric.NewCounterVec(metric.CounterOpts{
ConfigName: Namespace + "_kubernetes_events_total",
Namespace: Namespace,
Name: "kubernetes_events_total",
Help: "Number of Kubernetes events processed labeled by scope, action and execution result",
}, []string{LabelScope, LabelAction, LabelStatus}),
KubernetesEventReceived: metric.NewCounterVec(metric.CounterOpts{
ConfigName: Namespace + "_kubernetes_events_received_total",
Namespace: Namespace,
Name: "kubernetes_events_received_total",
Help: "Number of Kubernetes events received labeled by scope, action, valid data and equalness",
}, []string{LabelScope, LabelAction, "valid", "equal"}),
KubernetesAPIInteractions: metric.NewHistogramVec(metric.HistogramOpts{
ConfigName: Namespace + "_" + SubsystemK8sClient + "_api_latency_time_seconds",
Namespace: Namespace,
Subsystem: SubsystemK8sClient,
Name: "api_latency_time_seconds",
Help: "Duration of processed API calls labeled by path and method.",
}, []string{LabelPath, LabelMethod}),
KubernetesAPIRateLimiterLatency: metric.NewHistogramVec(metric.HistogramOpts{
ConfigName: Namespace + "_" + SubsystemK8sClient + "_rate_limiter_duration_seconds",
Namespace: Namespace,
Subsystem: SubsystemK8sClient,
Name: "rate_limiter_duration_seconds",
Help: "Kubernetes client rate limiter latency in seconds.",
Buckets: []float64{0.005, 0.025, 0.1, 0.25, 0.5, 1.0, 2.0, 4.0, 8.0, 15.0, 30.0, 60.0},
}, []string{}),
KubernetesAPICallsTotal: metric.NewCounterVec(metric.CounterOpts{
ConfigName: Namespace + "_" + SubsystemK8sClient + "_api_calls_total",
Namespace: Namespace,
Subsystem: SubsystemK8sClient,
Name: "api_calls_total",
Help: "Number of API calls made to kube-apiserver labeled by host, method and return code.",
}, []string{"host", LabelMethod, LabelAPIReturnCode}),
TerminatingEndpointsEvents: metric.NewCounter(metric.CounterOpts{
ConfigName: Namespace + "_" + SubsystemK8s + "_terminating_endpoints_events_total",
Namespace: Namespace,
Subsystem: SubsystemK8s,
Name: "terminating_endpoints_events_total",
Help: "Number of terminating endpoint events received from Kubernetes",
}),
IPAMEvent: metric.NewCounterVec(metric.CounterOpts{
ConfigName: Namespace + "_ipam_events_total",
Namespace: Namespace,
Name: "ipam_events_total",
Help: "Number of IPAM events received labeled by action and datapath family type",
}, []string{LabelAction, LabelDatapathFamily}),
IPAMCapacity: metric.NewGaugeVec(metric.GaugeOpts{
ConfigName: Namespace + "_ipam_capacity",
Namespace: Namespace,
Name: "ipam_capacity",
Help: "Total number of IPs in the IPAM pool labeled by family",
}, []string{LabelDatapathFamily}),
KVStoreOperationsDuration: metric.NewHistogramVec(metric.HistogramOpts{
ConfigName: Namespace + "_" + SubsystemKVStore + "_operations_duration_seconds",
Namespace: Namespace,
Subsystem: SubsystemKVStore,
Name: "operations_duration_seconds",
Help: "Duration in seconds of kvstore operations",
}, []string{LabelScope, LabelKind, LabelAction, LabelOutcome}),
KVStoreEventsQueueDuration: metric.NewHistogramVec(metric.HistogramOpts{
ConfigName: Namespace + "_" + SubsystemKVStore + "_events_queue_seconds",
Namespace: Namespace,
Subsystem: SubsystemKVStore,
Name: "events_queue_seconds",
Help: "Seconds waited before a received event was queued",
Buckets: []float64{.002, .005, .01, .015, .025, .05, .1, .25, .5, .75, 1},
}, []string{LabelScope, LabelAction}),
KVStoreQuorumErrors: metric.NewCounterVec(metric.CounterOpts{
ConfigName: Namespace + "_" + SubsystemKVStore + "_quorum_errors_total",
Namespace: Namespace,
Subsystem: SubsystemKVStore,
Name: "quorum_errors_total",
Help: "Number of quorum errors",
}, []string{LabelError}),
IPCacheErrorsTotal: metric.NewCounterVec(metric.CounterOpts{
ConfigName: Namespace + "_" + SubsystemIPCache + "_errors_total",
Namespace: Namespace,
Subsystem: SubsystemIPCache,
Name: "errors_total",
Help: "Number of errors interacting with the IP to Identity cache",
}, []string{LabelType, LabelError}),
IPCacheEventsTotal: metric.NewCounterVec(metric.CounterOpts{
ConfigName: Namespace + "_" + SubsystemIPCache + "_events_total",
Disabled: true,
Namespace: Namespace,
Subsystem: SubsystemIPCache,
Name: "events_total",
Help: "Number of events interacting with the IP to Identity cache",
}, []string{LabelType}),
FQDNGarbageCollectorCleanedTotal: metric.NewCounter(metric.CounterOpts{
ConfigName: Namespace + "_" + SubsystemFQDN + "_gc_deletions_total",
Namespace: Namespace,
Subsystem: SubsystemFQDN,
Name: "gc_deletions_total",
Help: "Number of FQDNs that have been cleaned on FQDN Garbage collector job",
}),
FQDNActiveNames: metric.NewGaugeVec(metric.GaugeOpts{
ConfigName: Namespace + "_" + SubsystemFQDN + "_active_names",
Disabled: true,
Namespace: Namespace,
Subsystem: SubsystemFQDN,
Name: "active_names",
Help: "Number of domains inside the DNS cache that have not expired (by TTL), per endpoint",
}, []string{LabelPeerEndpoint}),
FQDNActiveIPs: metric.NewGaugeVec(metric.GaugeOpts{
ConfigName: Namespace + "_" + SubsystemFQDN + "_active_ips",
Disabled: true,
Namespace: Namespace,
Subsystem: SubsystemFQDN,
Name: "active_ips",
Help: "Number of IPs inside the DNS cache associated with a domain that has not expired (by TTL), per endpoint",
}, []string{LabelPeerEndpoint}),
FQDNAliveZombieConnections: metric.NewGaugeVec(metric.GaugeOpts{
ConfigName: Namespace + "_" + SubsystemFQDN + "_alive_zombie_connections",
Disabled: true,
Namespace: Namespace,
Subsystem: SubsystemFQDN,
Name: "alive_zombie_connections",
Help: "Number of IPs associated with domains that have expired (by TTL) yet still associated with an active connection (aka zombie), per endpoint",
}, []string{LabelPeerEndpoint}),
FQDNSelectors: metric.NewGauge(metric.GaugeOpts{
ConfigName: Namespace + "_" + SubsystemFQDN + "_selectors",
Namespace: Namespace,
Subsystem: SubsystemFQDN,
Name: "selectors",
Help: "Number of registered ToFQDN selectors",
}),
FQDNSemaphoreRejectedTotal: metric.NewCounter(metric.CounterOpts{
ConfigName: Namespace + "_" + SubsystemFQDN + "_semaphore_rejected_total",
Disabled: true,
Namespace: Namespace,
Subsystem: SubsystemFQDN,
Name: "semaphore_rejected_total",
Help: "Number of DNS request rejected by the DNS Proxy's admission semaphore",
}),
BPFSyscallDuration: metric.NewHistogramVec(metric.HistogramOpts{
ConfigName: Namespace + "_" + SubsystemBPF + "_syscall_duration_seconds",
Disabled: true,
Namespace: Namespace,
Subsystem: SubsystemBPF,
Name: "syscall_duration_seconds",
Help: "Duration of BPF system calls",
}, []string{LabelOperation, LabelOutcome}),
BPFMapOps: metric.NewCounterVec(metric.CounterOpts{
ConfigName: Namespace + "_" + SubsystemBPF + "_map_ops_total",
Namespace: Namespace,
Subsystem: SubsystemBPF,
Name: "map_ops_total",
Help: "Total operations on map, tagged by map name",
}, []string{LabelMapName, LabelOperation, LabelOutcome}),
BPFMapCapacity: metric.NewGaugeVec(metric.GaugeOpts{
ConfigName: Namespace + "_" + SubsystemBPF + "_map_capacity",
Namespace: Namespace,
Subsystem: SubsystemBPF,
Name: "map_capacity",
Help: "Capacity of map, tagged by map group. All maps with a capacity of 65536 are grouped under 'default'",
}, []string{LabelMapGroup}),
VersionMetric: metric.NewGaugeVec(metric.GaugeOpts{
ConfigName: Namespace + "_version",
Namespace: Namespace,
Name: "version",
Help: "Cilium version",
}, []string{LabelVersion, LabelVersionRevision, LabelArch}),
APILimiterWaitHistoryDuration: metric.NewHistogramVec(metric.HistogramOpts{
ConfigName: Namespace + "_" + SubsystemAPILimiter + "_wait_history_duration_seconds",
Disabled: true,
Namespace: Namespace,
Subsystem: SubsystemAPILimiter,
Name: "wait_history_duration_seconds",
Help: "Histogram over duration of waiting period for API calls subjects to rate limiting",
}, []string{"api_call"}),
APILimiterWaitDuration: metric.NewGaugeVec(metric.GaugeOpts{
ConfigName: Namespace + "_" + SubsystemAPILimiter + "_wait_duration_seconds",
Namespace: Namespace,
Subsystem: SubsystemAPILimiter,
Name: "wait_duration_seconds",
Help: "Current wait time for api calls",
}, []string{"api_call", "value"}),
APILimiterProcessingDuration: metric.NewGaugeVec(metric.GaugeOpts{
ConfigName: Namespace + "_" + SubsystemAPILimiter + "_processing_duration_seconds",
Namespace: Namespace,
Subsystem: SubsystemAPILimiter,
Name: "processing_duration_seconds",
Help: "Current processing time of api call",
}, []string{"api_call", "value"}),
APILimiterRequestsInFlight: metric.NewGaugeVec(metric.GaugeOpts{
ConfigName: Namespace + "_" + SubsystemAPILimiter + "_requests_in_flight",
Namespace: Namespace,
Subsystem: SubsystemAPILimiter,
Name: "requests_in_flight",
Help: "Current requests in flight",
}, []string{"api_call", "value"}),
APILimiterRateLimit: metric.NewGaugeVec(metric.GaugeOpts{
ConfigName: Namespace + "_" + SubsystemAPILimiter + "_rate_limit",
Namespace: Namespace,
Subsystem: SubsystemAPILimiter,
Name: "rate_limit",
Help: "Current rate limiting configuration",
}, []string{"api_call", "value"}),
APILimiterAdjustmentFactor: metric.NewGaugeVec(metric.GaugeOpts{
ConfigName: Namespace + "_" + SubsystemAPILimiter + "_adjustment_factor",
Namespace: Namespace,
Subsystem: SubsystemAPILimiter,
Name: "adjustment_factor",
Help: "Current adjustment factor while auto adjusting",
}, []string{"api_call"}),
APILimiterProcessedRequests: metric.NewCounterVec(metric.CounterOpts{
ConfigName: Namespace + "_" + SubsystemAPILimiter + "_processed_requests_total",
Namespace: Namespace,
Subsystem: SubsystemAPILimiter,
Name: "processed_requests_total",
Help: "Total number of API requests processed",
}, []string{"api_call", LabelOutcome, LabelAPIReturnCode}),
EndpointPropagationDelay: metric.NewHistogramVec(metric.HistogramOpts{
ConfigName: Namespace + "_endpoint_propagation_delay_seconds",
Namespace: Namespace,
Name: "endpoint_propagation_delay_seconds",
Help: "CiliumEndpoint roundtrip propagation delay in seconds",
Buckets: []float64{.05, .1, 1, 5, 30, 60, 120, 240, 300, 600},
}, []string{}),
NodeHealthConnectivityStatus: metric.NewGaugeVec(metric.GaugeOpts{
ConfigName: Namespace + "_node_health_connectivity_status",
Namespace: Namespace,
Name: "node_health_connectivity_status",
Help: "The number of endpoints with last observed status of both ICMP and HTTP connectivity between the current Cilium agent and other Cilium nodes",
}, []string{
LabelSourceCluster,
LabelSourceNodeName,
LabelType,
LabelConnectivityStatus,
}),
NodeHealthConnectivityLatency: metric.NewHistogramVec(metric.HistogramOpts{
ConfigName: Namespace + "_node_health_connectivity_latency_seconds",
Namespace: Namespace,
Name: "node_health_connectivity_latency_seconds",
Help: "The histogram for last observed latency between the current Cilium agent and other Cilium nodes in seconds",
Buckets: []float64{0.001, 0.0025, 0.005, 0.01, 0.025, 0.05, 0.1, 0.25, 0.5, 1.0, 2.0, 4.0, 8.0},
}, []string{
LabelSourceCluster,
LabelSourceNodeName,
LabelType,
LabelProtocol,
LabelAddressType,
}),
WorkQueueDepth: WorkQueueDepth,
WorkQueueAddsTotal: WorkQueueAddsTotal,
WorkQueueLatency: WorkQueueLatency,
WorkQueueDuration: WorkQueueDuration,
WorkQueueUnfinishedWork: WorkQueueUnfinishedWork,
WorkQueueLongestRunningProcessor: WorkQueueLongestRunningProcessor,
WorkQueueRetries: WorkQueueRetries,
}
ifindexOpts := metric.GaugeOpts{
ConfigName: Namespace + "_endpoint_max_ifindex",
Disabled: true,
Namespace: Namespace,
Name: "endpoint_max_ifindex",
Help: "Maximum interface index observed for existing endpoints",
}
lm.EndpointMaxIfindex = metric.NewGauge(ifindexOpts)
v := version.GetCiliumVersion()
lm.VersionMetric.WithLabelValues(v.Version, v.Revision, v.Arch)
lm.BPFMapCapacity.WithLabelValues("default").Set(DefaultMapCapacity)
BootstrapTimes = lm.BootstrapTimes
APIInteractions = lm.APIInteractions
NodeHealthConnectivityStatus = lm.NodeHealthConnectivityStatus
NodeHealthConnectivityLatency = lm.NodeHealthConnectivityLatency
Endpoint = lm.Endpoint
EndpointMaxIfindex = lm.EndpointMaxIfindex
EndpointRegenerationTotal = lm.EndpointRegenerationTotal
EndpointStateCount = lm.EndpointStateCount
EndpointRegenerationTimeStats = lm.EndpointRegenerationTimeStats
EndpointPropagationDelay = lm.EndpointPropagationDelay
Policy = lm.Policy
PolicyRevision = lm.PolicyRevision
PolicyChangeTotal = lm.PolicyChangeTotal
PolicyEndpointStatus = lm.PolicyEndpointStatus
PolicyImplementationDelay = lm.PolicyImplementationDelay
PolicyIncrementalUpdateDuration = lm.PolicyIncrementalUpdateDuration
Identity = lm.Identity
IdentityLabelSources = lm.IdentityLabelSources
EventTS = lm.EventTS
EventLagK8s = lm.EventLagK8s
ProxyRedirects = lm.ProxyRedirects
ProxyPolicyL7Total = lm.ProxyPolicyL7Total
ProxyUpstreamTime = lm.ProxyUpstreamTime
ProxyDatapathUpdateTimeout = lm.ProxyDatapathUpdateTimeout
ConntrackGCRuns = lm.ConntrackGCRuns
ConntrackGCKeyFallbacks = lm.ConntrackGCKeyFallbacks
ConntrackGCSize = lm.ConntrackGCSize
NatGCSize = lm.NatGCSize
ConntrackGCDuration = lm.ConntrackGCDuration
ConntrackInterval = lm.ConntrackInterval
ConntrackDumpResets = lm.ConntrackDumpResets
SignalsHandled = lm.SignalsHandled
ServicesEventsCount = lm.ServicesEventsCount
ServiceImplementationDelay = lm.ServiceImplementationDelay
ErrorsWarnings = lm.ErrorsWarnings
ControllerRuns = lm.ControllerRuns
ControllerRunsDuration = lm.ControllerRunsDuration
SubprocessStart = lm.SubprocessStart
KubernetesEventProcessed = lm.KubernetesEventProcessed
KubernetesEventReceived = lm.KubernetesEventReceived
KubernetesAPIInteractions = lm.KubernetesAPIInteractions
KubernetesAPIRateLimiterLatency = lm.KubernetesAPIRateLimiterLatency
KubernetesAPICallsTotal = lm.KubernetesAPICallsTotal
TerminatingEndpointsEvents = lm.TerminatingEndpointsEvents
IPAMEvent = lm.IPAMEvent
IPAMCapacity = lm.IPAMCapacity
KVStoreOperationsDuration = lm.KVStoreOperationsDuration
KVStoreEventsQueueDuration = lm.KVStoreEventsQueueDuration
KVStoreQuorumErrors = lm.KVStoreQuorumErrors
FQDNGarbageCollectorCleanedTotal = lm.FQDNGarbageCollectorCleanedTotal
FQDNActiveNames = lm.FQDNActiveNames
FQDNActiveIPs = lm.FQDNActiveIPs
FQDNAliveZombieConnections = lm.FQDNAliveZombieConnections
FQDNSelectors = lm.FQDNSelectors
FQDNSemaphoreRejectedTotal = lm.FQDNSemaphoreRejectedTotal
IPCacheErrorsTotal = lm.IPCacheErrorsTotal
IPCacheEventsTotal = lm.IPCacheEventsTotal
BPFSyscallDuration = lm.BPFSyscallDuration
BPFMapOps = lm.BPFMapOps
BPFMapCapacity = lm.BPFMapCapacity
VersionMetric = lm.VersionMetric
APILimiterWaitHistoryDuration = lm.APILimiterWaitHistoryDuration
APILimiterWaitDuration = lm.APILimiterWaitDuration
APILimiterProcessingDuration = lm.APILimiterProcessingDuration
APILimiterRequestsInFlight = lm.APILimiterRequestsInFlight
APILimiterRateLimit = lm.APILimiterRateLimit
APILimiterAdjustmentFactor = lm.APILimiterAdjustmentFactor
APILimiterProcessedRequests = lm.APILimiterProcessedRequests
return lm
}
// InitOperatorMetrics is used to init legacy metrics necessary during operator init.
func InitOperatorMetrics() {
ErrorsWarnings = newErrorsWarningsMetric()
}
func newErrorsWarningsMetric() metric.Vec[metric.Counter] {
return metric.NewCounterVec(metric.CounterOpts{
ConfigName: Namespace + "_errors_warnings_total",
Namespace: Namespace,
Name: "errors_warnings_total",
Help: "Number of total errors in cilium-agent instances",
}, []string{"level", "subsystem"})
}
// GaugeWithThreshold is a prometheus gauge that registers itself with
// prometheus if over a threshold value and unregisters when under.
type GaugeWithThreshold struct {
// reg is the registry to register the gauge to. If nil the global registry
// is used.
reg *Registry
gauge prometheus.Gauge
threshold float64
active bool
}
// Set the value of the GaugeWithThreshold.
func (gwt *GaugeWithThreshold) Set(value float64) {
if gwt.reg == nil {
return
}
overThreshold := value > gwt.threshold
if gwt.active && !overThreshold {
gwt.active = !gwt.reg.Unregister(gwt.gauge)
if gwt.active {
gwt.reg.params.Logger.Warn("Failed to unregister metric", logfields.MetricConfig, gwt.gauge.Desc())
}
} else if !gwt.active && overThreshold {
err := gwt.reg.Register(gwt.gauge)
gwt.active = err == nil
if err != nil {
gwt.reg.params.Logger.Warn("Failed to register metric",
logfields.Error, err,
logfields.MetricConfig, gwt.gauge.Desc(),
)
}
}
gwt.gauge.Set(value)
}
// NewGaugeWithThresholdForRegistry creates a new GaugeWithThreshold.
func (reg *Registry) NewGaugeWithThreshold(name, subsystem, desc string, labels map[string]string, threshold float64) *GaugeWithThreshold {
return &GaugeWithThreshold{
reg: reg,
gauge: prometheus.NewGauge(prometheus.GaugeOpts{
Namespace: Namespace,
Subsystem: subsystem,
Name: name,
Help: desc,
ConstLabels: labels,
}),
threshold: threshold,
active: false,
}
}
// NewBPFMapPressureGauge creates a new GaugeWithThreshold for the
// cilium_bpf_map_pressure metric with the map name as constant label.
func (reg *Registry) NewBPFMapPressureGauge(mapname string, threshold float64) *GaugeWithThreshold {
return reg.NewGaugeWithThreshold(
"map_pressure",
SubsystemBPF,
"Fill percentage of map, tagged by map name",
map[string]string{
LabelMapName: mapname,
},
threshold,
)
}
func Reinitialize() {
reg, err := registry.Await(context.Background())
if err == nil {
reg.inner = prometheus.NewPedanticRegistry()
reg.registerMetrics()
}
}
// Register registers a collector
func Register(c prometheus.Collector) error {
var err error
withRegistry(func(reg *Registry) {
err = reg.Register(c)
})
return err
}
// RegisterList registers a list of collectors. If registration of one
// collector fails, no collector is registered.
func RegisterList(list []prometheus.Collector) error {
withRegistry(func(reg *Registry) {
reg.RegisterList(list)
})
return nil
}
// Unregister unregisters a collector
func Unregister(c prometheus.Collector) bool {
ctx, cancel := context.WithTimeout(context.Background(), time.Second)
defer cancel()
reg, err := registry.Await(ctx)
if err == nil {
return reg.Unregister(c)
}
return false
}
// withRegistry waits up to 1 second for the registry promise to resolve, if it does not then
// we might be calling this function before hive has been started, so to avoid a deadlock,
// wait in a routine so actions are deferred until the registry is initialized.
func withRegistry(fn func(reg *Registry)) {
ctx, cancel := context.WithTimeout(context.Background(), time.Second)
reg, err := registry.Await(ctx)
if err == nil {
fn(reg)
cancel()
return
}
cancel()
go func() {
reg, err := registry.Await(context.Background())
if err == nil {
fn(reg)
}
}()
}
// GetCounterValue returns the current value
// stored for the counter
func GetCounterValue(m prometheus.Counter) float64 {
var pm dto.Metric
err := m.Write(&pm)
if err == nil && pm.Counter != nil && pm.Counter.Value != nil {
return *pm.Counter.Value
}
return 0
}
// GetGaugeValue returns the current value stored for the gauge. This function
// is useful in tests.
func GetGaugeValue(m prometheus.Gauge) float64 {
var pm dto.Metric
err := m.Write(&pm)
if err == nil && pm.Gauge != nil && pm.Gauge.Value != nil {
return *pm.Gauge.Value
}
return 0
}
// Error2Outcome converts an error to LabelOutcome
func Error2Outcome(err error) string {
if err != nil {
return LabelValueOutcomeFail
}
return LabelValueOutcomeSuccess
}
// LabelOutcome2Code converts a label outcome to a code
func LabelOutcome2Code(outcome string) int {
if outcome == LabelValueOutcomeSuccess {
return 200
}
return 500
}
func BoolToFloat64(v bool) float64 {
if v {
return 1
}
return 0
}
// In general, most bpf maps are allocated to occupy a 16-bit key size.
// To reduce the number of metrics that need to be emitted for map capacity,
// we assume a default map size of 2^16 entries for all maps, which can be
// assumed unless specified otherwise.
const DefaultMapCapacity = 65536
func UpdateMapCapacity(groupName string, capacity uint32) {
if capacity == 0 || capacity == DefaultMapCapacity {
return
}
BPFMapCapacity.WithLabelValues(groupName).Set(float64(capacity))
}
// SPDX-License-Identifier: Apache-2.0
// Copyright Authors of Cilium
package metrics
import (
"net/http"
"strconv"
"strings"
"github.com/cilium/cilium/pkg/metrics/metric"
"github.com/cilium/cilium/pkg/spanstat"
)
// APIEventTSHelper is intended to be a global middleware to track metrics
// around API calls.
// It records the timestamp of an API call in the provided gauge.
type APIEventTSHelper struct {
Next http.Handler
TSGauge metric.Vec[metric.Gauge]
Histogram metric.Vec[metric.Observer]
}
type ResponderWrapper struct {
http.ResponseWriter
code int
}
func (rw *ResponderWrapper) WriteHeader(code int) {
rw.code = code
rw.ResponseWriter.WriteHeader(code)
}
// getShortPath returns the API path trimmed after the 3rd slash.
// examples:
//
// "/v1/config" -> "/v1/config"
// "/v1/endpoint/cilium-local:0" -> "/v1/endpoint"
// "/v1/endpoint/container-id:597.." -> "/v1/endpoint"
func getShortPath(s string) string {
var idxSum int
for range 3 {
idx := strings.IndexByte(s[idxSum:], '/')
if idx == -1 {
return s
}
idxSum += idx + 1
}
return s[:idxSum-1]
}
// ServeHTTP implements the http.Handler interface. It records the timestamp
// this API call began at, then chains to the next handler.
func (m *APIEventTSHelper) ServeHTTP(r http.ResponseWriter, req *http.Request) {
reqOk := req != nil && req.URL != nil && req.URL.Path != ""
var path string
if reqOk {
path = getShortPath(req.URL.Path)
m.TSGauge.WithLabelValues(LabelEventSourceAPI, path, req.Method).SetToCurrentTime()
}
duration := spanstat.Start()
rw := &ResponderWrapper{ResponseWriter: r}
m.Next.ServeHTTP(rw, req)
if reqOk {
took := float64(duration.End(true).Total().Seconds())
m.Histogram.WithLabelValues(path, req.Method, strconv.Itoa(rw.code)).Observe(took)
}
}
// SPDX-License-Identifier: Apache-2.0
// Copyright Authors of Cilium
package metrics
import (
"fmt"
"io"
"math"
"runtime"
"slices"
"strings"
"github.com/mitchellh/go-wordwrap"
"github.com/cilium/cilium/pkg/time"
)
// PlotSamples plots the given samples as a line graph using the unicode braille characters.
func PlotSamples(w io.Writer, rate bool, name, labels string, timeSpan, samplingInterval time.Duration, samples []float32, sb SampleBitmap) {
// Do not let panics propagate from here. Log the sample input that caused the panic.
defer func() {
if err := recover(); err != nil {
_, file, line, _ := runtime.Caller(2)
fmt.Fprintf(w, "panic: samples=%v, err=%s, source=%s:%d\n", samples, err, file, line)
}
}()
title := name
// Reverse the samples (samples is a fixed size array, thus was passed by value).
// We want them ordered from oldest to newest the same as our X-axis.
slices.Reverse(samples[:])
if rate {
// Compute the rate per second by iterating from oldest to newest and
// subtracting the previous sample and dividing by our sampling
// interval.
prev := samples[0]
for i := 1; i < len(samples); i++ {
s := samples[i]
samples[i] = (s - prev) / float32(samplingInterval.Seconds())
prev = s
}
samples[0] = 0
title += " (rate per second)"
}
sampleExists := func(index int) bool {
if index < 0 || index >= len(samples) {
return false
}
return sb.exists(len(samples) - 1 - int(index))
}
// Set up coordinates. We have two systems here, one for character
// coordinates (width, height, originX, originY, plotHeight, plotWidth)
// and one for the "dot" coordinates (plotHeightDots, plotWidthDots) using
// the braille symbols and thus 4x the height and 2x the width.
const width, height = 80, 10
originX, originY := 11, 7
plotHeight := height - 3
plotHeightDots := plotHeight * 4
plotWidth := width - originX - 1
plotWidthDots := plotWidth * 2
indentPlotOriginX := strings.Repeat(" ", originX)
// Write the name of the metric at the center.
fmt.Fprintf(w, "%s%s%s\n",
indentPlotOriginX,
strings.Repeat(" ", plotWidth/2-len(title)/2),
title)
// Write out the labels, also centered, but leave some margins.
if labels != "" {
for line := range strings.SplitSeq(wordwrap.WrapString(labels, uint(plotWidth-4)), "\n") {
fmt.Fprintf(w, "%s%s[ %s ]\n",
indentPlotOriginX,
strings.Repeat(" ", plotWidth/2-(len(line)+4)/2),
line)
}
}
// Set up a canvas into which to draw in.
canvas := make([]rune, width*height)
for x := range width {
for y := range height {
if x >= originX && y <= originY {
// initialize the plot area to the braille base. this way we can
// just OR in the dots we want to show.
canvas[y*width+x] = '\u2800'
} else {
canvas[y*width+x] = ' '
}
}
}
// setDot sets a braille dot within the dot coordinate system
// (0,0)...(plotWidthDots,plotHeightDots).
setDot := func(x, y int) {
var braillePixels = [][]rune{
{0x1, 0x2, 0x4, 0x40}, // left dots (even 'x')
{0x08, 0x10, 0x20, 0x80}, // right
}
pos := rune((plotHeightDots - y - 1) % 4)
canvas[(originY-y/4)*width+originX+x/2] |= braillePixels[x%2][pos]
}
writeText := func(y, x int, format string, args ...any) {
copy(canvas[y*width+x:], []rune(fmt.Sprintf(format, args...)))
}
// Calculate the graph minimum and maximum values
minY, maxY := float32(math.Inf(+1)), float32(math.Inf(-1))
for _, y := range samples {
minY = min(minY, y)
maxY = max(maxY, y)
}
midY := (maxY + minY) / 2
// Figure out how to show the Y units
suffix := ""
if strings.Contains(name, "seconds") {
suffix = "s"
}
unit, multp := chooseUnit(float64(maxY))
fmtY := func(v float32) string {
return fmt.Sprintf("%.1f%s%s", v*float32(multp), unit, suffix)
}
// Render the labels and the box.
writeText(0, originX-1, "╭"+strings.Repeat("─", width-originX-1)+"╮")
writeText(1, 1, "%8s ┤", fmtY(maxY))
writeText(1, width-1, "│")
writeText(2, originX-1, "│")
writeText(2, width-1, "│")
writeText(3, originX-1, "│")
writeText(3, width-1, "│")
writeText(4, 1, "%8s ┤", fmtY(midY))
writeText(4, width-1, "│")
writeText(5, originX-1, "│")
writeText(5, width-1, "│")
writeText(6, originX-1, "│")
writeText(6, width-1, "│")
writeText(7, 1, "%8s ┤", fmtY(minY))
writeText(7, width-1, "│")
writeText(8, originX-1, "╰"+strings.Repeat("─", width-originX-1)+"╯")
writeText(8, originX+3, "┬")
writeText(9, originX, "-%.0fmin", timeSpan.Minutes())
writeText(8, originX+3, "┬")
writeText(8, originX+3+((width-10)/2)-3, "┬")
writeText(9, originX+((width-10)/2)-3, "-%.0fmin", timeSpan.Minutes()/2)
writeText(8, width-3, "┬")
writeText(9, width-4, "now")
// Normalize negative values for plotting
if minY < 0.0 {
for i := range samples {
samples[i] += -minY
}
maxY += -minY
minY = 0.0
}
if maxY == 0.0 {
maxY = 0.000001
}
// getSample returns the interpolated sample for the given x position
// in the dot coordinates.
getSample := func(x int) (float32, bool) {
// find which sample is closest to x (rounding down)
pos := float64(x) / float64(plotWidthDots)
index := int(float64(len(samples)-1) * pos)
if !sampleExists(int(index)) {
return 0.0, false
} else if !sampleExists(index + 1) {
// the next sample is either out of range or not present,
// just return this sample without any interpolation.
return samples[index], true
}
// interpolate between two samples for estimate value of 'x'
prevPos := float64(index) / float64(len(samples)-1)
nextPos := float64(index+1) / float64(len(samples)-1)
rel := float32((pos - prevPos) / (nextPos - prevPos))
return samples[index] + (samples[index+1]-samples[index])*rel, true
}
// mapToY maps the value to the Y position
mapToY := func(v float32) int {
return int(((v - minY) / maxY) * (float32(plotHeightDots) - 0.001))
}
// Plot the samples (up to second to last column)
for x := range plotWidthDots - 1 {
if v, exists := getSample(x); exists {
setDot(x, mapToY(v))
}
}
// Plot the last sample without interpolation so that we always show
// the latest sample even if it's the only one.
if sampleExists(len(samples) - 1) {
setDot(
plotWidthDots-1,
mapToY(samples[len(samples)-1]),
)
}
// Finally write out our canvas.
for i := range height {
fmt.Fprintln(w, string(canvas[i*width:i*width+width]))
}
}
// SPDX-License-Identifier: Apache-2.0
// Copyright Authors of Cilium
package metrics
import (
"errors"
"fmt"
"log/slog"
"net/http"
"regexp"
"strings"
"github.com/cilium/hive"
"github.com/cilium/hive/cell"
"github.com/prometheus/client_golang/prometheus"
"github.com/prometheus/client_golang/prometheus/collectors"
"github.com/prometheus/client_golang/prometheus/promhttp"
dto "github.com/prometheus/client_model/go"
"github.com/spf13/pflag"
"github.com/cilium/cilium/pkg/lock"
"github.com/cilium/cilium/pkg/logging/logfields"
metricpkg "github.com/cilium/cilium/pkg/metrics/metric"
"github.com/cilium/cilium/pkg/option"
)
var defaultRegistryConfig = RegistryConfig{
PrometheusServeAddr: "",
}
type RegistryConfig struct {
// PrometheusServeAddr IP:Port on which to serve prometheus metrics (pass ":Port" to bind on all interfaces, "" is off)
PrometheusServeAddr string
// This is a list of metrics to be enabled or disabled, format is `+`/`-` + `{metric name}`
Metrics []string
}
func (rc RegistryConfig) Flags(flags *pflag.FlagSet) {
flags.String("prometheus-serve-addr", rc.PrometheusServeAddr, "IP:Port on which to serve prometheus metrics (pass \":Port\" to bind on all interfaces, \"\" is off)")
flags.StringSlice("metrics", rc.Metrics, "Metrics that should be enabled or disabled from the default metric list. (+metric_foo to enable metric_foo, -metric_bar to disable metric_bar)")
}
// RegistryParams are the parameters needed to construct a Registry
type RegistryParams struct {
cell.In
Logger *slog.Logger
Shutdowner hive.Shutdowner
Lifecycle cell.Lifecycle
AutoMetrics []metricpkg.WithMetadata `group:"hive-metrics"`
Config RegistryConfig
DaemonConfig *option.DaemonConfig
}
// Registry is a cell around a prometheus registry. This registry starts an HTTP server as part of its lifecycle
// on which all enabled metrics will be available. A reference to this registry can also be used to dynamically
// register or unregister `prometheus.Collector`s.
type Registry struct {
// inner registry of metrics.
// Served under the default /metrics endpoint. Each collector is wrapped with
// [metric.EnabledCollector] to only collect enabled metrics.
inner *prometheus.Registry
// collectors holds all registered collectors. Used to periodically sample the
// metrics.
collectors collectorSet
params RegistryParams
}
// Gather exposes metrics gather functionality, used by operator metrics command.
func (reg *Registry) Gather() ([]*dto.MetricFamily, error) {
return reg.inner.Gather()
}
func (reg *Registry) AddServerRuntimeHooks() {
if reg.params.Config.PrometheusServeAddr != "" {
// The Handler function provides a default handler to expose metrics
// via an HTTP server. "/metrics" is the usual endpoint for that.
mux := http.NewServeMux()
mux.Handle("/metrics", promhttp.HandlerFor(reg, promhttp.HandlerOpts{}))
srv := http.Server{
Addr: reg.params.Config.PrometheusServeAddr,
Handler: mux,
}
reg.params.Lifecycle.Append(cell.Hook{
OnStart: func(hc cell.HookContext) error {
go func() {
reg.params.Logger.Info("Serving prometheus metrics", logfields.Address, reg.params.Config.PrometheusServeAddr)
err := srv.ListenAndServe()
if err != nil && !errors.Is(err, http.ErrServerClosed) {
reg.params.Shutdowner.Shutdown(hive.ShutdownWithError(err))
}
}()
return nil
},
OnStop: func(hc cell.HookContext) error {
return srv.Shutdown(hc)
},
})
}
}
// NewRegistry constructs a new registry that is not initialized with
// hive/legacy metrics and has registered its runtime hooks yet.
func NewRegistry(params RegistryParams) *Registry {
reg := &Registry{
params: params,
inner: prometheus.NewPedanticRegistry(),
}
return reg
}
func NewAgentRegistry(params RegistryParams) *Registry {
reg := &Registry{
params: params,
}
reg.registerMetrics()
// Resolve the global registry variable for as long as we still have global functions
registryResolver.Resolve(reg)
reg.AddServerRuntimeHooks()
return reg
}
// Register registers a collector
func (r *Registry) Register(c prometheus.Collector) error {
r.collectors.add(c)
return r.inner.Register(metricpkg.EnabledCollector{C: c})
}
// Unregister unregisters a collector
func (r *Registry) Unregister(c prometheus.Collector) bool {
r.collectors.remove(c)
return r.inner.Unregister(c)
}
// goCustomCollectorsRX tracks enabled go runtime metrics.
var goCustomCollectorsRX = regexp.MustCompile(`^/sched/latencies:seconds`)
// Reinitialize creates a new internal registry and re-registers metrics to it.
func (r *Registry) registerMetrics() {
r.inner = prometheus.NewPedanticRegistry()
// Default metrics which can't be disabled.
r.MustRegister(collectors.NewProcessCollector(collectors.ProcessCollectorOpts{Namespace: Namespace}))
r.MustRegister(collectors.NewGoCollector(
collectors.WithGoCollectorRuntimeMetrics(
collectors.GoRuntimeMetricsRule{Matcher: goCustomCollectorsRX},
)))
metrics := make(map[string]metricpkg.WithMetadata)
for i, autoMetric := range r.params.AutoMetrics {
metrics[autoMetric.Opts().GetConfigName()] = r.params.AutoMetrics[i]
}
// This is a bodge for a very specific feature, inherited from the old `Daemon.additionalMetrics`.
// We should really find a more generic way to handle such cases.
metricFlags := r.params.Config.Metrics
if r.params.DaemonConfig.DNSProxyConcurrencyLimit > 0 {
metricFlags = append(metricFlags, "+"+Namespace+"_"+SubsystemFQDN+"_semaphore_rejected_total")
}
for _, metricFlag := range metricFlags {
metricFlag = strings.TrimSpace(metricFlag)
// This is a temporary hack which allows us to get rid of the centralized metric config without refactoring the
// dynamic map pressure registration/unregistion mechanism.
// Long term the map pressure metric becomes a smarter component so this is no longer needed.
if metricFlag[1:] == "-"+Namespace+"_"+SubsystemBPF+"_map_pressure" {
BPFMapPressure = false
continue
}
metric := metrics[metricFlag[1:]]
if metric == nil {
continue
}
switch metricFlag[0] {
case '+':
metric.SetEnabled(true)
case '-':
metric.SetEnabled(false)
default:
r.params.Logger.Warn(
fmt.Sprintf(
"--metrics flag contains value which does not start with + or -, '%s', ignoring",
metricFlag),
)
}
}
for _, m := range metrics {
if c, ok := m.(prometheus.Collector); ok {
r.MustRegister(c)
}
}
}
// MustRegister adds the collector to the registry, exposing this metric to
// prometheus scrapes.
// It will panic on error.
func (r *Registry) MustRegister(cs ...prometheus.Collector) {
for _, c := range cs {
r.collectors.add(c)
r.inner.MustRegister(metricpkg.EnabledCollector{C: c})
}
}
// RegisterList registers a list of collectors. If registration of one
// collector fails, no collector is registered.
func (r *Registry) RegisterList(list []prometheus.Collector) error {
registered := []prometheus.Collector{}
for _, c := range list {
if err := r.Register(c); err != nil {
for _, c := range registered {
r.Unregister(c)
}
return err
}
registered = append(registered, c)
}
return nil
}
// collectorSet holds the prometheus collectors so that we can sample them
// periodically. The collectors are not wrapped with [EnabledCollector] so
// that they're sampled regardless if they're enabled or not.
type collectorSet struct {
mu lock.Mutex
collectors map[prometheus.Collector]struct{}
}
func (cs *collectorSet) collect() <-chan prometheus.Metric {
ch := make(chan prometheus.Metric, 100)
go func() {
cs.mu.Lock()
defer cs.mu.Unlock()
defer close(ch)
for c := range cs.collectors {
c.Collect(ch)
}
}()
return ch
}
func (cs *collectorSet) add(c prometheus.Collector) {
cs.mu.Lock()
if cs.collectors == nil {
cs.collectors = make(map[prometheus.Collector]struct{})
}
cs.collectors[c] = struct{}{}
cs.mu.Unlock()
}
func (cs *collectorSet) remove(c prometheus.Collector) {
cs.mu.Lock()
delete(cs.collectors, c)
cs.mu.Unlock()
}
// SPDX-License-Identifier: Apache-2.0
// Copyright Authors of Cilium
package metrics
import (
"context"
"fmt"
"log/slog"
"slices"
"strings"
"github.com/cespare/xxhash/v2"
"github.com/cilium/hive/cell"
"github.com/cilium/hive/job"
"github.com/prometheus/client_golang/prometheus"
dto "github.com/prometheus/client_model/go"
"github.com/prometheus/common/model"
"github.com/spf13/pflag"
"github.com/cilium/cilium/pkg/lock"
"github.com/cilium/cilium/pkg/time"
)
var defaultSamplerConfig = samplerConfig{
MetricsSamplingInterval: defaultSamplingInterval,
}
type samplerConfig struct {
MetricsSamplingInterval time.Duration
}
func (cfg samplerConfig) timeSpan() time.Duration {
return cfg.MetricsSamplingInterval * numSamples
}
func (cfg samplerConfig) Flags(flags *pflag.FlagSet) {
flags.Duration("metrics-sampling-interval", defaultSamplingInterval, "Set the internal metrics sampling interval")
}
// samplingExcludedPrefixes are the prefixes of metrics that we don't care about sampling as they're
// either uninteresting or static over the runtime.
var samplingExcludedPrefixes = []string{
"cilium_event_ts",
"cilium_feature_",
}
func excludedFromSampling(metricName string) bool {
return slices.ContainsFunc(samplingExcludedPrefixes, func(prefix string) bool {
return strings.HasPrefix(metricName, prefix)
})
}
// sampler periodically samples all metrics (enabled or not).
// The sampled metrics can be inspected with the 'metrics' command.
// 'metrics -s' lists all metrics with samples from the past 2 hours,
// and 'metrics/plot (regex)' plots the matching metric. See files in
// 'testdata/' for examples.
type sampler struct {
reg *Registry
log *slog.Logger
cfg samplerConfig
mu lock.Mutex
metrics map[metricKey]debugSamples
maxWarningLogged bool
}
func newSampler(log *slog.Logger, reg *Registry, jg job.Group, cfg samplerConfig) *sampler {
sampler := &sampler{
log: log,
cfg: cfg,
reg: reg,
metrics: make(map[metricKey]debugSamples),
}
jg.Add(
job.OneShot("collect", sampler.collectLoop),
job.Timer("cleanup", sampler.cleanup, cfg.timeSpan()/2),
)
return sampler
}
const (
// Number of samples per metric we want to keep.
numSamples = 24
// The interval at which we collect samples.
// 24 * 5min = 2 hours.
defaultSamplingInterval = 5 * time.Minute
quarterIndex = numSamples/4 - 1
halfIndex = numSamples/2 - 1
lastIndex = numSamples - 1
// Cap the number of metrics we keep around to put an upper limit on memory usage.
// As there's way fewer histograms than gauges or counters, we can roughly estimate
// the memory usage as:
// max 2000 (20% histo): 400 * sizeof(histogram) + 1600 * sizeof(gaugeOrCounter)
// ~= 400 * 508 + 1600 * 164
// ~= 466kB
// worst (100% histo): 2000 * 520 ~= 1MB
// sizeof(baseSamples) = 24+2*16 = 56
// sizeof(sampleRing) = 24*4+4 = 100
// sizeof(histogramSamples): sizeof(baseSamples) + 24+16*8 /* prev */ + 3*sizeof(sampleRing) = 508
// sizeof(gaugeOrCounterSamples): sizeof(baseSamples) + sizeof(sampleRing) + 8 = 164
// See also TestSamplerMaxMemoryUsage.
maxSampledMetrics = 2000
)
// metricKey identifies a single metric. We are relying on the fact that
// Desc() always returns by pointer the same Desc.
type metricKey struct {
desc *prometheus.Desc
labelsHash uint64
}
func (k *metricKey) fqName() string {
// Unfortunately we need to rely on the implementation details of Desc.String()
// here to extract the name. If it ever changes our tests will catch it.
// This method is only invoked when the 'metrics' or 'metrics/plot' commands
// are used, so efficiency is not a huge concern.
s := k.desc.String()
const fqNamePrefix = `fqName: "`
start := strings.Index(s, fqNamePrefix)
if start < 0 {
return "???"
}
start += len(fqNamePrefix)
end := strings.Index(s[start:], `"`)
if end < 0 {
return "???"
}
return s[start : start+end]
}
// SampleBitmap tracks which of the 'numSamples' actually exists.
// For histograms we only mark it sampled when the counts have changed.
type SampleBitmap uint64
func (sb *SampleBitmap) mark(b bool) {
*sb <<= 1
if b {
*sb |= 1
}
}
func (sb SampleBitmap) exists(index int) bool {
return (sb>>index)&1 == 1
}
type debugSamples interface {
getName() string
getLabels() string
getJSON() JSONSamples
get() (m5, m30, m60, m120 string)
getUpdatedAt() time.Time
}
type baseSamples struct {
updatedAt time.Time
name string
labels string
}
func (bs baseSamples) getName() string {
return bs.name
}
func (bs baseSamples) getLabels() string {
return bs.labels
}
type gaugeOrCounterSamples struct {
baseSamples
samples sampleRing
// pos points to index where the next sample goes.
// the latest sample is pos-1.
bits SampleBitmap
}
type sampleRing struct {
samples [numSamples]float32
pos int
}
func (r *sampleRing) push(sample float32) {
r.samples[r.pos] = sample
r.pos = (r.pos + 1) % numSamples
}
func (r *sampleRing) grab() []float32 {
var samples [numSamples]float32
pos := r.pos - 1
if pos < 0 {
pos = numSamples - 1
}
for i := range numSamples {
samples[i] = r.samples[pos]
pos = pos - 1
if pos < 0 {
pos = numSamples - 1
}
}
return samples[:]
}
func (g *gaugeOrCounterSamples) getUpdatedAt() time.Time {
return g.updatedAt
}
func (g *gaugeOrCounterSamples) getJSON() JSONSamples {
samples := g.samples.grab()
return JSONSamples{
Name: g.name,
Labels: g.labels,
GaugeOrCounter: &JSONGaugeOrCounter{
Samples: samples[:],
},
Latest: prettyValue(float64(samples[0])),
}
}
func (g *gaugeOrCounterSamples) get() (zero, quarter, half, last string) {
samples := g.samples.grab()
return prettyValue(float64(samples[0])),
prettyValue(float64(samples[quarterIndex])),
prettyValue(float64(samples[halfIndex])),
prettyValue(float64(samples[lastIndex]))
}
type histogramSamples struct {
baseSamples
prev []histogramBucket
p50, p90, p99 sampleRing
bits SampleBitmap
isSeconds bool
}
func (h *histogramSamples) get() (zero, quarter, half, last string) {
suffix := ""
if h.isSeconds {
suffix = "s"
}
pretty := func(p50, p90, p99 float32) string {
return fmt.Sprintf("%s%s / %s%s / %s%s",
prettyValue(float64(p50)),
suffix, prettyValue(float64(p90)),
suffix, prettyValue(float64(p99)), suffix)
}
p50, p90, p99 := h.p50.grab(), h.p90.grab(), h.p99.grab()
zero = pretty(p50[0], p90[0], p99[0])
quarter = pretty(p50[quarterIndex], p90[quarterIndex], p99[quarterIndex])
half = pretty(p50[halfIndex], p90[halfIndex], p99[halfIndex])
last = pretty(p50[lastIndex], p90[lastIndex], p99[lastIndex])
return
}
func (h *histogramSamples) getUpdatedAt() time.Time {
return h.updatedAt
}
func (h *histogramSamples) getJSON() JSONSamples {
p50, p90, p99 := h.p50.grab(), h.p90.grab(), h.p99.grab()
suffix := ""
if h.isSeconds {
suffix = "s"
}
return JSONSamples{
Name: h.name,
Labels: h.labels,
Histogram: &JSONHistogram{
P50: p50[:],
P90: p90[:],
P99: p99[:],
},
Latest: fmt.Sprintf("%s%s / %s%s / %s%s",
prettyValue(float64(p50[0])),
suffix, prettyValue(float64(p90[0])),
suffix, prettyValue(float64(p99[0])), suffix),
}
}
// cleanup runs every hour to remove samples that have not been updated
// in more than an hour (e.g. the metric has been unregistered).
func (dc *sampler) cleanup(ctx context.Context) error {
dc.mu.Lock()
defer dc.mu.Unlock()
for k, s := range dc.metrics {
if time.Since(s.getUpdatedAt()) > dc.cfg.MetricsSamplingInterval {
delete(dc.metrics, k)
}
}
return nil
}
func (dc *sampler) collectLoop(ctx context.Context, health cell.Health) error {
ticker := time.NewTicker(dc.cfg.MetricsSamplingInterval)
defer ticker.Stop()
for {
dc.collect(health)
select {
case <-ctx.Done():
return nil
case <-ticker.C:
}
}
}
func (dc *sampler) collect(health cell.Health) {
dc.mu.Lock()
defer dc.mu.Unlock()
health.OK("Collecting metrics")
t0 := time.Now()
// Since this is meant to have very low overhead we want to avoid heap allocations
// and other expensive operations as much as possible. Thus we're using Collect()
// to collect metric one at a time (vs Gather() that does a lot in parallel) and
// also avoiding building up temporary data structures.
// One downside of this approach is that we need to parse Desc.String to extract
// the fqName and the labels, but we do this only when encountering a new metric
// and tests catch if it ever breaks.
metricChan := dc.reg.collectors.collect()
addNewMetric := func(key metricKey, s debugSamples) bool {
if len(dc.metrics) >= maxSampledMetrics {
if !dc.maxWarningLogged {
dc.log.Debug("maximum number of sampled metrics reached")
dc.maxWarningLogged = true
}
return false
}
dc.metrics[key] = s
return true
}
numSampled := 0
// The *Desc's we're sampling. Used to quickly decide whether or not
// to sample a metric without calling 'Write'.
shouldSampleDesc := map[*prometheus.Desc]bool{}
for metric := range metricChan {
desc := metric.Desc()
included, known := shouldSampleDesc[desc]
if known && !included {
continue
}
var msg dto.Metric
if err := metric.Write(&msg); err != nil {
continue
}
key := newMetricKey(desc, msg.Label)
name := key.fqName()
if !known {
included = !excludedFromSampling(name)
shouldSampleDesc[desc] = included
if !included {
continue
}
}
if msg.Histogram != nil {
var histogram *histogramSamples
if samples, ok := dc.metrics[key]; !ok {
histogram = &histogramSamples{
baseSamples: baseSamples{name: name, labels: concatLabels(msg.Label)},
isSeconds: strings.Contains(name, "seconds"),
}
if !addNewMetric(key, histogram) {
continue
}
} else {
histogram = samples.(*histogramSamples)
}
histogram.updatedAt = t0
buckets := convertHistogram(msg.GetHistogram())
updated := histogramSampleCount(buckets) != histogramSampleCount(histogram.prev)
if updated {
b := buckets
if histogram.prev != nil {
// Previous sample exists, deduct the counts from it to get the quantiles
// of the last period.
b = slices.Clone(buckets)
subtractHistogram(b, histogram.prev)
}
histogram.p50.push(float32(getHistogramQuantile(b, 0.50)))
histogram.p90.push(float32(getHistogramQuantile(b, 0.90)))
histogram.p99.push(float32(getHistogramQuantile(b, 0.99)))
histogram.bits.mark(true)
} else {
histogram.p50.push(0.0)
histogram.p90.push(0.0)
histogram.p99.push(0.0)
histogram.bits.mark(false)
}
histogram.prev = buckets
} else {
var s *gaugeOrCounterSamples
if samples, ok := dc.metrics[key]; !ok {
s = &gaugeOrCounterSamples{
baseSamples: baseSamples{name: key.fqName(), labels: concatLabels(msg.Label)},
}
if !addNewMetric(key, s) {
continue
}
} else {
s = samples.(*gaugeOrCounterSamples)
}
s.updatedAt = t0
var value float64
switch {
case msg.Counter != nil:
value = msg.Counter.GetValue()
case msg.Gauge != nil:
value = msg.Gauge.GetValue()
case msg.Summary != nil:
value = msg.Summary.GetSampleSum() / float64(msg.Summary.GetSampleCount())
default:
value = -1.0
}
s.samples.push(float32(value))
s.bits.mark(true)
}
numSampled++
}
health.OK(fmt.Sprintf("Sampled %d metrics in %s, next collection at %s", numSamples, time.Since(t0), t0.Add(dc.cfg.MetricsSamplingInterval)))
}
var sep = []byte{model.SeparatorByte}
// newMetricKey constructs a key to uniquely identify a specific metric. Designed
// to avoid heap allocations.
func newMetricKey(desc *prometheus.Desc, labels []*dto.LabelPair) metricKey {
var xxh xxhash.Digest
xxh.Reset()
for _, lp := range labels {
xxh.WriteString(lp.GetName())
xxh.Write(sep)
xxh.WriteString(lp.GetValue())
}
return metricKey{
desc: desc,
labelsHash: xxh.Sum64(),
}
}
func concatLabels(labels []*dto.LabelPair) string {
var b strings.Builder
for i, lp := range labels {
b.WriteString(lp.GetName())
b.WriteByte('=')
b.WriteString(lp.GetValue())
if i < len(labels)-1 {
b.WriteByte(' ')
}
}
return b.String()
}
// SPDX-License-Identifier: Apache-2.0
// Copyright Authors of Cilium
package metrics
import (
"log/slog"
"github.com/prometheus/client_golang/prometheus"
clientPkg "github.com/cilium/cilium/pkg/client"
healthClientPkg "github.com/cilium/cilium/pkg/health/client"
"github.com/cilium/cilium/pkg/logging"
"github.com/cilium/cilium/pkg/logging/logfields"
)
type statusCollector struct {
logger *slog.Logger
daemonHealthGetter daemonHealthGetter
connectivityStatusGetter connectivityStatusGetter
controllersFailingDesc *prometheus.Desc
ipAddressesDesc *prometheus.Desc
unreachableNodesDesc *prometheus.Desc
unreachableHealthEndpointsDesc *prometheus.Desc
}
func newStatusCollector(logger *slog.Logger) *statusCollector {
ciliumClient, err := clientPkg.NewClient("")
if err != nil {
logging.Fatal(logger, "Error while creating Cilium API client", logfields.Error, err)
}
healthClient, err := healthClientPkg.NewClient("")
if err != nil {
logging.Fatal(logger, "Error while creating cilium-health API client", logfields.Error, err)
}
return newStatusCollectorWithClients(logger, ciliumClient.Daemon, healthClient.Connectivity)
}
// newStatusCollectorWithClients provides a constructor with injected clients
func newStatusCollectorWithClients(logger *slog.Logger, d daemonHealthGetter, c connectivityStatusGetter) *statusCollector {
return &statusCollector{
logger: logger,
daemonHealthGetter: d,
connectivityStatusGetter: c,
controllersFailingDesc: prometheus.NewDesc(
prometheus.BuildFQName(Namespace, "", "controllers_failing"),
"Number of failing controllers",
nil, nil,
),
ipAddressesDesc: prometheus.NewDesc(
prometheus.BuildFQName(Namespace, "", "ip_addresses"),
"Number of allocated IP addresses",
[]string{"family"}, nil,
),
unreachableNodesDesc: prometheus.NewDesc(
prometheus.BuildFQName(Namespace, "", "unreachable_nodes"),
"Number of nodes that cannot be reached",
nil, nil,
),
unreachableHealthEndpointsDesc: prometheus.NewDesc(
prometheus.BuildFQName(Namespace, "", "unreachable_health_endpoints"),
"Number of health endpoints that cannot be reached",
nil, nil,
),
}
}
func (s *statusCollector) Describe(ch chan<- *prometheus.Desc) {
ch <- s.controllersFailingDesc
ch <- s.ipAddressesDesc
ch <- s.unreachableNodesDesc
ch <- s.unreachableHealthEndpointsDesc
}
func (s *statusCollector) Collect(ch chan<- prometheus.Metric) {
statusResponse, err := s.daemonHealthGetter.GetHealthz(nil)
if err != nil {
s.logger.Error("Error while getting Cilium status", logfields.Error, err)
return
}
if statusResponse.Payload == nil {
return
}
// Controllers failing
controllersFailing := 0
for _, ctrl := range statusResponse.Payload.Controllers {
if ctrl.Status == nil {
continue
}
if ctrl.Status.ConsecutiveFailureCount > 0 {
controllersFailing++
}
}
ch <- prometheus.MustNewConstMetric(
s.controllersFailingDesc,
prometheus.GaugeValue,
float64(controllersFailing),
)
if statusResponse.Payload.Ipam != nil {
// Address count
ch <- prometheus.MustNewConstMetric(
s.ipAddressesDesc,
prometheus.GaugeValue,
float64(len(statusResponse.Payload.Ipam.IPV4)),
"ipv4",
)
ch <- prometheus.MustNewConstMetric(
s.ipAddressesDesc,
prometheus.GaugeValue,
float64(len(statusResponse.Payload.Ipam.IPV6)),
"ipv6",
)
}
healthStatusResponse, err := s.connectivityStatusGetter.GetStatus(nil)
if err != nil {
s.logger.Error("Error while getting cilium-health status", logfields.Error, err)
return
}
if healthStatusResponse.Payload == nil {
return
}
// Nodes and endpoints healthStatusResponse
var (
unreachableNodes int
unreachableEndpoints int
)
for _, nodeStatus := range healthStatusResponse.Payload.Nodes {
for _, addr := range healthClientPkg.GetAllHostAddresses(nodeStatus) {
if healthClientPkg.GetPathConnectivityStatusType(addr) == healthClientPkg.ConnStatusUnreachable {
unreachableNodes++
break
}
}
for _, addr := range healthClientPkg.GetAllEndpointAddresses(nodeStatus) {
if healthClientPkg.GetPathConnectivityStatusType(addr) == healthClientPkg.ConnStatusUnreachable {
unreachableEndpoints++
break
}
}
}
ch <- prometheus.MustNewConstMetric(
s.unreachableNodesDesc,
prometheus.GaugeValue,
float64(unreachableNodes),
)
ch <- prometheus.MustNewConstMetric(
s.unreachableHealthEndpointsDesc,
prometheus.GaugeValue,
float64(unreachableEndpoints),
)
}
// SPDX-License-Identifier: Apache-2.0
// Copyright Authors of Cilium
package agent
import (
"bytes"
"context"
"encoding/gob"
"errors"
"fmt"
"log/slog"
"os"
"github.com/cilium/ebpf"
"github.com/cilium/ebpf/perf"
"golang.org/x/sys/unix"
"github.com/cilium/cilium/api/v1/models"
oldBPF "github.com/cilium/cilium/pkg/bpf"
"github.com/cilium/cilium/pkg/lock"
"github.com/cilium/cilium/pkg/logging"
"github.com/cilium/cilium/pkg/logging/logfields"
"github.com/cilium/cilium/pkg/maps/eventsmap"
"github.com/cilium/cilium/pkg/monitor/agent/consumer"
"github.com/cilium/cilium/pkg/monitor/agent/listener"
"github.com/cilium/cilium/pkg/monitor/api"
"github.com/cilium/cilium/pkg/monitor/payload"
"github.com/cilium/cilium/pkg/time"
)
// isCtxDone is a utility function that returns true when the context's Done()
// channel is closed. It is intended to simplify goroutines that need to check
// this multiple times in their loop.
func isCtxDone(ctx context.Context) bool {
select {
case <-ctx.Done():
return true
default:
return false
}
}
type Agent interface {
AttachToEventsMap(nPages int) error
SendEvent(typ int, event any) error
RegisterNewListener(newListener listener.MonitorListener)
RemoveListener(ml listener.MonitorListener)
RegisterNewConsumer(newConsumer consumer.MonitorConsumer)
RemoveConsumer(mc consumer.MonitorConsumer)
State() *models.MonitorStatus
}
// Agent structure for centralizing the responsibilities of the main events
// reader.
// There is some racey-ness around perfReaderCancel since it replaces on every
// perf reader start. In the event that a MonitorListener from a previous
// generation calls its cleanup after the start of the new perf reader, we
// might call the new, and incorrect, cancel function. We guard for this by
// checking the number of listeners during the cleanup call. The perf reader
// must have at least one MonitorListener (since it started) so no cancel is called.
// If it doesn't, the cancel is the correct behavior (the older generation
// cancel must have been called for us to get this far anyway).
type agent struct {
logger *slog.Logger
lock.Mutex
models.MonitorStatus
ctx context.Context
perfReaderCancel context.CancelFunc
// listeners are external cilium monitor clients which receive raw
// gob-encoded payloads
listeners map[listener.MonitorListener]struct{}
// consumers are internal clients which receive decoded messages
consumers map[consumer.MonitorConsumer]struct{}
events *ebpf.Map
monitorEvents *perf.Reader
}
// newAgent starts a new monitor agent instance which distributes monitor events
// to registered listeners. Once the datapath is set up, AttachToEventsMap needs
// to be called to receive events from the perf ring buffer. Otherwise, only
// user space events received via SendEvent are distributed registered listeners.
// Internally, the agent spawns a singleton goroutine reading events from
// the BPF perf ring buffer and provides an interface to pass in non-BPF events.
// The instance can be stopped by cancelling ctx, which will stop the perf reader
// goroutine and close all registered listeners.
// Note that the perf buffer reader is started only when listeners are
// connected.
func newAgent(ctx context.Context, logger *slog.Logger) *agent {
return &agent{
ctx: ctx,
logger: logger,
listeners: make(map[listener.MonitorListener]struct{}),
consumers: make(map[consumer.MonitorConsumer]struct{}),
perfReaderCancel: func() {}, // no-op to avoid doing null checks everywhere
}
}
// AttachToEventsMap opens the events perf ring buffer and makes it ready for
// consumption, such that any subscribed consumers may receive events
// from it. This function is to be called once the events map has been set up.
func (a *agent) AttachToEventsMap(nPages int) error {
a.Lock()
defer a.Unlock()
if a.events != nil {
return errors.New("events map already attached")
}
// assert that we can actually connect the monitor
path := oldBPF.MapPath(a.logger, eventsmap.MapName)
eventsMap, err := ebpf.LoadPinnedMap(path, nil)
if err != nil {
return err
}
a.events = eventsMap
a.MonitorStatus = models.MonitorStatus{
Cpus: int64(eventsMap.MaxEntries()),
Npages: int64(nPages),
Pagesize: int64(os.Getpagesize()),
}
// start the perf reader if we already have subscribers
if a.hasSubscribersLocked() {
a.startPerfReaderLocked()
}
return nil
}
// SendEvent distributes an event to all monitor listeners
func (a *agent) SendEvent(typ int, event any) error {
if a == nil {
return fmt.Errorf("monitor agent is not set up")
}
// Two types of clients are currently supported: consumers and listeners.
// The former ones expect decoded messages, so the notification does not
// require any additional marshalling operation before sending an event.
// Instead, the latter expect gob-encoded payloads, and the whole marshalling
// process may be quite expensive.
// While we want to avoid marshalling events if there are no active
// listeners, there's no need to check for active consumers ahead of time.
a.notifyAgentEvent(typ, event)
// do not marshal notifications if there are no active listeners
if !a.hasListeners() {
return nil
}
// marshal notifications into JSON format for legacy listeners
if typ == api.MessageTypeAgent {
msg, ok := event.(api.AgentNotifyMessage)
if !ok {
return errors.New("unexpected event type for MessageTypeAgent")
}
var err error
event, err = msg.ToJSON()
if err != nil {
return fmt.Errorf("unable to JSON encode agent notification: %w", err)
}
}
var buf bytes.Buffer
if err := buf.WriteByte(byte(typ)); err != nil {
return fmt.Errorf("unable to initialize buffer: %w", err)
}
if err := gob.NewEncoder(&buf).Encode(event); err != nil {
return fmt.Errorf("unable to gob encode: %w", err)
}
p := payload.Payload{Data: buf.Bytes(), CPU: 0, Lost: 0, Type: payload.EventSample}
a.sendToListeners(&p)
return nil
}
// hasSubscribersLocked returns true if there are listeners or consumers
// subscribed to the agent right now.
// Note: it is critical to hold the lock for this operation.
func (a *agent) hasSubscribersLocked() bool {
return len(a.listeners)+len(a.consumers) != 0
}
// hasListeners returns true if there are listeners subscribed to the
// agent right now.
func (a *agent) hasListeners() bool {
a.Lock()
defer a.Unlock()
return len(a.listeners) != 0
}
// startPerfReaderLocked starts the perf reader. This should only be
// called if there are no other readers already running.
// The goroutine is spawned with a context derived from m.Context() and the
// cancelFunc is assigned to perfReaderCancel. Note that cancelling m.Context()
// (e.g. on program shutdown) will also cancel the derived context.
// Note: it is critical to hold the lock for this operation.
func (a *agent) startPerfReaderLocked() {
if a.events == nil {
return // not attached to events map yet
}
a.perfReaderCancel() // don't leak any old readers, just in case.
perfEventReaderCtx, cancel := context.WithCancel(a.ctx)
a.perfReaderCancel = cancel
go a.handleEvents(perfEventReaderCtx)
}
// RegisterNewListener adds the new MonitorListener to the global list.
// It also spawns a singleton goroutine to read and distribute the events.
func (a *agent) RegisterNewListener(newListener listener.MonitorListener) {
if a == nil {
return
}
a.Lock()
defer a.Unlock()
if isCtxDone(a.ctx) {
a.logger.Debug("RegisterNewListener called on stopped monitor")
newListener.Close()
return
}
// If this is the first listener, start the perf reader
if !a.hasSubscribersLocked() {
a.startPerfReaderLocked()
}
version := newListener.Version()
switch newListener.Version() {
case listener.Version1_2:
a.listeners[newListener] = struct{}{}
default:
newListener.Close()
a.logger.Error("Closing listener from unsupported monitor client version", logfields.Version, version)
}
a.logger.Debug(
"New listener connected",
logfields.Count, len(a.listeners),
logfields.Version, version,
)
}
// RemoveListener deletes the MonitorListener from the list, closes its queue,
// and stops perfReader if this is the last subscriber
func (a *agent) RemoveListener(ml listener.MonitorListener) {
if a == nil {
return
}
a.Lock()
defer a.Unlock()
// Remove the listener and close it.
delete(a.listeners, ml)
a.logger.Debug(
"Removed listener",
logfields.Count, len(a.listeners),
logfields.Version, ml.Version(),
)
ml.Close()
// If this was the final listener, shutdown the perf reader and unmap our
// ring buffer readers. This tells the kernel to not emit this data.
// Note: it is critical to hold the lock and check the number of listeners.
// This guards against an older generation listener calling the
// current generation perfReaderCancel
if !a.hasSubscribersLocked() {
a.perfReaderCancel()
}
}
// RegisterNewConsumer adds the new MonitorConsumer to the global list.
// It also spawns a singleton goroutine to read and distribute the events.
func (a *agent) RegisterNewConsumer(newConsumer consumer.MonitorConsumer) {
if a == nil {
return
}
if isCtxDone(a.ctx) {
a.logger.Debug("RegisterNewConsumer called on stopped monitor")
return
}
a.Lock()
defer a.Unlock()
if !a.hasSubscribersLocked() {
a.startPerfReaderLocked()
}
a.consumers[newConsumer] = struct{}{}
}
// RemoveConsumer deletes the MonitorConsumer from the list, closes its queue,
// and stops perfReader if this is the last subscriber
func (a *agent) RemoveConsumer(mc consumer.MonitorConsumer) {
if a == nil {
return
}
a.Lock()
defer a.Unlock()
delete(a.consumers, mc)
if !a.hasSubscribersLocked() {
a.perfReaderCancel()
}
}
// handleEvents reads events from the perf buffer and processes them. It
// will exit when stopCtx is done. Note, however, that it will block in the
// Poll call but assumes enough events are generated that these blocks are
// short.
func (a *agent) handleEvents(stopCtx context.Context) {
tNow := time.Now()
a.logger.Info("Beginning to read perf buffer", logfields.StartTime, tNow)
defer a.logger.Info("Stopped reading perf buffer", logfields.StartTime, tNow)
bufferSize := int(a.Pagesize * a.Npages)
monitorEvents, err := perf.NewReader(a.events, bufferSize)
if err != nil {
logging.Fatal(a.logger, "Cannot initialise BPF perf ring buffer sockets",
logfields.Error, err,
logfields.StartTime, tNow,
)
}
defer func() {
monitorEvents.Close()
a.Lock()
a.monitorEvents = nil
a.Unlock()
}()
a.Lock()
a.monitorEvents = monitorEvents
a.Unlock()
for !isCtxDone(stopCtx) {
record, err := monitorEvents.Read()
switch {
case isCtxDone(stopCtx):
return
case err != nil:
if perf.IsUnknownEvent(err) {
a.Lock()
a.MonitorStatus.Unknown++
a.Unlock()
} else {
a.logger.Warn("Error received while reading from perf buffer",
logfields.Error, err,
logfields.StartTime, tNow,
)
if errors.Is(err, unix.EBADFD) {
return
}
}
continue
}
a.processPerfRecord(record)
}
}
// processPerfRecord processes a record from the datapath and sends it to any
// registered subscribers
func (a *agent) processPerfRecord(record perf.Record) {
a.Lock()
defer a.Unlock()
if record.LostSamples > 0 {
a.MonitorStatus.Lost += int64(record.LostSamples)
a.notifyPerfEventLostLocked(record.LostSamples, record.CPU)
a.sendToListenersLocked(&payload.Payload{
CPU: record.CPU,
Lost: record.LostSamples,
Type: payload.RecordLost,
})
} else {
a.notifyPerfEventLocked(record.RawSample, record.CPU)
a.sendToListenersLocked(&payload.Payload{
Data: record.RawSample,
CPU: record.CPU,
Type: payload.EventSample,
})
}
}
// State returns the current status of the monitor
func (a *agent) State() *models.MonitorStatus {
if a == nil {
return nil
}
a.Lock()
defer a.Unlock()
if a.monitorEvents == nil {
return nil
}
// Shallow-copy the structure, then return the newly allocated copy.
status := a.MonitorStatus
return &status
}
// notifyAgentEvent notifies all consumers about an agent event.
func (a *agent) notifyAgentEvent(typ int, message any) {
a.Lock()
defer a.Unlock()
for mc := range a.consumers {
mc.NotifyAgentEvent(typ, message)
}
}
// notifyPerfEventLocked notifies all consumers about a perf event.
// The caller must hold the monitor lock.
func (a *agent) notifyPerfEventLocked(data []byte, cpu int) {
for mc := range a.consumers {
mc.NotifyPerfEvent(data, cpu)
}
}
// notifyEventToConsumersLocked notifies all consumers about lost events.
// The caller must hold the monitor lock.
func (a *agent) notifyPerfEventLostLocked(numLostEvents uint64, cpu int) {
for mc := range a.consumers {
mc.NotifyPerfEventLost(numLostEvents, cpu)
}
}
// sendToListeners enqueues the payload to all listeners.
func (a *agent) sendToListeners(pl *payload.Payload) {
a.Lock()
defer a.Unlock()
a.sendToListenersLocked(pl)
}
// sendToListenersLocked enqueues the payload to all listeners while holding the monitor lock.
func (a *agent) sendToListenersLocked(pl *payload.Payload) {
for ml := range a.listeners {
ml.Enqueue(pl)
}
}
// SPDX-License-Identifier: Apache-2.0
// Copyright Authors of Cilium
package agent
import (
"context"
"fmt"
"log/slog"
"github.com/cilium/ebpf"
"github.com/cilium/hive/cell"
"github.com/spf13/pflag"
"github.com/cilium/cilium/pkg/defaults"
"github.com/cilium/cilium/pkg/logging/logfields"
"github.com/cilium/cilium/pkg/maps/eventsmap"
)
// Cell provides the monitor agent, which monitors the cilium events perf event
// buffer and forwards events to consumers/listeners. It also handles
// multicasting of other agent events.
var Cell = cell.Module(
"monitor-agent",
"Consumes the cilium events map and distributes those and other agent events",
cell.Provide(newMonitorAgent),
cell.Config(defaultConfig),
)
type AgentConfig struct {
// EnableMonitor enables the monitor unix domain socket server
EnableMonitor bool
// MonitorQueueSize is the size of the monitor event queue
MonitorQueueSize int
}
var defaultConfig = AgentConfig{
EnableMonitor: true,
}
func (def AgentConfig) Flags(flags *pflag.FlagSet) {
flags.Bool("enable-monitor", def.EnableMonitor, "Enable the monitor unix domain socket server")
flags.Int("monitor-queue-size", 0, "Size of the event queue when reading monitor events")
}
type agentParams struct {
cell.In
Lifecycle cell.Lifecycle
Log *slog.Logger
Config AgentConfig
EventsMap eventsmap.Map `optional:"true"`
}
func newMonitorAgent(params agentParams) Agent {
ctx, cancel := context.WithCancel(context.Background())
agent := newAgent(ctx, params.Log)
params.Lifecycle.Append(cell.Hook{
OnStart: func(cell.HookContext) error {
if params.EventsMap == nil {
// If there's no event map, function only for agent events.
params.Log.Info("No eventsmap: monitor works only for agent events.")
return nil
}
err := agent.AttachToEventsMap(defaults.MonitorBufferPages)
if err != nil {
params.Log.Error("encountered error when attaching the monitor agent to eventsmap", logfields.Error, err)
return fmt.Errorf("encountered error when attaching the monitor agent: %w", err)
}
if params.Config.EnableMonitor {
queueSize := params.Config.MonitorQueueSize
if queueSize == 0 {
possibleCPUs, err := ebpf.PossibleCPU()
if err != nil {
params.Log.Error("failed to get number of possible CPUs", logfields.Error, err)
return fmt.Errorf("failed to get number of possible CPUs: %w", err)
}
queueSize = min(possibleCPUs*defaults.MonitorQueueSizePerCPU, defaults.MonitorQueueSizePerCPUMaximum)
}
err = ServeMonitorAPI(ctx, params.Log, agent, queueSize)
if err != nil {
params.Log.Error("encountered error serving monitor agent API", logfields.Error, err)
return fmt.Errorf("encountered error serving monitor agent API: %w", err)
}
}
return err
},
OnStop: func(cell.HookContext) error {
cancel()
return nil
},
})
return agent
}
// SPDX-License-Identifier: Apache-2.0
// Copyright Authors of Cilium
package listener
import (
"errors"
"net"
"os"
"golang.org/x/sys/unix"
"github.com/cilium/cilium/pkg/monitor/payload"
)
// Version is the version of a node-monitor listener client. There are
// two API versions:
// - 1.0 which encodes the gob type information with each payload sent, and
// adds a meta object before it.
// - 1.2 which maintains a gob session per listener, thus only encoding the
// type information on the first payload sent. It does NOT prepend the a meta
// object.
type Version string
const (
// VersionUnsupported is here for use in error returns etc.
VersionUnsupported = Version("unsupported")
// Version1_2 is the API 1.0 version of the protocol (see above).
Version1_2 = Version("1.2")
)
// MonitorListener is a generic consumer of monitor events. Implementers are
// expected to handle errors as needed, including exiting.
type MonitorListener interface {
// Enqueue adds this payload to the send queue. Any errors should be logged
// and handled appropriately.
Enqueue(pl *payload.Payload)
// Version returns the API version of this listener
Version() Version
// Close closes the listener.
Close()
}
// IsDisconnected is a convenience function that wraps the absurdly long set of
// checks for a disconnect.
func IsDisconnected(err error) bool {
if err == nil {
return false
}
op := &net.OpError{}
if !errors.As(err, &op) {
return false
}
syscerr := &os.SyscallError{}
if !errors.As(op.Err, &syscerr) {
return false
}
var errn unix.Errno
return errors.As(syscerr.Err, &errn) && errors.Is(errn, unix.EPIPE)
}
// SPDX-License-Identifier: Apache-2.0
// Copyright Authors of Cilium
package agent
import (
"encoding/gob"
"log/slog"
"net"
"sync"
"github.com/cilium/cilium/pkg/logging/logfields"
"github.com/cilium/cilium/pkg/monitor/agent/listener"
"github.com/cilium/cilium/pkg/monitor/payload"
)
// listenerv1_2 implements the cilium-node-monitor API protocol compatible with
// cilium 1.2
// cleanupFn is called on exit
type listenerv1_2 struct {
logger *slog.Logger
conn net.Conn
queue chan *payload.Payload
cleanupFn func(listener.MonitorListener)
// Used to prevent queue from getting closed multiple times.
once sync.Once
}
func newListenerv1_2(logger *slog.Logger, c net.Conn, queueSize int, cleanupFn func(listener.MonitorListener)) *listenerv1_2 {
ml := &listenerv1_2{
logger: logger,
conn: c,
queue: make(chan *payload.Payload, queueSize),
cleanupFn: cleanupFn,
}
go ml.drainQueue()
return ml
}
func (ml *listenerv1_2) Enqueue(pl *payload.Payload) {
select {
case ml.queue <- pl:
default:
ml.logger.Debug("Per listener queue is full, dropping message")
}
}
// drainQueue encodes and sends monitor payloads to the listener. It is
// intended to be a goroutine.
func (ml *listenerv1_2) drainQueue() {
defer func() {
ml.cleanupFn(ml)
}()
enc := gob.NewEncoder(ml.conn)
for pl := range ml.queue {
if err := pl.EncodeBinary(enc); err != nil {
switch {
case listener.IsDisconnected(err):
ml.logger.Debug("Listener disconnected")
return
default:
ml.logger.Warn("Removing listener due to write failure", logfields.Error, err)
return
}
}
}
}
func (ml *listenerv1_2) Version() listener.Version {
return listener.Version1_2
}
// Close closes the underlying socket and payload queue.
func (ml *listenerv1_2) Close() {
ml.once.Do(func() {
ml.conn.Close()
close(ml.queue)
})
}
// SPDX-License-Identifier: Apache-2.0
// Copyright Authors of Cilium
package agent
import (
"context"
"fmt"
"log/slog"
"net"
"os"
"github.com/cilium/cilium/pkg/api"
"github.com/cilium/cilium/pkg/defaults"
"github.com/cilium/cilium/pkg/logging/logfields"
)
// buildServer opens a listener socket at path. It exits with logging on all
// errors.
func buildServer(logger *slog.Logger, path string) (*net.UnixListener, error) {
addr, err := net.ResolveUnixAddr("unix", path)
if err != nil {
return nil, fmt.Errorf("cannot resolve unix address %s: %w", path, err)
}
os.Remove(path)
server, err := net.ListenUnix("unix", addr)
if err != nil {
return nil, fmt.Errorf("cannot listen on unix socket %s: %w", path, err)
}
if os.Getuid() == 0 {
err := api.SetDefaultPermissions(logger.Debug, path)
if err != nil {
server.Close()
return nil, fmt.Errorf("cannot set default permissions on socket %s: %w", path, err)
}
}
return server, nil
}
// server serves the Cilium monitor API on the unix domain socket
type server struct {
logger *slog.Logger
listener net.Listener
monitor Agent
}
// ServeMonitorAPI serves the Cilium 1.2 monitor API on a unix domain socket.
// This method starts the server in the background. The server is stopped when
// ctx is cancelled. Each incoming connection registers a new listener on
// monitor.
func ServeMonitorAPI(ctx context.Context, logger *slog.Logger, monitor Agent, queueSize int) error {
listener, err := buildServer(logger, defaults.MonitorSockPath1_2)
if err != nil {
return err
}
s := &server{
listener: listener,
monitor: monitor,
logger: logger,
}
logger.Info(fmt.Sprintf("Serving cilium node monitor v1.2 API at unix://%s", defaults.MonitorSockPath1_2))
go s.connectionHandler1_2(ctx, queueSize)
return nil
}
// connectionHandler1_2 handles all the incoming connections and sets up the
// listener objects. It will block until ctx is cancelled.
func (s *server) connectionHandler1_2(ctx context.Context, queueSize int) {
go func() {
<-ctx.Done()
s.listener.Close()
}()
for !isCtxDone(ctx) {
conn, err := s.listener.Accept()
switch {
case isCtxDone(ctx):
if conn != nil {
conn.Close()
}
return
case err != nil:
s.logger.Warn("Error accepting connection", logfields.Error, err)
continue
}
newListener := newListenerv1_2(s.logger, conn, queueSize, s.monitor.RemoveListener)
s.monitor.RegisterNewListener(newListener)
}
}
// SPDX-License-Identifier: Apache-2.0
// Copyright Authors of Cilium
package api
import (
"bytes"
"encoding/gob"
"fmt"
)
// Methods in this file are only used on Linux paired with the MonitorEvent interface.
// Dump prints the message according to the verbosity level specified
func (n *AgentNotify) Dump(args *DumpArgs) {
if args.Verbosity == JSON {
fmt.Fprintln(args.Buf, n.getJSON())
} else {
fmt.Fprintf(args.Buf, ">> %s: %s\n", resolveAgentType(n.Type), n.Text)
}
}
// Decode decodes the message in 'data' into the struct.
func (a *AgentNotify) Decode(data []byte) error {
buf := bytes.NewBuffer(data[1:])
dec := gob.NewDecoder(buf)
return dec.Decode(a)
}
// GetSrc retrieves the source endpoint for the message
func (n *AgentNotify) GetSrc() (src uint16) {
return 0
}
// GetDst retrieves the destination endpoint for the message.
func (n *AgentNotify) GetDst() (dst uint16) {
return 0
}
// SPDX-License-Identifier: Apache-2.0
// Copyright Authors of Cilium
package api
import (
"fmt"
)
// DropMin numbers less than this are non-drop reason codes
var DropMin uint8 = 130
// DropInvalid is the Invalid packet reason.
var DropInvalid uint8 = 2
// These values are shared with bpf/lib/common.h and api/v1/flow/flow.proto.
var errors = map[uint8]string{
0: "Success",
2: "Invalid packet",
3: "Interface",
4: "Interface Decrypted",
5: "LB, sock cgroup: No backend slot entry found",
6: "LB, sock cgroup: No backend entry found",
7: "LB, sock cgroup: Reverse entry update failed",
8: "LB, sock cgroup: Reverse entry stale",
9: "Fragmented packet",
10: "Fragmented packet entry update failed",
11: "Missed tail call to custom program",
12: "Interface Decrypting",
13: "Interface Encrypting",
14: "LB: sock cgroup: Reverse entry delete succeeded",
130: "Invalid source mac", // Unused
131: "Invalid destination mac", // Unused
132: "Invalid source ip",
133: "Policy denied",
134: "Invalid packet",
135: "CT: Truncated or invalid header",
136: "Fragmentation needed",
137: "CT: Unknown L4 protocol",
138: "CT: Can't create entry from packet", // Unused
139: "Unsupported L3 protocol",
140: "Missed tail call",
141: "Error writing to packet",
142: "Unknown L4 protocol",
143: "Unknown ICMPv4 code",
144: "Unknown ICMPv4 type",
145: "Unknown ICMPv6 code",
146: "Unknown ICMPv6 type",
147: "Error retrieving tunnel key",
148: "Error retrieving tunnel options", // Unused
149: "Invalid Geneve option", // Unused
150: "Unknown L3 target address",
151: "Stale or unroutable IP",
152: "No matching local container found", // Unused
153: "Error while correcting L3 checksum",
154: "Error while correcting L4 checksum",
155: "CT: Map insertion failed",
156: "Invalid IPv6 extension header",
157: "IP fragmentation not supported",
158: "Service backend not found",
160: "No tunnel/encapsulation endpoint (datapath BUG!)",
161: "NAT 46/64 not enabled",
162: "Reached EDT rate-limiting drop horizon",
163: "Unknown connection tracking state",
164: "Local host is unreachable",
165: "No configuration available to perform policy decision", // Unused
166: "Unsupported L2 protocol",
167: "No mapping for NAT masquerade",
168: "Unsupported protocol for NAT masquerade",
169: "FIB lookup failed",
170: "Encapsulation traffic is prohibited",
171: "Invalid identity",
172: "Unknown sender",
173: "NAT not needed",
174: "Is a ClusterIP",
175: "First logical datagram fragment not found",
176: "Forbidden ICMPv6 message",
177: "Denied by LB src range check",
178: "Socket lookup failed",
179: "Socket assign failed",
180: "Proxy redirection not supported for protocol",
181: "Policy denied by denylist",
182: "VLAN traffic disallowed by VLAN filter",
183: "Incorrect VNI from VTEP",
184: "Failed to update or lookup TC buffer",
185: "No SID was found for the IP address",
186: "SRv6 state was removed during tail call",
187: "L3 translation from IPv4 to IPv6 failed (NAT46)",
188: "L3 translation from IPv6 to IPv4 failed (NAT64)",
189: "Authentication required",
190: "No conntrack map found",
191: "No nat map found",
192: "Invalid ClusterID",
193: "Unsupported packet protocol for DSR encapsulation",
194: "No egress gateway found",
195: "Traffic is unencrypted",
196: "TTL exceeded",
197: "No node ID found",
198: "Rate limited",
199: "IGMP handled",
200: "IGMP subscribed",
201: "Multicast handled",
202: "Host datapath not ready",
203: "Endpoint policy program not available",
204: "No Egress IP configured",
205: "Punt to proxy",
}
func extendedReason(extError int8) string {
if extError == int8(0) {
return ""
}
return fmt.Sprintf("%d", extError)
}
func DropReasonExt(reason uint8, extError int8) string {
if err, ok := errors[reason]; ok {
if ext := extendedReason(extError); ext == "" {
return err
} else {
return err + ", " + ext
}
}
return fmt.Sprintf("%d, %d", reason, extError)
}
// DropReason prints the drop reason in a human readable string
func DropReason(reason uint8) string {
return DropReasonExt(reason, int8(0))
}
// SPDX-License-Identifier: Apache-2.0
// Copyright Authors of Cilium
package api
import "fmt"
// Keep in sync with __id_for_file in bpf/lib/source_info.h.
var files = map[uint8]string{
// @@ source files list begin
// source files from bpf/
1: "bpf_host.c",
2: "bpf_lxc.c",
3: "bpf_overlay.c",
4: "bpf_xdp.c",
5: "bpf_sock.c",
6: "bpf_network.c",
7: "bpf_wireguard.c",
// header files from bpf/lib/
101: "arp.h",
102: "drop.h",
103: "srv6.h",
104: "icmp6.h",
105: "nodeport.h",
106: "lb.h",
107: "mcast.h",
108: "ipv4.h",
109: "conntrack.h",
110: "local_delivery.h",
111: "trace.h",
112: "encap.h",
113: "encrypt.h",
114: "host_firewall.h",
115: "nodeport_egress.h",
116: "ipv6.h",
117: "classifiers.h",
// @@ source files list end
}
// BPFFileName returns the file name for the given BPF file id.
func BPFFileName(id uint8) string {
if name, ok := files[id]; ok {
return name
}
return fmt.Sprintf("unknown(%d)", id)
}
// SPDX-License-Identifier: Apache-2.0
// Copyright Authors of Cilium
package api
import (
"bufio"
"bytes"
"encoding/binary"
"github.com/cilium/cilium/pkg/byteorder"
"github.com/cilium/cilium/pkg/hubble/parser/getters"
)
// Verbosity levels for formatting output.
type Verbosity uint8
const (
// INFO is the level of verbosity in which summaries of Drop and Capture
// messages are printed out when the monitor is invoked
INFO Verbosity = iota + 1
// DEBUG is the level of verbosity in which more information about packets
// is printed than in INFO mode. Debug, Drop, and Capture messages are printed.
DEBUG
// VERBOSE is the level of verbosity in which the most information possible
// about packets is printed out. Currently is not utilized.
VERBOSE
// JSON is the level of verbosity in which event information is printed out in json format
JSON
)
// DisplayFormat is used to determine how to display the endpoint
type DisplayFormat bool
const (
// DisplayLabel is used to display the endpoint as a label
DisplayLabel DisplayFormat = false
// DisplayHex is used to display the endpoint as a number
DisplayNumeric DisplayFormat = true
)
// DumpArgs is used to pass arguments to the Dump method
type DumpArgs struct {
Data []byte
CpuPrefix string
Format DisplayFormat
LinkMonitor getters.LinkGetter
Dissect bool
Verbosity Verbosity
Buf *bufio.Writer
}
// MonitorEvent is the interface that all monitor events must implement to be dumped
type MonitorEvent interface {
// Decode decodes the message in 'data' into the struct.
Decode(data []byte) error
// GetSrc retrieves the source endpoint for the message
GetSrc() (src uint16)
// GetDst retrieves the destination endpoint for the message.
GetDst() (dst uint16)
// Dump prints the message according to the verbosity level specified
Dump(args *DumpArgs)
}
// DefaultDecoder is a default implementation of the Decode method
type DefaultDecoder struct{}
// Decode decodes the message in 'data' into the struct.
func (d *DefaultDecoder) Decode(data []byte) error {
return binary.Read(bytes.NewReader(data), byteorder.Native, d)
}
// DefaultSrcDstGetter is a default implementation of the GetSrc and GetDst methods
type DefaultSrcDstGetter struct{}
// GetSrc retrieves the source endpoint for the message
func (d *DefaultSrcDstGetter) GetSrc() (src uint16) {
return 0
}
// GetDst retrieves the destination endpoint for the message.
func (d *DefaultSrcDstGetter) GetDst() (dst uint16) {
return 0
}
// SPDX-License-Identifier: Apache-2.0
// Copyright Authors of Cilium
package api
import (
"encoding/json"
"fmt"
"net"
"slices"
"sort"
"strconv"
"strings"
"time"
"github.com/cilium/cilium/pkg/monitor/notifications"
)
// Must be synchronized with <bpf/lib/common.h>
const (
// 0-128 are reserved for BPF datapath events
MessageTypeUnspec = iota
// MessageTypeDrop is a BPF datapath notification carrying a DropNotify
// which corresponds to drop_notify defined in bpf/lib/drop.h
MessageTypeDrop
// MessageTypeDebug is a BPF datapath notification carrying a DebugMsg
// which corresponds to debug_msg defined in bpf/lib/dbg.h
MessageTypeDebug
// MessageTypeCapture is a BPF datapath notification carrying a DebugCapture
// which corresponds to debug_capture_msg defined in bpf/lib/dbg.h
MessageTypeCapture
// MessageTypeTrace is a BPF datapath notification carrying a TraceNotify
// which corresponds to trace_notify defined in bpf/lib/trace.h
MessageTypeTrace
// MessageTypePolicyVerdict is a BPF datapath notification carrying a PolicyVerdictNotify
// which corresponds to policy_verdict_notify defined in bpf/lib/policy_log.h
MessageTypePolicyVerdict
// MessageTypeTraceSock is a BPF datapath notification carrying a TraceNotifySock
// which corresponds to trace_sock_notify defined in bpf/lib/trace_sock.h
MessageTypeTraceSock = 7
// 129-255 are reserved for agent level events
// MessageTypeAccessLog contains a pkg/proxy/accesslog.LogRecord
MessageTypeAccessLog = 129
// MessageTypeAgent is an agent notification carrying a AgentNotify
MessageTypeAgent = 130
)
const (
MessageTypeNameDrop = "drop"
MessageTypeNameDebug = "debug"
MessageTypeNameCapture = "capture"
MessageTypeNameTrace = "trace"
MessageTypeNameL7 = "l7"
MessageTypeNameAgent = "agent"
MessageTypeNamePolicyVerdict = "policy-verdict"
MessageTypeNameTraceSock = "trace-sock"
)
type MessageTypeFilter []int
var (
// MessageTypeNames is a map of all type names
MessageTypeNames = map[string]int{
MessageTypeNameDrop: MessageTypeDrop,
MessageTypeNameDebug: MessageTypeDebug,
MessageTypeNameCapture: MessageTypeCapture,
MessageTypeNameTrace: MessageTypeTrace,
MessageTypeNameL7: MessageTypeAccessLog,
MessageTypeNameAgent: MessageTypeAgent,
MessageTypeNamePolicyVerdict: MessageTypePolicyVerdict,
MessageTypeNameTraceSock: MessageTypeTraceSock,
}
)
// AllMessageTypeNames returns a slice of MessageTypeNames
func AllMessageTypeNames() []string {
names := make([]string, 0, len(MessageTypeNames))
for name := range MessageTypeNames {
names = append(names, name)
}
// Sort by the underlying MessageType
sort.SliceStable(names, func(i, j int) bool {
return MessageTypeNames[names[i]] < MessageTypeNames[names[j]]
})
return names
}
// MessageTypeName returns the name for a message type or the numeric value if
// the name can't be found
func MessageTypeName(typ int) string {
for name, value := range MessageTypeNames {
if value == typ {
return name
}
}
return strconv.Itoa(typ)
}
func (m *MessageTypeFilter) String() string {
pieces := make([]string, 0, len(*m))
for _, typ := range *m {
pieces = append(pieces, MessageTypeName(typ))
}
return strings.Join(pieces, ",")
}
func (m *MessageTypeFilter) Set(value string) error {
i, err := MessageTypeNames[value]
if !err {
return fmt.Errorf("Unknown type (%s). Please use one of the following ones %v",
value, MessageTypeNames)
}
*m = append(*m, i)
return nil
}
func (m *MessageTypeFilter) Type() string {
return "[]string"
}
func (m *MessageTypeFilter) Contains(typ int) bool {
return slices.Contains(*m, typ)
}
// Must be synchronized with <bpf/lib/trace.h>
const (
TraceToLxc = iota
TraceToProxy
TraceToHost
TraceToStack
TraceToOverlay
TraceFromLxc
TraceFromProxy
TraceFromHost
TraceFromStack
TraceFromOverlay
TraceFromNetwork
TraceToNetwork
TraceFromCrypto
TraceToCrypto
)
// TraceObservationPoints is a map of all supported trace observation points
var TraceObservationPoints = map[uint8]string{
TraceToLxc: "to-endpoint",
TraceToProxy: "to-proxy",
TraceToHost: "to-host",
TraceToStack: "to-stack",
TraceToOverlay: "to-overlay",
TraceToNetwork: "to-network",
TraceToCrypto: "to-crypto",
TraceFromLxc: "from-endpoint",
TraceFromProxy: "from-proxy",
TraceFromHost: "from-host",
TraceFromStack: "from-stack",
TraceFromOverlay: "from-overlay",
TraceFromNetwork: "from-network",
TraceFromCrypto: "from-crypto",
}
// TraceObservationPoint returns the name of a trace observation point
func TraceObservationPoint(obsPoint uint8) string {
if str, ok := TraceObservationPoints[obsPoint]; ok {
return str
}
return fmt.Sprintf("%d", obsPoint)
}
// AgentNotify is a notification from the agent. The notification is stored
// in its JSON-encoded representation
type AgentNotify struct {
Type AgentNotification
Text string
}
// AgentNotifyMessage is a notification from the agent. It is similar to
// AgentNotify, but the notification is an unencoded struct. See the *Message
// constructors in this package for possible values.
type AgentNotifyMessage struct {
Type AgentNotification
Notification any
}
// ToJSON encodes a AgentNotifyMessage to its JSON-based AgentNotify representation
func (m *AgentNotifyMessage) ToJSON() (AgentNotify, error) {
repr, err := json.Marshal(m.Notification)
if err != nil {
return AgentNotify{}, err
}
return AgentNotify{
Type: m.Type,
Text: string(repr),
}, nil
}
// AgentNotification specifies the type of agent notification
type AgentNotification uint32
const (
AgentNotifyUnspec AgentNotification = iota
AgentNotifyGeneric
AgentNotifyStart
AgentNotifyEndpointRegenerateSuccess
AgentNotifyEndpointRegenerateFail
AgentNotifyPolicyUpdated
AgentNotifyPolicyDeleted
AgentNotifyEndpointCreated
AgentNotifyEndpointDeleted
AgentNotifyIPCacheUpserted
AgentNotifyIPCacheDeleted
)
// AgentNotifications is a map of all supported agent notification types.
var AgentNotifications = map[AgentNotification]string{
AgentNotifyUnspec: "unspecified",
AgentNotifyGeneric: "Message",
AgentNotifyStart: "Cilium agent started",
AgentNotifyEndpointRegenerateSuccess: "Endpoint regenerated",
AgentNotifyEndpointCreated: "Endpoint created",
AgentNotifyEndpointDeleted: "Endpoint deleted",
AgentNotifyEndpointRegenerateFail: "Failed endpoint regeneration",
AgentNotifyIPCacheDeleted: "IPCache entry deleted",
AgentNotifyIPCacheUpserted: "IPCache entry upserted",
AgentNotifyPolicyUpdated: "Policy updated",
AgentNotifyPolicyDeleted: "Policy deleted",
}
func resolveAgentType(t AgentNotification) string {
if n, ok := AgentNotifications[t]; ok {
return n
}
return fmt.Sprintf("%d", t)
}
func (n *AgentNotify) getJSON() string {
return fmt.Sprintf(`{"type":"agent","subtype":"%s","message":%s}`, resolveAgentType(n.Type), n.Text)
}
// PolicyUpdateNotification structures update notification
type PolicyUpdateNotification struct {
Labels []string `json:"labels,omitempty"`
Revision uint64 `json:"revision,omitempty"`
RuleCount int `json:"rule_count"`
}
// PolicyUpdateMessage constructs an agent notification message for policy updates
func PolicyUpdateMessage(numRules int, labels []string, revision uint64) AgentNotifyMessage {
notification := PolicyUpdateNotification{
Labels: labels,
Revision: revision,
RuleCount: numRules,
}
return AgentNotifyMessage{
Type: AgentNotifyPolicyUpdated,
Notification: notification,
}
}
// PolicyDeleteMessage constructs an agent notification message for policy deletion
func PolicyDeleteMessage(deleted int, labels []string, revision uint64) AgentNotifyMessage {
notification := PolicyUpdateNotification{
Labels: labels,
Revision: revision,
RuleCount: deleted,
}
return AgentNotifyMessage{
Type: AgentNotifyPolicyDeleted,
Notification: notification,
}
}
// EndpointRegenNotification structures regeneration notification
type EndpointRegenNotification struct {
ID uint64 `json:"id,omitempty"`
Labels []string `json:"labels,omitempty"`
Error string `json:"error,omitempty"`
}
// EndpointRegenMessage constructs an agent notification message for endpoint regeneration
func EndpointRegenMessage(e notifications.RegenNotificationInfo, err error) AgentNotifyMessage {
notification := EndpointRegenNotification{
ID: e.GetID(),
Labels: e.GetOpLabels(),
}
typ := AgentNotifyEndpointRegenerateSuccess
if err != nil {
notification.Error = err.Error()
typ = AgentNotifyEndpointRegenerateFail
}
return AgentNotifyMessage{
Type: typ,
Notification: notification,
}
}
// EndpointNotification structures the endpoint create or delete notification
type EndpointNotification struct {
EndpointRegenNotification
PodName string `json:"pod-name,omitempty"`
Namespace string `json:"namespace,omitempty"`
}
// EndpointCreateMessage constructs an agent notification message for endpoint creation
func EndpointCreateMessage(e notifications.RegenNotificationInfo) AgentNotifyMessage {
notification := EndpointNotification{
EndpointRegenNotification: EndpointRegenNotification{
ID: e.GetID(),
Labels: e.GetOpLabels(),
},
PodName: e.GetK8sPodName(),
Namespace: e.GetK8sNamespace(),
}
return AgentNotifyMessage{
Type: AgentNotifyEndpointCreated,
Notification: notification,
}
}
// EndpointDeleteMessage constructs an agent notification message for endpoint deletion
func EndpointDeleteMessage(e notifications.RegenNotificationInfo) AgentNotifyMessage {
notification := EndpointNotification{
EndpointRegenNotification: EndpointRegenNotification{
ID: e.GetID(),
Labels: e.GetOpLabels(),
},
PodName: e.GetK8sPodName(),
Namespace: e.GetK8sNamespace(),
}
return AgentNotifyMessage{
Type: AgentNotifyEndpointDeleted,
Notification: notification,
}
}
// IPCacheNotification structures ipcache change notifications
type IPCacheNotification struct {
CIDR string `json:"cidr"`
Identity uint32 `json:"id"`
OldIdentity *uint32 `json:"old-id,omitempty"`
HostIP net.IP `json:"host-ip,omitempty"`
OldHostIP net.IP `json:"old-host-ip,omitempty"`
EncryptKey uint8 `json:"encrypt-key"`
Namespace string `json:"namespace,omitempty"`
PodName string `json:"pod-name,omitempty"`
}
// IPCacheUpsertedMessage constructs an agent notification message for ipcache upsertions
func IPCacheUpsertedMessage(cidr string, id uint32, oldID *uint32, hostIP net.IP, oldHostIP net.IP,
encryptKey uint8, namespace, podName string) AgentNotifyMessage {
notification := IPCacheNotification{
CIDR: cidr,
Identity: id,
OldIdentity: oldID,
HostIP: hostIP,
OldHostIP: oldHostIP,
EncryptKey: encryptKey,
Namespace: namespace,
PodName: podName,
}
return AgentNotifyMessage{
Type: AgentNotifyIPCacheUpserted,
Notification: notification,
}
}
// IPCacheDeletedMessage constructs an agent notification message for ipcache deletions
func IPCacheDeletedMessage(cidr string, id uint32, oldID *uint32, hostIP net.IP, oldHostIP net.IP,
encryptKey uint8, namespace, podName string) AgentNotifyMessage {
notification := IPCacheNotification{
CIDR: cidr,
Identity: id,
OldIdentity: oldID,
HostIP: hostIP,
OldHostIP: oldHostIP,
EncryptKey: encryptKey,
Namespace: namespace,
PodName: podName,
}
return AgentNotifyMessage{
Type: AgentNotifyIPCacheDeleted,
Notification: notification,
}
}
// TimeNotification structures agent start notification
type TimeNotification struct {
Time string `json:"time"`
}
// StartMessage constructs an agent notification message when the agent starts
func StartMessage(t time.Time) AgentNotifyMessage {
notification := TimeNotification{
Time: t.Format(time.RFC3339Nano),
}
return AgentNotifyMessage{
Type: AgentNotifyStart,
Notification: notification,
}
}
const (
// PolicyIngress is the value of Flags&PolicyNotifyFlagDirection for ingress traffic
PolicyIngress = 1
// PolicyEgress is the value of Flags&PolicyNotifyFlagDirection for egress traffic
PolicyEgress = 2
// PolicyMatchNone is the value of MatchType indicatating no policy match
PolicyMatchNone = 0
// PolicyMatchL3Only is the value of MatchType indicating a L3-only match
PolicyMatchL3Only = 1
// PolicyMatchL3L4 is the value of MatchType indicating a L3+L4 match
PolicyMatchL3L4 = 2
// PolicyMatchL4Only is the value of MatchType indicating a L4-only match
PolicyMatchL4Only = 3
// PolicyMatchAll is the value of MatchType indicating an allow-all match
PolicyMatchAll = 4
// PolicyMatchL3Proto is the value of MatchType indicating a L3 and protocol match
PolicyMatchL3Proto = 5
// PolicyMatchProtoOnly is the value of MatchType indicating only a protocol match
PolicyMatchProtoOnly = 6
)
type PolicyMatchType int
func (m PolicyMatchType) String() string {
switch m {
case PolicyMatchL3Only:
return "L3-Only"
case PolicyMatchL3L4:
return "L3-L4"
case PolicyMatchL4Only:
return "L4-Only"
case PolicyMatchAll:
return "all"
case PolicyMatchNone:
return "none"
case PolicyMatchL3Proto:
return "L3-Proto"
case PolicyMatchProtoOnly:
return "Proto-Only"
}
return "unknown"
}
// SPDX-License-Identifier: Apache-2.0
// Copyright Authors of Cilium
package monitor
import (
"bufio"
"encoding/json"
"fmt"
"net"
// NOTE: syscall is deprecated, but it is replaced by golang.org/x/sys
// which reuses syscall.Errno similarly to how we do below.
"syscall"
"github.com/cilium/cilium/pkg/byteorder"
"github.com/cilium/cilium/pkg/hubble/parser/getters"
"github.com/cilium/cilium/pkg/monitor/api"
)
// must be in sync with <bpf/lib/dbg.h>
const (
DbgCaptureUnspec = iota
DbgCaptureReserved1
DbgCaptureReserved2
DbgCaptureReserved3
DbgCaptureDelivery
DbgCaptureFromLb
DbgCaptureAfterV46
DbgCaptureAfterV64
DbgCaptureProxyPre
DbgCaptureProxyPost
DbgCaptureSnatPre
DbgCaptureSnatPost
)
// must be in sync with <bpf/lib/dbg.h>
const (
DbgUnspec = iota
DbgGeneric
DbgLocalDelivery
DbgEncap
DbgLxcFound
DbgPolicyDenied
DbgCtLookup
DbgCtLookupRev
DbgCtMatch
DbgCtCreated
DbgCtCreated2
DbgIcmp6Handle
DbgIcmp6Request
DbgIcmp6Ns
DbgIcmp6TimeExceeded
DbgCtVerdict
DbgDecap
DbgPortMap
DbgErrorRet
DbgToHost
DbgToStack
DbgPktHash
DbgLb6LookupFrontend
DbgLb6LookupFrontendFail
DbgLb6LookupBackendSlot
DbgLb6LookupBackendSlotSuccess
DbgLb6LookupBackendSlotV2Fail
DbgLb6LookupBackendFail
DbgLb6ReverseNatLookup
DbgLb6ReverseNat
DbgLb4LookupFrontend
DbgLb4LookupFrontendFail
DbgLb4LookupBackendSlot
DbgLb4LookupBackendSlotSuccess
DbgLb4LookupBackendSlotV2Fail
DbgLb4LookupBackendFail
DbgLb4ReverseNatLookup
DbgLb4ReverseNat
DbgLb4LoopbackSnat
DbgLb4LoopbackSnatRev
DbgCtLookup4
DbgRRBackendSlotSel
DbgRevProxyLookup
DbgRevProxyFound
DbgRevProxyUpdate
DbgL4Policy
DbgNetdevInCluster
DbgNetdevEncap4
DbgCTLookup41
DbgCTLookup42
DbgCTCreated4
DbgCTLookup61
DbgCTLookup62
DbgCTCreated6
DbgSkipProxy
DbgL4Create
DbgIPIDMapFailed4
DbgIPIDMapFailed6
DbgIPIDMapSucceed4
DbgIPIDMapSucceed6
DbgLbStaleCT
DbgInheritIdentity
DbgSkLookup4
DbgSkLookup6
DbgSkAssign
DbgL7LB
)
// must be in sync with <bpf/lib/conntrack.h>
const (
CtNew uint32 = iota
CtEstablished
CtReply
CtRelated
)
var ctStateText = map[uint32]string{
CtNew: "New",
CtEstablished: "Established",
CtReply: "Reply",
CtRelated: "Related",
}
const (
ctEgress = 0
ctIngress = 1
)
var ctDirection = map[int]string{
ctEgress: "egress",
ctIngress: "ingress",
}
func ctState(state uint32) string {
txt, ok := ctStateText[state]
if ok {
return txt
}
return api.DropReason(uint8(state))
}
var tupleFlags = map[int16]string{
0: "IN",
1: "OUT",
2: "RELATED",
}
func ctFlags(flags int16) string {
s := ""
for k, v := range tupleFlags {
if k&flags != 0 {
if s != "" {
s += ", "
}
s += v
}
}
return s
}
func ctInfo(arg1 uint32, arg2 uint32) string {
return fmt.Sprintf("sport=%d dport=%d nexthdr=%d flags=%s",
arg1>>16, arg1&0xFFFF, arg2>>8, ctFlags(int16(arg2&0xFF)))
}
func ctLookup4Info1(n *DebugMsg) string {
return fmt.Sprintf("src=%s:%d dst=%s:%d", ip4Str(n.Arg1),
n.Arg3&0xFFFF, ip4Str(n.Arg2), n.Arg3>>16)
}
func ctLookup4Info2(n *DebugMsg) string {
return fmt.Sprintf("nexthdr=%d flags=%d dir=%d scope=%d",
n.Arg1>>8, n.Arg1&0xFF, n.Arg2, n.Arg3)
}
func ctCreate4Info(n *DebugMsg) string {
return fmt.Sprintf("proxy-port=%d revnat=%d src-identity=%d lb=%s",
n.Arg1>>16, byteorder.NetworkToHost16(uint16(n.Arg1&0xFFFF)), n.Arg2, ip4Str(n.Arg3))
}
func ctLookup6Info1(n *DebugMsg) string {
return fmt.Sprintf("src=[::%s]:%d dst=[::%s]:%d", ip6Str(n.Arg1),
n.Arg3&0xFFFF, ip6Str(n.Arg2), n.Arg3>>16)
}
func ctCreate6Info(n *DebugMsg) string {
return fmt.Sprintf("proxy-port=%d revnat=%d src-identity=%d",
n.Arg1>>16, byteorder.NetworkToHost16(uint16(n.Arg1&0xFFFF)), n.Arg2)
}
func skAssignInfo(n *DebugMsg) string {
if n.Arg1 == 0 {
return "Success"
}
return syscall.Errno(n.Arg1).Error()
}
func verdictInfo(arg uint32) string {
revnat := byteorder.NetworkToHost16(uint16(arg & 0xFFFF))
return fmt.Sprintf("revnat=%d", revnat)
}
func proxyInfo(arg1 uint32, arg2 uint32) string {
sport := byteorder.NetworkToHost16(uint16(arg1 >> 16))
dport := byteorder.NetworkToHost16(uint16(arg1 & 0xFFFF))
return fmt.Sprintf("sport=%d dport=%d saddr=%s", sport, dport, ip4Str(arg2))
}
func l4CreateInfo(n *DebugMsg) string {
src := n.Arg1
dst := n.Arg2
dport := byteorder.NetworkToHost16(uint16(n.Arg3 >> 16))
proto := n.Arg3 & 0xFF
return fmt.Sprintf("src=%d dst=%d dport=%d proto=%d", src, dst, dport, proto)
}
func ip4Str(arg1 uint32) string {
ip := make(net.IP, 4)
byteorder.Native.PutUint32(ip, arg1)
return ip.String()
}
func ip6Str(arg1 uint32) string {
ip6 := byteorder.NetworkToHost32(arg1)
return fmt.Sprintf("%x:%x", ip6>>16, ip6&0xFFFF)
}
const (
// DebugMsgLen is the amount of packet data in a packet capture message
DebugMsgLen = 20
)
// DebugMsg is the message format of the debug message found in the BPF ring buffer
type DebugMsg struct {
api.DefaultSrcDstGetter
Type uint8
SubType uint8
Source uint16
Hash uint32
Arg1 uint32
Arg2 uint32
Arg3 uint32
}
// Dump prints the message according to the verbosity level specified
func (n *DebugMsg) Dump(args *api.DumpArgs) {
switch args.Verbosity {
case api.INFO:
// We don't print messages at INFO level
return
case api.JSON:
fmt.Fprintln(args.Buf, n.getJSON(args.CpuPrefix, args.LinkMonitor))
default:
fmt.Fprintf(args.Buf, "%s MARK %#x FROM %d DEBUG: %s\n", args.CpuPrefix, n.Hash, n.Source, n.Message(args.LinkMonitor))
}
}
// GetSrc retrieves the source endpoint for the message.
func (n *DebugMsg) GetSrc() uint16 {
return n.Source
}
// Decode decodes the message in 'data' into the struct.
func (n *DebugMsg) Decode(data []byte) error {
if l := len(data); l < DebugMsgLen {
return fmt.Errorf("unexpected DebugMsg data length, expected %d but got %d", DebugMsgLen, l)
}
n.Type = data[0]
n.SubType = data[1]
n.Source = byteorder.Native.Uint16(data[2:4])
n.Hash = byteorder.Native.Uint32(data[4:8])
n.Arg1 = byteorder.Native.Uint32(data[8:12])
n.Arg2 = byteorder.Native.Uint32(data[12:16])
n.Arg3 = byteorder.Native.Uint32(data[16:20])
return nil
}
// Message returns the debug message in a human-readable format
func (n *DebugMsg) Message(linkMonitor getters.LinkGetter) string {
switch n.SubType {
case DbgGeneric:
return fmt.Sprintf("No message, arg1=%d (%#x) arg2=%d (%#x)", n.Arg1, n.Arg1, n.Arg2, n.Arg2)
case DbgLocalDelivery:
return fmt.Sprintf("Attempting local delivery for container id %d from seclabel %d", n.Arg1, n.Arg2)
case DbgEncap:
return fmt.Sprintf("Encapsulating to node %d (%#x) from seclabel %d", n.Arg1, n.Arg1, n.Arg2)
case DbgLxcFound:
var ifname string
if linkMonitor != nil {
ifname = linkMonitor.Name(n.Arg1)
}
return fmt.Sprintf("Local container found ifindex %s seclabel %d", ifname, byteorder.NetworkToHost16(uint16(n.Arg2)))
case DbgPolicyDenied:
return fmt.Sprintf("Policy evaluation would deny packet from %d to %d", n.Arg1, n.Arg2)
case DbgCtLookup:
return fmt.Sprintf("CT lookup: %s", ctInfo(n.Arg1, n.Arg2))
case DbgCtLookupRev:
return fmt.Sprintf("CT reverse lookup: %s", ctInfo(n.Arg1, n.Arg2))
case DbgCtLookup4:
return fmt.Sprintf("CT lookup address: %s", ip4Str(n.Arg1))
case DbgCtMatch:
return fmt.Sprintf("CT entry found lifetime=%d, %s", n.Arg1,
verdictInfo(n.Arg2))
case DbgCtCreated:
return fmt.Sprintf("CT created 1/2: %s %s",
ctInfo(n.Arg1, n.Arg2), verdictInfo(n.Arg3))
case DbgCtCreated2:
return fmt.Sprintf("CT created 2/2: %s revnat=%d", ip4Str(n.Arg1), byteorder.NetworkToHost16(uint16(n.Arg2)))
case DbgCtVerdict:
return fmt.Sprintf("CT verdict: %s, %s",
ctState(n.Arg1), verdictInfo(n.Arg2))
case DbgIcmp6Handle:
return fmt.Sprintf("Handling ICMPv6 type=%d", n.Arg1)
case DbgIcmp6Request:
return fmt.Sprintf("ICMPv6 echo request for router offset=%d", n.Arg1)
case DbgIcmp6Ns:
return fmt.Sprintf("ICMPv6 neighbour soliciation for address %x:%x", n.Arg1, n.Arg2)
case DbgIcmp6TimeExceeded:
return "Sending ICMPv6 time exceeded"
case DbgDecap:
return fmt.Sprintf("Tunnel decap: id=%d flowlabel=%x", n.Arg1, n.Arg2)
case DbgPortMap:
return fmt.Sprintf("Mapping port from=%d to=%d", n.Arg1, n.Arg2)
case DbgErrorRet:
return fmt.Sprintf("BPF function %d returned error %d", n.Arg1, n.Arg2)
case DbgToHost:
return fmt.Sprintf("Going to host, policy-skip=%d", n.Arg1)
case DbgToStack:
return fmt.Sprintf("Going to the stack, policy-skip=%d", n.Arg1)
case DbgPktHash:
return fmt.Sprintf("Packet hash=%d (%#x), selected_service=%d", n.Arg1, n.Arg1, n.Arg2)
case DbgRRBackendSlotSel:
return fmt.Sprintf("RR backend slot selection hash=%d (%#x), selected_service=%d", n.Arg1, n.Arg1, n.Arg2)
case DbgLb6LookupFrontend:
return fmt.Sprintf("Frontend service lookup, addr.p4=%x key.dport=%d", n.Arg1, byteorder.NetworkToHost16(uint16(n.Arg2)))
case DbgLb6LookupFrontendFail:
return fmt.Sprintf("Frontend service lookup failed, addr.p2=%x addr.p3=%x", n.Arg1, n.Arg2)
case DbgLb6LookupBackendSlot, DbgLb4LookupBackendSlot:
return fmt.Sprintf("Service backend slot lookup: slot=%d, dport=%d", n.Arg1, byteorder.NetworkToHost16(uint16(n.Arg2)))
case DbgLb6LookupBackendSlotV2Fail, DbgLb4LookupBackendSlotV2Fail:
return fmt.Sprintf("Service backend slot lookup failed: slot=%d, dport=%d", n.Arg1, byteorder.NetworkToHost16(uint16(n.Arg2)))
case DbgLb6LookupBackendFail, DbgLb4LookupBackendFail:
return fmt.Sprintf("Backend service lookup failed: backend_id=%d", n.Arg1)
case DbgLb6LookupBackendSlotSuccess:
return fmt.Sprintf("Service backend slot lookup result: target.p4=%x port=%d", n.Arg1, byteorder.NetworkToHost16(uint16(n.Arg2)))
case DbgLb6ReverseNatLookup, DbgLb4ReverseNatLookup:
return fmt.Sprintf("Reverse NAT lookup, index=%d", byteorder.NetworkToHost16(uint16(n.Arg1)))
case DbgLb6ReverseNat:
return fmt.Sprintf("Performing reverse NAT, address.p4=%x port=%d", n.Arg1, byteorder.NetworkToHost16(uint16(n.Arg2)))
case DbgLb4LookupFrontend:
return fmt.Sprintf("Frontend service lookup, addr=%s key.dport=%d", ip4Str(n.Arg1), byteorder.NetworkToHost16(uint16(n.Arg2)))
case DbgLb4LookupFrontendFail:
return "Frontend service lookup failed"
case DbgLb4LookupBackendSlotSuccess:
return fmt.Sprintf("Service backend slot lookup result: target=%s port=%d", ip4Str(n.Arg1), byteorder.NetworkToHost16(uint16(n.Arg2)))
case DbgLb4ReverseNat:
return fmt.Sprintf("Performing reverse NAT, address=%s port=%d", ip4Str(n.Arg1), byteorder.NetworkToHost16(uint16(n.Arg2)))
case DbgLb4LoopbackSnat:
return fmt.Sprintf("Loopback SNAT from=%s to=%s", ip4Str(n.Arg1), ip4Str(n.Arg2))
case DbgLb4LoopbackSnatRev:
return fmt.Sprintf("Loopback reverse SNAT from=%s to=%s", ip4Str(n.Arg1), ip4Str(n.Arg2))
case DbgRevProxyLookup:
return fmt.Sprintf("Reverse proxy lookup %s nexthdr=%d",
proxyInfo(n.Arg1, n.Arg2), n.Arg3)
case DbgRevProxyFound:
return fmt.Sprintf("Reverse proxy entry found, orig-daddr=%s orig-dport=%d", ip4Str(n.Arg1), n.Arg2)
case DbgRevProxyUpdate:
return fmt.Sprintf("Reverse proxy updated %s nexthdr=%d",
proxyInfo(n.Arg1, n.Arg2), n.Arg3)
case DbgL4Policy:
return fmt.Sprintf("Resolved L4 policy to: %d / %s",
byteorder.NetworkToHost16(uint16(n.Arg1)), ctDirection[int(n.Arg2)])
case DbgNetdevInCluster:
return fmt.Sprintf("Destination is inside cluster prefix, source identity: %d", n.Arg1)
case DbgNetdevEncap4:
return fmt.Sprintf("Attempting encapsulation, lookup key: %s, identity: %d", ip4Str(n.Arg1), n.Arg2)
case DbgCTLookup41:
return fmt.Sprintf("Conntrack lookup 1/2: %s", ctLookup4Info1(n))
case DbgCTLookup42:
return fmt.Sprintf("Conntrack lookup 2/2: %s", ctLookup4Info2(n))
case DbgCTCreated4:
return fmt.Sprintf("Conntrack create: %s", ctCreate4Info(n))
case DbgCTLookup61:
return fmt.Sprintf("Conntrack lookup 1/2: %s", ctLookup6Info1(n))
case DbgCTLookup62:
return fmt.Sprintf("Conntrack lookup 2/2: %s", ctLookup4Info2(n))
case DbgCTCreated6:
return fmt.Sprintf("Conntrack create: %s", ctCreate6Info(n))
case DbgSkipProxy:
return fmt.Sprintf("Skipping proxy, tc_index is set=%x", n.Arg1)
case DbgL4Create:
return fmt.Sprintf("Matched L4 policy; creating conntrack %s", l4CreateInfo(n))
case DbgIPIDMapFailed4:
return fmt.Sprintf("Failed to map addr=%s to identity", ip4Str(n.Arg1))
case DbgIPIDMapFailed6:
return fmt.Sprintf("Failed to map addr.p4=[::%s] to identity", ip6Str(n.Arg1))
case DbgIPIDMapSucceed4:
return fmt.Sprintf("Successfully mapped addr=%s to identity=%d", ip4Str(n.Arg1), n.Arg2)
case DbgIPIDMapSucceed6:
return fmt.Sprintf("Successfully mapped addr.p4=[::%s] to identity=%d", ip6Str(n.Arg1), n.Arg2)
case DbgLbStaleCT:
return fmt.Sprintf("Stale CT entry found stale_ct.rev_nat_id=%d, svc.rev_nat_id=%d", n.Arg2, n.Arg1)
case DbgInheritIdentity:
return fmt.Sprintf("Inheriting identity=%d from stack", n.Arg1)
case DbgSkLookup4:
return fmt.Sprintf("Socket lookup: %s", ctLookup4Info1(n))
case DbgSkLookup6:
return fmt.Sprintf("Socket lookup: %s", ctLookup6Info1(n))
case DbgSkAssign:
return fmt.Sprintf("Socket assign: %s", skAssignInfo(n))
case DbgL7LB:
return fmt.Sprintf("L7 LB from %s to %s: proxy port %d", ip4Str(n.Arg1), ip4Str(n.Arg2), n.Arg3)
default:
return fmt.Sprintf("Unknown message type=%d arg1=%d arg2=%d", n.SubType, n.Arg1, n.Arg2)
}
}
func (n *DebugMsg) getJSON(cpuPrefix string, linkMonitor getters.LinkGetter) string {
return fmt.Sprintf(`{"cpu":%q,"type":"debug","message":%q}`,
cpuPrefix, n.Message(linkMonitor))
}
const (
// DebugCaptureLen is the amount of packet data in a packet capture message
DebugCaptureLen = 24
)
// DebugCapture is the metadata sent along with a captured packet frame
type DebugCapture struct {
api.DefaultSrcDstGetter
Type uint8
SubType uint8
// Source, if populated, is the ID of the source endpoint.
Source uint16
Hash uint32
Len uint32
OrigLen uint32
Arg1 uint32
Arg2 uint32
// data
}
// Dump prints the message according to the verbosity level specified
func (n *DebugCapture) Dump(args *api.DumpArgs) {
switch args.Verbosity {
case api.INFO, api.DEBUG:
n.DumpInfo(args.Buf, args.Data, args.LinkMonitor)
case api.JSON:
n.DumpJSON(args.Buf, args.Data, args.CpuPrefix, args.LinkMonitor)
default:
fmt.Fprintln(args.Buf, msgSeparator)
n.DumpVerbose(args.Buf, args.Dissect, args.Data, args.CpuPrefix)
}
}
// GetSrc retrieves the source endpoint for the message.
func (n *DebugCapture) GetSrc() uint16 {
return n.Source
}
// Decode decodes the message in 'data' into the struct.
func (n *DebugCapture) Decode(data []byte) error {
if l := len(data); l < DebugCaptureLen {
return fmt.Errorf("unexpected DebugCapture data length, expected %d but got %d", DebugCaptureLen, l)
}
n.Type = data[0]
n.SubType = data[1]
n.Source = byteorder.Native.Uint16(data[2:4])
n.Hash = byteorder.Native.Uint32(data[4:8])
n.Len = byteorder.Native.Uint32(data[8:12])
n.OrigLen = byteorder.Native.Uint32(data[12:16])
n.Arg1 = byteorder.Native.Uint32(data[16:20])
n.Arg2 = byteorder.Native.Uint32(data[20:24])
return nil
}
// DumpInfo prints a summary of the capture messages.
func (n *DebugCapture) DumpInfo(buf *bufio.Writer, data []byte, linkMonitor getters.LinkGetter) {
prefix := n.infoPrefix(linkMonitor)
if len(prefix) > 0 {
fmt.Fprintf(buf, "%s: %s\n", prefix, GetConnectionSummary(data[DebugCaptureLen:], nil))
}
}
func (n *DebugCapture) infoPrefix(linkMonitor getters.LinkGetter) string {
switch n.SubType {
case DbgCaptureDelivery:
ifname := linkMonitor.Name(n.Arg1)
return fmt.Sprintf("-> %s", ifname)
case DbgCaptureFromLb:
ifname := linkMonitor.Name(n.Arg1)
return fmt.Sprintf("<- load-balancer %s", ifname)
case DbgCaptureAfterV46:
return fmt.Sprintf("== v4->v6 %d", n.Arg1)
case DbgCaptureAfterV64:
return fmt.Sprintf("== v6->v4 %d", n.Arg1)
case DbgCaptureProxyPost:
return fmt.Sprintf("-> proxy port %d", byteorder.NetworkToHost16(uint16(n.Arg1)))
default:
return ""
}
}
// DumpVerbose prints the captured packet in human readable format
func (n *DebugCapture) DumpVerbose(buf *bufio.Writer, dissect bool, data []byte, prefix string) {
fmt.Fprintf(buf, "%s MARK %#x FROM %d DEBUG: %d bytes, %s", prefix, n.Hash, n.Source, n.Len, n.subTypeString())
if n.Len > 0 && len(data) > DebugCaptureLen {
Dissect(buf, dissect, data[DebugCaptureLen:], nil)
}
}
func (n *DebugCapture) subTypeString() string {
switch n.SubType {
case DbgCaptureDelivery:
return fmt.Sprintf("Delivery to ifindex %d", n.Arg1)
case DbgCaptureFromLb:
return fmt.Sprintf("Incoming packet to load balancer on ifindex %d", n.Arg1)
case DbgCaptureAfterV46:
return fmt.Sprintf("Packet after nat46 ifindex %d", n.Arg1)
case DbgCaptureAfterV64:
return fmt.Sprintf("Packet after nat64 ifindex %d", n.Arg1)
case DbgCaptureProxyPre:
return fmt.Sprintf("Packet to proxy port %d (Pre)", byteorder.NetworkToHost16(uint16(n.Arg1)))
case DbgCaptureProxyPost:
return fmt.Sprintf("Packet to proxy port %d (Post)", byteorder.NetworkToHost16(uint16(n.Arg1)))
case DbgCaptureSnatPre:
return fmt.Sprintf("Packet going into snat engine on ifindex %d", n.Arg1)
case DbgCaptureSnatPost:
return fmt.Sprintf("Packet coming from snat engine on ifindex %d", n.Arg1)
default:
return fmt.Sprintf("Unknown message type=%d arg1=%d", n.SubType, n.Arg1)
}
}
func (n *DebugCapture) getJSON(data []byte, cpuPrefix string, linkMonitor getters.LinkGetter) (string, error) {
v := DebugCaptureToVerbose(n, linkMonitor)
v.CPUPrefix = cpuPrefix
v.Summary = GetConnectionSummary(data[DebugCaptureLen:], nil)
ret, err := json.Marshal(v)
return string(ret), err
}
// DumpJSON prints notification in json format
func (n *DebugCapture) DumpJSON(buf *bufio.Writer, data []byte, cpuPrefix string, linkMonitor getters.LinkGetter) {
resp, err := n.getJSON(data, cpuPrefix, linkMonitor)
if err != nil {
fmt.Fprintf(buf, `{"type":"debug_capture_error","message":%q}`+"\n", err.Error())
return
}
fmt.Fprintln(buf, resp)
}
// DebugCaptureVerbose represents a json notification printed by monitor
type DebugCaptureVerbose struct {
CPUPrefix string `json:"cpu,omitempty"`
Type string `json:"type,omitempty"`
Mark string `json:"mark,omitempty"`
Message string `json:"message,omitempty"`
Prefix string `json:"prefix,omitempty"`
Source uint16 `json:"source"`
Bytes uint32 `json:"bytes"`
Summary string `json:"summary,omitempty"`
}
// DebugCaptureToVerbose creates verbose notification from base TraceNotify
func DebugCaptureToVerbose(n *DebugCapture, linkMonitor getters.LinkGetter) DebugCaptureVerbose {
return DebugCaptureVerbose{
Type: "capture",
Mark: fmt.Sprintf("%#x", n.Hash),
Source: n.Source,
Bytes: n.Len,
Message: n.subTypeString(),
Prefix: n.infoPrefix(linkMonitor),
}
}
// SPDX-License-Identifier: Apache-2.0
// Copyright Authors of Cilium
package monitor
import (
"bufio"
"encoding/json"
"fmt"
"github.com/cilium/cilium/pkg/byteorder"
"github.com/cilium/cilium/pkg/identity"
"github.com/cilium/cilium/pkg/monitor/api"
)
const (
DropNotifyVersion0 = iota
DropNotifyVersion1
DropNotifyVersion2
)
const (
// dropNotifyV1Len is the amount of packet data provided in a v0/v1 drop notification.
dropNotifyV1Len = 36
// dropNotifyV2Len is the amount of packet data provided in a v2 drop notification.
dropNotifyV2Len = 40
)
const (
// DropNotifyFlagIsIPv6 is set in DropNotify.Flags when it refers to an IPv6 flow.
DropNotifyFlagIsIPv6 uint8 = 1 << iota
// DropNotifyFlagIsL3Device is set in DropNotify.Flags when it refers to a L3 device.
DropNotifyFlagIsL3Device
// DropNotifyFlagIsVXLAN is set in DropNotify.Flags when it refers to an overlay VXLAN packet.
DropNotifyFlagIsVXLAN
// DropNotifyFlagIsGeneve is set in DropNotify.Flags when it refers to an overlay Geneve packet.
DropNotifyFlagIsGeneve
)
var (
dropNotifyLengthFromVersion = map[uint16]uint{
DropNotifyVersion0: dropNotifyV1Len, // retain backwards compatibility for testing.
DropNotifyVersion1: dropNotifyV1Len,
DropNotifyVersion2: dropNotifyV2Len,
}
)
// DropNotify is the message format of a drop notification in the BPF ring buffer
type DropNotify struct {
Type uint8
SubType uint8
Source uint16
Hash uint32
OrigLen uint32
CapLen uint16
Version uint16
SrcLabel identity.NumericIdentity
DstLabel identity.NumericIdentity
DstID uint32
Line uint16
File uint8
ExtError int8
Ifindex uint32
Flags uint8
_ [3]uint8
// data
}
// Dump prints the message according to the verbosity level specified
func (dn *DropNotify) Dump(args *api.DumpArgs) {
switch args.Verbosity {
case api.INFO, api.DEBUG:
dn.DumpInfo(args.Buf, args.Data, args.Format)
case api.JSON:
dn.DumpJSON(args.Buf, args.Data, args.CpuPrefix)
default:
fmt.Fprintln(args.Buf, msgSeparator)
dn.DumpVerbose(args.Buf, !args.Dissect, args.Data, args.CpuPrefix, args.Format)
}
}
// GetSrc retrieves the source endpoint for the message.
func (n *DropNotify) GetSrc() uint16 {
return n.Source
}
// GetDst retrieves the destination endpoint for the message.
func (n *DropNotify) GetDst() uint16 {
return uint16(n.DstID)
}
// dumpIdentity dumps the source and destination identities in numeric or
// human-readable format.
func (n *DropNotify) dumpIdentity(buf *bufio.Writer, numeric api.DisplayFormat) {
if numeric {
fmt.Fprintf(buf, ", identity %d->%d", n.SrcLabel, n.DstLabel)
} else {
fmt.Fprintf(buf, ", identity %s->%s", n.SrcLabel, n.DstLabel)
}
}
// Decode decodes the message in 'data' into the struct.
func (n *DropNotify) Decode(data []byte) error {
if l := len(data); l < dropNotifyV1Len {
return fmt.Errorf("unexpected DropNotify data length, expected at least %d but got %d", dropNotifyV1Len, l)
}
version := byteorder.Native.Uint16(data[14:16])
// Check against max version.
if version > DropNotifyVersion2 {
return fmt.Errorf("Unrecognized drop event (version %d)", version)
}
// Decode logic for version >= v2.
if version >= DropNotifyVersion2 {
if l := len(data); l < dropNotifyV2Len {
return fmt.Errorf("unexpected DropNotify data length (version %d), expected at least %d but got %d", version, dropNotifyV2Len, l)
}
n.Flags = data[36]
}
// Decode logic for version >= v0/v1.
n.Type = data[0]
n.SubType = data[1]
n.Source = byteorder.Native.Uint16(data[2:4])
n.Hash = byteorder.Native.Uint32(data[4:8])
n.OrigLen = byteorder.Native.Uint32(data[8:12])
n.CapLen = byteorder.Native.Uint16(data[12:14])
n.Version = version
n.SrcLabel = identity.NumericIdentity(byteorder.Native.Uint32(data[16:20]))
n.DstLabel = identity.NumericIdentity(byteorder.Native.Uint32(data[20:24]))
n.DstID = byteorder.Native.Uint32(data[24:28])
n.Line = byteorder.Native.Uint16(data[28:30])
n.File = data[30]
n.ExtError = int8(data[31])
n.Ifindex = byteorder.Native.Uint32(data[32:36])
return nil
}
// IsL3Device returns true if the trace comes from an L3 device.
func (n *DropNotify) IsL3Device() bool {
return n.Flags&DropNotifyFlagIsL3Device != 0
}
// IsIPv6 returns true if the trace refers to an IPv6 packet.
func (n *DropNotify) IsIPv6() bool {
return n.Flags&DropNotifyFlagIsIPv6 != 0
}
// IsGeneve returns true if the trace refers to an overlay Geneve packet.
func (n *DropNotify) IsGeneve() bool {
return n.Flags&DropNotifyFlagIsGeneve != 0
}
// IsVXLAN returns true if the trace refers to an overlay VXLAN packet.
func (n *DropNotify) IsVXLAN() bool {
return n.Flags&DropNotifyFlagIsVXLAN != 0
}
// DataOffset returns the offset from the beginning of DropNotify where the
// notification data begins.
//
// Returns zero for invalid or unknown DropNotify messages.
func (n *DropNotify) DataOffset() uint {
return dropNotifyLengthFromVersion[n.Version]
}
// DumpInfo prints a summary of the drop messages.
func (n *DropNotify) DumpInfo(buf *bufio.Writer, data []byte, numeric api.DisplayFormat) {
fmt.Fprintf(buf, "xx drop (%s) flow %#x to endpoint %d, ifindex %d, file %s:%d, ",
api.DropReasonExt(n.SubType, n.ExtError), n.Hash, n.DstID, n.Ifindex, api.BPFFileName(n.File), int(n.Line))
n.dumpIdentity(buf, numeric)
fmt.Fprintf(buf, ": %s\n", GetConnectionSummary(data[n.DataOffset():], &decodeOpts{n.IsL3Device(), n.IsIPv6(), n.IsVXLAN(), n.IsGeneve()}))
}
// DumpVerbose prints the drop notification in human readable form
func (n *DropNotify) DumpVerbose(buf *bufio.Writer, dissect bool, data []byte, prefix string, numeric api.DisplayFormat) {
fmt.Fprintf(buf, "%s MARK %#x FROM %d DROP: %d bytes, reason %s",
prefix, n.Hash, n.Source, n.OrigLen, api.DropReasonExt(n.SubType, n.ExtError))
if n.SrcLabel != 0 || n.DstLabel != 0 {
n.dumpIdentity(buf, numeric)
}
if n.DstID != 0 {
fmt.Fprintf(buf, ", to endpoint %d\n", n.DstID)
} else {
fmt.Fprintf(buf, "\n")
}
if offset := int(n.DataOffset()); n.CapLen > 0 && len(data) > offset {
Dissect(buf, dissect, data[offset:], &decodeOpts{n.IsL3Device(), n.IsIPv6(), n.IsVXLAN(), n.IsGeneve()})
}
}
func (n *DropNotify) getJSON(data []byte, cpuPrefix string) (string, error) {
v := DropNotifyToVerbose(n)
v.CPUPrefix = cpuPrefix
if offset := int(n.DataOffset()); n.CapLen > 0 && len(data) > offset {
v.Summary = GetDissectSummary(data[offset:], &decodeOpts{n.IsL3Device(), n.IsIPv6(), n.IsVXLAN(), n.IsGeneve()})
}
ret, err := json.Marshal(v)
return string(ret), err
}
// DumpJSON prints notification in json format
func (n *DropNotify) DumpJSON(buf *bufio.Writer, data []byte, cpuPrefix string) {
resp, err := n.getJSON(data, cpuPrefix)
if err == nil {
fmt.Fprintln(buf, resp)
}
}
// DropNotifyVerbose represents a json notification printed by monitor
type DropNotifyVerbose struct {
CPUPrefix string `json:"cpu,omitempty"`
Type string `json:"type,omitempty"`
Mark string `json:"mark,omitempty"`
Reason string `json:"reason,omitempty"`
Source uint16 `json:"source"`
Bytes uint32 `json:"bytes"`
SrcLabel identity.NumericIdentity `json:"srcLabel"`
DstLabel identity.NumericIdentity `json:"dstLabel"`
DstID uint32 `json:"dstID"`
Line uint16 `json:"Line"`
File uint8 `json:"File"`
ExtError int8 `json:"ExtError"`
Ifindex uint32 `json:"Ifindex"`
Summary *DissectSummary `json:"summary,omitempty"`
}
// DropNotifyToVerbose creates verbose notification from DropNotify
func DropNotifyToVerbose(n *DropNotify) DropNotifyVerbose {
return DropNotifyVerbose{
Type: "drop",
Mark: fmt.Sprintf("%#x", n.Hash),
Reason: api.DropReasonExt(n.SubType, n.ExtError),
Source: n.Source,
Bytes: n.OrigLen,
SrcLabel: n.SrcLabel,
DstLabel: n.DstLabel,
DstID: n.DstID,
Line: n.Line,
File: n.File,
ExtError: n.ExtError,
Ifindex: n.Ifindex,
}
}
// SPDX-License-Identifier: Apache-2.0
// Copyright Authors of Cilium
package monitor
import (
"bufio"
"fmt"
"github.com/cilium/cilium/pkg/byteorder"
"github.com/cilium/cilium/pkg/identity"
"github.com/cilium/cilium/pkg/monitor/api"
"github.com/cilium/cilium/pkg/policy"
)
const (
// PolicyVerdictNotifyLen is the amount of packet data provided in a Policy notification
PolicyVerdictNotifyLen = 32
// The values below are for parsing PolicyVerdictNotify. They need to be consistent
// with what are defined in data plane.
// PolicyVerdictNotifyFlagDirection is the bit mask in Flags that
// corresponds to the direction of a traffic
PolicyVerdictNotifyFlagDirection = 0x3
// PolicyVerdictNotifyFlagIsIPv6 is the bit mask in Flags that
// corresponds to whether the traffic is IPv6 or not
PolicyVerdictNotifyFlagIsIPv6 = 0x4
// PolicyVerdictNotifyFlagMatchType is the bit mask in Flags that
// corresponds to the policy match type
PolicyVerdictNotifyFlagMatchType = 0x38
// PolicyVerdictNotifyFlagIsAudited is the bit mask in Flags that
// corresponds to whether the traffic was allowed due to the audit mode
PolicyVerdictNotifyFlagIsAudited = 0x40
// PolicyVerdictNotifyFlagMatchTypeBitOffset is the bit offset in Flags that
// corresponds to the policy match type
PolicyVerdictNotifyFlagMatchTypeBitOffset = 3
)
// PolicyVerdictNotify is the message format of a policy verdict notification in the bpf ring buffer
type PolicyVerdictNotify struct {
Type uint8
SubType uint8
Source uint16
Hash uint32
OrigLen uint32
CapLen uint16
Version uint16
RemoteLabel identity.NumericIdentity
Verdict int32
DstPort uint16
Proto uint8
Flags uint8
AuthType uint8
Pad1 uint8
Pad2 uint16
// data
}
// Dump prints the message according to the verbosity level specified
func (pn *PolicyVerdictNotify) Dump(args *api.DumpArgs) {
pn.DumpInfo(args.Buf, args.Data, args.Format)
}
// GetSrc retrieves the source endpoint for the message.
func (n *PolicyVerdictNotify) GetSrc() uint16 {
return n.Source
}
// GetDst retrieves the security identity for the message.
// `POLICY_INGRESS` -> `RemoteLabel` is the src security identity.
// `POLICY_EGRESS` -> `RemoteLabel` is the dst security identity.
func (n *PolicyVerdictNotify) GetDst() uint16 {
return uint16(n.RemoteLabel)
}
// Decode decodes the message in 'data' into the struct.
func (n *PolicyVerdictNotify) Decode(data []byte) error {
if l := len(data); l < PolicyVerdictNotifyLen {
return fmt.Errorf("unexpected PolicyVerdictNotify data length, expected %d but got %d", PolicyVerdictNotifyLen, l)
}
n.Type = data[0]
n.SubType = data[1]
n.Source = byteorder.Native.Uint16(data[2:4])
n.Hash = byteorder.Native.Uint32(data[4:8])
n.OrigLen = byteorder.Native.Uint32(data[8:12])
n.CapLen = byteorder.Native.Uint16(data[12:14])
n.Version = byteorder.Native.Uint16(data[14:16])
n.RemoteLabel = identity.NumericIdentity(byteorder.Native.Uint32(data[16:20]))
n.Verdict = int32(byteorder.Native.Uint32(data[20:24]))
n.DstPort = byteorder.Native.Uint16(data[24:26])
n.Proto = data[26]
n.Flags = data[27]
n.AuthType = data[28]
n.Pad1 = data[29]
n.Pad2 = byteorder.Native.Uint16(data[30:32])
return nil
}
// IsTrafficIngress returns true if this notify is for an ingress traffic
func (n *PolicyVerdictNotify) IsTrafficIngress() bool {
return n.Flags&PolicyVerdictNotifyFlagDirection == api.PolicyIngress
}
// IsTrafficIPv6 returns true if this notify is for IPv6 traffic
func (n *PolicyVerdictNotify) IsTrafficIPv6() bool {
return (n.Flags&PolicyVerdictNotifyFlagIsIPv6 > 0)
}
// GetPolicyMatchType returns how the traffic matched the policy
func (n *PolicyVerdictNotify) GetPolicyMatchType() api.PolicyMatchType {
return api.PolicyMatchType((n.Flags & PolicyVerdictNotifyFlagMatchType) >>
PolicyVerdictNotifyFlagMatchTypeBitOffset)
}
// IsTrafficAudited returns true if this notify is for traffic that
// was allowed due to the audit mode
func (n *PolicyVerdictNotify) IsTrafficAudited() bool {
return (n.Flags&PolicyVerdictNotifyFlagIsAudited > 0)
}
// GetPolicyActionString returns the action string corresponding to the action
func GetPolicyActionString(verdict int32, audit bool) string {
if audit {
return "audit"
}
if verdict < 0 {
return "deny"
} else if verdict > 0 {
return "redirect"
}
return "allow"
}
// GetAuthType returns string for the authentication method applied (for success verdict)
// or required (for drops).
func (n *PolicyVerdictNotify) GetAuthType() policy.AuthType {
return policy.AuthType(n.AuthType)
}
// DumpInfo prints a summary of the policy notify messages.
func (n *PolicyVerdictNotify) DumpInfo(buf *bufio.Writer, data []byte, numeric api.DisplayFormat) {
dir := "egress"
if n.IsTrafficIngress() {
dir = "ingress"
}
fmt.Fprintf(buf, "Policy verdict log: flow %#x local EP ID %d", n.Hash, n.Source)
if numeric {
fmt.Fprintf(buf, ", remote ID %d", n.RemoteLabel)
} else {
fmt.Fprintf(buf, ", remote ID %s", n.RemoteLabel)
}
fmt.Fprintf(buf, ", proto %d, %s, action %s, auth: %s, match %s, %s\n", n.Proto, dir,
GetPolicyActionString(n.Verdict, n.IsTrafficAudited()),
n.GetAuthType(), n.GetPolicyMatchType(),
GetConnectionSummary(data[PolicyVerdictNotifyLen:], nil))
}
// SPDX-License-Identifier: Apache-2.0
// Copyright Authors of Cilium
package monitor
import (
"fmt"
"net"
"github.com/cilium/cilium/pkg/byteorder"
"github.com/cilium/cilium/pkg/monitor/api"
"github.com/cilium/cilium/pkg/types"
)
// Service translation event point in socket trace event messages
const (
XlatePointUnknown = iota
XlatePointPreDirectionFwd
XlatePointPostDirectionFwd
XlatePointPreDirectionRev
XlatePointPostDirectionRev
)
// L4 protocol for socket trace event messages
const (
L4ProtocolUnknown = iota
L4ProtocolTCP
L4ProtocolUDP
)
const TraceSockNotifyFlagIPv6 uint8 = 0x1
const (
TraceSockNotifyLen = 38
)
// TraceSockNotify is message format for socket trace notifications sent from datapath.
// Keep this in sync to the datapath structure (trace_sock_notify) defined in
// bpf/lib/trace_sock.h
type TraceSockNotify struct {
api.DefaultSrcDstGetter
Type uint8
XlatePoint uint8
DstIP types.IPv6
DstPort uint16
SockCookie uint64
CgroupId uint64
L4Proto uint8
Flags uint8
}
// Dump prints the message according to the verbosity level specified
func (t *TraceSockNotify) Dump(args *api.DumpArgs) {
// Currently only printed with the debug option. Extend it to info and json.
// GH issue: https://github.com/cilium/cilium/issues/21510
if args.Verbosity == api.DEBUG {
fmt.Fprintf(args.Buf, "%s [%s] cgroup_id: %d sock_cookie: %d, dst [%s]:%d %s \n",
args.CpuPrefix, t.XlatePointStr(), t.CgroupId, t.SockCookie, t.IP(), t.DstPort, t.L4ProtoStr())
}
}
// Decode decodes the message in 'data' into the struct.
func (t *TraceSockNotify) Decode(data []byte) error {
if l := len(data); l < TraceSockNotifyLen {
return fmt.Errorf("unexpected TraceSockNotify data length, expected %d but got %d", TraceSockNotifyLen, l)
}
t.Type = data[0]
t.XlatePoint = data[1]
copy(t.DstIP[:], data[2:18])
t.DstPort = byteorder.Native.Uint16(data[18:20])
t.SockCookie = byteorder.Native.Uint64(data[20:28])
t.CgroupId = byteorder.Native.Uint64(data[28:36])
t.L4Proto = data[36]
t.Flags = data[37]
return nil
}
func (t *TraceSockNotify) XlatePointStr() string {
switch t.XlatePoint {
case XlatePointPreDirectionFwd:
return "pre-xlate-fwd"
case XlatePointPostDirectionFwd:
return "post-xlate-fwd"
case XlatePointPreDirectionRev:
return "pre-xlate-rev"
case XlatePointPostDirectionRev:
return "post-xlate-rev"
default:
return "unknown"
}
}
// IP returns the IPv4 or IPv6 address field.
func (t *TraceSockNotify) IP() net.IP {
if (t.Flags & TraceSockNotifyFlagIPv6) != 0 {
return t.DstIP[:]
}
return t.DstIP[:4]
}
func (t *TraceSockNotify) L4ProtoStr() string {
switch t.L4Proto {
case L4ProtocolTCP:
return "tcp"
case L4ProtocolUDP:
return "udp"
default:
return "unknown"
}
}
// SPDX-License-Identifier: Apache-2.0
// Copyright Authors of Cilium
package monitor
import (
"bufio"
"encoding/json"
"fmt"
"net"
"github.com/cilium/cilium/pkg/byteorder"
"github.com/cilium/cilium/pkg/hubble/parser/getters"
"github.com/cilium/cilium/pkg/identity"
"github.com/cilium/cilium/pkg/monitor/api"
"github.com/cilium/cilium/pkg/types"
)
const (
// traceNotifyV0Len is the amount of packet data provided in a trace notification v0.
traceNotifyV0Len = 32
// traceNotifyV1Len is the amount of packet data provided in a trace notification v1.
traceNotifyV1Len = 48
)
const (
// TraceNotifyFlagIsIPv6 is set in TraceNotify.Flags when the
// notification refers to an IPv6 flow
TraceNotifyFlagIsIPv6 uint8 = 1 << iota
// TraceNotifyFlagIsL3Device is set in TraceNotify.Flags when the
// notification refers to a L3 device.
TraceNotifyFlagIsL3Device
// TraceNotifyFlagIsVXLAN is set in TraceNotify.Flags when the
// notification refers to an overlay VXLAN packet.
TraceNotifyFlagIsVXLAN
// TraceNotifyFlagIsGeneve is set in TraceNotify.Flags when the
// notification refers to an overlay Geneve packet.
TraceNotifyFlagIsGeneve
)
const (
TraceNotifyVersion0 = iota
TraceNotifyVersion1
)
// TraceNotify is the message format of a trace notification in the BPF ring buffer
type TraceNotify struct {
Type uint8
ObsPoint uint8
Source uint16
Hash uint32
OrigLen uint32
CapLen uint16
Version uint16
SrcLabel identity.NumericIdentity
DstLabel identity.NumericIdentity
DstID uint16
Reason uint8
Flags uint8
Ifindex uint32
OrigIP types.IPv6
// data
}
// Dump prints the message according to the verbosity level specified
func (tn *TraceNotify) Dump(args *api.DumpArgs) {
switch args.Verbosity {
case api.INFO, api.DEBUG:
tn.DumpInfo(args.Buf, args.Data, args.Format, args.LinkMonitor)
case api.JSON:
tn.DumpJSON(args.Buf, args.Data, args.CpuPrefix, args.LinkMonitor)
default:
fmt.Fprintln(args.Buf, msgSeparator)
tn.DumpVerbose(args.Buf, args.Dissect, args.Data, args.CpuPrefix, args.Format, args.LinkMonitor)
}
}
// GetSrc retrieves the source endpoint for the message.
func (tn *TraceNotify) GetSrc() uint16 {
return tn.Source
}
// GetDst retrieves the destination endpoint or proxy destination port according to the message subtype.
func (tn *TraceNotify) GetDst() uint16 {
return tn.DstID
}
// Decode decodes the message in 'data' into the struct.
func (tn *TraceNotify) Decode(data []byte) error {
if l := len(data); l < traceNotifyV0Len {
return fmt.Errorf("unexpected TraceNotify data length, expected at least %d but got %d", traceNotifyV0Len, l)
}
version := byteorder.Native.Uint16(data[14:16])
// Check against max version.
if version > TraceNotifyVersion1 {
return fmt.Errorf("Unrecognized trace event (version %d)", version)
}
// Decode logic for version >= v1.
if version >= TraceNotifyVersion1 {
if l := len(data); l < traceNotifyV1Len {
return fmt.Errorf("unexpected TraceNotify data length (version %d), expected at least %d but got %d", version, traceNotifyV1Len, l)
}
copy(tn.OrigIP[:], data[32:48])
}
// Decode logic for version >= v0.
tn.Type = data[0]
tn.ObsPoint = data[1]
tn.Source = byteorder.Native.Uint16(data[2:4])
tn.Hash = byteorder.Native.Uint32(data[4:8])
tn.OrigLen = byteorder.Native.Uint32(data[8:12])
tn.CapLen = byteorder.Native.Uint16(data[12:14])
tn.Version = version
tn.SrcLabel = identity.NumericIdentity(byteorder.Native.Uint32(data[16:20]))
tn.DstLabel = identity.NumericIdentity(byteorder.Native.Uint32(data[20:24]))
tn.DstID = byteorder.Native.Uint16(data[24:26])
tn.Reason = data[26]
tn.Flags = data[27]
tn.Ifindex = byteorder.Native.Uint32(data[28:32])
return nil
}
// IsEncrypted returns true when the notification has the encrypt flag set,
// false otherwise.
func (n *TraceNotify) IsEncrypted() bool {
return (n.Reason & TraceReasonEncryptMask) != 0
}
// TraceReason returns the trace reason for this notification, see the
// TraceReason* constants.
func (n *TraceNotify) TraceReason() uint8 {
return n.Reason & ^TraceReasonEncryptMask
}
// TraceReasonIsKnown returns false when the trace reason is unknown, true
// otherwise.
func (n *TraceNotify) TraceReasonIsKnown() bool {
return n.TraceReason() != TraceReasonUnknown
}
// TraceReasonIsReply returns true when the trace reason is TraceReasonCtReply,
// false otherwise.
func (n *TraceNotify) TraceReasonIsReply() bool {
return n.TraceReason() == TraceReasonCtReply
}
// TraceReasonIsEncap returns true when the trace reason is encapsulation
// related, false otherwise.
func (n *TraceNotify) TraceReasonIsEncap() bool {
switch n.TraceReason() {
case TraceReasonSRv6Encap, TraceReasonEncryptOverlay:
return true
}
return false
}
// TraceReasonIsDecap returns true when the trace reason is decapsulation
// related, false otherwise.
func (n *TraceNotify) TraceReasonIsDecap() bool {
switch n.TraceReason() {
case TraceReasonSRv6Decap:
return true
}
return false
}
var (
traceNotifyLength = map[uint16]uint{
TraceNotifyVersion0: traceNotifyV0Len,
TraceNotifyVersion1: traceNotifyV1Len,
}
)
/* Reasons for forwarding a packet, keep in sync with api/v1/flow/flow.proto */
const (
TraceReasonPolicy = iota
TraceReasonCtEstablished
TraceReasonCtReply
TraceReasonCtRelated
TraceReasonCtDeprecatedReopened
TraceReasonUnknown
TraceReasonSRv6Encap
TraceReasonSRv6Decap
TraceReasonEncryptOverlay
// TraceReasonEncryptMask is the bit used to indicate encryption or not.
TraceReasonEncryptMask = uint8(0x80)
)
/* keep in sync with api/v1/flow/flow.proto */
var traceReasons = map[uint8]string{
TraceReasonPolicy: "new",
TraceReasonCtEstablished: "established",
TraceReasonCtReply: "reply",
TraceReasonCtRelated: "related",
TraceReasonCtDeprecatedReopened: "reopened",
TraceReasonUnknown: "unknown",
TraceReasonSRv6Encap: "srv6-encap",
TraceReasonSRv6Decap: "srv6-decap",
TraceReasonEncryptOverlay: "encrypt-overlay",
}
// dumpIdentity dumps the source and destination identities in numeric or
// human-readable format.
func (n *TraceNotify) dumpIdentity(buf *bufio.Writer, numeric api.DisplayFormat) {
if numeric {
fmt.Fprintf(buf, ", identity %d->%d", n.SrcLabel, n.DstLabel)
} else {
fmt.Fprintf(buf, ", identity %s->%s", n.SrcLabel, n.DstLabel)
}
}
func (n *TraceNotify) encryptReasonString() string {
if n.IsEncrypted() {
return "encrypted "
}
return ""
}
func (n *TraceNotify) traceReasonString() string {
if str, ok := traceReasons[n.TraceReason()]; ok {
return str
}
// NOTE: show the underlying datapath trace reason without excluding the
// encrypt mask.
return fmt.Sprintf("%d", n.Reason)
}
func (n *TraceNotify) traceSummary() string {
switch n.ObsPoint {
case api.TraceToLxc:
return fmt.Sprintf("-> endpoint %d", n.DstID)
case api.TraceToProxy:
pp := ""
if n.DstID != 0 {
pp = fmt.Sprintf(" port %d", n.DstID)
}
return "-> proxy" + pp
case api.TraceToHost:
return "-> host from"
case api.TraceToStack:
return "-> stack"
case api.TraceToOverlay:
return "-> overlay"
case api.TraceToNetwork:
return "-> network"
case api.TraceFromLxc:
return fmt.Sprintf("<- endpoint %d", n.Source)
case api.TraceFromProxy:
return "<- proxy"
case api.TraceFromHost:
return "<- host"
case api.TraceFromStack:
return "<- stack"
case api.TraceFromOverlay:
return "<- overlay"
case api.TraceFromNetwork:
return "<- network"
case api.TraceFromCrypto:
return "<- crypto"
case api.TraceToCrypto:
return "-> crypto"
default:
return "unknown trace"
}
}
// IsL3Device returns true if the trace comes from an L3 device.
func (n *TraceNotify) IsL3Device() bool {
return n.Flags&TraceNotifyFlagIsL3Device != 0
}
// IsIPv6 returns true if the trace refers to an IPv6 packet.
func (n *TraceNotify) IsIPv6() bool {
return n.Flags&TraceNotifyFlagIsIPv6 != 0
}
// IsVXLAN returns true if the trace refers to an overlay VXLAN packet.
func (n *TraceNotify) IsVXLAN() bool {
return n.Flags&TraceNotifyFlagIsVXLAN != 0
}
// IsGeneve returns true if the trace refers to an overlay Geneve packet.
func (n *TraceNotify) IsGeneve() bool {
return n.Flags&TraceNotifyFlagIsGeneve != 0
}
// OriginalIP returns the original source IP if reverse NAT was performed on
// the flow
func (n *TraceNotify) OriginalIP() net.IP {
if n.IsIPv6() {
return n.OrigIP[:]
}
return n.OrigIP[:4]
}
// DataOffset returns the offset from the beginning of TraceNotify where the
// trace notify data begins.
//
// Returns zero for invalid or unknown TraceNotify messages.
func (n *TraceNotify) DataOffset() uint {
return traceNotifyLength[n.Version]
}
// DumpInfo prints a summary of the trace messages.
func (n *TraceNotify) DumpInfo(buf *bufio.Writer, data []byte, numeric api.DisplayFormat, linkMonitor getters.LinkGetter) {
hdrLen := n.DataOffset()
if enc := n.encryptReasonString(); enc != "" {
fmt.Fprintf(buf, "%s %s flow %#x ",
n.traceSummary(), enc, n.Hash)
} else {
fmt.Fprintf(buf, "%s flow %#x ", n.traceSummary(), n.Hash)
}
n.dumpIdentity(buf, numeric)
ifname := linkMonitor.Name(n.Ifindex)
fmt.Fprintf(buf, " state %s ifindex %s orig-ip %s: %s\n", n.traceReasonString(),
ifname, n.OriginalIP().String(), GetConnectionSummary(data[hdrLen:], &decodeOpts{n.IsL3Device(), n.IsIPv6(), n.IsVXLAN(), n.IsGeneve()}))
}
// DumpVerbose prints the trace notification in human readable form
func (n *TraceNotify) DumpVerbose(buf *bufio.Writer, dissect bool, data []byte, prefix string, numeric api.DisplayFormat, linkMonitor getters.LinkGetter) {
fmt.Fprintf(buf, "%s MARK %#x FROM %d %s: %d bytes (%d captured), state %s",
prefix, n.Hash, n.Source, api.TraceObservationPoint(n.ObsPoint), n.OrigLen, n.CapLen, n.traceReasonString())
if n.Ifindex != 0 {
ifname := linkMonitor.Name(n.Ifindex)
fmt.Fprintf(buf, ", interface %s", ifname)
}
if n.SrcLabel != 0 || n.DstLabel != 0 {
fmt.Fprintf(buf, ", ")
n.dumpIdentity(buf, numeric)
}
fmt.Fprintf(buf, ", orig-ip %s", n.OriginalIP().String())
if n.DstID != 0 {
dst := "endpoint"
if n.ObsPoint == api.TraceToProxy {
dst = "proxy-port"
}
fmt.Fprintf(buf, ", to %s %d\n", dst, n.DstID)
} else {
fmt.Fprintf(buf, "\n")
}
hdrLen := n.DataOffset()
if n.CapLen > 0 && len(data) > int(hdrLen) {
Dissect(buf, dissect, data[hdrLen:], &decodeOpts{n.IsL3Device(), n.IsIPv6(), n.IsVXLAN(), n.IsGeneve()})
}
}
func (n *TraceNotify) getJSON(data []byte, cpuPrefix string, linkMonitor getters.LinkGetter) (string, error) {
v := TraceNotifyToVerbose(n, linkMonitor)
v.CPUPrefix = cpuPrefix
hdrLen := n.DataOffset()
if n.CapLen > 0 && len(data) > int(hdrLen) {
v.Summary = GetDissectSummary(data[hdrLen:], &decodeOpts{n.IsL3Device(), n.IsIPv6(), n.IsVXLAN(), n.IsGeneve()})
}
ret, err := json.Marshal(v)
return string(ret), err
}
// DumpJSON prints notification in json format
func (n *TraceNotify) DumpJSON(buf *bufio.Writer, data []byte, cpuPrefix string, linkMonitor getters.LinkGetter) {
resp, err := n.getJSON(data, cpuPrefix, linkMonitor)
if err == nil {
fmt.Fprintln(buf, resp)
}
}
// TraceNotifyVerbose represents a json notification printed by monitor
type TraceNotifyVerbose struct {
CPUPrefix string `json:"cpu,omitempty"`
Type string `json:"type,omitempty"`
Mark string `json:"mark,omitempty"`
Ifindex string `json:"ifindex,omitempty"`
State string `json:"state,omitempty"`
ObservationPoint string `json:"observationPoint"`
TraceSummary string `json:"traceSummary"`
Source uint16 `json:"source"`
Bytes uint32 `json:"bytes"`
SrcLabel identity.NumericIdentity `json:"srcLabel"`
DstLabel identity.NumericIdentity `json:"dstLabel"`
DstID uint16 `json:"dstID"`
Summary *DissectSummary `json:"summary,omitempty"`
}
// TraceNotifyToVerbose creates verbose notification from base TraceNotify
func TraceNotifyToVerbose(n *TraceNotify, linkMonitor getters.LinkGetter) TraceNotifyVerbose {
ifname := linkMonitor.Name(n.Ifindex)
return TraceNotifyVerbose{
Type: "trace",
Mark: fmt.Sprintf("%#x", n.Hash),
Ifindex: ifname,
State: n.traceReasonString(),
ObservationPoint: api.TraceObservationPoint(n.ObsPoint),
TraceSummary: n.traceSummary(),
Source: n.Source,
Bytes: n.OrigLen,
SrcLabel: n.SrcLabel,
DstLabel: n.DstLabel,
DstID: n.DstID,
}
}
// SPDX-License-Identifier: Apache-2.0
// Copyright Authors of Cilium
package monitor
import (
"bufio"
"encoding/hex"
"fmt"
"net"
"strconv"
"github.com/gopacket/gopacket"
"github.com/gopacket/gopacket/layers"
"github.com/cilium/cilium/pkg/lock"
"github.com/cilium/cilium/pkg/logging"
)
type parserCache struct {
eth layers.Ethernet
ip4 layers.IPv4
ip6 layers.IPv6
icmp4 layers.ICMPv4
icmp6 layers.ICMPv6
tcp layers.TCP
udp layers.UDP
sctp layers.SCTP
decoded []gopacket.LayerType
overlay struct {
vxlan layers.VXLAN
geneve layers.Geneve
eth layers.Ethernet
ip4 layers.IPv4
ip6 layers.IPv6
icmp4 layers.ICMPv4
icmp6 layers.ICMPv6
tcp layers.TCP
udp layers.UDP
sctp layers.SCTP
decoded []gopacket.LayerType
}
}
type decodeOpts struct {
IsL3Device bool
IsIPv6 bool
IsVXLAN bool
IsGeneve bool
}
var (
cache *parserCache
dissectLock lock.Mutex
parserL2Dev *gopacket.DecodingLayerParser
parserL3Dev struct {
IPv4 *gopacket.DecodingLayerParser
IPv6 *gopacket.DecodingLayerParser
}
parserOverlay struct {
VXLAN *gopacket.DecodingLayerParser
Geneve *gopacket.DecodingLayerParser
}
)
// getParser must be called with dissectLock held
func initParser() {
if cache == nil {
// slogloggercheck: it's safe to use the default logger here as it has been initialized by the program up to this point.
logging.DefaultSlogLogger.Info("Initializing dissection cache...")
cache = &parserCache{}
cache.decoded = []gopacket.LayerType{}
cache.overlay.decoded = []gopacket.LayerType{}
decoders := []gopacket.DecodingLayer{
&cache.eth,
&cache.ip4, &cache.ip6,
&cache.icmp4, &cache.icmp6,
&cache.tcp, &cache.udp, &cache.sctp,
}
overlayDecoders := []gopacket.DecodingLayer{
&cache.overlay.vxlan, &cache.overlay.geneve,
&cache.overlay.eth,
&cache.overlay.ip4, &cache.overlay.ip6,
&cache.overlay.icmp4, &cache.overlay.icmp6,
&cache.overlay.tcp, &cache.overlay.udp, &cache.overlay.sctp,
}
parserL2Dev = gopacket.NewDecodingLayerParser(layers.LayerTypeEthernet, decoders...)
parserL3Dev.IPv4 = gopacket.NewDecodingLayerParser(layers.LayerTypeIPv4, decoders...)
parserL3Dev.IPv6 = gopacket.NewDecodingLayerParser(layers.LayerTypeIPv6, decoders...)
parserOverlay.VXLAN = gopacket.NewDecodingLayerParser(layers.LayerTypeVXLAN, overlayDecoders...)
parserOverlay.Geneve = gopacket.NewDecodingLayerParser(layers.LayerTypeGeneve, overlayDecoders...)
parserL2Dev.IgnoreUnsupported = true
parserL3Dev.IPv4.IgnoreUnsupported = true
parserL3Dev.IPv6.IgnoreUnsupported = true
parserOverlay.VXLAN.IgnoreUnsupported = true
parserOverlay.Geneve.IgnoreUnsupported = true
}
}
func getTCPInfo(isOverlay bool) string {
target := cache.tcp
if isOverlay {
target = cache.overlay.tcp
}
info := ""
addTCPFlag := func(flag, new string) string {
if flag == "" {
return new
}
return flag + ", " + new
}
if target.SYN {
info = addTCPFlag(info, "SYN")
}
if target.ACK {
info = addTCPFlag(info, "ACK")
}
if target.RST {
info = addTCPFlag(info, "RST")
}
if target.FIN {
info = addTCPFlag(info, "FIN")
}
return info
}
func getEthInfo(isOverlay bool) string {
target := cache.eth
if isOverlay {
target = cache.overlay.eth
}
return fmt.Sprintf("%s -> %s %s", target.SrcMAC, target.DstMAC, target.EthernetType.String())
}
// ConnectionInfo contains tuple information and icmp code for a connection
type ConnectionInfo struct {
SrcIP net.IP
DstIP net.IP
SrcPort uint16
DstPort uint16
Proto string
IcmpCode string
Tunnel *ConnectionInfo
}
func (c *ConnectionInfo) isOverlay() bool {
return c.Tunnel != nil
}
// getConnectionInfoFromCache assume dissectLock is obtained at the caller and data is already
// parsed to cache.decoded
func getConnectionInfoFromCache() (c *ConnectionInfo, hasIP, hasEth bool) {
c = &ConnectionInfo{}
for _, typ := range cache.decoded {
switch typ {
case layers.LayerTypeEthernet:
hasEth = true
case layers.LayerTypeIPv4:
hasIP = true
c.SrcIP, c.DstIP = cache.ip4.SrcIP, cache.ip4.DstIP
case layers.LayerTypeIPv6:
hasIP = true
c.SrcIP, c.DstIP = cache.ip6.SrcIP, cache.ip6.DstIP
case layers.LayerTypeTCP:
c.Proto = "tcp"
c.SrcPort, c.DstPort = uint16(cache.tcp.SrcPort), uint16(cache.tcp.DstPort)
case layers.LayerTypeUDP:
c.Proto = "udp"
c.SrcPort, c.DstPort = uint16(cache.udp.SrcPort), uint16(cache.udp.DstPort)
case layers.LayerTypeSCTP:
c.Proto = "sctp"
c.SrcPort, c.DstPort = uint16(cache.sctp.SrcPort), uint16(cache.sctp.DstPort)
case layers.LayerTypeICMPv4:
c.Proto = "icmp"
c.IcmpCode = cache.icmp4.TypeCode.String()
case layers.LayerTypeICMPv6:
c.Proto = "icmp"
c.IcmpCode = cache.icmp6.TypeCode.String()
}
}
// Return in case we have not decoded any overlay layer.
if len(cache.overlay.decoded) == 0 {
return
}
// Expect VXLAN/Geneve encap as first overlay layer, if not we bail out.
switch cache.overlay.decoded[0] {
case layers.LayerTypeVXLAN:
c.Proto = "vxlan"
case layers.LayerTypeGeneve:
c.Proto = "geneve"
default:
return
}
// Reset return values. This ensures the resulting flow does not misrepresent
// what is happening (e.g. same IP addresses for overlay and underlay).
hasEth, hasIP = false, false
c = &ConnectionInfo{Tunnel: c}
// Parse the rest of the overlay layers as we would do for a non-encapsulated packet.
// It is possible we're not parsing any layer here. This is because the overlay
// decoders failed (e.g., not enough data). We would still return empty values
// for the inner packet (ethernet, ip, l4, basically the re-init variables)
// while returning the non-empty `tunnel` field.
for _, typ := range cache.overlay.decoded[1:] {
switch typ {
case layers.LayerTypeEthernet:
hasEth = true
case layers.LayerTypeIPv4:
hasIP = true
c.SrcIP, c.DstIP = cache.overlay.ip4.SrcIP, cache.overlay.ip4.DstIP
case layers.LayerTypeIPv6:
hasIP = true
c.SrcIP, c.DstIP = cache.overlay.ip6.SrcIP, cache.overlay.ip6.DstIP
case layers.LayerTypeTCP:
c.Proto = "tcp"
c.SrcPort, c.DstPort = uint16(cache.overlay.tcp.SrcPort), uint16(cache.overlay.tcp.DstPort)
case layers.LayerTypeUDP:
c.Proto = "udp"
c.SrcPort, c.DstPort = uint16(cache.overlay.udp.SrcPort), uint16(cache.overlay.udp.DstPort)
case layers.LayerTypeSCTP:
c.Proto = "sctp"
c.SrcPort, c.DstPort = uint16(cache.overlay.sctp.SrcPort), uint16(cache.overlay.sctp.DstPort)
case layers.LayerTypeICMPv4:
c.Proto = "icmp"
c.IcmpCode = cache.overlay.icmp4.TypeCode.String()
case layers.LayerTypeICMPv6:
c.Proto = "icmp"
c.IcmpCode = cache.overlay.icmp6.TypeCode.String()
}
}
return
}
// GetConnectionSummary decodes the data into layers and returns a connection
// summary in the format:
//
// - sIP:sPort -> dIP:dPort, e.g. 1.1.1.1:2000 -> 2.2.2.2:80
// - sIP -> dIP icmpCode, 1.1.1.1 -> 2.2.2.2 echo-request
// - <inner> [tunnel sIP:sPort -> dIP:dPort type], e.g. 1.1.1.1:2000 -> 2.2.2.2:80 [tunnel 5.5.5.5:8472 -> 6.6.6.6:32767 vxlan]
func GetConnectionSummary(data []byte, opts *decodeOpts) string {
dissectLock.Lock()
defer dissectLock.Unlock()
initParser()
// Since v1.1.18, DecodeLayers returns a non-nil error for an empty packet, see
// https://github.com/google/gopacket/issues/846
// TODO: reconsider this check if the issue is fixed upstream
if len(data) == 0 {
// Truncate layers to avoid accidental re-use.
cache.decoded = cache.decoded[:0]
cache.overlay.decoded = cache.overlay.decoded[:0]
return "[unknown]"
}
var err error
// Identify correct decoder. For L3 packets (no ethernet), rely on opts.IsIPv6.
switch {
case opts == nil || !opts.IsL3Device:
err = parserL2Dev.DecodeLayers(data, &cache.decoded)
case opts.IsIPv6:
err = parserL3Dev.IPv6.DecodeLayers(data, &cache.decoded)
default:
err = parserL3Dev.IPv4.DecodeLayers(data, &cache.decoded)
}
if err != nil {
return "[error]"
}
// In case of overlay, identify correct decoder and proceed from the UDP payload.
switch {
case opts != nil && opts.IsVXLAN:
err = parserOverlay.VXLAN.DecodeLayers(cache.udp.Payload, &cache.overlay.decoded)
case opts != nil && opts.IsGeneve:
err = parserOverlay.Geneve.DecodeLayers(cache.udp.Payload, &cache.overlay.decoded)
default:
// Truncate layers to avoid accidental re-use.
cache.overlay.decoded = cache.overlay.decoded[:0]
}
if err != nil {
return "[error]"
}
var str string
c, hasIP, hasEth := getConnectionInfoFromCache()
// Dump the outer packet.
switch {
case c.Proto == "icmp":
str += fmt.Sprintf("%s -> %s %s %s", c.SrcIP, c.DstIP, c.Proto, c.IcmpCode)
case c.Proto != "":
str += fmt.Sprintf("%s -> %s %s",
net.JoinHostPort(c.SrcIP.String(), strconv.Itoa(int(c.SrcPort))),
net.JoinHostPort(c.DstIP.String(), strconv.Itoa(int(c.DstPort))),
c.Proto)
if c.Proto == "tcp" {
str += " " + getTCPInfo(c.isOverlay())
}
case hasIP:
str += fmt.Sprintf("%s -> %s", c.SrcIP, c.DstIP)
case hasEth:
str += getEthInfo(c.isOverlay())
default:
str += "[unknown]"
}
// In case of an overlay packet, dump also the tunnel.
if c.isOverlay() {
str += fmt.Sprintf(" [tunnel %s -> %s %s]",
net.JoinHostPort(c.Tunnel.SrcIP.String(), strconv.Itoa(int(c.Tunnel.SrcPort))),
net.JoinHostPort(c.Tunnel.DstIP.String(), strconv.Itoa(int(c.Tunnel.DstPort))),
c.Tunnel.Proto)
}
return str
}
// Dissect parses and prints the provided data if dissect is set to true,
// otherwise the data is printed as HEX output
func Dissect(buf *bufio.Writer, dissect bool, data []byte, opts *decodeOpts) {
if !dissect {
fmt.Fprint(buf, hex.Dump(data))
return
}
dissectLock.Lock()
defer dissectLock.Unlock()
initParser()
// See comment in [GetConnectionSummary].
if len(data) == 0 {
cache.decoded = cache.decoded[:0]
cache.overlay.decoded = cache.overlay.decoded[:0]
return
}
var err error
var parser *gopacket.DecodingLayerParser
// See comment in [GetConnectionSummary].
switch {
case opts == nil || !opts.IsL3Device:
parser = parserL2Dev
case opts.IsIPv6:
parser = parserL3Dev.IPv6
default:
parser = parserL3Dev.IPv4
}
err = parser.DecodeLayers(data, &cache.decoded)
for _, typ := range cache.decoded {
switch typ {
case layers.LayerTypeEthernet:
fmt.Fprintln(buf, gopacket.LayerString(&cache.eth))
case layers.LayerTypeIPv4:
fmt.Fprintln(buf, gopacket.LayerString(&cache.ip4))
case layers.LayerTypeIPv6:
fmt.Fprintln(buf, gopacket.LayerString(&cache.ip6))
case layers.LayerTypeTCP:
fmt.Fprintln(buf, gopacket.LayerString(&cache.tcp))
case layers.LayerTypeUDP:
fmt.Fprintln(buf, gopacket.LayerString(&cache.udp))
case layers.LayerTypeSCTP:
fmt.Fprintln(buf, gopacket.LayerString(&cache.sctp))
case layers.LayerTypeICMPv4:
fmt.Fprintln(buf, gopacket.LayerString(&cache.icmp4))
case layers.LayerTypeICMPv6:
fmt.Fprintln(buf, gopacket.LayerString(&cache.icmp6))
default:
fmt.Fprintln(buf, "Unknown layer")
}
}
if parser.Truncated {
fmt.Fprintln(buf, " Packet has been truncated")
}
if err != nil {
fmt.Fprintln(buf, " Failed to decode layer:", err)
}
// See comment in [GetConnectionSummary].
switch {
case opts != nil && opts.IsVXLAN:
parser = parserOverlay.VXLAN
case opts != nil && opts.IsGeneve:
parser = parserOverlay.Geneve
default:
// Truncate layers to avoid accidental re-use.
cache.overlay.decoded = cache.overlay.decoded[:0]
return
}
err = parser.DecodeLayers(cache.udp.Payload, &cache.overlay.decoded)
for _, typ := range cache.overlay.decoded {
switch typ {
case layers.LayerTypeVXLAN:
fmt.Fprintln(buf, gopacket.LayerString(&cache.overlay.vxlan))
case layers.LayerTypeGeneve:
fmt.Fprintln(buf, gopacket.LayerString(&cache.overlay.geneve))
case layers.LayerTypeEthernet:
fmt.Fprintln(buf, gopacket.LayerString(&cache.overlay.eth))
case layers.LayerTypeIPv4:
fmt.Fprintln(buf, gopacket.LayerString(&cache.overlay.ip4))
case layers.LayerTypeIPv6:
fmt.Fprintln(buf, gopacket.LayerString(&cache.overlay.ip6))
case layers.LayerTypeTCP:
fmt.Fprintln(buf, gopacket.LayerString(&cache.overlay.tcp))
case layers.LayerTypeUDP:
fmt.Fprintln(buf, gopacket.LayerString(&cache.overlay.udp))
case layers.LayerTypeSCTP:
fmt.Fprintln(buf, gopacket.LayerString(&cache.overlay.sctp))
case layers.LayerTypeICMPv4:
fmt.Fprintln(buf, gopacket.LayerString(&cache.overlay.icmp4))
case layers.LayerTypeICMPv6:
fmt.Fprintln(buf, gopacket.LayerString(&cache.overlay.icmp6))
default:
fmt.Fprintln(buf, "Unknown layer")
}
}
if parser.Truncated {
fmt.Fprintln(buf, " Packet has been truncated")
}
if err != nil {
fmt.Fprintln(buf, " Failed to decode layer:", err)
}
}
// Flow contains source and destination
type Flow struct {
Src string `json:"src"`
Dst string `json:"dst"`
}
// Tunnel holds VXLAN or GENEVE tunnel info for DissectSummary.
type Tunnel struct {
Ethernet string `json:"ethernet,omitempty"`
IPv4 string `json:"ipv4,omitempty"`
IPv6 string `json:"ipv6,omitempty"`
UDP string `json:"udp,omitempty"`
VXLAN string `json:"vxlan,omitempty"`
GENEVE string `json:"geneve,omitempty"`
L2 *Flow `json:"l2,omitempty"`
L3 *Flow `json:"l3,omitempty"`
L4 *Flow `json:"l4,omitempty"`
}
// DissectSummary bundles decoded layers into json-marshallable message
type DissectSummary struct {
Ethernet string `json:"ethernet,omitempty"`
IPv4 string `json:"ipv4,omitempty"`
IPv6 string `json:"ipv6,omitempty"`
TCP string `json:"tcp,omitempty"`
UDP string `json:"udp,omitempty"`
SCTP string `json:"sctp,omitempty"`
ICMPv4 string `json:"icmpv4,omitempty"`
ICMPv6 string `json:"icmpv6,omitempty"`
L2 *Flow `json:"l2,omitempty"`
L3 *Flow `json:"l3,omitempty"`
L4 *Flow `json:"l4,omitempty"`
Tunnel *Tunnel `json:"tunnel,omitempty"`
}
// GetDissectSummary returns DissectSummary created from data
func GetDissectSummary(data []byte, opts *decodeOpts) *DissectSummary {
dissectLock.Lock()
defer dissectLock.Unlock()
initParser()
// See comment in [GetConnectionSummary].
if len(data) == 0 {
cache.decoded = cache.decoded[:0]
cache.overlay.decoded = cache.overlay.decoded[:0]
return nil
}
var err error
// See comment in [GetConnectionSummary].
switch {
case opts == nil || !opts.IsL3Device:
err = parserL2Dev.DecodeLayers(data, &cache.decoded)
case opts.IsIPv6:
err = parserL3Dev.IPv6.DecodeLayers(data, &cache.decoded)
default:
err = parserL3Dev.IPv4.DecodeLayers(data, &cache.decoded)
}
if err != nil {
return nil
}
ret := &DissectSummary{}
for _, typ := range cache.decoded {
switch typ {
case layers.LayerTypeEthernet:
ret.Ethernet = gopacket.LayerString(&cache.eth)
src, dst := cache.eth.LinkFlow().Endpoints()
ret.L2 = &Flow{Src: src.String(), Dst: dst.String()}
case layers.LayerTypeIPv4:
ret.IPv4 = gopacket.LayerString(&cache.ip4)
src, dst := cache.ip4.NetworkFlow().Endpoints()
ret.L3 = &Flow{Src: src.String(), Dst: dst.String()}
case layers.LayerTypeIPv6:
ret.IPv6 = gopacket.LayerString(&cache.ip6)
src, dst := cache.ip6.NetworkFlow().Endpoints()
ret.L3 = &Flow{Src: src.String(), Dst: dst.String()}
case layers.LayerTypeTCP:
ret.TCP = gopacket.LayerString(&cache.tcp)
src, dst := cache.tcp.TransportFlow().Endpoints()
ret.L4 = &Flow{Src: src.String(), Dst: dst.String()}
case layers.LayerTypeUDP:
ret.UDP = gopacket.LayerString(&cache.udp)
src, dst := cache.udp.TransportFlow().Endpoints()
ret.L4 = &Flow{Src: src.String(), Dst: dst.String()}
case layers.LayerTypeSCTP:
ret.SCTP = gopacket.LayerString(&cache.sctp)
src, dst := cache.sctp.TransportFlow().Endpoints()
ret.L4 = &Flow{Src: src.String(), Dst: dst.String()}
case layers.LayerTypeICMPv4:
ret.ICMPv4 = gopacket.LayerString(&cache.icmp4)
case layers.LayerTypeICMPv6:
ret.ICMPv6 = gopacket.LayerString(&cache.icmp6)
}
}
// See comment in [GetConnectionSummary].
switch {
case opts != nil && opts.IsVXLAN:
err = parserOverlay.VXLAN.DecodeLayers(cache.udp.Payload, &cache.overlay.decoded)
case opts != nil && opts.IsGeneve:
err = parserOverlay.Geneve.DecodeLayers(cache.udp.Payload, &cache.overlay.decoded)
default:
// Truncate layers to avoid accidental re-use.
cache.overlay.decoded = cache.overlay.decoded[:0]
return ret
}
if err != nil {
return ret
}
if len(cache.overlay.decoded) == 0 {
return ret
}
// See comment in [GetConnectionSummary].
// In case of VXLAN/GENEVE, let's move decoded layers inside the Tunnel
// field, and keep decoding after overlay headers.
switch cache.overlay.decoded[0] {
case layers.LayerTypeVXLAN, layers.LayerTypeGeneve:
ret = &DissectSummary{Tunnel: &Tunnel{
Ethernet: ret.Ethernet,
IPv4: ret.IPv4,
IPv6: ret.IPv6,
UDP: ret.UDP,
L2: ret.L2,
L3: ret.L3,
L4: ret.L4,
}}
if cache.overlay.decoded[0] == layers.LayerTypeVXLAN {
ret.Tunnel.VXLAN = gopacket.LayerString(&cache.overlay.vxlan)
} else {
ret.Tunnel.GENEVE = gopacket.LayerString(&cache.overlay.geneve)
}
default:
return ret
}
for _, typ := range cache.overlay.decoded[1:] {
switch typ {
case layers.LayerTypeEthernet:
ret.Ethernet = gopacket.LayerString(&cache.overlay.eth)
src, dst := cache.overlay.eth.LinkFlow().Endpoints()
ret.L2 = &Flow{Src: src.String(), Dst: dst.String()}
case layers.LayerTypeIPv4:
ret.IPv4 = gopacket.LayerString(&cache.overlay.ip4)
src, dst := cache.overlay.ip4.NetworkFlow().Endpoints()
ret.L3 = &Flow{Src: src.String(), Dst: dst.String()}
case layers.LayerTypeIPv6:
ret.IPv6 = gopacket.LayerString(&cache.overlay.ip6)
src, dst := cache.overlay.ip6.NetworkFlow().Endpoints()
ret.L3 = &Flow{Src: src.String(), Dst: dst.String()}
case layers.LayerTypeTCP:
ret.TCP = gopacket.LayerString(&cache.overlay.tcp)
src, dst := cache.overlay.tcp.TransportFlow().Endpoints()
ret.L4 = &Flow{Src: src.String(), Dst: dst.String()}
case layers.LayerTypeUDP:
ret.UDP = gopacket.LayerString(&cache.overlay.udp)
src, dst := cache.overlay.udp.TransportFlow().Endpoints()
ret.L4 = &Flow{Src: src.String(), Dst: dst.String()}
case layers.LayerTypeSCTP:
ret.SCTP = gopacket.LayerString(&cache.overlay.sctp)
src, dst := cache.overlay.sctp.TransportFlow().Endpoints()
ret.L4 = &Flow{Src: src.String(), Dst: dst.String()}
case layers.LayerTypeICMPv4:
ret.ICMPv4 = gopacket.LayerString(&cache.overlay.icmp4)
case layers.LayerTypeICMPv6:
ret.ICMPv6 = gopacket.LayerString(&cache.overlay.icmp6)
}
}
return ret
}
// SPDX-License-Identifier: Apache-2.0
// Copyright Authors of Cilium
package format
import (
"slices"
"strconv"
"strings"
"github.com/spf13/pflag"
)
// Uint16Flags is a slice of unsigned 16-bit ints with some convenience methods.
type Uint16Flags []uint16
var _ pflag.Value = &Uint16Flags{}
// String provides a human-readable string format of the received variable.
func (i *Uint16Flags) String() string {
pieces := make([]string, 0, len(*i))
for _, v := range *i {
pieces = append(pieces, strconv.Itoa(int(v)))
}
return strings.Join(pieces, ", ")
}
// Set converts the specified value into an integer and appends it to the flags.
// Returns an error if the value cannot be converted to a 16-bit unsigned value.
func (i *Uint16Flags) Set(value string) error {
vUint64, err := strconv.ParseUint(value, 10, 16)
if err != nil {
return err
}
*i = append(*i, uint16(vUint64))
return nil
}
// Type returns a human-readable string representing the type of the receiver.
func (i *Uint16Flags) Type() string {
return "[]uint16"
}
// Has returns true of value exist
func (i *Uint16Flags) Has(value uint16) bool {
return slices.Contains(*i, value)
}
// SPDX-License-Identifier: Apache-2.0
// Copyright Authors of Cilium
package format
import (
"bufio"
"fmt"
"io"
"github.com/cilium/cilium/pkg/hubble/parser/getters"
"github.com/cilium/cilium/pkg/monitor"
monitorAPI "github.com/cilium/cilium/pkg/monitor/api"
"github.com/cilium/cilium/pkg/monitor/payload"
)
// MonitorFormatter filters and formats monitor messages from a buffer.
type MonitorFormatter struct {
EventTypes monitorAPI.MessageTypeFilter
FromSource Uint16Flags
ToDst Uint16Flags
Related Uint16Flags
Hex bool
JSONOutput bool
Verbosity monitorAPI.Verbosity
Numeric bool
linkMonitor getters.LinkGetter
buf *bufio.Writer
}
// NewMonitorFormatter returns a new formatter with default configuration.
func NewMonitorFormatter(verbosity monitorAPI.Verbosity, linkMonitor getters.LinkGetter, w io.Writer) *MonitorFormatter {
return &MonitorFormatter{
Hex: false,
EventTypes: monitorAPI.MessageTypeFilter{},
FromSource: Uint16Flags{},
ToDst: Uint16Flags{},
Related: Uint16Flags{},
JSONOutput: false,
Verbosity: verbosity,
Numeric: bool(monitorAPI.DisplayLabel),
linkMonitor: linkMonitor,
buf: bufio.NewWriter(w),
}
}
// match checks if the event type, from endpoint and / or to endpoint match
// when they are supplied. The either part of from and to endpoint depends on
// related to, which can match on both. If either one of them is less than or
// equal to zero, then it is assumed user did not use them.
func (m *MonitorFormatter) match(messageType int, src uint16, dst uint16) bool {
if len(m.EventTypes) > 0 && !m.EventTypes.Contains(messageType) {
return false
} else if len(m.FromSource) > 0 && !m.FromSource.Has(src) {
return false
} else if len(m.ToDst) > 0 && !m.ToDst.Has(dst) {
return false
} else if len(m.Related) > 0 && !m.Related.Has(src) && !m.Related.Has(dst) {
return false
}
return true
}
// FormatSample prints an event from the provided raw data slice to stdout.
//
// For most monitor event types, 'data' corresponds to the 'data' field in
// bpf.PerfEventSample. Exceptions are MessageTypeAccessLog and
// MessageTypeAgent.
func (m *MonitorFormatter) FormatSample(data []byte, cpu int) {
defer m.buf.Flush()
prefix := fmt.Sprintf("CPU %02d:", cpu)
messageType := int(data[0])
var msg monitorAPI.MonitorEvent
switch messageType {
case monitorAPI.MessageTypeDrop:
msg = &monitor.DropNotify{}
case monitorAPI.MessageTypeDebug:
msg = &monitor.DebugMsg{}
case monitorAPI.MessageTypeCapture:
msg = &monitor.DebugCapture{}
case monitorAPI.MessageTypeTrace:
msg = &monitor.TraceNotify{}
case monitorAPI.MessageTypeAccessLog:
msg = &monitor.LogRecordNotify{}
case monitorAPI.MessageTypeAgent:
msg = &monitorAPI.AgentNotify{}
case monitorAPI.MessageTypePolicyVerdict:
msg = &monitor.PolicyVerdictNotify{}
case monitorAPI.MessageTypeTraceSock:
msg = &monitor.TraceSockNotify{}
default:
fmt.Fprintf(m.buf, "%s Unknown event: %+v\n", prefix, data)
return
}
if err := msg.Decode(data); err != nil {
fmt.Fprintf(m.buf, "cannot decode message type '%d': %v\n", messageType, err)
return
}
// For TraceSockNotify we don't implement any matching logic.
// See the original implementation: https://github.com/cilium/cilium/pull/21516#discussion_r984194699
_, isTraceSock := msg.(*monitor.TraceSockNotify)
if !isTraceSock && !m.match(messageType, msg.GetSrc(), msg.GetDst()) {
return
}
msg.Dump(&monitorAPI.DumpArgs{
Data: data,
CpuPrefix: prefix,
Format: monitorAPI.DisplayFormat(m.Numeric),
LinkMonitor: m.linkMonitor,
Dissect: !m.Hex,
Verbosity: m.Verbosity,
Buf: m.buf,
})
}
// FormatLostEvent formats a lost event using the specified payload parameters.
func (m *MonitorFormatter) FormatLostEvent(lost uint64, cpu int) {
defer m.buf.Flush()
fmt.Fprintf(m.buf, "CPU %02d: Lost %d events\n", cpu, lost)
}
// FormatUnknownEvent formats an unknown event using the specified payload parameters.
func (m *MonitorFormatter) FormatUnknownEvent(lost uint64, cpu int, t int) {
defer m.buf.Flush()
fmt.Fprintf(m.buf, "Unknown payload type: %d, CPU %02d: Lost %d events\n", t, cpu, lost)
}
// FormatEvent formats an event from the specified payload
//
// Returns true if the event was successfully recognized, false otherwise.
func (m *MonitorFormatter) FormatEvent(pl *payload.Payload) bool {
switch pl.Type {
case payload.EventSample:
m.FormatSample(pl.Data, pl.CPU)
case payload.RecordLost:
m.FormatLostEvent(pl.Lost, pl.CPU)
default:
m.FormatUnknownEvent(pl.Lost, pl.CPU, pl.Type)
return false
}
return true
}
// SPDX-License-Identifier: Apache-2.0
// Copyright Authors of Cilium
package monitor
import (
"bufio"
"bytes"
"encoding/gob"
"encoding/json"
"fmt"
"strings"
"github.com/cilium/dns"
"github.com/cilium/cilium/pkg/monitor/api"
"github.com/cilium/cilium/pkg/proxy/accesslog"
)
// LogRecordNotify is a proxy access log notification
type LogRecordNotify struct {
accesslog.LogRecord
}
// Dump prints the message according to the verbosity level specified
func (l *LogRecordNotify) Dump(args *api.DumpArgs) {
if args.Verbosity == api.JSON {
l.DumpJSON(args.Buf)
} else {
l.DumpInfo(args.Buf)
}
}
// GetSrc retrieves the source endpoint for the message
func (l *LogRecordNotify) GetSrc() uint16 {
return uint16(l.SourceEndpoint.ID)
}
// GetDst retrieves the destination endpoint for the message.
func (l *LogRecordNotify) GetDst() uint16 {
return uint16(l.DestinationEndpoint.ID)
}
// Decode decodes the message in 'data' into the struct.
func (l *LogRecordNotify) Decode(data []byte) error {
buf := bytes.NewBuffer(data[1:])
dec := gob.NewDecoder(buf)
return dec.Decode(l)
}
func (l *LogRecordNotify) direction() string {
switch l.ObservationPoint {
case accesslog.Ingress:
return "<-"
case accesslog.Egress:
return "->"
default:
return "??"
}
}
func (l *LogRecordNotify) l7Proto() string {
if l.HTTP != nil {
return "http"
}
if l.Kafka != nil {
return "kafka"
}
if l.DNS != nil {
return "dns"
}
if l.L7 != nil {
return l.L7.Proto
}
return "unknown-l7"
}
// DumpInfo dumps an access log notification
func (l *LogRecordNotify) DumpInfo(buf *bufio.Writer) {
switch l.Type {
case accesslog.TypeRequest:
fmt.Fprintf(buf, "%s %s %s from %d (%s) to %d (%s), identity %d->%d, verdict %s",
l.direction(), l.Type, l.l7Proto(), l.SourceEndpoint.ID, l.SourceEndpoint.Labels,
l.DestinationEndpoint.ID, l.DestinationEndpoint.Labels,
l.SourceEndpoint.Identity, l.DestinationEndpoint.Identity,
l.Verdict)
case accesslog.TypeResponse:
fmt.Fprintf(buf, "%s %s %s to %d (%s) from %d (%s), identity %d->%d, verdict %s",
l.direction(), l.Type, l.l7Proto(), l.DestinationEndpoint.ID, l.DestinationEndpoint.Labels,
l.SourceEndpoint.ID, l.SourceEndpoint.Labels,
l.SourceEndpoint.Identity, l.DestinationEndpoint.Identity,
l.Verdict)
}
if http := l.HTTP; http != nil {
url := ""
if http.URL != nil {
url = http.URL.String()
}
fmt.Fprintf(buf, " %s %s => %d\n", http.Method, url, http.Code)
}
if kafka := l.Kafka; kafka != nil {
fmt.Fprintf(buf, " %s topic %s => %d\n", kafka.APIKey, kafka.Topic.Topic, kafka.ErrorCode)
}
if l.DNS != nil {
types := []string{}
for _, t := range l.DNS.QTypes {
types = append(types, dns.TypeToString[t])
}
qTypeStr := strings.Join(types, ",")
switch {
case l.Type == accesslog.TypeRequest:
fmt.Fprintf(buf, " DNS %s: %s %s", l.DNS.ObservationSource, l.DNS.Query, qTypeStr)
case l.Type == accesslog.TypeResponse:
fmt.Fprintf(buf, " DNS %s: %s %s", l.DNS.ObservationSource, l.DNS.Query, qTypeStr)
ips := make([]string, 0, len(l.DNS.IPs))
for _, ip := range l.DNS.IPs {
ips = append(ips, ip.String())
}
fmt.Fprintf(buf, " TTL: %d Answer: '%s'", l.DNS.TTL, strings.Join(ips, ","))
if len(l.DNS.CNAMEs) > 0 {
fmt.Fprintf(buf, " CNAMEs: %s", strings.Join(l.DNS.CNAMEs, ","))
}
}
fmt.Fprintf(buf, "\n")
}
if l7 := l.L7; l7 != nil {
status := ""
for k, v := range l7.Fields {
if k == "status" {
status = v
} else {
fmt.Fprintf(buf, " %s:%s", k, v)
}
}
if status != "" {
fmt.Fprintf(buf, " => status:%s", status)
}
fmt.Fprintf(buf, "\n")
}
}
func (l *LogRecordNotify) getJSON() (string, error) {
v := LogRecordNotifyToVerbose(l)
ret, err := json.Marshal(v)
return string(ret), err
}
// DumpJSON prints notification in json format
func (l *LogRecordNotify) DumpJSON(buf *bufio.Writer) {
resp, err := l.getJSON()
if err == nil {
fmt.Fprintln(buf, resp)
}
}
// LogRecordNotifyVerbose represents a json notification printed by monitor
type LogRecordNotifyVerbose struct {
Type string `json:"type"`
ObservationPoint accesslog.ObservationPoint `json:"observationPoint"`
FlowType accesslog.FlowType `json:"flowType"`
L7Proto string `json:"l7Proto"`
SrcEpID uint64 `json:"srcEpID"`
SrcEpLabels []string `json:"srcEpLabels"`
SrcIdentity uint64 `json:"srcIdentity"`
DstEpID uint64 `json:"dstEpID"`
DstEpLabels []string `json:"dstEpLabels"`
DstIdentity uint64 `json:"dstIdentity"`
Verdict accesslog.FlowVerdict `json:"verdict"`
HTTP *accesslog.LogRecordHTTP `json:"http,omitempty"`
Kafka *accesslog.LogRecordKafka `json:"kafka,omitempty"`
DNS *accesslog.LogRecordDNS `json:"dns,omitempty"`
L7 *accesslog.LogRecordL7 `json:"l7,omitempty"`
}
// LogRecordNotifyToVerbose turns LogRecordNotify into json-friendly Verbose structure
func LogRecordNotifyToVerbose(n *LogRecordNotify) LogRecordNotifyVerbose {
return LogRecordNotifyVerbose{
Type: "logRecord",
ObservationPoint: n.ObservationPoint,
FlowType: n.Type,
L7Proto: n.l7Proto(),
SrcEpID: n.SourceEndpoint.ID,
SrcEpLabels: n.SourceEndpoint.Labels.GetModel(),
SrcIdentity: n.SourceEndpoint.Identity,
DstEpID: n.DestinationEndpoint.ID,
DstEpLabels: n.DestinationEndpoint.Labels.GetModel(),
DstIdentity: n.DestinationEndpoint.Identity,
Verdict: n.Verdict,
HTTP: n.HTTP,
Kafka: n.Kafka,
DNS: n.DNS,
L7: n.L7,
}
}
// SPDX-License-Identifier: Apache-2.0
// Copyright Authors of Cilium
package payload
import (
"bytes"
"encoding/binary"
"encoding/gob"
"io"
"github.com/cilium/cilium/pkg/byteorder"
)
// Below constants are based on the ones from <linux/perf_event.h>.
const (
// EventSample is equivalent to PERF_RECORD_SAMPLE
EventSample = 9
// RecordLost is equivalent to PERF_RECORD_LOST
RecordLost = 2
)
// Meta is used by readers to get information about the payload.
type Meta struct {
Size uint32
_ [28]byte // Reserved 28 bytes for future fields.
}
// UnmarshalBinary decodes the metadata from its binary representation.
func (meta *Meta) UnmarshalBinary(data []byte) error {
return meta.ReadBinary(bytes.NewReader(data))
}
// MarshalBinary encodes the metadata into its binary representation.
func (meta *Meta) MarshalBinary() ([]byte, error) {
var buf bytes.Buffer
if err := meta.WriteBinary(&buf); err != nil {
return nil, err
}
return buf.Bytes(), nil
}
// ReadBinary reads the metadata from its binary representation.
func (meta *Meta) ReadBinary(r io.Reader) error {
return binary.Read(r, byteorder.Native, meta)
}
// WriteBinary writes the metadata into its binary representation.
func (meta *Meta) WriteBinary(w io.Writer) error {
return binary.Write(w, byteorder.Native, meta)
}
// Payload is the structure used when copying events from the main monitor.
type Payload struct {
Data []byte
CPU int
Lost uint64
Type int
}
// Decode decodes the payload from its binary representation.
func (pl *Payload) Decode(data []byte) error {
// Note that this method can't be named UnmarshalBinary, because the gob encoder would call
// this method, resulting in infinite recursion.
return pl.ReadBinary(bytes.NewBuffer(data))
}
// Encode encodes the payload into its binary representation.
func (pl *Payload) Encode() ([]byte, error) {
// Note that this method can't be named MarshalBinary, because the gob encoder would call
// this method, resulting in infinite recursion.
var buf bytes.Buffer
if err := pl.WriteBinary(&buf); err != nil {
return nil, err
}
return buf.Bytes(), nil
}
// ReadBinary reads the payload from its binary representation.
func (pl *Payload) ReadBinary(r io.Reader) error {
dec := gob.NewDecoder(r)
return pl.DecodeBinary(dec)
}
// WriteBinary writes the payload into its binary representation.
func (pl *Payload) WriteBinary(w io.Writer) error {
enc := gob.NewEncoder(w)
return pl.EncodeBinary(enc)
}
// EncodeBinary writes the payload into its binary representation.
func (pl *Payload) EncodeBinary(enc *gob.Encoder) error {
return enc.Encode(pl)
}
// DecodeBinary reads the payload from its binary representation.
func (pl *Payload) DecodeBinary(dec *gob.Decoder) error {
return dec.Decode(pl)
}
// SPDX-License-Identifier: Apache-2.0
// Copyright Authors of Cilium
package monitor
import (
"slices"
"github.com/spf13/pflag"
monitorAPI "github.com/cilium/cilium/pkg/monitor/api"
)
var _ pflag.Value = &monitorAPI.MessageTypeFilter{}
const (
msgSeparator = "------------------------------------------------------------------------------"
)
// GetAllTypes returns a slice of all known message types, sorted
func GetAllTypes() []string {
types := make([]string, 0, len(monitorAPI.MessageTypeNames))
for k := range monitorAPI.MessageTypeNames {
types = append(types, k)
}
slices.Sort(types)
return types
}
// SPDX-License-Identifier: Apache-2.0
// Copyright Authors of Cilium
package mountinfo
import (
"bufio"
"fmt"
"io"
"os"
"strconv"
"strings"
)
const (
mountInfoFilepath = "/proc/self/mountinfo"
)
// MountInfo is a struct representing information from /proc/pid/mountinfo. More
// information about file syntax:
// https://www.kernel.org/doc/Documentation/filesystems/proc.txt
type MountInfo struct {
MountID int64
ParentID int64
StDev string
Root string
MountPoint string
MountOptions string
OptionalFields []string
FilesystemType string
MountSource string
SuperOptions string
}
// parseMountInfoFile returns a slice of *MountInfo with information parsed from
// the given reader
func parseMountInfoFile(r io.Reader) ([]*MountInfo, error) {
var result []*MountInfo
scanner := bufio.NewScanner(r)
scanner.Split(bufio.ScanLines)
for scanner.Scan() {
mountInfoRaw := scanner.Text()
// Optional fields (which are on the 7th position) are separated
// from the rest of fields by "-" character. The number of
// optional fields can be greater or equal to 1.
mountInfoSeparated := strings.Split(mountInfoRaw, " - ")
if len(mountInfoSeparated) != 2 {
return nil, fmt.Errorf("invalid mountinfo entry which has more that one '-' separator: %s", mountInfoRaw)
}
// Extract fields from both sides of mountinfo
mountInfoLeft := strings.Split(mountInfoSeparated[0], " ")
mountInfoRight := strings.Split(mountInfoSeparated[1], " ")
// Before '-' separator there should be 6 fields and unknown
// number of optional fields
if len(mountInfoLeft) < 6 {
return nil, fmt.Errorf("invalid mountinfo entry: %s", mountInfoRaw)
}
// After '-' separator there should be 3 fields
if len(mountInfoRight) != 3 {
return nil, fmt.Errorf("invalid mountinfo entry: %s", mountInfoRaw)
}
mountID, err := strconv.ParseInt(mountInfoLeft[0], 10, 64)
if err != nil {
return nil, err
}
parentID, err := strconv.ParseInt(mountInfoLeft[1], 10, 64)
if err != nil {
return nil, err
}
// Extract optional fields, which start from 7th position
var optionalFields []string
for i := 6; i < len(mountInfoLeft); i++ {
optionalFields = append(optionalFields, mountInfoLeft[i])
}
result = append(result, &MountInfo{
MountID: mountID,
ParentID: parentID,
StDev: mountInfoLeft[2],
Root: mountInfoLeft[3],
MountPoint: mountInfoLeft[4],
MountOptions: mountInfoLeft[5],
OptionalFields: optionalFields,
FilesystemType: mountInfoRight[0],
MountSource: mountInfoRight[1],
SuperOptions: mountInfoRight[2],
})
}
if err := scanner.Err(); err != nil {
return nil, err
}
return result, nil
}
// GetMountInfo returns a slice of *MountInfo with information parsed from
// /proc/self/mountinfo
func GetMountInfo() ([]*MountInfo, error) {
fMounts, err := os.Open(mountInfoFilepath)
if err != nil {
return nil, fmt.Errorf("failed to open mount information at %s: %w", mountInfoFilepath, err)
}
defer fMounts.Close()
return parseMountInfoFile(fMounts)
}
// SPDX-License-Identifier: Apache-2.0
// Copyright Authors of Cilium
package mountinfo
import (
"errors"
"os"
"path/filepath"
"golang.org/x/sys/unix"
)
const (
// FilesystemType superblock magic numbers for filesystems,
// to be used for IsMountFS.
FilesystemTypeBPFFS = unix.BPF_FS_MAGIC
FilesystemTypeCgroup2 = unix.CGROUP2_SUPER_MAGIC
)
// IsMountFS returns two boolean values, checking
// - whether the path is a mount point;
// - if yes, whether its filesystem type is mntType.
//
// Note that this function can not detect bind mounts,
// and is not working properly when path="/".
func IsMountFS(mntType int64, path string) (bool, bool, error) {
var st, pst unix.Stat_t
err := unix.Lstat(path, &st)
if err != nil {
if errors.Is(err, unix.ENOENT) {
// non-existent path can't be a mount point
return false, false, nil
}
return false, false, &os.PathError{Op: "lstat", Path: path, Err: err}
}
parent := filepath.Dir(path)
err = unix.Lstat(parent, &pst)
if err != nil {
return false, false, &os.PathError{Op: "lstat", Path: parent, Err: err}
}
if st.Dev == pst.Dev {
// parent has the same dev -- not a mount point
return false, false, nil
}
// Check the fstype
fst := unix.Statfs_t{}
err = unix.Statfs(path, &fst)
if err != nil {
return true, false, &os.PathError{Op: "statfs", Path: path, Err: err}
}
return true, int64(fst.Type) == mntType, nil
}
// SPDX-License-Identifier: Apache-2.0
// Copyright Authors of Cilium
package node
import (
"bufio"
"context"
"fmt"
"log/slog"
"net"
"net/netip"
"os"
"strconv"
"strings"
"github.com/cilium/cilium/api/v1/models"
"github.com/cilium/cilium/pkg/byteorder"
"github.com/cilium/cilium/pkg/cidr"
"github.com/cilium/cilium/pkg/common"
"github.com/cilium/cilium/pkg/datapath/tunnel"
"github.com/cilium/cilium/pkg/defaults"
"github.com/cilium/cilium/pkg/lock"
"github.com/cilium/cilium/pkg/logging"
"github.com/cilium/cilium/pkg/logging/logfields"
"github.com/cilium/cilium/pkg/node/addressing"
"github.com/cilium/cilium/pkg/option"
wgTypes "github.com/cilium/cilium/pkg/wireguard/types"
)
const preferPublicIP bool = true
var (
addrs addresses
// localNode holds the current state of the local "types.Node".
// This is defined here until all uses of the getters and
// setters in this file have been migrated to use LocalNodeStore
// directly.
// Initialized to proper instance via an invoke function in LocalNodeStoreCell,
// or temporarily in tests with 'WithTestLocalNodeStore'.
localNode *LocalNodeStore
)
func getLocalNode(logger *slog.Logger) LocalNode {
n, err := localNode.Get(context.TODO())
if err != nil {
// Only expecting errors if we're called after LocalNodeStore has stopped, e.g.
// we have a component that uses the legacy getters and setters here and does
// not depend on LocalNodeStore.
logging.Fatal(logger, "getLocalNode: unexpected error", logfields.Error, err)
}
return n
}
type addresses struct {
mu lock.RWMutex
routerInfo RouterInfo
}
type RouterInfo interface {
GetCIDRs() []net.IPNet
}
func makeIPv6HostIP(logger *slog.Logger) net.IP {
ipstr := "fc00::10CA:1"
ip := net.ParseIP(ipstr)
if ip == nil {
logging.Fatal(logger, "Unable to parse IP", logfields.IPAddr, ipstr)
}
return ip
}
// initDefaultPrefix initializes the node address and allocation prefixes with
// default values derived from the system. device can be set to the primary
// network device of the system in which case the first address with global
// scope will be regarded as the system's node address.
func initDefaultPrefix(logger *slog.Logger, device string) {
localNode.Update(func(n *LocalNode) {
setDefaultPrefix(logger, option.Config, device, n)
})
}
func setDefaultPrefix(logger *slog.Logger, cfg *option.DaemonConfig, device string, node *LocalNode) {
if cfg.EnableIPv4 {
isIPv6 := false
ip, err := firstGlobalV4Addr(device, node.GetCiliumInternalIP(isIPv6), preferPublicIP)
if err != nil {
return
}
if node.GetNodeIP(isIPv6) == nil {
node.SetNodeInternalIP(ip)
}
ipv4range := node.IPv4AllocCIDR
ipv6range := node.IPv6AllocCIDR
if ipv4range == nil {
// If the IPv6AllocRange is not nil then the IPv4 allocation should be
// derived from the IPv6AllocRange.
// vvvv vvvv
// FD00:0000:0000:0000:0000:0000:0000:0000
if ipv6range != nil {
ip = net.IPv4(
ipv6range.IP[8],
ipv6range.IP[9],
ipv6range.IP[10],
ipv6range.IP[11])
}
v4range := fmt.Sprintf(defaults.DefaultIPv4Prefix+"/%d",
ip.To4()[3], defaults.DefaultIPv4PrefixLen)
_, ip4net, err := net.ParseCIDR(v4range)
if err != nil {
logging.Panic(logger, "BUG: Invalid default IPv4 prefix",
logfields.Error, err,
logfields.V4Prefix, v4range,
)
}
node.IPv4AllocCIDR = cidr.NewCIDR(ip4net)
logger.Debug(
"Using autogenerated IPv4 allocation range",
logfields.V4Prefix, node.IPv4AllocCIDR,
)
}
}
if cfg.EnableIPv6 {
isIPv6 := true
ipv4range := node.IPv4AllocCIDR
ipv6range := node.IPv6AllocCIDR
if node.GetNodeIP(isIPv6) == nil {
// Find a IPv6 node address first
addr, _ := firstGlobalV6Addr(device, node.GetCiliumInternalIP(isIPv6), preferPublicIP)
if addr == nil {
addr = makeIPv6HostIP(logger)
}
node.SetNodeInternalIP(addr)
}
if ipv6range == nil {
var v6range string
var logMessage string
if ipv4range != nil {
// The IPv6 allocation should be derived from the IPv4 allocation.
ip := node.IPv4AllocCIDR.IP
v6range = fmt.Sprintf("%s%02x%02x:%02x%02x:0:0/%d",
cfg.IPv6ClusterAllocCIDRBase, ip[0], ip[1], ip[2], ip[3], 96)
logMessage = "Using autogenerated IPv6 allocation range from IPv4 allocation"
} else {
// The IPv6 allocation is derived from the node's IPv6 address.
ip := node.GetNodeIP(isIPv6)
if ip == nil {
// This should not happen, as we set the node IP above.
logging.Panic(logger, "BUG: Node IPv6 address is not available to derive IPv6 pod CIDR")
}
// We use the last 4 bytes of the node's IPv6 address to build the pod CIDR.
// This makes the allocation logic independent of IPv4.
v6range = fmt.Sprintf("%s%02x%02x:%02x%02x:0:0/%d",
cfg.IPv6ClusterAllocCIDRBase, ip[12], ip[13], ip[14], ip[15], 96)
logMessage = "Using autogenerated IPv6 allocation range from node IPv6"
}
_, ip6net, err := net.ParseCIDR(v6range)
if err != nil {
logging.Panic(logger, "BUG: Invalid default IPv6 prefix",
logfields.Error, err,
logfields.V6Prefix, v6range,
)
}
node.IPv6AllocCIDR = cidr.NewCIDR(ip6net)
logger.Debug(
logMessage,
logfields.V6Prefix, node.IPv6AllocCIDR,
)
}
}
}
func clone(ip net.IP) net.IP {
if ip == nil {
return nil
}
dup := make(net.IP, len(ip))
copy(dup, ip)
return dup
}
// GetServiceLoopbackIPv4 returns the service loopback IPv4 address of this node.
func GetServiceLoopbackIPv4(logger *slog.Logger) net.IP {
return getLocalNode(logger).Local.ServiceLoopbackIPv4
}
// SetIPv4Loopback sets the service loopback IPv4 address of this node.
func SetServiceLoopbackIPv4(ip net.IP) {
localNode.Update(func(n *LocalNode) {
n.Local.ServiceLoopbackIPv4 = ip
})
}
// GetIPv4AllocRange returns the IPv4 allocation prefix of this node
func GetIPv4AllocRange(logger *slog.Logger) *cidr.CIDR {
return getLocalNode(logger).IPv4AllocCIDR.DeepCopy()
}
// GetIPv6AllocRange returns the IPv6 allocation prefix of this node
func GetIPv6AllocRange(logger *slog.Logger) *cidr.CIDR {
return getLocalNode(logger).IPv6AllocCIDR.DeepCopy()
}
// IsNodeIP determines if addr is one of the node's IP addresses,
// and returns which type of address it is. "" is returned if addr
// is not one of the node's IP addresses.
func IsNodeIP(logger *slog.Logger, addr netip.Addr) addressing.AddressType {
n := getLocalNode(logger)
return n.IsNodeIP(addr)
}
// GetIPv4 returns one of the IPv4 node address available with the following
// priority:
// - NodeInternalIP
// - NodeExternalIP
// - other IP address type.
// It must be reachable on the network.
func GetIPv4(logger *slog.Logger) net.IP {
n := getLocalNode(logger)
return clone(n.GetNodeIP(false))
}
// GetInternalIPv4 returns node internal ipv4 address else return nil.
func GetInternalIPv4(logger *slog.Logger) net.IP {
n := getLocalNode(logger)
return clone(n.GetNodeInternalIPv4())
}
// GetInternalIPv6 returns node internal ipv6 address else return nil.
func GetInternalIPv6(logger *slog.Logger) net.IP {
n := getLocalNode(logger)
return clone(n.GetNodeInternalIPv6())
}
// GetCiliumEndpointNodeIP is the node IP that will be referenced by CiliumEndpoints with endpoints
// running on this node.
func GetCiliumEndpointNodeIP(logger *slog.Logger) string {
n := getLocalNode(logger)
if option.Config.EnableIPv4 && n.Local.UnderlayProtocol == tunnel.IPv4 {
return GetIPv4(logger).String()
}
return GetIPv6(logger).String()
}
// SetInternalIPv4Router sets the cilium internal IPv4 node address, it is allocated from the node prefix.
// This must not be conflated with k8s internal IP as this IP address is only relevant within the
// Cilium-managed network (this means within the node for direct routing mode and on the overlay
// for tunnel mode).
func SetInternalIPv4Router(ip net.IP) {
localNode.Update(func(n *LocalNode) {
n.SetCiliumInternalIP(ip)
})
}
// GetInternalIPv4Router returns the cilium internal IPv4 node address. This must not be conflated with
// k8s internal IP as this IP address is only relevant within the Cilium-managed network (this means
// within the node for direct routing mode and on the overlay for tunnel mode).
func GetInternalIPv4Router(logger *slog.Logger) net.IP {
n := getLocalNode(logger)
return n.GetCiliumInternalIP(false)
}
// GetRouterInfo returns additional information for the router, the cilium_host interface.
func GetRouterInfo() RouterInfo {
addrs.mu.RLock()
defer addrs.mu.RUnlock()
return addrs.routerInfo
}
// SetRouterInfo sets additional information for the router, the cilium_host interface.
func SetRouterInfo(info RouterInfo) {
addrs.mu.Lock()
addrs.routerInfo = info
addrs.mu.Unlock()
}
// SetIPv4AllocRange sets the IPv4 address pool to use when allocating
// addresses for local endpoints
func SetIPv4AllocRange(net *cidr.CIDR) {
localNode.Update(func(n *LocalNode) {
n.IPv4AllocCIDR = net
})
}
// SetIPv6NodeRange sets the IPv6 address pool to be used on this node
func SetIPv6NodeRange(net *cidr.CIDR) {
localNode.Update(func(n *LocalNode) {
n.IPv6AllocCIDR = net
})
}
// AutoComplete completes the parts of addressing that can be auto derived
func AutoComplete(logger *slog.Logger, directRoutingDevice string) error {
initDefaultPrefix(logger, directRoutingDevice)
if option.Config.EnableIPv6 && GetIPv6AllocRange(logger) == nil {
return fmt.Errorf("IPv6 allocation CIDR is not configured. Please specify --%s", option.IPv6Range)
}
if option.Config.EnableIPv4 && GetIPv4AllocRange(logger) == nil {
return fmt.Errorf("IPv4 allocation CIDR is not configured. Please specify --%s", option.IPv4Range)
}
return nil
}
// ValidatePostInit validates the entire addressing setup and completes it as
// required
func ValidatePostInit(logger *slog.Logger) error {
if option.Config.EnableIPv4 {
if GetIPv4(logger) == nil {
return fmt.Errorf("external IPv4 node address could not be derived, please configure via --ipv4-node")
}
}
if option.Config.TunnelingEnabled() && GetIPv4(logger) == nil && GetIPv6(logger) == nil {
return fmt.Errorf("external node address could not be derived, please configure via --ipv4-node or --ipv6-node")
}
if option.Config.EnableIPv4 && GetInternalIPv4Router(logger) == nil {
return fmt.Errorf("BUG: Internal IPv4 node address was not configured")
}
return nil
}
// GetIPv6 returns the IPv6 address of the node
func GetIPv6(logger *slog.Logger) net.IP {
n := getLocalNode(logger)
return clone(n.GetNodeIP(true))
}
// GetIPv6Router returns the IPv6 address of the router, e.g. address
// of cilium_host device.
func GetIPv6Router(logger *slog.Logger) net.IP {
n := getLocalNode(logger)
return clone(n.GetCiliumInternalIP(true))
}
// SetIPv6Router sets the IPv6 address of the router address, e.g. address
// of cilium_host device.
func SetIPv6Router(ip net.IP) {
localNode.Update(func(n *LocalNode) {
n.SetCiliumInternalIP(ip)
})
}
// GetNodeAddressing returns the NodeAddressing model for the local IPs.
func GetNodeAddressing(logger *slog.Logger) *models.NodeAddressing {
a := &models.NodeAddressing{}
if option.Config.EnableIPv6 {
a.IPV6 = &models.NodeAddressingElement{
Enabled: option.Config.EnableIPv6,
IP: GetIPv6Router(logger).String(),
AllocRange: GetIPv6AllocRange(logger).String(),
}
}
if option.Config.EnableIPv4 {
a.IPV4 = &models.NodeAddressingElement{
Enabled: option.Config.EnableIPv4,
IP: GetInternalIPv4Router(logger).String(),
AllocRange: GetIPv4AllocRange(logger).String(),
}
}
return a
}
func getCiliumHostIPsFromFile(nodeConfig string) (ipv4GW, ipv6Router net.IP) {
// ipLen is the length of the IP address stored in the node_config.h
// it has the same length for both IPv4 and IPv6.
const ipLen = net.IPv6len
var hasIPv4, hasIPv6 bool
f, err := os.Open(nodeConfig)
switch {
case err != nil:
default:
defer f.Close()
scanner := bufio.NewScanner(f)
for scanner.Scan() {
txt := scanner.Text()
switch {
case !hasIPv6 && strings.Contains(txt, defaults.RestoreV6Addr):
defineLine := strings.Split(txt, defaults.RestoreV6Addr)
if len(defineLine) != 2 {
continue
}
ipv6 := common.C2GoArray(defineLine[1])
if len(ipv6) != ipLen {
continue
}
ipv6Router = net.IP(ipv6)
hasIPv6 = true
case !hasIPv4 && strings.Contains(txt, defaults.RestoreV4Addr):
defineLine := strings.Split(txt, defaults.RestoreV4Addr)
if len(defineLine) != 2 {
continue
}
ipv4 := common.C2GoArray(defineLine[1])
if len(ipv4) != ipLen {
continue
}
ipv4GW = net.IP(ipv4)
hasIPv4 = true
// Legacy cases based on the header defines:
case !hasIPv4 && strings.Contains(txt, "IPV4_GATEWAY"):
// #define IPV4_GATEWAY 0xee1c000a
defineLine := strings.Split(txt, " ")
if len(defineLine) != 3 {
continue
}
ipv4GWHex := strings.TrimPrefix(defineLine[2], "0x")
ipv4GWUint64, err := strconv.ParseUint(ipv4GWHex, 16, 32)
if err != nil {
continue
}
if ipv4GWUint64 != 0 {
bs := make([]byte, net.IPv4len)
byteorder.Native.PutUint32(bs, uint32(ipv4GWUint64))
ipv4GW = net.IPv4(bs[0], bs[1], bs[2], bs[3])
hasIPv4 = true
}
}
}
}
return ipv4GW, ipv6Router
}
// ExtractCiliumHostIPFromFS returns the Cilium IPv4 gateway and router IPv6 address from
// the node_config.h file if is present; or by deriving it from
// defaults.HostDevice interface, on which only the IPv4 is possible to derive.
func ExtractCiliumHostIPFromFS(logger *slog.Logger) (ipv4GW, ipv6Router net.IP) {
nodeConfig := option.Config.GetNodeConfigPath()
ipv4GW, ipv6Router = getCiliumHostIPsFromFile(nodeConfig)
if ipv4GW != nil || ipv6Router != nil {
logger.Info(
"Restored router address from node_config",
logfields.IPv4, ipv4GW,
logfields.IPv6, ipv6Router,
logfields.File, nodeConfig,
)
return ipv4GW, ipv6Router
}
return getCiliumHostIPsFromNetDev(logger, defaults.HostDevice)
}
// SetIPsecKeyIdentity sets the IPsec key identity an opaque value used to
// identity encryption keys used on the node.
func SetIPsecKeyIdentity(id uint8) {
localNode.Update(func(n *LocalNode) {
n.EncryptionKey = id
})
}
func GetOptOutNodeEncryption(logger *slog.Logger) bool {
return getLocalNode(logger).Local.OptOutNodeEncryption
}
// SetEndpointHealthIPv4 sets the IPv4 cilium-health endpoint address.
func SetEndpointHealthIPv4(ip net.IP) {
localNode.Update(func(n *LocalNode) {
n.IPv4HealthIP = ip
})
}
// GetEndpointHealthIPv4 returns the IPv4 cilium-health endpoint address.
func GetEndpointHealthIPv4(logger *slog.Logger) net.IP {
return getLocalNode(logger).IPv4HealthIP
}
// SetEndpointHealthIPv6 sets the IPv6 cilium-health endpoint address.
func SetEndpointHealthIPv6(ip net.IP) {
localNode.Update(func(n *LocalNode) {
n.IPv6HealthIP = ip
})
}
// GetEndpointHealthIPv6 returns the IPv6 cilium-health endpoint address.
func GetEndpointHealthIPv6(logger *slog.Logger) net.IP {
return getLocalNode(logger).IPv6HealthIP
}
// SetIngressIPv4 sets the local IPv4 source address for Cilium Ingress.
func SetIngressIPv4(ip net.IP) {
localNode.Update(func(n *LocalNode) {
n.IPv4IngressIP = ip
})
}
// GetIngressIPv4 returns the local IPv4 source address for Cilium Ingress.
func GetIngressIPv4(logger *slog.Logger) net.IP {
return getLocalNode(logger).IPv4IngressIP
}
// SetIngressIPv6 sets the local IPv6 source address for Cilium Ingress.
func SetIngressIPv6(ip net.IP) {
localNode.Update(func(n *LocalNode) {
n.IPv6IngressIP = ip
})
}
// GetIngressIPv6 returns the local IPv6 source address for Cilium Ingress.
func GetIngressIPv6(logger *slog.Logger) net.IP {
return getLocalNode(logger).IPv6IngressIP
}
// GetEndpointEncryptKeyIndex returns the encryption key value for an endpoint
// owned by the local node.
// With IPSec encryption, this is the ID of the currently loaded key.
// With WireGuard, this returns a non-zero static value.
// Note that the key index returned by this function is only valid for _endpoints_
// of the local node. If you want to obtain the key index of the local node itself,
// access the `EncryptionKey` field via the LocalNodeStore.
func GetEndpointEncryptKeyIndex(logger *slog.Logger, wgCfg wgTypes.WireguardConfig) uint8 {
switch {
case option.Config.EnableIPSec:
return getLocalNode(logger).EncryptionKey
case wgCfg.Enabled():
return wgTypes.StaticEncryptKey
}
return 0
}
// WithTestLocalNodeStore sets the 'localNode' to a temporary instance and
// runs the given test. Afterwards the 'localNode' is restored to nil.
// This is a temporary workaround for tests until the LocalNodeStoreCell can be
// used.
func WithTestLocalNodeStore(runTest func()) {
SetTestLocalNodeStore()
defer UnsetTestLocalNodeStore()
runTest()
}
func SetTestLocalNodeStore() {
if localNode != nil {
panic("localNode already set")
}
// Set the localNode global variable temporarily so that the legacy getters
// and setters can access it.
localNode = NewTestLocalNodeStore(LocalNode{})
}
func UnsetTestLocalNodeStore() {
localNode = nil
}
// UpdateLocalNodeInTest provides access to modifying the local node
// information from tests that are not yet using hive and the LocalNodeStoreCell.
func UpdateLocalNodeInTest(mod func(n *LocalNode)) {
if localNode == nil {
panic("localNode not set, use node.LocalNodeStoreCell or WithTestLocalNodeStore()?")
}
localNode.Update(mod)
}
// SPDX-License-Identifier: Apache-2.0
// Copyright Authors of Cilium
//go:build !darwin
package node
import (
"fmt"
"log/slog"
"net"
"sort"
"github.com/vishvananda/netlink"
"golang.org/x/sys/unix"
"github.com/cilium/cilium/pkg/datapath/linux/safenetlink"
"github.com/cilium/cilium/pkg/ip"
"github.com/cilium/cilium/pkg/logging/logfields"
)
func firstGlobalAddr(intf string, preferredIP net.IP, family int, preferPublic bool) (net.IP, error) {
var link netlink.Link
var ipLen int
var err error
ipsToExclude := GetExcludedIPs()
linkScopeMax := unix.RT_SCOPE_UNIVERSE
if family == netlink.FAMILY_V4 {
ipLen = 4
} else {
ipLen = 16
}
if intf != "" && intf != "undefined" {
link, err = safenetlink.LinkByName(intf)
if err != nil {
link = nil
} else {
ipsToExclude = []net.IP{}
}
}
retryInterface:
addr, err := safenetlink.AddrList(link, family)
if err != nil {
return nil, err
}
retryScope:
ipsPublic := []netlink.Addr{}
ipsPrivate := []netlink.Addr{}
hasPreferred := false
for _, a := range addr {
if a.Scope > linkScopeMax {
continue
}
if ip.ListContainsIP(ipsToExclude, a.IP) {
continue
}
if len(a.IP) < ipLen {
continue
}
isPreferredIP := a.IP.Equal(preferredIP)
if a.Flags&unix.IFA_F_SECONDARY > 0 && !isPreferredIP {
// Skip secondary addresses if they're not the preferredIP
continue
}
if ip.IsPublicAddr(a.IP) {
ipsPublic = append(ipsPublic, a)
} else {
ipsPrivate = append(ipsPrivate, a)
}
// If the IP is the same as the preferredIP, that
// means that maybe it is restored from node_config.h,
// so if it is present we prefer this one, even if it
// is a secondary address.
if isPreferredIP {
hasPreferred = true
}
}
if hasPreferred && !preferPublic {
return preferredIP, nil
}
if len(ipsPublic) != 0 {
if hasPreferred && ip.IsPublicAddr(preferredIP) {
return preferredIP, nil
}
// Just make sure that we always return the same one and not a
// random one. More info in the issue GH-7637.
sort.SliceStable(ipsPublic, func(i, j int) bool {
return ipsPublic[i].LinkIndex < ipsPublic[j].LinkIndex
})
return ipsPublic[0].IP, nil
}
if len(ipsPrivate) != 0 {
if hasPreferred && !ip.IsPublicAddr(preferredIP) {
return preferredIP, nil
}
// Same stable order, see above ipsPublic.
sort.SliceStable(ipsPrivate, func(i, j int) bool {
return ipsPrivate[i].LinkIndex < ipsPrivate[j].LinkIndex
})
return ipsPrivate[0].IP, nil
}
// First, if a device is specified, fall back to anything wider
// than link (site, custom, ...) before trying all devices.
if linkScopeMax != unix.RT_SCOPE_SITE {
linkScopeMax = unix.RT_SCOPE_SITE
goto retryScope
}
// Fall back with retry for all interfaces with full scope again
// (which then goes back to lower scope again for all interfaces
// before we give up completely).
if link != nil {
linkScopeMax = unix.RT_SCOPE_UNIVERSE
link = nil
goto retryInterface
}
return nil, fmt.Errorf("No address found")
}
// firstGlobalV4Addr returns the first IPv4 global IP of an interface,
// where the IPs are sorted in creation order (oldest to newest).
//
// All secondary IPs, except the preferredIP, are filtered out.
//
// Public IPs are preferred over private ones. When intf is defined only
// IPs belonging to that interface are considered.
//
// If preferredIP is present in the IP list it is returned irrespective of
// the sort order. However, if preferPublic is true and preferredIP is a
// private IP, a public IP will be returned if it is assigned to the intf
//
// Passing intf and preferredIP will only return preferredIP if it is in
// the IPs that belong to intf.
//
// In all cases, if intf is not found all interfaces are considered.
//
// If a intf-specific global address couldn't be found, we retry to find
// an address with reduced scope (site, custom) on that particular device.
//
// If the latter fails as well, we retry on all interfaces beginning with
// universe scope again (and then falling back to reduced scope).
//
// In case none of the above helped, we bail out with error.
func firstGlobalV4Addr(intf string, preferredIP net.IP, preferPublic bool) (net.IP, error) {
return firstGlobalAddr(intf, preferredIP, netlink.FAMILY_V4, preferPublic)
}
// firstGlobalV6Addr returns first IPv6 global IP of an interface, see
// firstGlobalV4Addr for more details.
func firstGlobalV6Addr(intf string, preferredIP net.IP, preferPublic bool) (net.IP, error) {
return firstGlobalAddr(intf, preferredIP, netlink.FAMILY_V6, preferPublic)
}
// getCiliumHostIPsFromNetDev returns the first IPv4 link local and returns
// it
func getCiliumHostIPsFromNetDev(logger *slog.Logger, devName string) (ipv4GW, ipv6Router net.IP) {
hostDev, err := safenetlink.LinkByName(devName)
if err != nil {
return nil, nil
}
addrs, err := safenetlink.AddrList(hostDev, netlink.FAMILY_ALL)
if err != nil {
return nil, nil
}
for _, addr := range addrs {
if addr.IP.To4() != nil {
if addr.Scope == int(netlink.SCOPE_LINK) {
ipv4GW = addr.IP
}
} else {
if addr.Scope != int(netlink.SCOPE_LINK) {
ipv6Router = addr.IP
}
}
}
if ipv4GW != nil || ipv6Router != nil {
logger.Info(
"Restored router address from device",
logfields.IPv4, ipv4GW,
logfields.IPv6, ipv6Router,
logfields.Device, devName,
)
}
return ipv4GW, ipv6Router
}
// SPDX-License-Identifier: Apache-2.0
// Copyright Authors of Cilium
package addressing
import (
"net"
)
// AddressType represents a type of IP address for a node. They are copied
// from k8s.io/api/core/v1/types.go to avoid pulling in a lot of Kubernetes
// imports into this package.
type AddressType string
const (
NodeHostName AddressType = "Hostname"
NodeExternalIP AddressType = "ExternalIP"
NodeInternalIP AddressType = "InternalIP"
NodeExternalDNS AddressType = "ExternalDNS"
NodeInternalDNS AddressType = "InternalDNS"
NodeCiliumInternalIP AddressType = "CiliumInternalIP"
)
type Address interface {
AddrType() AddressType
ToString() string
}
// ExtractNodeIP returns one of the provided IP addresses available with the following priority:
// - NodeInternalIP
// - NodeExternalIP
// - other IP address type
// An error is returned if ExtractNodeIP fails to get an IP based on the provided address family.
func ExtractNodeIP[T Address](addrs []T, ipv6 bool) net.IP {
var backupIP net.IP
for _, addr := range addrs {
parsed := net.ParseIP(addr.ToString())
if parsed == nil {
continue
}
if (ipv6 && parsed.To4() != nil) ||
(!ipv6 && parsed.To4() == nil) {
continue
}
switch addr.AddrType() {
// Ignore CiliumInternalIPs
case NodeCiliumInternalIP:
continue
// Always prefer a cluster internal IP
case NodeInternalIP:
return parsed
case NodeExternalIP:
// Fall back to external Node IP
// if no internal IP could be found
backupIP = parsed
default:
// As a last resort, if no internal or external
// IP was found, use any node address available
if backupIP == nil {
backupIP = parsed
}
}
}
return backupIP
}
// SPDX-License-Identifier: Apache-2.0
// Copyright Authors of Cilium
package node
import (
"log/slog"
"sync"
)
var (
localBootID string
logOnce sync.Once
)
func GetBootID(logger *slog.Logger) string {
logOnce.Do(func() {
initLocalBootID(logger)
})
return localBootID
}
// SPDX-License-Identifier: Apache-2.0
// Copyright Authors of Cilium
//go:build linux
package node
import (
"log/slog"
"os"
"strings"
"github.com/cilium/cilium/pkg/logging/logfields"
"github.com/cilium/cilium/pkg/option"
)
func initLocalBootID(logger *slog.Logger) {
bootID, err := os.ReadFile(option.Config.BootIDFile)
if err != nil {
logger.Warn("Could not read boot id from file",
logfields.Error, err,
logfields.File, option.Config.BootIDFile,
)
return
}
localBootID = strings.TrimSpace(string(bootID))
logger.Info("Local boot ID",
logfields.BootID, localBootID,
)
}
// SPDX-License-Identifier: Apache-2.0
// Copyright Authors of Cilium
package node
const (
templateHostEndpointID = uint64(0xffff)
)
var (
endpointID = templateHostEndpointID
)
// GetEndpointID returns the ID of the host endpoint for this node.
func GetEndpointID() uint64 {
return endpointID
}
// SetEndpointID sets the ID of the host endpoint this node.
func SetEndpointID(id uint64) {
endpointID = id
}
// SPDX-License-Identifier: Apache-2.0
// Copyright Authors of Cilium
package node
import "net"
var excludedIPs []net.IP
// GetExcludedIPs returns a list of IPs from netdevices that Cilium
// needs to exclude to operate
func GetExcludedIPs() []net.IP {
return excludedIPs
}
// SPDX-License-Identifier: Apache-2.0
// Copyright Authors of Cilium
package node
import (
"strings"
"github.com/vishvananda/netlink"
"github.com/cilium/cilium/pkg/datapath/linux/safenetlink"
)
func init() {
initExcludedIPs()
}
func initExcludedIPs() {
// We exclude below bad device prefixes from address selection ...
prefixes := []string{
"docker",
}
links, err := safenetlink.LinkList()
if err != nil {
return
}
for _, l := range links {
// Don't exclude dummy devices, since they may be setup by
// processes like nodelocaldns and they aren't always brought up. See
// https://github.com/kubernetes/dns/blob/fa0192f004c9571cf24d8e9868be07f57380fccb/pkg/netif/netif.go#L24-L36
// Such devices in down state may still be relevant.
if l.Type() == "dummy" {
continue
}
// ... also all down devices since they won't be reachable.
//
// We need to check for both "up" and "unknown" state, as some
// drivers may not implement operstate handling, and just report
// their state as unknown even though they are operational.
if l.Attrs().OperState == netlink.OperUp ||
l.Attrs().OperState == netlink.OperUnknown {
skip := true
for _, p := range prefixes {
if strings.HasPrefix(l.Attrs().Name, p) {
skip = false
break
}
}
if skip {
continue
}
}
addr, err := safenetlink.AddrList(l, netlink.FAMILY_ALL)
if err != nil {
continue
}
for _, a := range addr {
excludedIPs = append(excludedIPs, a.IP)
}
}
}
// SPDX-License-Identifier: Apache-2.0
// Copyright Authors of Cilium
package node
import (
"context"
"log/slog"
"github.com/cilium/hive/cell"
"github.com/cilium/hive/job"
"github.com/cilium/statedb"
"github.com/cilium/cilium/pkg/node/types"
"github.com/cilium/cilium/pkg/rate"
"github.com/cilium/cilium/pkg/source"
"github.com/cilium/cilium/pkg/time"
)
// LocalNodeSynchronizer specifies how to build, and keep synchronized the local
// node object.
type LocalNodeSynchronizer interface {
InitLocalNode(context.Context, *LocalNode) error
SyncLocalNode(context.Context, *LocalNodeStore)
}
// LocalNodeStoreCell provides the LocalNodeStore instance.
// The LocalNodeStore is the canonical owner of `types.Node` for the local node and
// provides a reactive API for observing and updating it.
var LocalNodeStoreCell = cell.Module(
"local-node-store",
"Provides LocalNodeStore for observing and updating local node info",
cell.Provide(
NewLocalNodeTable,
statedb.RWTable[*LocalNode].ToTable,
),
cell.Provide(NewLocalNodeStore),
)
// LocalNodeStoreParams are the inputs needed for constructing LocalNodeStore.
type LocalNodeStoreParams struct {
cell.In
Logger *slog.Logger
Lifecycle cell.Lifecycle
Sync LocalNodeSynchronizer
DB *statedb.DB
Nodes statedb.RWTable[*LocalNode]
Jobs job.Group
}
// LocalNodeStore is the canonical owner for the local node object and provides
// a reactive API for observing and updating the state.
type LocalNodeStore struct {
db *statedb.DB
nodes statedb.RWTable[*LocalNode]
}
func NewLocalNodeStore(params LocalNodeStoreParams) (*LocalNodeStore, error) {
wtxn := params.DB.WriteTxn(params.Nodes)
// Register an initializer that'll mark the table initialized once we're done
// with [LocalNodeSynchronizer.InitLocalNode].
initDone := params.Nodes.RegisterInitializer(wtxn, "LocalNodeSynchronizer")
// Insert the skeleton local node.
params.Nodes.Insert(wtxn,
&LocalNode{
Node: types.Node{
Name: types.GetName(),
// Explicitly initialize the labels and annotations maps, so that
// we don't need to always check for nil values.
Labels: make(map[string]string),
Annotations: make(map[string]string),
Source: source.Unspec,
},
Local: &LocalNodeInfo{},
})
wtxn.Commit()
s := &LocalNodeStore{params.DB, params.Nodes}
params.Lifecycle.Append(cell.Hook{
OnStart: func(ctx cell.HookContext) error {
wtxn := params.DB.WriteTxn(params.Nodes)
n, _, _ := params.Nodes.Get(wtxn, LocalNodeQuery)
n = n.DeepCopy()
err := params.Sync.InitLocalNode(ctx, n)
params.Nodes.Insert(wtxn, n)
initDone(wtxn)
wtxn.Commit()
if err != nil {
return err
}
// Start the synchronization process in background
params.Jobs.Add(
job.OneShot(
"sync-local-node",
func(ctx context.Context, _ cell.Health) error {
params.Sync.SyncLocalNode(ctx, s)
return nil
},
))
// Set the global variable still used by getters
// and setters in address.go. We're setting it in Start
// to catch uses of it before it's initialized.
localNode = s
return nil
},
OnStop: func(cell.HookContext) error {
localNode = nil
return nil
},
})
return s, nil
}
// observeRatePerSecond sets the maximum number of [LocalNode] updates per second that
// [LocalNodeStore.Observe] emits. This avoids unnecessary computation when there's
// many rapid changes to the local node.
const observeRatePerSecond = 5
// Observe changes to the local node state.
func (s *LocalNodeStore) Observe(ctx context.Context, next func(LocalNode), complete func(error)) {
go func() {
// Wait until initialized before starting to observe.
_, initWatch := s.nodes.Initialized(s.db.ReadTxn())
select {
case <-initWatch:
case <-ctx.Done():
complete(ctx.Err())
return
}
limiter := rate.NewLimiter(time.Second/observeRatePerSecond, 1)
defer limiter.Stop()
defer complete(nil)
for {
lns, _, watch, _ := s.nodes.GetWatch(s.db.ReadTxn(), LocalNodeQuery)
next(*lns)
if err := limiter.Wait(ctx); err != nil {
return
}
select {
case <-watch:
case <-ctx.Done():
return
}
}
}()
}
// Get retrieves the current local node. Use Get() only for inspecting the state,
// e.g. in API handlers. Do not assume the value does not change over time.
// Blocks until the store has been initialized.
func (s *LocalNodeStore) Get(ctx context.Context) (LocalNode, error) {
_, initWatch := s.nodes.Initialized(s.db.ReadTxn())
select {
case <-initWatch:
case <-ctx.Done():
return LocalNode{}, ctx.Err()
}
ln, _, found := s.nodes.Get(s.db.ReadTxn(), LocalNodeQuery)
if !found {
panic("BUG: No local node exists")
}
return *ln, nil
}
// Update modifies the local node with a mutator.
func (s *LocalNodeStore) Update(update func(*LocalNode)) {
txn := s.db.WriteTxn(s.nodes)
defer txn.Commit()
ln, _, found := s.nodes.Get(txn, LocalNodeQuery)
if !found {
panic("BUG: No local node exists")
}
ln = ln.DeepCopy()
update(ln)
if ln.Local == nil {
panic("BUG: Updated LocalNode has nil Local")
}
s.nodes.Insert(txn, ln)
}
func NewTestLocalNodeStore(mockNode LocalNode) *LocalNodeStore {
db := statedb.New()
tbl, err := NewLocalNodeTable(db)
if err != nil {
panic(err)
}
if mockNode.Local == nil {
mockNode.Local = &LocalNodeInfo{}
}
txn := db.WriteTxn(tbl)
tbl.Insert(txn, &mockNode)
txn.Commit()
return &LocalNodeStore{db, tbl}
}
// LocalNodeStoreTestCell is a convenience for tests that provides a no-op
// [LocalNodeSynchronizer]. Use [LocalNodeStoreCell] in tests when you want
// to provide your own [LocalNodeSynchronizer].
var LocalNodeStoreTestCell = cell.Group(
cell.Provide(NewNopLocalNodeSynchronizer),
LocalNodeStoreCell,
)
type nopLocalNodeSynchronizer struct{}
// InitLocalNode implements LocalNodeSynchronizer.
func (n nopLocalNodeSynchronizer) InitLocalNode(context.Context, *LocalNode) error {
return nil
}
// SyncLocalNode implements LocalNodeSynchronizer.
func (n nopLocalNodeSynchronizer) SyncLocalNode(context.Context, *LocalNodeStore) {
}
var _ LocalNodeSynchronizer = nopLocalNodeSynchronizer{}
func NewNopLocalNodeSynchronizer() LocalNodeSynchronizer {
return nopLocalNodeSynchronizer{}
}
// SPDX-License-Identifier: Apache-2.0
// Copyright Authors of Cilium
package node
import (
"net"
"slices"
"strings"
"github.com/cilium/statedb"
"github.com/cilium/statedb/index"
k8stypes "k8s.io/apimachinery/pkg/types"
"github.com/cilium/cilium/pkg/cidr"
"github.com/cilium/cilium/pkg/datapath/tunnel"
"github.com/cilium/cilium/pkg/node/types"
)
// LocalNode is the local Cilium node. This is derived from the k8s corev1.Node object.
//
// +k8s:deepcopy-gen=true
// +deepequal-gen=true
type LocalNode struct {
types.Node
// Local is non-nil if this is the local node. This carries additional
// information about the local node that is not shared outside.
Local *LocalNodeInfo
}
// TableHeader implements statedb.TableWritable.
func (n *LocalNode) TableHeader() []string {
return []string{
"Name",
"Source",
"Addresses",
}
}
// TableRow implements statedb.TableWritable.
func (n *LocalNode) TableRow() []string {
addrs := make([]string, len(n.IPAddresses))
for i := range n.IPAddresses {
addrs[i] = string(n.IPAddresses[i].Type) + ":" + n.IPAddresses[i].ToString()
}
slices.Sort(addrs)
return []string{
n.Fullname(),
string(n.Source),
strings.Join(addrs, ", "),
}
}
var _ statedb.TableWritable = &LocalNode{}
// LocalNodeInfo is the additional information about the local node that
// is only used internally.
//
// +k8s:deepcopy-gen=true
// +deepequal-gen=true
type LocalNodeInfo struct {
// OptOutNodeEncryption will make the local node opt-out of node-to-node
// encryption
OptOutNodeEncryption bool
// Unique identifier of the Kubernetes node, used to construct the
// corresponding owner reference.
UID k8stypes.UID
// ID of the node assigned by the cloud provider.
ProviderID string
// v4 CIDR in which pod IPs are routable
IPv4NativeRoutingCIDR *cidr.CIDR
// v6 CIDR in which pod IPs are routable
IPv6NativeRoutingCIDR *cidr.CIDR
// ServiceLoopbackIPv4 is the source address used for SNAT when a Pod talks to
// itself through a Service.
ServiceLoopbackIPv4 net.IP
// IsBeingDeleted indicates that the local node is being deleted.
IsBeingDeleted bool
// UnderlayProtocol is the IP family of our underlay.
UnderlayProtocol tunnel.UnderlayProtocol
}
const (
LocalNodeTableName = "local-node"
)
var (
LocalNodeNameIndex = statedb.Index[*LocalNode, string]{
Name: "name",
FromObject: func(obj *LocalNode) index.KeySet {
return index.NewKeySet(index.String(obj.Fullname()))
},
FromKey: index.String,
FromString: index.FromString,
Unique: true,
}
NodeByName = LocalNodeNameIndex.Query
LocalNodeLocalIndex = statedb.Index[*LocalNode, bool]{
Name: "local",
FromObject: func(obj *LocalNode) index.KeySet {
if obj.Local == nil {
// Don't add remote nodes to this index at all.
return index.KeySet{}
}
return index.NewKeySet(index.Bool(true))
},
FromKey: index.Bool,
FromString: index.BoolString,
Unique: true,
}
NodeByLocal = LocalNodeLocalIndex.Query
LocalNodeQuery = NodeByLocal(true)
)
func NewLocalNodeTable(db *statedb.DB) (statedb.RWTable[*LocalNode], error) {
return statedb.NewTable(
db,
LocalNodeTableName,
LocalNodeNameIndex,
LocalNodeLocalIndex,
)
}
// SPDX-License-Identifier: Apache-2.0
// Copyright Authors of Cilium
package types
import (
"encoding/json"
"errors"
"fmt"
"net"
"net/netip"
"path"
"slices"
v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"github.com/cilium/cilium/api/v1/models"
"github.com/cilium/cilium/pkg/annotation"
"github.com/cilium/cilium/pkg/cidr"
cmtypes "github.com/cilium/cilium/pkg/clustermesh/types"
"github.com/cilium/cilium/pkg/defaults"
ipamTypes "github.com/cilium/cilium/pkg/ipam/types"
ciliumv2 "github.com/cilium/cilium/pkg/k8s/apis/cilium.io/v2"
"github.com/cilium/cilium/pkg/kvstore/store"
"github.com/cilium/cilium/pkg/node/addressing"
"github.com/cilium/cilium/pkg/option"
"github.com/cilium/cilium/pkg/source"
)
// Identity represents the node identity of a node.
type Identity struct {
Name string
Cluster string
}
// String returns the string representation on NodeIdentity.
func (nn Identity) String() string {
return path.Join(nn.Cluster, nn.Name)
}
// appendAllocCDIR sets or appends the given podCIDR to the node.
// If the IPv4/IPv6AllocCIDR is already set, we add the podCIDR as a secondary
// alloc CIDR.
func (n *Node) appendAllocCDIR(podCIDR *cidr.CIDR) {
if podCIDR.IP.To4() != nil {
if n.IPv4AllocCIDR == nil {
n.IPv4AllocCIDR = podCIDR
} else {
n.IPv4SecondaryAllocCIDRs = append(n.IPv4SecondaryAllocCIDRs, podCIDR)
}
} else {
if n.IPv6AllocCIDR == nil {
n.IPv6AllocCIDR = podCIDR
} else {
n.IPv6SecondaryAllocCIDRs = append(n.IPv6SecondaryAllocCIDRs, podCIDR)
}
}
}
// ParseCiliumNode parses a CiliumNode custom resource and returns a Node
// instance. Invalid IP and CIDRs are silently ignored
func ParseCiliumNode(n *ciliumv2.CiliumNode) (node Node) {
wireguardPubKey, _ := annotation.Get(n, annotation.WireguardPubKey, annotation.WireguardPubKeyAlias)
node = Node{
Name: n.Name,
EncryptionKey: uint8(n.Spec.Encryption.Key),
Cluster: option.Config.ClusterName,
ClusterID: option.Config.ClusterID,
Source: source.CustomResource,
Labels: n.ObjectMeta.Labels,
Annotations: n.ObjectMeta.Annotations,
NodeIdentity: uint32(n.Spec.NodeIdentity),
WireguardPubKey: wireguardPubKey,
BootID: n.Spec.BootID,
}
for _, cidrString := range n.Spec.IPAM.PodCIDRs {
ipnet, err := cidr.ParseCIDR(cidrString)
if err == nil {
node.appendAllocCDIR(ipnet)
}
}
for _, pool := range n.Spec.IPAM.Pools.Allocated {
for _, podCIDR := range pool.CIDRs {
ipnet, err := cidr.ParseCIDR(string(podCIDR))
if err == nil {
node.appendAllocCDIR(ipnet)
}
}
}
node.IPv4HealthIP = net.ParseIP(n.Spec.HealthAddressing.IPv4)
node.IPv6HealthIP = net.ParseIP(n.Spec.HealthAddressing.IPv6)
node.IPv4IngressIP = net.ParseIP(n.Spec.IngressAddressing.IPV4)
node.IPv6IngressIP = net.ParseIP(n.Spec.IngressAddressing.IPV6)
for _, address := range n.Spec.Addresses {
if ip := net.ParseIP(address.IP); ip != nil {
node.IPAddresses = append(node.IPAddresses, Address{Type: address.Type, IP: ip})
}
}
return
}
// ToCiliumNode converts the node to a CiliumNode
func (n *Node) ToCiliumNode() *ciliumv2.CiliumNode {
var (
podCIDRs []string
ipAddrs []ciliumv2.NodeAddress
healthIPv4, healthIPv6 string
ingressIPv4, ingressIPv6 string
)
if n.IPv4AllocCIDR != nil {
podCIDRs = append(podCIDRs, n.IPv4AllocCIDR.String())
}
if n.IPv6AllocCIDR != nil {
podCIDRs = append(podCIDRs, n.IPv6AllocCIDR.String())
}
for _, ipv4AllocCIDR := range n.IPv4SecondaryAllocCIDRs {
podCIDRs = append(podCIDRs, ipv4AllocCIDR.String())
}
for _, ipv6AllocCIDR := range n.IPv6SecondaryAllocCIDRs {
podCIDRs = append(podCIDRs, ipv6AllocCIDR.String())
}
if n.IPv4HealthIP != nil {
healthIPv4 = n.IPv4HealthIP.String()
}
if n.IPv6HealthIP != nil {
healthIPv6 = n.IPv6HealthIP.String()
}
if n.IPv4IngressIP != nil {
ingressIPv4 = n.IPv4IngressIP.String()
}
if n.IPv6IngressIP != nil {
ingressIPv6 = n.IPv6IngressIP.String()
}
for _, address := range n.IPAddresses {
ipAddrs = append(ipAddrs, ciliumv2.NodeAddress{
Type: address.Type,
IP: address.IP.String(),
})
}
return &ciliumv2.CiliumNode{
ObjectMeta: v1.ObjectMeta{
Name: n.Name,
Labels: n.Labels,
Annotations: n.Annotations,
},
Spec: ciliumv2.NodeSpec{
Addresses: ipAddrs,
HealthAddressing: ciliumv2.HealthAddressingSpec{
IPv4: healthIPv4,
IPv6: healthIPv6,
},
IngressAddressing: ciliumv2.AddressPair{
IPV4: ingressIPv4,
IPV6: ingressIPv6,
},
Encryption: ciliumv2.EncryptionSpec{
Key: int(n.EncryptionKey),
},
IPAM: ipamTypes.IPAMSpec{
PodCIDRs: podCIDRs,
},
NodeIdentity: uint64(n.NodeIdentity),
BootID: n.BootID,
},
}
}
// Node contains the nodes name, the list of addresses to this address
//
// +k8s:deepcopy-gen=true
// +deepequal-gen=true
type Node struct {
// Name is the name of the node. This is typically the hostname of the node.
Name string
// Cluster is the name of the cluster the node is associated with
Cluster string
IPAddresses []Address
// IPv4AllocCIDR if set, is the IPv4 address pool out of which the node
// allocates IPs for local endpoints from
IPv4AllocCIDR *cidr.CIDR
// IPv4SecondaryAllocCIDRs contains additional IPv4 CIDRs from which this
//node allocates IPs for its local endpoints from
IPv4SecondaryAllocCIDRs []*cidr.CIDR
// IPv6AllocCIDR if set, is the IPv6 address pool out of which the node
// allocates IPs for local endpoints from
IPv6AllocCIDR *cidr.CIDR
// IPv6SecondaryAllocCIDRs contains additional IPv6 CIDRs from which this
// node allocates IPs for its local endpoints from
IPv6SecondaryAllocCIDRs []*cidr.CIDR
// IPv4HealthIP if not nil, this is the IPv4 address of the
// cilium-health endpoint located on the node.
IPv4HealthIP net.IP
// IPv6HealthIP if not nil, this is the IPv6 address of the
// cilium-health endpoint located on the node.
IPv6HealthIP net.IP
// IPv4IngressIP if not nil, this is the IPv4 address of the
// Ingress listener on the node.
IPv4IngressIP net.IP
// IPv6IngressIP if not nil, this is the IPv6 address of the
// Ingress listener located on the node.
IPv6IngressIP net.IP
// ClusterID is the unique identifier of the cluster
ClusterID uint32
// Source is the source where the node configuration was generated / created.
Source source.Source
// Key index used for transparent encryption or 0 for no encryption
EncryptionKey uint8
// Node labels
Labels map[string]string
// Node annotations
Annotations map[string]string
// NodeIdentity is the numeric identity allocated for the node
NodeIdentity uint32
// WireguardPubKey is the WireGuard public key of this node
WireguardPubKey string
// BootID is a unique node identifier generated on boot
BootID string
}
// Fullname returns the node's full name including the cluster name if a
// cluster name value other than the default value has been specified
func (n *Node) Fullname() string {
if n.Cluster != defaults.ClusterName {
return path.Join(n.Cluster, n.Name)
}
return n.Name
}
// Address is a node address which contains an IP and the address type.
//
// +k8s:deepcopy-gen=true
type Address struct {
Type addressing.AddressType
IP net.IP
}
func (a *Address) DeepEqual(other *Address) bool {
return a.Type == other.Type && slices.Equal(a.IP, other.IP)
}
func (a Address) ToString() string {
return a.IP.String()
}
func (a Address) AddrType() addressing.AddressType {
return a.Type
}
// IsNodeIP determines if addr is one of the node's IP addresses,
// and returns which type of address it is. "" is returned if addr
// is not one of the node's IP addresses.
func (n *Node) IsNodeIP(addr netip.Addr) addressing.AddressType {
for _, a := range n.IPAddresses {
// for IPv4 this should not allocate memory
// this conversion will go away once net.IP is replaced with netip.Addr
ip := a.IP.To4()
if ip == nil {
ip = a.IP
}
if na, ok := netip.AddrFromSlice(ip); ok && na == addr {
return a.Type
}
}
return ""
}
// GetNodeIP returns one of the node's IP addresses available with the
// following priority:
// - NodeInternalIP
// - NodeExternalIP
// - other IP address type
// Nil is returned if GetNodeIP fails to extract an IP from the Node based
// on the provided address family.
func (n *Node) GetNodeIP(ipv6 bool) net.IP {
return addressing.ExtractNodeIP[Address](n.IPAddresses, ipv6)
}
// GetExternalIP returns ExternalIP of k8s Node. If not present, then it
// returns nil;
func (n *Node) GetExternalIP(ipv6 bool) net.IP {
for _, addr := range n.IPAddresses {
if (ipv6 && addr.IP.To4() != nil) || (!ipv6 && addr.IP.To4() == nil) {
continue
}
if addr.Type == addressing.NodeExternalIP {
return addr.IP
}
}
return nil
}
// GetK8sNodeIPs returns k8s Node IP (either InternalIP or ExternalIP or nil;
// the former is preferred).
func (n *Node) GetK8sNodeIP() net.IP {
var externalIP net.IP
for _, addr := range n.IPAddresses {
if addr.Type == addressing.NodeInternalIP {
return addr.IP
} else if addr.Type == addressing.NodeExternalIP {
externalIP = addr.IP
}
}
return externalIP
}
// GetNodeInternalIP returns the Internal IPv4 of node or nil.
func (n *Node) GetNodeInternalIPv4() net.IP {
for _, addr := range n.IPAddresses {
if addr.IP.To4() == nil {
continue
}
if addr.Type == addressing.NodeInternalIP {
return addr.IP
}
}
return nil
}
// GetNodeInternalIP returns the Internal IPv6 of node or nil.
func (n *Node) GetNodeInternalIPv6() net.IP {
for _, addr := range n.IPAddresses {
if addr.IP.To4() != nil {
continue
}
if addr.Type == addressing.NodeInternalIP {
return addr.IP
}
}
return nil
}
// GetCiliumInternalIP returns the CiliumInternalIP e.g. the IP associated
// with cilium_host on the node.
func (n *Node) GetCiliumInternalIP(ipv6 bool) net.IP {
for _, addr := range n.IPAddresses {
if (ipv6 && addr.IP.To4() != nil) ||
(!ipv6 && addr.IP.To4() == nil) {
continue
}
if addr.Type == addressing.NodeCiliumInternalIP {
return addr.IP
}
}
return nil
}
// SetCiliumInternalIP sets the CiliumInternalIP e.g. the IP associated
// with cilium_host on the node.
func (n *Node) SetCiliumInternalIP(newAddr net.IP) {
n.setAddress(addressing.NodeCiliumInternalIP, newAddr)
}
// SetNodeExternalIP sets the NodeExternalIP.
func (n *Node) SetNodeExternalIP(newAddr net.IP) {
n.setAddress(addressing.NodeExternalIP, newAddr)
}
// SetNodeInternalIP sets the NodeInternalIP.
func (n *Node) SetNodeInternalIP(newAddr net.IP) {
n.setAddress(addressing.NodeInternalIP, newAddr)
}
func (n *Node) RemoveAddresses(typ addressing.AddressType) {
newAddresses := []Address{}
for _, addr := range n.IPAddresses {
if addr.Type != typ {
newAddresses = append(newAddresses, addr)
}
}
n.IPAddresses = newAddresses
}
func (n *Node) setAddress(typ addressing.AddressType, newIP net.IP) {
newAddr := Address{Type: typ, IP: newIP}
if newIP == nil {
n.RemoveAddresses(typ)
return
}
// Create a copy of the slice, so that we don't modify the
// current one, which may be captured by any of the observers.
n.IPAddresses = slices.Clone(n.IPAddresses)
ipv6 := newIP.To4() == nil
// Try first to replace an existing address with same type
for i, addr := range n.IPAddresses {
if addr.Type != typ {
continue
}
if ipv6 != (addr.IP.To4() == nil) {
// Don't replace if address family is different.
continue
}
n.IPAddresses[i] = newAddr
return
}
n.IPAddresses = append(n.IPAddresses, newAddr)
}
func (n *Node) GetIPByType(addrType addressing.AddressType, ipv6 bool) net.IP {
for _, addr := range n.IPAddresses {
if addr.Type != addrType {
continue
}
if is4 := addr.IP.To4() != nil; (!ipv6 && is4) || (ipv6 && !is4) {
return addr.IP
}
}
return nil
}
func (n *Node) getPrimaryAddress() *models.NodeAddressing {
v4 := n.GetNodeIP(false)
v6 := n.GetNodeIP(true)
var ipv4AllocStr, ipv6AllocStr string
if n.IPv4AllocCIDR != nil {
ipv4AllocStr = n.IPv4AllocCIDR.String()
}
if n.IPv6AllocCIDR != nil {
ipv6AllocStr = n.IPv6AllocCIDR.String()
}
var v4Str, v6Str string
if v4 != nil {
v4Str = v4.String()
}
if v6 != nil {
v6Str = v6.String()
}
return &models.NodeAddressing{
IPV4: &models.NodeAddressingElement{
Enabled: option.Config.EnableIPv4,
IP: v4Str,
AllocRange: ipv4AllocStr,
},
IPV6: &models.NodeAddressingElement{
Enabled: option.Config.EnableIPv6,
IP: v6Str,
AllocRange: ipv6AllocStr,
},
}
}
func (n *Node) isPrimaryAddress(addr Address, ipv4 bool) bool {
return addr.IP.String() == n.GetNodeIP(!ipv4).String()
}
func (n *Node) getSecondaryAddresses() []*models.NodeAddressingElement {
result := []*models.NodeAddressingElement{}
for _, addr := range n.IPAddresses {
ipv4 := false
if addr.IP.To4() != nil {
ipv4 = true
}
if !n.isPrimaryAddress(addr, ipv4) {
result = append(result, &models.NodeAddressingElement{
IP: addr.IP.String(),
})
}
}
return result
}
func (n *Node) getHealthAddresses() *models.NodeAddressing {
if n.IPv4HealthIP == nil && n.IPv6HealthIP == nil {
return nil
}
var v4Str, v6Str string
if n.IPv4HealthIP != nil {
v4Str = n.IPv4HealthIP.String()
}
if n.IPv6HealthIP != nil {
v6Str = n.IPv6HealthIP.String()
}
return &models.NodeAddressing{
IPV4: &models.NodeAddressingElement{
Enabled: option.Config.EnableIPv4,
IP: v4Str,
},
IPV6: &models.NodeAddressingElement{
Enabled: option.Config.EnableIPv6,
IP: v6Str,
},
}
}
func (n *Node) getIngressAddresses() *models.NodeAddressing {
if n.IPv4IngressIP == nil && n.IPv6IngressIP == nil {
return nil
}
var v4Str, v6Str string
if n.IPv4IngressIP != nil {
v4Str = n.IPv4IngressIP.String()
}
if n.IPv6IngressIP != nil {
v6Str = n.IPv6IngressIP.String()
}
return &models.NodeAddressing{
IPV4: &models.NodeAddressingElement{
Enabled: option.Config.EnableIPv4,
IP: v4Str,
},
IPV6: &models.NodeAddressingElement{
Enabled: option.Config.EnableIPv6,
IP: v6Str,
},
}
}
// GetModel returns the API model representation of a node.
func (n *Node) GetModel() *models.NodeElement {
return &models.NodeElement{
Name: n.Fullname(),
PrimaryAddress: n.getPrimaryAddress(),
SecondaryAddresses: n.getSecondaryAddresses(),
HealthEndpointAddress: n.getHealthAddresses(),
IngressAddress: n.getIngressAddresses(),
Source: string(n.Source),
}
}
// Identity returns the identity of the node
func (n *Node) Identity() Identity {
return Identity{
Name: n.Name,
Cluster: n.Cluster,
}
}
func getCluster() string {
return option.Config.ClusterName
}
// IsLocal returns true if this is the node on which the agent itself is
// running on
func (n *Node) IsLocal() bool {
return n != nil && n.Name == GetName() && n.Cluster == getCluster()
}
func (n *Node) GetIPv4AllocCIDRs() []*cidr.CIDR {
result := make([]*cidr.CIDR, 0, len(n.IPv4SecondaryAllocCIDRs)+1)
if n.IPv4AllocCIDR != nil {
result = append(result, n.IPv4AllocCIDR)
}
if len(n.IPv4SecondaryAllocCIDRs) > 0 {
result = append(result, n.IPv4SecondaryAllocCIDRs...)
}
return result
}
func (n *Node) GetIPv6AllocCIDRs() []*cidr.CIDR {
result := make([]*cidr.CIDR, 0, len(n.IPv6SecondaryAllocCIDRs)+1)
if n.IPv6AllocCIDR != nil {
result = append(result, n.IPv6AllocCIDR)
}
if len(n.IPv6SecondaryAllocCIDRs) > 0 {
result = append(result, n.IPv6SecondaryAllocCIDRs...)
}
return result
}
// GetKeyNodeName constructs the API name for the given cluster and node name.
func GetKeyNodeName(cluster, node string) string {
// WARNING - STABLE API: Changing the structure of the key may break
// backwards compatibility
return path.Join(cluster, node)
}
// GetKeyName returns the kvstore key to be used for the node
func (n *Node) GetKeyName() string {
return GetKeyNodeName(n.Cluster, n.Name)
}
// DeepKeyCopy creates a deep copy of the LocalKey
func (n *Node) DeepKeyCopy() store.LocalKey {
return n.DeepCopy()
}
// Marshal returns the node object as JSON byte slice
func (n *Node) Marshal() ([]byte, error) {
return json.Marshal(n)
}
// Unmarshal parses the JSON byte slice and updates the node receiver
func (n *Node) Unmarshal(key string, data []byte) error {
newNode := Node{}
if err := json.Unmarshal(data, &newNode); err != nil {
return err
}
if err := newNode.validate(); err != nil {
return err
}
*n = newNode
return nil
}
// LogRepr returns a representation of the node to be used for logging
func (n *Node) LogRepr() string {
b, err := n.Marshal()
if err != nil {
return fmt.Sprintf("%#v", n)
}
return string(b)
}
func (n *Node) validate() error {
switch {
case n.Cluster == "":
return errors.New("cluster is unset")
case n.Name == "":
return errors.New("name is unset")
}
// Skip the ClusterID check if it matches the local one, as we assume that
// it has already been validated, and to allow it to be zero.
if n.ClusterID != option.Config.ClusterID {
if err := cmtypes.ValidateClusterID(n.ClusterID); err != nil {
return err
}
}
return nil
}
// SPDX-License-Identifier: Apache-2.0
// Copyright Authors of Cilium
package types
import (
"os"
"sync"
"github.com/cilium/cilium/pkg/defaults"
k8sConsts "github.com/cilium/cilium/pkg/k8s/constants"
"github.com/cilium/cilium/pkg/logging"
"github.com/cilium/cilium/pkg/logging/logfields"
"github.com/cilium/cilium/pkg/option"
)
var (
nodeName = "localhost"
absoluteNodeName = nodeName
absoluteNodeNameOnce sync.Once
)
// SetName sets the name of the local node. This will overwrite the value that
// is automatically retrieved with `os.Hostname()`.
//
// Note: This function is currently designed to only be called during the
// bootstrapping procedure of the agent where no parallelism exists. If you
// want to use this function in later stages, a mutex must be added first.
func SetName(name string) {
nodeName = name
absoluteNodeName = getAbsoluteNodeName()
}
// GetName returns the name of the local node. The value returned was either
// previously set with SetName(), retrieved via `os.Hostname()`, or as a last
// resort is hardcoded to "localhost".
func GetName() string {
return nodeName
}
// GetAbsoluteNodeName returns the absolute node name combined of both
// (prefixed)cluster name and the local node name in case of
// clustered environments otherwise returns the name of the local node.
func GetAbsoluteNodeName() string {
absoluteNodeNameOnce.Do(func() {
absoluteNodeName = getAbsoluteNodeName()
})
return absoluteNodeName
}
func getAbsoluteNodeName() string {
if clusterName := GetClusterName(); clusterName != "" {
return clusterName + "/" + nodeName
} else {
return nodeName
}
}
func GetClusterName() string {
if option.Config.ClusterName != "" &&
option.Config.ClusterName != defaults.ClusterName {
return option.Config.ClusterName
} else {
return ""
}
}
func init() {
// Give priority to the environment variable available in the Cilium agent
if name := os.Getenv(k8sConsts.EnvNodeNameSpec); name != "" {
nodeName = name
return
}
if h, err := os.Hostname(); err != nil {
// slogloggercheck: it's safe to use the default logger as it's for a warning unlikely to happen.
logging.DefaultSlogLogger.Warn("Unable to retrieve local hostname", logfields.Error, err)
} else {
// slogloggercheck: it's safe to use the default logger as it's for a debug message.
logging.DefaultSlogLogger.Debug("os.Hostname() returned", logfields.NodeName, h)
nodeName = h
}
}
//go:build !ignore_autogenerated
// +build !ignore_autogenerated
// SPDX-License-Identifier: Apache-2.0
// Copyright Authors of Cilium
// Code generated by deepcopy-gen. DO NOT EDIT.
package types
import (
net "net"
cidr "github.com/cilium/cilium/pkg/cidr"
)
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *Address) DeepCopyInto(out *Address) {
*out = *in
if in.IP != nil {
in, out := &in.IP, &out.IP
*out = make(net.IP, len(*in))
copy(*out, *in)
}
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Address.
func (in *Address) DeepCopy() *Address {
if in == nil {
return nil
}
out := new(Address)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *Node) DeepCopyInto(out *Node) {
*out = *in
if in.IPAddresses != nil {
in, out := &in.IPAddresses, &out.IPAddresses
*out = make([]Address, len(*in))
for i := range *in {
(*in)[i].DeepCopyInto(&(*out)[i])
}
}
if in.IPv4AllocCIDR != nil {
in, out := &in.IPv4AllocCIDR, &out.IPv4AllocCIDR
*out = (*in).DeepCopy()
}
if in.IPv4SecondaryAllocCIDRs != nil {
in, out := &in.IPv4SecondaryAllocCIDRs, &out.IPv4SecondaryAllocCIDRs
*out = make([]*cidr.CIDR, len(*in))
for i := range *in {
if (*in)[i] != nil {
in, out := &(*in)[i], &(*out)[i]
*out = (*in).DeepCopy()
}
}
}
if in.IPv6AllocCIDR != nil {
in, out := &in.IPv6AllocCIDR, &out.IPv6AllocCIDR
*out = (*in).DeepCopy()
}
if in.IPv6SecondaryAllocCIDRs != nil {
in, out := &in.IPv6SecondaryAllocCIDRs, &out.IPv6SecondaryAllocCIDRs
*out = make([]*cidr.CIDR, len(*in))
for i := range *in {
if (*in)[i] != nil {
in, out := &(*in)[i], &(*out)[i]
*out = (*in).DeepCopy()
}
}
}
if in.IPv4HealthIP != nil {
in, out := &in.IPv4HealthIP, &out.IPv4HealthIP
*out = make(net.IP, len(*in))
copy(*out, *in)
}
if in.IPv6HealthIP != nil {
in, out := &in.IPv6HealthIP, &out.IPv6HealthIP
*out = make(net.IP, len(*in))
copy(*out, *in)
}
if in.IPv4IngressIP != nil {
in, out := &in.IPv4IngressIP, &out.IPv4IngressIP
*out = make(net.IP, len(*in))
copy(*out, *in)
}
if in.IPv6IngressIP != nil {
in, out := &in.IPv6IngressIP, &out.IPv6IngressIP
*out = make(net.IP, len(*in))
copy(*out, *in)
}
if in.Labels != nil {
in, out := &in.Labels, &out.Labels
*out = make(map[string]string, len(*in))
for key, val := range *in {
(*out)[key] = val
}
}
if in.Annotations != nil {
in, out := &in.Annotations, &out.Annotations
*out = make(map[string]string, len(*in))
for key, val := range *in {
(*out)[key] = val
}
}
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Node.
func (in *Node) DeepCopy() *Node {
if in == nil {
return nil
}
out := new(Node)
in.DeepCopyInto(out)
return out
}
//go:build !ignore_autogenerated
// +build !ignore_autogenerated
// SPDX-License-Identifier: Apache-2.0
// Copyright Authors of Cilium
// Code generated by deepequal-gen. DO NOT EDIT.
package types
// DeepEqual is an autogenerated deepequal function, deeply comparing the
// receiver with other. in must be non-nil.
func (in *Node) DeepEqual(other *Node) bool {
if other == nil {
return false
}
if in.Name != other.Name {
return false
}
if in.Cluster != other.Cluster {
return false
}
if ((in.IPAddresses != nil) && (other.IPAddresses != nil)) || ((in.IPAddresses == nil) != (other.IPAddresses == nil)) {
in, other := &in.IPAddresses, &other.IPAddresses
if other == nil {
return false
}
if len(*in) != len(*other) {
return false
} else {
for i, inElement := range *in {
if !inElement.DeepEqual(&(*other)[i]) {
return false
}
}
}
}
if (in.IPv4AllocCIDR == nil) != (other.IPv4AllocCIDR == nil) {
return false
} else if in.IPv4AllocCIDR != nil {
if !in.IPv4AllocCIDR.DeepEqual(other.IPv4AllocCIDR) {
return false
}
}
if ((in.IPv4SecondaryAllocCIDRs != nil) && (other.IPv4SecondaryAllocCIDRs != nil)) || ((in.IPv4SecondaryAllocCIDRs == nil) != (other.IPv4SecondaryAllocCIDRs == nil)) {
in, other := &in.IPv4SecondaryAllocCIDRs, &other.IPv4SecondaryAllocCIDRs
if other == nil {
return false
}
if len(*in) != len(*other) {
return false
} else {
for i, inElement := range *in {
if !inElement.DeepEqual((*other)[i]) {
return false
}
}
}
}
if (in.IPv6AllocCIDR == nil) != (other.IPv6AllocCIDR == nil) {
return false
} else if in.IPv6AllocCIDR != nil {
if !in.IPv6AllocCIDR.DeepEqual(other.IPv6AllocCIDR) {
return false
}
}
if ((in.IPv6SecondaryAllocCIDRs != nil) && (other.IPv6SecondaryAllocCIDRs != nil)) || ((in.IPv6SecondaryAllocCIDRs == nil) != (other.IPv6SecondaryAllocCIDRs == nil)) {
in, other := &in.IPv6SecondaryAllocCIDRs, &other.IPv6SecondaryAllocCIDRs
if other == nil {
return false
}
if len(*in) != len(*other) {
return false
} else {
for i, inElement := range *in {
if !inElement.DeepEqual((*other)[i]) {
return false
}
}
}
}
if ((in.IPv4HealthIP != nil) && (other.IPv4HealthIP != nil)) || ((in.IPv4HealthIP == nil) != (other.IPv4HealthIP == nil)) {
in, other := &in.IPv4HealthIP, &other.IPv4HealthIP
if other == nil {
return false
}
if len(*in) != len(*other) {
return false
} else {
for i, inElement := range *in {
if inElement != (*other)[i] {
return false
}
}
}
}
if ((in.IPv6HealthIP != nil) && (other.IPv6HealthIP != nil)) || ((in.IPv6HealthIP == nil) != (other.IPv6HealthIP == nil)) {
in, other := &in.IPv6HealthIP, &other.IPv6HealthIP
if other == nil {
return false
}
if len(*in) != len(*other) {
return false
} else {
for i, inElement := range *in {
if inElement != (*other)[i] {
return false
}
}
}
}
if ((in.IPv4IngressIP != nil) && (other.IPv4IngressIP != nil)) || ((in.IPv4IngressIP == nil) != (other.IPv4IngressIP == nil)) {
in, other := &in.IPv4IngressIP, &other.IPv4IngressIP
if other == nil {
return false
}
if len(*in) != len(*other) {
return false
} else {
for i, inElement := range *in {
if inElement != (*other)[i] {
return false
}
}
}
}
if ((in.IPv6IngressIP != nil) && (other.IPv6IngressIP != nil)) || ((in.IPv6IngressIP == nil) != (other.IPv6IngressIP == nil)) {
in, other := &in.IPv6IngressIP, &other.IPv6IngressIP
if other == nil {
return false
}
if len(*in) != len(*other) {
return false
} else {
for i, inElement := range *in {
if inElement != (*other)[i] {
return false
}
}
}
}
if in.ClusterID != other.ClusterID {
return false
}
if in.Source != other.Source {
return false
}
if in.EncryptionKey != other.EncryptionKey {
return false
}
if ((in.Labels != nil) && (other.Labels != nil)) || ((in.Labels == nil) != (other.Labels == nil)) {
in, other := &in.Labels, &other.Labels
if other == nil {
return false
}
if len(*in) != len(*other) {
return false
} else {
for key, inValue := range *in {
if otherValue, present := (*other)[key]; !present {
return false
} else {
if inValue != otherValue {
return false
}
}
}
}
}
if ((in.Annotations != nil) && (other.Annotations != nil)) || ((in.Annotations == nil) != (other.Annotations == nil)) {
in, other := &in.Annotations, &other.Annotations
if other == nil {
return false
}
if len(*in) != len(*other) {
return false
} else {
for key, inValue := range *in {
if otherValue, present := (*other)[key]; !present {
return false
} else {
if inValue != otherValue {
return false
}
}
}
}
}
if in.NodeIdentity != other.NodeIdentity {
return false
}
if in.WireguardPubKey != other.WireguardPubKey {
return false
}
if in.BootID != other.BootID {
return false
}
return true
}
//go:build !ignore_autogenerated
// +build !ignore_autogenerated
// SPDX-License-Identifier: Apache-2.0
// Copyright Authors of Cilium
// Code generated by deepcopy-gen. DO NOT EDIT.
package node
import (
net "net"
)
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *LocalNode) DeepCopyInto(out *LocalNode) {
*out = *in
in.Node.DeepCopyInto(&out.Node)
if in.Local != nil {
in, out := &in.Local, &out.Local
*out = new(LocalNodeInfo)
(*in).DeepCopyInto(*out)
}
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new LocalNode.
func (in *LocalNode) DeepCopy() *LocalNode {
if in == nil {
return nil
}
out := new(LocalNode)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *LocalNodeInfo) DeepCopyInto(out *LocalNodeInfo) {
*out = *in
if in.IPv4NativeRoutingCIDR != nil {
in, out := &in.IPv4NativeRoutingCIDR, &out.IPv4NativeRoutingCIDR
*out = (*in).DeepCopy()
}
if in.IPv6NativeRoutingCIDR != nil {
in, out := &in.IPv6NativeRoutingCIDR, &out.IPv6NativeRoutingCIDR
*out = (*in).DeepCopy()
}
if in.ServiceLoopbackIPv4 != nil {
in, out := &in.ServiceLoopbackIPv4, &out.ServiceLoopbackIPv4
*out = make(net.IP, len(*in))
copy(*out, *in)
}
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new LocalNodeInfo.
func (in *LocalNodeInfo) DeepCopy() *LocalNodeInfo {
if in == nil {
return nil
}
out := new(LocalNodeInfo)
in.DeepCopyInto(out)
return out
}
//go:build !ignore_autogenerated
// +build !ignore_autogenerated
// SPDX-License-Identifier: Apache-2.0
// Copyright Authors of Cilium
// Code generated by deepequal-gen. DO NOT EDIT.
package node
// DeepEqual is an autogenerated deepequal function, deeply comparing the
// receiver with other. in must be non-nil.
func (in *LocalNode) DeepEqual(other *LocalNode) bool {
if other == nil {
return false
}
if !in.Node.DeepEqual(&other.Node) {
return false
}
if (in.Local == nil) != (other.Local == nil) {
return false
} else if in.Local != nil {
if !in.Local.DeepEqual(other.Local) {
return false
}
}
return true
}
// DeepEqual is an autogenerated deepequal function, deeply comparing the
// receiver with other. in must be non-nil.
func (in *LocalNodeInfo) DeepEqual(other *LocalNodeInfo) bool {
if other == nil {
return false
}
if in.OptOutNodeEncryption != other.OptOutNodeEncryption {
return false
}
if in.UID != other.UID {
return false
}
if in.ProviderID != other.ProviderID {
return false
}
if (in.IPv4NativeRoutingCIDR == nil) != (other.IPv4NativeRoutingCIDR == nil) {
return false
} else if in.IPv4NativeRoutingCIDR != nil {
if !in.IPv4NativeRoutingCIDR.DeepEqual(other.IPv4NativeRoutingCIDR) {
return false
}
}
if (in.IPv6NativeRoutingCIDR == nil) != (other.IPv6NativeRoutingCIDR == nil) {
return false
} else if in.IPv6NativeRoutingCIDR != nil {
if !in.IPv6NativeRoutingCIDR.DeepEqual(other.IPv6NativeRoutingCIDR) {
return false
}
}
if ((in.ServiceLoopbackIPv4 != nil) && (other.ServiceLoopbackIPv4 != nil)) || ((in.ServiceLoopbackIPv4 == nil) != (other.ServiceLoopbackIPv4 == nil)) {
in, other := &in.ServiceLoopbackIPv4, &other.ServiceLoopbackIPv4
if other == nil {
return false
}
if len(*in) != len(*other) {
return false
} else {
for i, inElement := range *in {
if inElement != (*other)[i] {
return false
}
}
}
}
if in.IsBeingDeleted != other.IsBeingDeleted {
return false
}
if in.UnderlayProtocol != other.UnderlayProtocol {
return false
}
return true
}
// SPDX-License-Identifier: Apache-2.0
// Copyright Authors of Cilium
package option
import (
"bytes"
"crypto/sha256"
"encoding/json"
"fmt"
"log/slog"
"math"
"net"
"net/netip"
"os"
"path/filepath"
"regexp"
"runtime"
"slices"
"strconv"
"strings"
"unicode"
"unicode/utf8"
"github.com/cilium/ebpf"
"github.com/google/go-cmp/cmp"
"github.com/google/go-cmp/cmp/cmpopts"
"github.com/mackerelio/go-osstat/memory"
"github.com/spf13/cast"
"github.com/spf13/cobra"
"github.com/spf13/pflag"
"github.com/spf13/viper"
"github.com/cilium/cilium/api/v1/models"
"github.com/cilium/cilium/pkg/cidr"
clustermeshTypes "github.com/cilium/cilium/pkg/clustermesh/types"
"github.com/cilium/cilium/pkg/command"
"github.com/cilium/cilium/pkg/defaults"
"github.com/cilium/cilium/pkg/ip"
ipamOption "github.com/cilium/cilium/pkg/ipam/option"
"github.com/cilium/cilium/pkg/kpr"
"github.com/cilium/cilium/pkg/logging"
"github.com/cilium/cilium/pkg/logging/logfields"
"github.com/cilium/cilium/pkg/mac"
"github.com/cilium/cilium/pkg/time"
"github.com/cilium/cilium/pkg/util"
"github.com/cilium/cilium/pkg/version"
)
const (
// AgentHealthPort is the TCP port for agent health status API
AgentHealthPort = "agent-health-port"
// ClusterHealthPort is the TCP port for cluster-wide network connectivity health API
ClusterHealthPort = "cluster-health-port"
// ClusterMeshHealthPort is the TCP port for ClusterMesh apiserver health API
ClusterMeshHealthPort = "clustermesh-health-port"
// AllowICMPFragNeeded allows ICMP Fragmentation Needed type packets in policy.
AllowICMPFragNeeded = "allow-icmp-frag-needed"
// AllowLocalhost is the policy when to allow local stack to reach local endpoints { auto | always | policy }
AllowLocalhost = "allow-localhost"
// AllowLocalhostAuto defaults to policy except when running in
// Kubernetes where it then defaults to "always"
AllowLocalhostAuto = "auto"
// AllowLocalhostAlways always allows the local stack to reach local
// endpoints
AllowLocalhostAlways = "always"
// AllowLocalhostPolicy requires a policy rule to allow the local stack
// to reach particular endpoints or policy enforcement must be
// disabled.
AllowLocalhostPolicy = "policy"
// AnnotateK8sNode enables annotating a kubernetes node while bootstrapping
// the daemon, which can also be disabled using this option.
AnnotateK8sNode = "annotate-k8s-node"
// BPFDistributedLRU enables per-CPU distributed backend memory
BPFDistributedLRU = "bpf-distributed-lru"
// BPFRoot is the Path to BPF filesystem
BPFRoot = "bpf-root"
// CGroupRoot is the path to Cgroup2 filesystem
CGroupRoot = "cgroup-root"
// ConfigFile is the Configuration file (default "$HOME/ciliumd.yaml")
ConfigFile = "config"
// ConfigDir is the directory that contains a file for each option where
// the filename represents the option name and the content of that file
// represents the value of that option.
ConfigDir = "config-dir"
// ConntrackGCInterval is the name of the ConntrackGCInterval option
ConntrackGCInterval = "conntrack-gc-interval"
// ConntrackGCMaxInterval is the name of the ConntrackGCMaxInterval option
ConntrackGCMaxInterval = "conntrack-gc-max-interval"
// DebugArg is the argument enables debugging mode
DebugArg = "debug"
// DebugVerbose is the argument enables verbose log message for particular subsystems
DebugVerbose = "debug-verbose"
// Devices facing cluster/external network for attaching bpf_host
Devices = "devices"
// Forces the auto-detection of devices, even if specific devices are explicitly listed
ForceDeviceDetection = "force-device-detection"
// DirectRoutingDevice is the name of a device used to connect nodes in
// direct routing mode (only required by BPF NodePort)
DirectRoutingDevice = "direct-routing-device"
// EnablePolicy enables policy enforcement in the agent.
EnablePolicy = "enable-policy"
// EnableL7Proxy is the name of the option to enable L7 proxy
EnableL7Proxy = "enable-l7-proxy"
// EnableTracing enables tracing mode in the agent.
EnableTracing = "enable-tracing"
// EnableIPIPTermination is the name of the option to enable IPIP termination
EnableIPIPTermination = "enable-ipip-termination"
// Add unreachable routes on pod deletion
EnableUnreachableRoutes = "enable-unreachable-routes"
// EncryptInterface enables encryption on specified interface
EncryptInterface = "encrypt-interface"
// EncryptNode enables node IP encryption
EncryptNode = "encrypt-node"
// GopsPort is the TCP port for the gops server.
GopsPort = "gops-port"
// EnableGops run the gops server
EnableGops = "enable-gops"
// FixedIdentityMapping is the key-value for the fixed identity mapping
// which allows to use reserved label for fixed identities
FixedIdentityMapping = "fixed-identity-mapping"
// FixedZoneMapping is the key-value for the fixed zone mapping which
// is used to map zone value (string) from EndpointSlice to ID (uint8)
// in lb{4,6}_backend in BPF map.
FixedZoneMapping = "fixed-zone-mapping"
// IPv4Range is the per-node IPv4 endpoint prefix, e.g. 10.16.0.0/16
IPv4Range = "ipv4-range"
// IPv6Range is the per-node IPv6 endpoint prefix, must be /96, e.g. fd02:1:1::/96
IPv6Range = "ipv6-range"
// IPv4ServiceRange is the Kubernetes IPv4 services CIDR if not inside cluster prefix
IPv4ServiceRange = "ipv4-service-range"
// IPv6ServiceRange is the Kubernetes IPv6 services CIDR if not inside cluster prefix
IPv6ServiceRange = "ipv6-service-range"
// IPv6ClusterAllocCIDRName is the name of the IPv6ClusterAllocCIDR option
IPv6ClusterAllocCIDRName = "ipv6-cluster-alloc-cidr"
// K8sRequireIPv4PodCIDRName is the name of the K8sRequireIPv4PodCIDR option
K8sRequireIPv4PodCIDRName = "k8s-require-ipv4-pod-cidr"
// K8sRequireIPv6PodCIDRName is the name of the K8sRequireIPv6PodCIDR option
K8sRequireIPv6PodCIDRName = "k8s-require-ipv6-pod-cidr"
// EnableK8s operation of Kubernetes-related services/controllers.
// Intended for operating cilium with CNI-compatible orchestrators other than Kubernetes. (default is true)
EnableK8s = "enable-k8s"
// AgentHealthRequireK8sConnectivity determines whether the agent health endpoint requires k8s connectivity
AgentHealthRequireK8sConnectivity = "agent-health-require-k8s-connectivity"
// K8sAPIServer is the kubernetes api address server (for https use --k8s-kubeconfig-path instead)
K8sAPIServer = "k8s-api-server"
// K8sAPIServerURLs is the kubernetes api address server url
K8sAPIServerURLs = "k8s-api-server-urls"
// K8sKubeConfigPath is the absolute path of the kubernetes kubeconfig file
K8sKubeConfigPath = "k8s-kubeconfig-path"
// K8sSyncTimeout is the timeout since last event was received to synchronize all resources with k8s.
K8sSyncTimeoutName = "k8s-sync-timeout"
// AllocatorListTimeout is the timeout to list initial allocator state.
AllocatorListTimeoutName = "allocator-list-timeout"
// KeepConfig when restoring state, keeps containers' configuration in place
KeepConfig = "keep-config"
// KVStore key-value store type
KVStore = "kvstore"
// KVStoreOpt key-value store options
KVStoreOpt = "kvstore-opt"
// Labels is the list of label prefixes used to determine identity of an endpoint
Labels = "labels"
// LabelPrefixFile is the valid label prefixes file path
LabelPrefixFile = "label-prefix-file"
// EnableHostFirewall enables network policies for the host
EnableHostFirewall = "enable-host-firewall"
// EnableHostLegacyRouting enables the old routing path via stack.
EnableHostLegacyRouting = "enable-host-legacy-routing"
// EnableNodePort enables NodePort services implemented by Cilium in BPF
EnableNodePort = "enable-node-port"
// NodePortAcceleration indicates whether NodePort should be accelerated
// via XDP ("none", "generic", "native", or "best-effort")
NodePortAcceleration = "node-port-acceleration"
// Alias to DSR/IPIP IPv4 source CIDR
LoadBalancerRSSv4CIDR = "bpf-lb-rss-ipv4-src-cidr"
// Alias to DSR/IPIP IPv6 source CIDR
LoadBalancerRSSv6CIDR = "bpf-lb-rss-ipv6-src-cidr"
// LoadBalancerNat46X64 enables NAT46 and NAT64 for services
LoadBalancerNat46X64 = "bpf-lb-nat46x64"
// Alias to NodePortAcceleration
LoadBalancerAcceleration = "bpf-lb-acceleration"
// LoadBalancerIPIPSockMark enables sock-lb logic to force service traffic via IPIP
LoadBalancerIPIPSockMark = "bpf-lb-ipip-sock-mark"
// LoadBalancerExternalControlPlane switch skips connectivity to kube-apiserver
// which is relevant in lb-only mode
LoadBalancerExternalControlPlane = "bpf-lb-external-control-plane"
// NodePortBindProtection rejects bind requests to NodePort service ports
NodePortBindProtection = "node-port-bind-protection"
// EnableAutoProtectNodePortRange enables appending NodePort range to
// net.ipv4.ip_local_reserved_ports if it overlaps with ephemeral port
// range (net.ipv4.ip_local_port_range)
EnableAutoProtectNodePortRange = "enable-auto-protect-node-port-range"
// KubeProxyReplacement controls how to enable kube-proxy replacement
// features in BPF datapath
KubeProxyReplacement = "kube-proxy-replacement"
// EnableIdentityMark enables setting the mark field with the identity for
// local traffic. This may be disabled if chaining modes and Cilium use
// conflicting marks.
EnableIdentityMark = "enable-identity-mark"
// AddressScopeMax controls the maximum address scope for addresses to be
// considered local ones with HOST_ID in the ipcache
AddressScopeMax = "local-max-addr-scope"
// EnableLocalRedirectPolicy enables support for local redirect policy
EnableLocalRedirectPolicy = "enable-local-redirect-policy"
// EnableMKE enables MKE specific 'chaining' for kube-proxy replacement
EnableMKE = "enable-mke"
// CgroupPathMKE points to the cgroupv1 net_cls mount instance
CgroupPathMKE = "mke-cgroup-mount"
// LibDir enables the directory path to store runtime build environment
LibDir = "lib-dir"
// LogDriver sets logging endpoints to use for example syslog, fluentd
LogDriver = "log-driver"
// LogOpt sets log driver options for cilium
LogOpt = "log-opt"
// EnableRemoteNodeMasquerade Masquerade packets from endpoints leaving the host destined to a remote node in BPF masquerading mode. This option requires to set enable-bpf-masquerade to true.
EnableRemoteNodeMasquerade = "enable-remote-node-masquerade"
// EnableIPv4Masquerade masquerades IPv4 packets from endpoints leaving the host.
EnableIPv4Masquerade = "enable-ipv4-masquerade"
// EnableIPv6Masquerade masquerades IPv6 packets from endpoints leaving the host.
EnableIPv6Masquerade = "enable-ipv6-masquerade"
// EnableBPFClockProbe selects a more efficient source clock (jiffies vs ktime)
EnableBPFClockProbe = "enable-bpf-clock-probe"
// EnableBPFMasquerade masquerades packets from endpoints leaving the host with BPF instead of iptables
EnableBPFMasquerade = "enable-bpf-masquerade"
// EnableMasqueradeRouteSource masquerades to the source route IP address instead of the interface one
EnableMasqueradeRouteSource = "enable-masquerade-to-route-source"
// EnableIPMasqAgent enables BPF ip-masq-agent
EnableIPMasqAgent = "enable-ip-masq-agent"
// EnableIPv4EgressGateway enables the IPv4 egress gateway
EnableIPv4EgressGateway = "enable-ipv4-egress-gateway"
// EnableEgressGateway enables the egress gateway
EnableEgressGateway = "enable-egress-gateway"
// EnableEnvoyConfig enables processing of CiliumClusterwideEnvoyConfig and CiliumEnvoyConfig CRDs
EnableEnvoyConfig = "enable-envoy-config"
// IPMasqAgentConfigPath is the configuration file path
IPMasqAgentConfigPath = "ip-masq-agent-config-path"
// InstallIptRules sets whether Cilium should install any iptables in general
InstallIptRules = "install-iptables-rules"
// InstallNoConntrackIptRules instructs Cilium to install Iptables rules
// to skip netfilter connection tracking on all pod traffic.
InstallNoConntrackIptRules = "install-no-conntrack-iptables-rules"
// ContainerIPLocalReservedPorts instructs the Cilium CNI plugin to reserve
// the provided comma-separated list of ports in the container network namespace
ContainerIPLocalReservedPorts = "container-ip-local-reserved-ports"
// IPv6NodeAddr is the IPv6 address of node
IPv6NodeAddr = "ipv6-node"
// IPv4NodeAddr is the IPv4 address of node
IPv4NodeAddr = "ipv4-node"
// Restore restores state, if possible, from previous daemon
Restore = "restore"
// SocketPath sets daemon's socket path to listen for connections
SocketPath = "socket-path"
// StateDir is the directory path to store runtime state
StateDir = "state-dir"
// TracePayloadlen length of payload to capture when tracing native packets.
TracePayloadlen = "trace-payloadlen"
// TracePayloadlenOverlay length of payload to capture when tracing overlay packets.
TracePayloadlenOverlay = "trace-payloadlen-overlay"
// Version prints the version information
Version = "version"
// EnableXDPPrefilter enables XDP-based prefiltering
EnableXDPPrefilter = "enable-xdp-prefilter"
// EnableTCX enables attaching endpoint programs using tcx if the kernel supports it
EnableTCX = "enable-tcx"
ProcFs = "procfs"
// PrometheusServeAddr IP:Port on which to serve prometheus metrics (pass ":Port" to bind on all interfaces, "" is off)
PrometheusServeAddr = "prometheus-serve-addr"
// ExternalEnvoyProxy defines whether the Envoy is deployed externally in form of a DaemonSet or not.
ExternalEnvoyProxy = "external-envoy-proxy"
// CMDRef is the path to cmdref output directory
CMDRef = "cmdref"
// DNSPolicyUnloadOnShutdown is the name of the dns-policy-unload-on-shutdown option.
DNSPolicyUnloadOnShutdown = "dns-policy-unload-on-shutdown"
// ToFQDNsMinTTL is the minimum time, in seconds, to use DNS data for toFQDNs policies.
ToFQDNsMinTTL = "tofqdns-min-ttl"
// ToFQDNsProxyPort is the global port on which the in-agent DNS proxy should listen. Default 0 is a OS-assigned port.
ToFQDNsProxyPort = "tofqdns-proxy-port"
// ToFQDNsMaxIPsPerHost defines the maximum number of IPs to maintain
// for each FQDN name in an endpoint's FQDN cache
ToFQDNsMaxIPsPerHost = "tofqdns-endpoint-max-ip-per-hostname"
// ToFQDNsMaxDeferredConnectionDeletes defines the maximum number of IPs to
// retain for expired DNS lookups with still-active connections"
ToFQDNsMaxDeferredConnectionDeletes = "tofqdns-max-deferred-connection-deletes"
// ToFQDNsIdleConnectionGracePeriod defines the connection idle time during which
// previously active connections with expired DNS lookups are still considered alive
ToFQDNsIdleConnectionGracePeriod = "tofqdns-idle-connection-grace-period"
// ToFQDNsPreCache is a path to a file with DNS cache data to insert into the
// global cache on startup.
// The file is not re-read after agent start.
ToFQDNsPreCache = "tofqdns-pre-cache"
// DNSProxyConcurrencyLimit limits parallel processing of DNS messages in
// DNS proxy at any given point in time.
DNSProxyConcurrencyLimit = "dnsproxy-concurrency-limit"
// DNSProxyLockCount is the array size containing mutexes which protect
// against parallel handling of DNS response IPs.
DNSProxyLockCount = "dnsproxy-lock-count"
// DNSProxyLockTimeout is timeout when acquiring the locks controlled by
// DNSProxyLockCount.
DNSProxyLockTimeout = "dnsproxy-lock-timeout"
// DNSProxySocketLingerTimeout defines how many seconds we wait for the connection
// between the DNS proxy and the upstream server to be closed.
DNSProxySocketLingerTimeout = "dnsproxy-socket-linger-timeout"
// DNSProxyEnableTransparentMode enables transparent mode for the DNS proxy.
DNSProxyEnableTransparentMode = "dnsproxy-enable-transparent-mode"
// DNSProxyInsecureSkipTransparentModeCheck is a hidden flag that allows users
// to disable transparent mode even if IPSec is enabled
DNSProxyInsecureSkipTransparentModeCheck = "dnsproxy-insecure-skip-transparent-mode-check"
// MTUName is the name of the MTU option
MTUName = "mtu"
// RouteMetric is the name of the route-metric option
RouteMetric = "route-metric"
// DatapathMode is the name of the DatapathMode option
DatapathMode = "datapath-mode"
// EnableSocketLB is the name for the option to enable the socket LB
EnableSocketLB = "bpf-lb-sock"
// EnableSocketLBTracing is the name for the option to enable the socket LB tracing
EnableSocketLBTracing = "trace-sock"
// BPFSocketLBHostnsOnly is the name of the BPFSocketLBHostnsOnly option
BPFSocketLBHostnsOnly = "bpf-lb-sock-hostns-only"
// EnableSocketLBPodConnectionTermination enables termination of pod connections
// to deleted service backends when socket-LB is enabled.
EnableSocketLBPodConnectionTermination = "bpf-lb-sock-terminate-pod-connections"
// RoutingMode is the name of the option to choose between native routing and tunneling mode
RoutingMode = "routing-mode"
// ServiceNoBackendResponse is the name of the option to pick how to handle traffic for services
// without any backends
ServiceNoBackendResponse = "service-no-backend-response"
// ServiceNoBackendResponseReject is the name of the option to reject traffic for services
// without any backends
ServiceNoBackendResponseReject = "reject"
// ServiceNoBackendResponseDrop is the name of the option to drop traffic for services
// without any backends
ServiceNoBackendResponseDrop = "drop"
// MaxInternalTimerDelay sets a maximum on all periodic timers in
// the agent in order to flush out timer-related bugs in the agent.
MaxInternalTimerDelay = "max-internal-timer-delay"
// MonitorAggregationName specifies the MonitorAggregationLevel on the
// comandline.
MonitorAggregationName = "monitor-aggregation"
// MonitorAggregationInterval configures interval for monitor-aggregation
MonitorAggregationInterval = "monitor-aggregation-interval"
// MonitorAggregationFlags configures TCP flags used by monitor aggregation.
MonitorAggregationFlags = "monitor-aggregation-flags"
// ciliumEnvPrefix is the prefix used for environment variables
ciliumEnvPrefix = "CILIUM_"
// CNIChainingMode configures which CNI plugin Cilium is chained with.
CNIChainingMode = "cni-chaining-mode"
// CNIChainingTarget is the name of a CNI network in to which we should
// insert our plugin configuration
CNIChainingTarget = "cni-chaining-target"
// AuthMapEntriesMin defines the minimum auth map limit.
AuthMapEntriesMin = 1 << 8
// AuthMapEntriesMax defines the maximum auth map limit.
AuthMapEntriesMax = 1 << 24
// AuthMapEntriesDefault defines the default auth map limit.
AuthMapEntriesDefault = 1 << 19
// BPFConntrackAccounting controls whether CT accounting for packets and bytes is enabled
BPFConntrackAccountingDefault = false
// AuthMapEntriesName configures max entries for BPF auth map.
AuthMapEntriesName = "bpf-auth-map-max"
// CTMapEntriesGlobalTCPDefault is the default maximum number of entries
// in the TCP CT table.
CTMapEntriesGlobalTCPDefault = 2 << 18 // 512Ki
// CTMapEntriesGlobalAnyDefault is the default maximum number of entries
// in the non-TCP CT table.
CTMapEntriesGlobalAnyDefault = 2 << 17 // 256Ki
// CTMapEntriesGlobalTCPName configures max entries for the TCP CT
// table.
CTMapEntriesGlobalTCPName = "bpf-ct-global-tcp-max"
// CTMapEntriesGlobalAnyName configures max entries for the non-TCP CT
// table.
CTMapEntriesGlobalAnyName = "bpf-ct-global-any-max"
// CTMapEntriesTimeout* name option and default value mappings
CTMapEntriesTimeoutSYNName = "bpf-ct-timeout-regular-tcp-syn"
CTMapEntriesTimeoutFINName = "bpf-ct-timeout-regular-tcp-fin"
CTMapEntriesTimeoutTCPName = "bpf-ct-timeout-regular-tcp"
CTMapEntriesTimeoutAnyName = "bpf-ct-timeout-regular-any"
CTMapEntriesTimeoutSVCTCPName = "bpf-ct-timeout-service-tcp"
CTMapEntriesTimeoutSVCTCPGraceName = "bpf-ct-timeout-service-tcp-grace"
CTMapEntriesTimeoutSVCAnyName = "bpf-ct-timeout-service-any"
// NATMapEntriesGlobalDefault holds the default size of the NAT map
// and is 2/3 of the full CT size as a heuristic
NATMapEntriesGlobalDefault = int((CTMapEntriesGlobalTCPDefault + CTMapEntriesGlobalAnyDefault) * 2 / 3)
// SockRevNATMapEntriesDefault holds the default size of the SockRev NAT map
// and is the same size of CTMapEntriesGlobalAnyDefault as a heuristic given
// that sock rev NAT is mostly used for UDP and getpeername only.
SockRevNATMapEntriesDefault = CTMapEntriesGlobalAnyDefault
// MapEntriesGlobalDynamicSizeRatioName is the name of the option to
// set the ratio of total system memory to use for dynamic sizing of the
// CT, NAT, Neighbor and SockRevNAT BPF maps.
MapEntriesGlobalDynamicSizeRatioName = "bpf-map-dynamic-size-ratio"
// LimitTableAutoGlobalTCPMin defines the minimum TCP CT table limit for
// dynamic size ration calculation.
LimitTableAutoGlobalTCPMin = 1 << 17 // 128Ki entries
// LimitTableAutoGlobalAnyMin defines the minimum UDP CT table limit for
// dynamic size ration calculation.
LimitTableAutoGlobalAnyMin = 1 << 16 // 64Ki entries
// LimitTableAutoNatGlobalMin defines the minimum NAT limit for dynamic size
// ration calculation.
LimitTableAutoNatGlobalMin = 1 << 17 // 128Ki entries
// LimitTableAutoSockRevNatMin defines the minimum SockRevNAT limit for
// dynamic size ration calculation.
LimitTableAutoSockRevNatMin = 1 << 16 // 64Ki entries
// LimitTableMin defines the minimum CT or NAT table limit
LimitTableMin = 1 << 10 // 1Ki entries
// LimitTableMax defines the maximum CT or NAT table limit
LimitTableMax = 1 << 24 // 16Mi entries (~1GiB of entries per map)
// PolicyMapMin defines the minimum policy map limit.
PolicyMapMin = 1 << 8
// PolicyMapMax defines the maximum policy map limit.
PolicyMapMax = 1 << 16
// FragmentsMapMin defines the minimum fragments map limit.
FragmentsMapMin = 1 << 8
// FragmentsMapMax defines the maximum fragments map limit.
FragmentsMapMax = 1 << 16
// NATMapEntriesGlobalName configures max entries for BPF NAT table
NATMapEntriesGlobalName = "bpf-nat-global-max"
// NeighMapEntriesGlobalName configures max entries for BPF neighbor table
NeighMapEntriesGlobalName = "bpf-neigh-global-max"
// PolicyMapFullReconciliationInterval sets the interval for performing the full
// reconciliation of the endpoint policy map.
PolicyMapFullReconciliationIntervalName = "bpf-policy-map-full-reconciliation-interval"
// EgressGatewayPolicyMapEntriesName configures max entries for egress gateway's policy
// map.
EgressGatewayPolicyMapEntriesName = "egress-gateway-policy-map-max"
// LogSystemLoadConfigName is the name of the option to enable system
// load logging
LogSystemLoadConfigName = "log-system-load"
// DisableCiliumEndpointCRDName is the name of the option to disable
// use of the CEP CRD
DisableCiliumEndpointCRDName = "disable-endpoint-crd"
// MaxCtrlIntervalName and MaxCtrlIntervalNameEnv allow configuration
// of MaxControllerInterval.
MaxCtrlIntervalName = "max-controller-interval"
// K8sNamespaceName is the name of the K8sNamespace option
K8sNamespaceName = "k8s-namespace"
// AgentNotReadyNodeTaintKeyName is the name of the option to set
// AgentNotReadyNodeTaintKey
AgentNotReadyNodeTaintKeyName = "agent-not-ready-taint-key"
// EnableIPv4Name is the name of the option to enable IPv4 support
EnableIPv4Name = "enable-ipv4"
// EnableIPv6Name is the name of the option to enable IPv6 support
EnableIPv6Name = "enable-ipv6"
// EnableIPv6NDPName is the name of the option to enable IPv6 NDP support
EnableIPv6NDPName = "enable-ipv6-ndp"
// EnableSRv6 is the name of the option to enable SRv6 encapsulation support
EnableSRv6 = "enable-srv6"
// SRv6EncapModeName is the name of the option to specify the SRv6 encapsulation mode
SRv6EncapModeName = "srv6-encap-mode"
// EnableSCTPName is the name of the option to enable SCTP support
EnableSCTPName = "enable-sctp"
// EnableNat46X64Gateway enables L3 based NAT46 and NAT64 gateway
EnableNat46X64Gateway = "enable-nat46x64-gateway"
// IPv6MCastDevice is the name of the option to select IPv6 multicast device
IPv6MCastDevice = "ipv6-mcast-device"
// BPFEventsDefaultRateLimit specifies limit of messages per second that can be written to
// BPF events map. This limit is defined for all types of events except dbg.
// The number of messages is averaged, meaning that if no messages were written
// to the map over 5 seconds, it's possible to write more events than the value of rate limit
// in the 6th second.
//
// If BPFEventsDefaultRateLimit > 0, non-zero value for BPFEventsDefaultBurstLimit must also be provided
// lest the configuration is considered invalid.
// If both rate and burst limit are 0 or not specified, no limit is imposed.
BPFEventsDefaultRateLimit = "bpf-events-default-rate-limit"
// BPFEventsDefaultBurstLimit specifies the maximum number of messages that can be written
// to BPF events map in 1 second. This limit is defined for all types of events except dbg.
//
// If BPFEventsDefaultBurstLimit > 0, non-zero value for BPFEventsDefaultRateLimit must also be provided
// lest the configuration is considered invalid.
// If both burst and rate limit are 0 or not specified, no limit is imposed.
BPFEventsDefaultBurstLimit = "bpf-events-default-burst-limit"
// FQDNRejectResponseCode is the name for the option for dns-proxy reject response code
FQDNRejectResponseCode = "tofqdns-dns-reject-response-code"
// FQDNProxyDenyWithNameError is useful when stub resolvers, like the one
// in Alpine Linux's libc (musl), treat a REFUSED as a resolution error.
// This happens when trying a DNS search list, as in kubernetes, and breaks
// even whitelisted DNS names.
FQDNProxyDenyWithNameError = "nameError"
// FQDNProxyDenyWithRefused is the response code for Domain refused. It is
// the default for denied DNS requests.
FQDNProxyDenyWithRefused = "refused"
// FQDNProxyResponseMaxDelay is the maximum time the proxy holds back a response
FQDNProxyResponseMaxDelay = "tofqdns-proxy-response-max-delay"
// FQDNRegexCompileLRUSize is the size of the FQDN regex compilation LRU.
// Useful for heavy but repeated FQDN MatchName or MatchPattern use.
FQDNRegexCompileLRUSize = "fqdn-regex-compile-lru-size"
// PreAllocateMapsName is the name of the option PreAllocateMaps
PreAllocateMapsName = "preallocate-bpf-maps"
// EnableBPFTProxy option supports enabling or disabling BPF TProxy.
EnableBPFTProxy = "enable-bpf-tproxy"
// EnableAutoDirectRoutingName is the name for the EnableAutoDirectRouting option
EnableAutoDirectRoutingName = "auto-direct-node-routes"
// DirectRoutingSkipUnreachableName is the name for the DirectRoutingSkipUnreachable option
DirectRoutingSkipUnreachableName = "direct-routing-skip-unreachable"
// EnableIPSecName is the name of the option to enable IPSec
EnableIPSecName = "enable-ipsec"
// Duration of the IPsec key rotation. After that time, we will clean the
// previous IPsec key from the node.
IPsecKeyRotationDuration = "ipsec-key-rotation-duration"
// Enable watcher for IPsec key. If disabled, a restart of the agent will
// be necessary on key rotations.
EnableIPsecKeyWatcher = "enable-ipsec-key-watcher"
// Enable caching for XfrmState for IPSec. Significantly reduces CPU usage
// in large clusters.
EnableIPSecXfrmStateCaching = "enable-ipsec-xfrm-state-caching"
// IPSecKeyFileName is the name of the option for ipsec key file
IPSecKeyFileName = "ipsec-key-file"
// EnableIPSecEncryptedOverlay is the name of the option which enables
// the EncryptedOverlay feature.
//
// This feature will encrypt overlay traffic before it leaves the cluster.
EnableIPSecEncryptedOverlay = "enable-ipsec-encrypted-overlay"
// BootIDFilename is a hidden flag that allows users to specify a
// filename other than /proc/sys/kernel/random/boot_id. This can be
// useful for testing purposes in local containerized cluster.
BootIDFilename = "boot-id-file"
// EnableL2Announcements is the name of the option to enable l2 announcements
EnableL2Announcements = "enable-l2-announcements"
// L2AnnouncerLeaseDuration, if a lease has not been renewed for X amount of time, a new leader can be chosen.
L2AnnouncerLeaseDuration = "l2-announcements-lease-duration"
// L2AnnouncerRenewDeadline, the leader will renew the lease every X amount of time.
L2AnnouncerRenewDeadline = "l2-announcements-renew-deadline"
// L2AnnouncerRetryPeriod, on renew failure, retry after X amount of time.
L2AnnouncerRetryPeriod = "l2-announcements-retry-period"
// EnableEncryptionStrictMode is the name of the option to enable strict encryption mode.
EnableEncryptionStrictMode = "enable-encryption-strict-mode"
// EncryptionStrictModeCIDR is the CIDR in which the strict encryption mode should be enforced.
EncryptionStrictModeCIDR = "encryption-strict-mode-cidr"
// EncryptionStrictModeAllowRemoteNodeIdentities allows dynamic lookup of remote node identities.
// This is required when tunneling is used
// or direct routing is used and the node CIDR and pod CIDR overlap.
EncryptionStrictModeAllowRemoteNodeIdentities = "encryption-strict-mode-allow-remote-node-identities"
// KVstoreLeaseTTL is the time-to-live for lease in kvstore.
KVstoreLeaseTTL = "kvstore-lease-ttl"
// KVstoreMaxConsecutiveQuorumErrorsName is the maximum number of acceptable
// kvstore consecutive quorum errors before the agent assumes permanent failure
KVstoreMaxConsecutiveQuorumErrorsName = "kvstore-max-consecutive-quorum-errors"
// IdentityChangeGracePeriod is the name of the
// IdentityChangeGracePeriod option
IdentityChangeGracePeriod = "identity-change-grace-period"
// CiliumIdentityMaxJitter is the maximum duration to delay processing a CiliumIdentity under certain conditions (default: 30s).
CiliumIdentityMaxJitter = "identity-max-jitter"
// IdentityRestoreGracePeriod is the name of the
// IdentityRestoreGracePeriod option
IdentityRestoreGracePeriod = "identity-restore-grace-period"
// EnableHealthChecking is the name of the EnableHealthChecking option
EnableHealthChecking = "enable-health-checking"
// EnableEndpointHealthChecking is the name of the EnableEndpointHealthChecking option
EnableEndpointHealthChecking = "enable-endpoint-health-checking"
// EnableHealthCheckLoadBalancerIP is the name of the EnableHealthCheckLoadBalancerIP option
EnableHealthCheckLoadBalancerIP = "enable-health-check-loadbalancer-ip"
// HealthCheckICMPFailureThreshold is the name of the HealthCheckICMPFailureThreshold option
HealthCheckICMPFailureThreshold = "health-check-icmp-failure-threshold"
// EndpointQueueSize is the size of the EventQueue per-endpoint.
EndpointQueueSize = "endpoint-queue-size"
// EndpointGCInterval interval to attempt garbage collection of
// endpoints that are no longer alive and healthy.
EndpointGCInterval = "endpoint-gc-interval"
// EndpointRegenInterval is the interval of the periodic endpoint regeneration loop.
EndpointRegenInterval = "endpoint-regen-interval"
// ServiceLoopbackIPv4 is the address to use for service loopback SNAT
ServiceLoopbackIPv4 = "ipv4-service-loopback-address"
// LocalRouterIPv4 is the link-local IPv4 address to use for Cilium router device
LocalRouterIPv4 = "local-router-ipv4"
// LocalRouterIPv6 is the link-local IPv6 address to use for Cilium router device
LocalRouterIPv6 = "local-router-ipv6"
// EnableEndpointRoutes enables use of per endpoint routes
EnableEndpointRoutes = "enable-endpoint-routes"
// ExcludeLocalAddress excludes certain addresses to be recognized as a
// local address
ExcludeLocalAddress = "exclude-local-address"
// IPv4PodSubnets A list of IPv4 subnets that pods may be
// assigned from. Used with CNI chaining where IPs are not directly managed
// by Cilium.
IPv4PodSubnets = "ipv4-pod-subnets"
// IPv6PodSubnets A list of IPv6 subnets that pods may be
// assigned from. Used with CNI chaining where IPs are not directly managed
// by Cilium.
IPv6PodSubnets = "ipv6-pod-subnets"
// IPAM is the IPAM method to use
IPAM = "ipam"
// IPAMMultiPoolPreAllocation defines the pre-allocation value for each IPAM pool
IPAMMultiPoolPreAllocation = "ipam-multi-pool-pre-allocation"
// IPAMDefaultIPPool defines the default IP Pool when using multi-pool
IPAMDefaultIPPool = "ipam-default-ip-pool"
// XDPModeNative for loading progs with XDPModeLinkDriver
XDPModeNative = "native"
// XDPModeBestEffort for loading progs with XDPModeLinkDriver
XDPModeBestEffort = "best-effort"
// XDPModeGeneric for loading progs with XDPModeLinkGeneric
XDPModeGeneric = "testing-only"
// XDPModeDisabled for not having XDP enabled
XDPModeDisabled = "disabled"
// XDPModeLinkDriver is the tc selector for native XDP
XDPModeLinkDriver = "xdpdrv"
// XDPModeLinkGeneric is the tc selector for generic XDP
XDPModeLinkGeneric = "xdpgeneric"
// XDPModeLinkNone for not having XDP enabled
XDPModeLinkNone = XDPModeDisabled
// K8sClientQPSLimit is the queries per second limit for the K8s client. Defaults to k8s client defaults.
K8sClientQPSLimit = "k8s-client-qps"
// K8sClientBurst is the burst value allowed for the K8s client. Defaults to k8s client defaults.
K8sClientBurst = "k8s-client-burst"
// AutoCreateCiliumNodeResource enables automatic creation of a
// CiliumNode resource for the local node
AutoCreateCiliumNodeResource = "auto-create-cilium-node-resource"
// ExcludeNodeLabelPatterns allows for excluding unnecessary labels from being propagated from k8s node to cilium
// node object. This allows for avoiding unnecessary events being broadcast to all nodes in the cluster.
ExcludeNodeLabelPatterns = "exclude-node-label-patterns"
// IPv4NativeRoutingCIDR describes a v4 CIDR in which pod IPs are routable
IPv4NativeRoutingCIDR = "ipv4-native-routing-cidr"
// IPv6NativeRoutingCIDR describes a v6 CIDR in which pod IPs are routable
IPv6NativeRoutingCIDR = "ipv6-native-routing-cidr"
// MasqueradeInterfaces is the selector used to select interfaces subject to
// egress masquerading
MasqueradeInterfaces = "egress-masquerade-interfaces"
// PolicyTriggerInterval is the amount of time between triggers of policy
// updates are invoked.
PolicyTriggerInterval = "policy-trigger-interval"
// IdentityAllocationMode specifies what mode to use for identity
// allocation
IdentityAllocationMode = "identity-allocation-mode"
// IdentityAllocationModeKVstore enables use of a key-value store such
// as etcd for identity allocation
IdentityAllocationModeKVstore = "kvstore"
// IdentityAllocationModeCRD enables use of Kubernetes CRDs for
// identity allocation
IdentityAllocationModeCRD = "crd"
// IdentityAllocationModeDoubleWriteReadKVstore writes identities to the KVStore and as CRDs at the same time.
// Identities are then read from the KVStore.
IdentityAllocationModeDoubleWriteReadKVstore = "doublewrite-readkvstore"
// IdentityAllocationModeDoubleWriteReadCRD writes identities to the KVStore and as CRDs at the same time.
// Identities are then read from the CRDs.
IdentityAllocationModeDoubleWriteReadCRD = "doublewrite-readcrd"
// EnableLocalNodeRoute controls installation of the route which points
// the allocation prefix of the local node.
EnableLocalNodeRoute = "enable-local-node-route"
// PolicyAuditModeArg argument enables policy audit mode.
PolicyAuditModeArg = "policy-audit-mode"
// PolicyAccountingArg argument enable policy accounting.
PolicyAccountingArg = "policy-accounting"
// K8sClientConnectionTimeout configures the timeout for K8s client connections.
K8sClientConnectionTimeout = "k8s-client-connection-timeout"
// K8sClientConnectionKeepAlive configures the keep alive duration for K8s client connections.
K8sClientConnectionKeepAlive = "k8s-client-connection-keep-alive"
// K8sHeartbeatTimeout configures the timeout for apiserver heartbeat
K8sHeartbeatTimeout = "k8s-heartbeat-timeout"
// EnableIPv4FragmentsTrackingName is the name of the option to enable
// IPv4 fragments tracking for L4-based lookups. Needs LRU map support.
EnableIPv4FragmentsTrackingName = "enable-ipv4-fragment-tracking"
// EnableIPv6FragmentsTrackingName is the name of the option to enable
// IPv6 fragments tracking for L4-based lookups. Needs LRU map support.
EnableIPv6FragmentsTrackingName = "enable-ipv6-fragment-tracking"
// FragmentsMapEntriesName configures max entries for BPF fragments
// tracking map.
FragmentsMapEntriesName = "bpf-fragments-map-max"
// K8sEnableAPIDiscovery enables Kubernetes API discovery
K8sEnableAPIDiscovery = "enable-k8s-api-discovery"
// EgressMultiHomeIPRuleCompat instructs Cilium to use a new scheme to
// store rules and routes under ENI and Azure IPAM modes, if false.
// Otherwise, it will use the old scheme.
EgressMultiHomeIPRuleCompat = "egress-multi-home-ip-rule-compat"
// Install ingress/egress routes through uplink on host for Pods when working with
// delegated IPAM plugin.
InstallUplinkRoutesForDelegatedIPAM = "install-uplink-routes-for-delegated-ipam"
// EnableCustomCallsName is the name of the option to enable tail calls
// for user-defined custom eBPF programs.
EnableCustomCallsName = "enable-custom-calls"
// BGPSecretsNamespace is the Kubernetes namespace to get BGP control plane secrets from.
BGPSecretsNamespace = "bgp-secrets-namespace"
// VLANBPFBypass instructs Cilium to bypass bpf logic for vlan tagged packets
VLANBPFBypass = "vlan-bpf-bypass"
// DisableExternalIPMitigation disable ExternalIP mitigation (CVE-2020-8554)
DisableExternalIPMitigation = "disable-external-ip-mitigation"
// EnableICMPRules enables ICMP-based rule support for Cilium Network Policies.
EnableICMPRules = "enable-icmp-rules"
// Use the CiliumInternalIPs (vs. NodeInternalIPs) for IPsec encapsulation.
UseCiliumInternalIPForIPsec = "use-cilium-internal-ip-for-ipsec"
// BypassIPAvailabilityUponRestore bypasses the IP availability error
// within IPAM upon endpoint restore and allows the use of the restored IP
// regardless of whether it's available in the pool.
BypassIPAvailabilityUponRestore = "bypass-ip-availability-upon-restore"
// EnableVTEP enables cilium VXLAN VTEP integration
EnableVTEP = "enable-vtep"
// VTEP endpoint IPs
VtepEndpoint = "vtep-endpoint"
// VTEP CIDRs
VtepCIDR = "vtep-cidr"
// VTEP CIDR Mask applies to all VtepCIDR
VtepMask = "vtep-mask"
// VTEP MACs
VtepMAC = "vtep-mac"
// TCFilterPriority sets the priority of the cilium tc filter, enabling other
// filters to be inserted prior to the cilium filter.
TCFilterPriority = "bpf-filter-priority"
// Flag to enable BGP control plane features
EnableBGPControlPlane = "enable-bgp-control-plane"
// EnableBGPControlPlaneStatusReport enables BGP Control Plane CRD status reporting
EnableBGPControlPlaneStatusReport = "enable-bgp-control-plane-status-report"
// BGP router-id allocation mode
BGPRouterIDAllocationMode = "bgp-router-id-allocation-mode"
// BGP router-id allocation IP pool
BGPRouterIDAllocationIPPool = "bgp-router-id-allocation-ip-pool"
// EnablePMTUDiscovery enables path MTU discovery to send ICMP
// fragmentation-needed replies to the client (when needed).
EnablePMTUDiscovery = "enable-pmtu-discovery"
// BPFMapEventBuffers specifies what maps should have event buffers enabled,
// and the max size and TTL of events in the buffers should be.
BPFMapEventBuffers = "bpf-map-event-buffers"
// IPAMCiliumNodeUpdateRate is the maximum rate at which the CiliumNode custom
// resource is updated.
IPAMCiliumNodeUpdateRate = "ipam-cilium-node-update-rate"
// EnableK8sNetworkPolicy enables support for K8s NetworkPolicy.
EnableK8sNetworkPolicy = "enable-k8s-networkpolicy"
// EnableCiliumNetworkPolicy enables support for Cilium Network Policy.
EnableCiliumNetworkPolicy = "enable-cilium-network-policy"
// EnableCiliumClusterwideNetworkPolicy enables support for Cilium Clusterwide
// Network Policy.
EnableCiliumClusterwideNetworkPolicy = "enable-cilium-clusterwide-network-policy"
// PolicyCIDRMatchMode defines the entities that CIDR selectors can reach
PolicyCIDRMatchMode = "policy-cidr-match-mode"
// EnableNodeSelectorLabels enables use of the node label based identity
EnableNodeSelectorLabels = "enable-node-selector-labels"
// NodeLabels is the list of label prefixes used to determine identity of a node (requires enabling of
// EnableNodeSelectorLabels)
NodeLabels = "node-labels"
// BPFEventsDropEnabled defines the DropNotification setting for any endpoint
BPFEventsDropEnabled = "bpf-events-drop-enabled"
// BPFEventsPolicyVerdictEnabled defines the PolicyVerdictNotification setting for any endpoint
BPFEventsPolicyVerdictEnabled = "bpf-events-policy-verdict-enabled"
// BPFEventsTraceEnabled defines the TraceNotification setting for any endpoint
BPFEventsTraceEnabled = "bpf-events-trace-enabled"
// BPFConntrackAccounting controls whether CT accounting for packets and bytes is enabled
BPFConntrackAccounting = "bpf-conntrack-accounting"
// EnableNonDefaultDenyPolicies allows policies to define whether they are operating in default-deny mode
EnableNonDefaultDenyPolicies = "enable-non-default-deny-policies"
// EnableEndpointLockdownOnPolicyOverflow enables endpoint lockdown when an endpoint's
// policy map overflows.
EnableEndpointLockdownOnPolicyOverflow = "enable-endpoint-lockdown-on-policy-overflow"
// ConnectivityProbeFrequencyRatio is the name of the option to specify the connectivity probe frequency
ConnectivityProbeFrequencyRatio = "connectivity-probe-frequency-ratio"
// EnableExtendedIPProtocols controls whether traffic with extended IP protocols is supported in datapath.
EnableExtendedIPProtocols = "enable-extended-ip-protocols"
)
// Default string arguments
var (
FQDNRejectOptions = []string{FQDNProxyDenyWithNameError, FQDNProxyDenyWithRefused}
// MonitorAggregationFlagsDefault ensure that all TCP flags trigger
// monitor notifications even under medium monitor aggregation.
MonitorAggregationFlagsDefault = []string{"syn", "fin", "rst"}
)
// Available options for DaemonConfig.RoutingMode
const (
// RoutingModeNative specifies native routing mode
RoutingModeNative = "native"
// RoutingModeTunnel specifies tunneling mode
RoutingModeTunnel = "tunnel"
)
const (
// HTTP403Message specifies the response body for 403 responses, defaults to "Access denied"
HTTP403Message = "http-403-msg"
// ReadCNIConfiguration reads the CNI configuration file and extracts
// Cilium relevant information. This can be used to pass per node
// configuration to Cilium.
ReadCNIConfiguration = "read-cni-conf"
// WriteCNIConfigurationWhenReady writes the CNI configuration to the
// specified location once the agent is ready to serve requests. This
// allows to keep a Kubernetes node NotReady until Cilium is up and
// running and able to schedule endpoints.
WriteCNIConfigurationWhenReady = "write-cni-conf-when-ready"
// CNIExclusive tells the agent to remove other CNI configuration files
CNIExclusive = "cni-exclusive"
// CNIExternalRouting delegates endpoint routing to the chained CNI plugin.
CNIExternalRouting = "cni-external-routing"
// CNILogFile is the path to a log file (on the host) for the CNI plugin
// binary to use for logging.
CNILogFile = "cni-log-file"
// EnableCiliumEndpointSlice enables the cilium endpoint slicing feature.
EnableCiliumEndpointSlice = "enable-cilium-endpoint-slice"
// IdentityManagementMode controls whether CiliumIdentities are managed by cilium-agent, cilium-operator, or both.
IdentityManagementMode = "identity-management-mode"
// EnableSourceIPVerification enables the source ip verification, defaults to true
EnableSourceIPVerification = "enable-source-ip-verification"
)
const (
// NodePortAccelerationDisabled means we do not accelerate NodePort via XDP
NodePortAccelerationDisabled = XDPModeDisabled
// NodePortAccelerationGeneric means we accelerate NodePort via generic XDP
NodePortAccelerationGeneric = XDPModeGeneric
// NodePortAccelerationNative means we accelerate NodePort via native XDP in the driver (preferred)
NodePortAccelerationNative = XDPModeNative
// NodePortAccelerationBestEffort means we accelerate NodePort via native XDP in the driver (preferred), but will skip devices without driver support
NodePortAccelerationBestEffort = XDPModeBestEffort
// KubeProxyReplacementTrue specifies to enable all kube-proxy replacement
// features (might panic).
KubeProxyReplacementTrue = "true"
// KubeProxyReplacementFalse specifies to enable only selected kube-proxy
// replacement features (might panic).
KubeProxyReplacementFalse = "false"
// PprofAddressAgent is the default value for pprof in the agent
PprofAddressAgent = "localhost"
// PprofPortAgent is the default value for pprof in the agent
PprofPortAgent = 6060
// IdentityManagementModeAgent means cilium-agent is solely responsible for managing CiliumIdentity.
IdentityManagementModeAgent = "agent"
// IdentityManagementModeOperator means cilium-operator is solely responsible for managing CiliumIdentity.
IdentityManagementModeOperator = "operator"
// IdentityManagementModeBoth means cilium-agent and cilium-operator both manage identities
// (used only during migration between "agent" and "operator").
IdentityManagementModeBoth = "both"
)
const (
// BGPRouterIDAllocationModeDefault means the router-id is allocated per node
BGPRouterIDAllocationModeDefault = "default"
// BGPRouterIDAllocationModeIPPool means the router-id is allocated per IP pool
BGPRouterIDAllocationModeIPPool = "ip-pool"
)
// getEnvName returns the environment variable to be used for the given option name.
func getEnvName(option string) string {
under := strings.ReplaceAll(option, "-", "_")
upper := strings.ToUpper(under)
return ciliumEnvPrefix + upper
}
// BindEnv binds the option name with a deterministic generated environment
// variable which is based on the given optName. If the same optName is bound
// more than once, this function panics.
func BindEnv(vp *viper.Viper, optName string) {
vp.BindEnv(optName, getEnvName(optName))
}
// BindEnvWithLegacyEnvFallback binds the given option name with either the same
// environment variable as BindEnv, if it's set, or with the given legacyEnvName.
//
// The function is used to work around the viper.BindEnv limitation that only
// one environment variable can be bound for an option, and we need multiple
// environment variables due to backward compatibility reasons.
func BindEnvWithLegacyEnvFallback(vp *viper.Viper, optName, legacyEnvName string) {
envName := getEnvName(optName)
if os.Getenv(envName) == "" {
envName = legacyEnvName
}
vp.BindEnv(optName, envName)
}
// LogRegisteredSlogOptions logs all options that where bound to viper.
func LogRegisteredSlogOptions(vp *viper.Viper, entry *slog.Logger) {
keys := vp.AllKeys()
slices.Sort(keys)
for _, k := range keys {
ss := vp.GetStringSlice(k)
if len(ss) == 0 {
sm := vp.GetStringMap(k)
for k, v := range sm {
ss = append(ss, fmt.Sprintf("%s=%s", k, v))
}
}
if len(ss) > 0 {
entry.Info(fmt.Sprintf(" --%s='%s'", k, strings.Join(ss, ",")))
} else {
entry.Info(fmt.Sprintf(" --%s='%s'", k, vp.GetString(k)))
}
}
}
// DaemonConfig is the configuration used by Daemon.
type DaemonConfig struct {
// Private sum of the config written to file. Used to check that the config is not changed
// after.
shaSum [32]byte
CreationTime time.Time
BpfDir string // BPF template files directory
LibDir string // Cilium library files directory
RunDir string // Cilium runtime directory
ExternalEnvoyProxy bool // Whether Envoy is deployed as external DaemonSet or not
LBDevInheritIPAddr string // Device which IP addr used by bpf_host devices
EnableXDPPrefilter bool // Enable XDP-based prefiltering
XDPMode string // XDP mode, values: { xdpdrv | xdpgeneric | none }
EnableTCX bool // Enable attaching endpoint programs using tcx if the kernel supports it
HostV4Addr net.IP // Host v4 address of the snooping device
HostV6Addr net.IP // Host v6 address of the snooping device
EncryptInterface []string // Set of network facing interface to encrypt over
EncryptNode bool // Set to true for encrypting node IP traffic
DatapathMode string // Datapath mode
RoutingMode string // Routing mode
DryMode bool // Do not create BPF maps, devices, ..
// RestoreState enables restoring the state from previous running daemons.
RestoreState bool
KeepConfig bool // Keep configuration of existing endpoints when starting up.
// AllowLocalhost defines when to allows the local stack to local endpoints
// values: { auto | always | policy }
AllowLocalhost string
// StateDir is the directory where runtime state of endpoints is stored
StateDir string
// Options changeable at runtime
Opts *IntOptions
// Monitor contains the configuration for the node monitor.
Monitor *models.MonitorStatus
// AgentHealthPort is the TCP port for agent health status API
AgentHealthPort int
// ClusterHealthPort is the TCP port for cluster-wide network connectivity health API
ClusterHealthPort int
// ClusterMeshHealthPort is the TCP port for ClusterMesh apiserver health API
ClusterMeshHealthPort int
// AgentHealthRequireK8sConnectivity determines whether the agent health endpoint requires k8s connectivity
AgentHealthRequireK8sConnectivity bool
// IPv6ClusterAllocCIDR is the base CIDR used to allocate IPv6 node
// CIDRs if allocation is not performed by an orchestration system
IPv6ClusterAllocCIDR string
// IPv6ClusterAllocCIDRBase is derived from IPv6ClusterAllocCIDR and
// contains the CIDR without the mask, e.g. "fdfd::1/64" -> "fdfd::"
//
// This variable should never be written to, it is initialized via
// DaemonConfig.Validate()
IPv6ClusterAllocCIDRBase string
// IPv6NAT46x64CIDR is the private base CIDR for the NAT46x64 gateway
IPv6NAT46x64CIDR string
// IPv6NAT46x64CIDRBase is derived from IPv6NAT46x64CIDR and contains
// the IPv6 prefix with the masked bits zeroed out
IPv6NAT46x64CIDRBase netip.Addr
// K8sRequireIPv4PodCIDR requires the k8s node resource to specify the
// IPv4 PodCIDR. Cilium will block bootstrapping until the information
// is available.
K8sRequireIPv4PodCIDR bool
// K8sRequireIPv6PodCIDR requires the k8s node resource to specify the
// IPv6 PodCIDR. Cilium will block bootstrapping until the information
// is available.
K8sRequireIPv6PodCIDR bool
// MTU is the maximum transmission unit of the underlying network
MTU int
// RouteMetric is the metric used for the routes added to the cilium_host device
RouteMetric int
// ClusterName is the name of the cluster
ClusterName string
// ClusterID is the unique identifier of the cluster
ClusterID uint32
// CTMapEntriesGlobalTCP is the maximum number of conntrack entries
// allowed in each TCP CT table for IPv4/IPv6.
CTMapEntriesGlobalTCP int
// CTMapEntriesGlobalAny is the maximum number of conntrack entries
// allowed in each non-TCP CT table for IPv4/IPv6.
CTMapEntriesGlobalAny int
// CTMapEntriesTimeout* values configured by the user.
CTMapEntriesTimeoutTCP time.Duration
CTMapEntriesTimeoutAny time.Duration
CTMapEntriesTimeoutSVCTCP time.Duration
CTMapEntriesTimeoutSVCTCPGrace time.Duration
CTMapEntriesTimeoutSVCAny time.Duration
CTMapEntriesTimeoutSYN time.Duration
CTMapEntriesTimeoutFIN time.Duration
// MaxInternalTimerDelay sets a maximum on all periodic timers in
// the agent in order to flush out timer-related bugs in the agent.
MaxInternalTimerDelay time.Duration
// MonitorAggregationInterval configures the interval between monitor
// messages when monitor aggregation is enabled.
MonitorAggregationInterval time.Duration
// MonitorAggregationFlags determines which TCP flags that the monitor
// aggregation ensures reports are generated for when monitor-aggregation
// is enabled. Network byte-order.
MonitorAggregationFlags uint16
// BPFEventsDefaultRateLimit specifies limit of messages per second that can be written to
// BPF events map. This limit is defined for all types of events except dbg.
// The number of messages is averaged, meaning that if no messages were written
// to the map over 5 seconds, it's possible to write more events than the value of rate limit
// in the 6th second.
//
// If BPFEventsDefaultRateLimit > 0, non-zero value for BPFEventsDefaultBurstLimit must also be provided
// lest the configuration is considered invalid.
BPFEventsDefaultRateLimit uint32
// BPFEventsDefaultBurstLimit specifies the maximum number of messages that can be written
// to BPF events map in 1 second. This limit is defined for all types of events except dbg.
//
// If BPFEventsDefaultBurstLimit > 0, non-zero value for BPFEventsDefaultRateLimit must also be provided
// lest the configuration is considered invalid.
// If both burst and rate limit are 0 or not specified, no limit is imposed.
BPFEventsDefaultBurstLimit uint32
// BPFMapsDynamicSizeRatio is ratio of total system memory to use for
// dynamic sizing of the CT, NAT, Neighbor and SockRevNAT BPF maps.
BPFMapsDynamicSizeRatio float64
// NATMapEntriesGlobal is the maximum number of NAT mappings allowed
// in the BPF NAT table
NATMapEntriesGlobal int
// NeighMapEntriesGlobal is the maximum number of neighbor mappings
// allowed in the BPF neigh table
NeighMapEntriesGlobal int
// AuthMapEntries is the maximum number of entries in the auth map.
AuthMapEntries int
// PolicyMapFullReconciliationInterval is the interval at which to perform
// the full reconciliation of the endpoint policy map.
PolicyMapFullReconciliationInterval time.Duration
// DisableCiliumEndpointCRD disables the use of CiliumEndpoint CRD
DisableCiliumEndpointCRD bool
// MaxControllerInterval is the maximum value for a controller's
// RunInterval. Zero means unlimited.
MaxControllerInterval uint
// HTTP403Message is the error message to return when a HTTP 403 is returned
// by the proxy, if L7 policy is configured.
HTTP403Message string
ProcFs string
// K8sNamespace is the name of the namespace in which Cilium is
// deployed in when running in Kubernetes mode
K8sNamespace string
// AgentNotReadyNodeTaint is a node taint which prevents pods from being
// scheduled. Once cilium is setup it is removed from the node. Mostly
// used in cloud providers to prevent existing CNI plugins from managing
// pods.
AgentNotReadyNodeTaintKey string
// EnableIPv4 is true when IPv4 is enabled
EnableIPv4 bool
// EnableIPv6 is true when IPv6 is enabled
EnableIPv6 bool
// EnableNat46X64Gateway is true when L3 based NAT46 and NAT64 translation is enabled
EnableNat46X64Gateway bool
// EnableIPv6NDP is true when NDP is enabled for IPv6
EnableIPv6NDP bool
// EnableSRv6 is true when SRv6 encapsulation support is enabled
EnableSRv6 bool
// SRv6EncapMode is the encapsulation mode for SRv6
SRv6EncapMode string
// EnableSCTP is true when SCTP support is enabled.
EnableSCTP bool
// IPv6MCastDevice is the name of device that joins IPv6's solicitation multicast group
IPv6MCastDevice string
// EnableL7Proxy is the option to enable L7 proxy
EnableL7Proxy bool
// EnableIPSec is true when IPSec is enabled
EnableIPSec bool
// IPSec key file for stored keys
IPSecKeyFile string
// Duration of the IPsec key rotation. After that time, we will clean the
// previous IPsec key from the node.
IPsecKeyRotationDuration time.Duration
// Enable watcher for IPsec key. If disabled, a restart of the agent will
// be necessary on key rotations.
EnableIPsecKeyWatcher bool
// EnableIPSecXfrmStateCaching enables IPSec XfrmState caching.
EnableIPSecXfrmStateCaching bool
// EnableIPSecEncryptedOverlay enables IPSec encryption for overlay traffic.
EnableIPSecEncryptedOverlay bool
// BootIDFile is the file containing the boot ID of the node
BootIDFile string
// EnableEncryptionStrictMode enables strict mode for encryption
EnableEncryptionStrictMode bool
// EncryptionStrictModeCIDR is the CIDR to use for strict mode
EncryptionStrictModeCIDR netip.Prefix
// EncryptionStrictModeAllowRemoteNodeIdentities allows dynamic lookup of node identities.
// This is required when tunneling is used
// or direct routing is used and the node CIDR and pod CIDR overlap.
EncryptionStrictModeAllowRemoteNodeIdentities bool
// EnableL2Announcements enables L2 announcement of service IPs
EnableL2Announcements bool
// L2AnnouncerLeaseDuration, if a lease has not been renewed for X amount of time, a new leader can be chosen.
L2AnnouncerLeaseDuration time.Duration
// L2AnnouncerRenewDeadline, the leader will renew the lease every X amount of time.
L2AnnouncerRenewDeadline time.Duration
// L2AnnouncerRetryPeriod, on renew failure, retry after X amount of time.
L2AnnouncerRetryPeriod time.Duration
// CLI options
BPFRoot string
BPFSocketLBHostnsOnly bool
CGroupRoot string
BPFCompileDebug string
ConfigFile string
ConfigDir string
Debug bool
DebugVerbose []string
EnableSocketLBTracing bool
EnableSocketLBPeer bool
EnablePolicy string
EnableTracing bool
EnableIPIPTermination bool
EnableUnreachableRoutes bool
FixedIdentityMapping map[string]string
FixedIdentityMappingValidator Validator `json:"-"`
FixedZoneMapping map[string]uint8
ReverseFixedZoneMapping map[uint8]string
FixedZoneMappingValidator Validator `json:"-"`
IPv4Range string
IPv6Range string
IPv4ServiceRange string
IPv6ServiceRange string
K8sSyncTimeout time.Duration
AllocatorListTimeout time.Duration
LabelPrefixFile string
Labels []string
LogDriver []string
LogOpt map[string]string
LogSystemLoadConfig bool
// Masquerade specifies whether or not to masquerade packets from endpoints
// leaving the host.
EnableRemoteNodeMasquerade bool
EnableIPv4Masquerade bool
EnableIPv6Masquerade bool
EnableBPFMasquerade bool
EnableMasqueradeRouteSource bool
EnableIPMasqAgent bool
EnableBPFClockProbe bool
EnableEgressGateway bool
EnableEnvoyConfig bool
InstallIptRules bool
MonitorAggregation string
PreAllocateMaps bool
IPv6NodeAddr string
IPv4NodeAddr string
SocketPath string
TracePayloadlen int
TracePayloadlenOverlay int
Version string
ToFQDNsMinTTL int
// DNSPolicyUnloadOnShutdown defines whether DNS policy rules should be unloaded on
// graceful shutdown.
DNSPolicyUnloadOnShutdown bool
// ToFQDNsProxyPort is the user-configured global, shared, DNS listen port used
// by the DNS Proxy. Both UDP and TCP are handled on the same port. When it
// is 0 a random port will be assigned, and can be obtained from
// DefaultDNSProxy below.
ToFQDNsProxyPort int
// ToFQDNsMaxIPsPerHost defines the maximum number of IPs to maintain
// for each FQDN name in an endpoint's FQDN cache
ToFQDNsMaxIPsPerHost int
// ToFQDNsMaxIPsPerHost defines the maximum number of IPs to retain for
// expired DNS lookups with still-active connections
ToFQDNsMaxDeferredConnectionDeletes int
// ToFQDNsIdleConnectionGracePeriod Time during which idle but
// previously active connections with expired DNS lookups are
// still considered alive
ToFQDNsIdleConnectionGracePeriod time.Duration
// FQDNRejectResponse is the dns-proxy response for invalid dns-proxy request
FQDNRejectResponse string
// FQDNProxyResponseMaxDelay The maximum time the DNS proxy holds an allowed
// DNS response before sending it along. Responses are sent as soon as the
// datapath is updated with the new IP information.
FQDNProxyResponseMaxDelay time.Duration
// FQDNRegexCompileLRUSize is the size of the FQDN regex compilation LRU.
// Useful for heavy but repeated FQDN MatchName or MatchPattern use.
FQDNRegexCompileLRUSize uint
// Path to a file with DNS cache data to preload on startup
ToFQDNsPreCache string
// DNSProxyConcurrencyLimit limits parallel processing of DNS messages in
// DNS proxy at any given point in time.
DNSProxyConcurrencyLimit int
// DNSProxyEnableTransparentMode enables transparent mode for the DNS proxy.
DNSProxyEnableTransparentMode bool
// DNSProxyInsecureSkipTransparentModeCheck is a hidden flag that allows users
// to disable transparent mode even if IPSec is enabled
DNSProxyInsecureSkipTransparentModeCheck bool
// DNSProxyLockCount is the array size containing mutexes which protect
// against parallel handling of DNS response names.
DNSProxyLockCount int
// DNSProxyLockTimeout is timeout when acquiring the locks controlled by
// DNSProxyLockCount.
DNSProxyLockTimeout time.Duration
// DNSProxySocketLingerTimeout defines how many seconds we wait for the connection
// between the DNS proxy and the upstream server to be closed.
DNSProxySocketLingerTimeout int
// EnableBPFTProxy enables implementing proxy redirection via BPF
// mechanisms rather than iptables rules.
EnableBPFTProxy bool
// EnableAutoDirectRouting enables installation of direct routes to
// other nodes when available
EnableAutoDirectRouting bool
// DirectRoutingSkipUnreachable skips installation of direct routes
// to nodes when they're not on the same L2
DirectRoutingSkipUnreachable bool
// EnableLocalNodeRoute controls installation of the route which points
// the allocation prefix of the local node.
EnableLocalNodeRoute bool
// EnableHealthChecking enables health checking between nodes and
// health endpoints
EnableHealthChecking bool
// EnableEndpointHealthChecking enables health checking between virtual
// health endpoints
EnableEndpointHealthChecking bool
// EnableHealthCheckLoadBalancerIP enables health checking of LoadBalancerIP
// by cilium
EnableHealthCheckLoadBalancerIP bool
// HealthCheckICMPFailureThreshold is the number of ICMP packets sent for each health
// checking run. If at least an ICMP response is received, the node or endpoint
// is marked as healthy.
HealthCheckICMPFailureThreshold int
// IdentityChangeGracePeriod is the grace period that needs to pass
// before an endpoint that has changed its identity will start using
// that new identity. During the grace period, the new identity has
// already been allocated and other nodes in the cluster have a chance
// to whitelist the new upcoming identity of the endpoint.
IdentityChangeGracePeriod time.Duration
// Maximum jitter time for CiliumIdentityAdd commentMore actions
CiliumIdentityMaxJitter time.Duration
// IdentityRestoreGracePeriod is the grace period that needs to pass before CIDR identities
// restored during agent restart are released. If any of the restored identities remains
// unused after this time, they will be removed from the IP cache. Any of the restored
// identities that are used in network policies will remain in the IP cache until all such
// policies are removed.
//
// The default is 30 seconds for k8s clusters, and 10 minutes for kvstore clusters
IdentityRestoreGracePeriod time.Duration
// EndpointQueueSize is the size of the EventQueue per-endpoint. A larger
// queue means that more events can be buffered per-endpoint. This is useful
// in the case where a cluster might be under high load for endpoint-related
// events, specifically those which cause many regenerations.
EndpointQueueSize int
// ConntrackGCInterval is the connection tracking garbage collection
// interval
ConntrackGCInterval time.Duration
// ConntrackGCMaxInterval if set limits the automatic GC interval calculation to
// the specified maximum value.
ConntrackGCMaxInterval time.Duration
// ServiceLoopbackIPv4 is the address to use for service loopback SNAT
ServiceLoopbackIPv4 string
// LocalRouterIPv4 is the link-local IPv4 address used for Cilium's router device
LocalRouterIPv4 string
// LocalRouterIPv6 is the link-local IPv6 address used for Cilium's router device
LocalRouterIPv6 string
// EnableEndpointRoutes enables use of per endpoint routes
EnableEndpointRoutes bool
// Specifies whether to annotate the kubernetes nodes or not
AnnotateK8sNode bool
// EnableHealthDatapath enables IPIP health probes data path
EnableHealthDatapath bool
// EnableIPIPDevices enables the creation of IPIP devices for IPv4 and IPv6
EnableIPIPDevices bool
// EnableHostLegacyRouting enables the old routing path via stack.
EnableHostLegacyRouting bool
// NodePortNat46X64 indicates whether NAT46 / NAT64 can be used.
NodePortNat46X64 bool
// LoadBalancerIPIPSockMark enables sock-lb logic to force service traffic via IPIP
LoadBalancerIPIPSockMark bool
// LoadBalancerRSSv4CIDR defines the outer source IPv4 prefix for DSR/IPIP
LoadBalancerRSSv4CIDR string
LoadBalancerRSSv4 net.IPNet
// LoadBalancerRSSv4CIDR defines the outer source IPv6 prefix for DSR/IPIP
LoadBalancerRSSv6CIDR string
LoadBalancerRSSv6 net.IPNet
// LoadBalancerExternalControlPlane tells whether to not use kube-apiserver as
// its control plane in lb-only mode.
LoadBalancerExternalControlPlane bool
// EnablePMTUDiscovery indicates whether to send ICMP fragmentation-needed
// replies to the client (when needed).
EnablePMTUDiscovery bool
// NodePortAcceleration indicates whether NodePort should be accelerated
// via XDP ("none", "generic", "native", or "best-effort")
NodePortAcceleration string
// NodePortBindProtection rejects bind requests to NodePort service ports
NodePortBindProtection bool
// EnableAutoProtectNodePortRange enables appending NodePort range to
// net.ipv4.ip_local_reserved_ports if it overlaps with ephemeral port
// range (net.ipv4.ip_local_port_range)
EnableAutoProtectNodePortRange bool
// AddressScopeMax controls the maximum address scope for addresses to be
// considered local ones with HOST_ID in the ipcache
AddressScopeMax int
// EnableMKE enables MKE specific 'chaining' for kube-proxy replacement
EnableMKE bool
// CgroupPathMKE points to the cgroupv1 net_cls mount instance
CgroupPathMKE string
// EnableHostFirewall enables network policies for the host
EnableHostFirewall bool
// EnableLocalRedirectPolicy enables redirect policies to redirect traffic within nodes
EnableLocalRedirectPolicy bool
// Selection of BPF main clock source (ktime vs jiffies)
ClockSource BPFClockSource
// EnableIdentityMark enables setting the mark field with the identity for
// local traffic. This may be disabled if chaining modes and Cilium use
// conflicting marks.
EnableIdentityMark bool
// KernelHz is the HZ rate the kernel is operating in
KernelHz int
// ExcludeLocalAddresses excludes certain addresses to be recognized as
// a local address
ExcludeLocalAddresses []netip.Prefix
// IPv4PodSubnets available subnets to be assign IPv4 addresses to pods from
IPv4PodSubnets []*net.IPNet
// IPv6PodSubnets available subnets to be assign IPv6 addresses to pods from
IPv6PodSubnets []*net.IPNet
// IPAM is the IPAM method to use
IPAM string
// IPAMMultiPoolPreAllocation defines the pre-allocation value for each IPAM pool
IPAMMultiPoolPreAllocation map[string]string
// IPAMDefaultIPPool the default IP Pool when using multi-pool
IPAMDefaultIPPool string
// AutoCreateCiliumNodeResource enables automatic creation of a
// CiliumNode resource for the local node
AutoCreateCiliumNodeResource bool
// ExcludeNodeLabelPatterns allows for excluding unnecessary labels from being propagated from k8s node to cilium
// node object. This allows for avoiding unnecessary events being broadcast to all nodes in the cluster.
ExcludeNodeLabelPatterns []*regexp.Regexp
// IPv4NativeRoutingCIDR describes a CIDR in which pod IPs are routable
IPv4NativeRoutingCIDR *cidr.CIDR
// IPv6NativeRoutingCIDR describes a CIDR in which pod IPs are routable
IPv6NativeRoutingCIDR *cidr.CIDR
// MasqueradeInterfaces is the selector used to select interfaces subject
// to egress masquerading.
MasqueradeInterfaces []string
// PolicyTriggerInterval is the amount of time between when policy updates
// are triggered.
PolicyTriggerInterval time.Duration
// IdentityAllocationMode specifies what mode to use for identity
// allocation
IdentityAllocationMode string
// AllowICMPFragNeeded allows ICMP Fragmentation Needed type packets in
// the network policy for cilium-agent.
AllowICMPFragNeeded bool
// Azure options
// PolicyAuditMode enables non-drop mode for installed policies. In
// audit mode packets affected by policies will not be dropped.
// Policy related decisions can be checked via the policy verdict messages.
PolicyAuditMode bool
// PolicyAccounting enable policy accounting
PolicyAccounting bool
// EnableIPv4FragmentsTracking enables IPv4 fragments tracking for
// L4-based lookups. Needs LRU map support.
EnableIPv4FragmentsTracking bool
// EnableIPv6FragmentsTracking enables IPv6 fragments tracking for
// L4-based lookups. Needs LRU map support.
EnableIPv6FragmentsTracking bool
// FragmentsMapEntries is the maximum number of fragmented datagrams
// that can simultaneously be tracked in order to retrieve their L4
// ports for all fragments.
FragmentsMapEntries int
// SizeofCTElement is the size of an element (key + value) in the CT map.
SizeofCTElement int
// SizeofNATElement is the size of an element (key + value) in the NAT map.
SizeofNATElement int
// SizeofNeighElement is the size of an element (key + value) in the neigh
// map.
SizeofNeighElement int
// SizeofSockRevElement is the size of an element (key + value) in the neigh
// map.
SizeofSockRevElement int
// k8sEnableLeasesFallbackDiscovery enables k8s to fallback to API probing to check
// for the support of Leases in Kubernetes when there is an error in discovering
// API groups using Discovery API.
// We require to check for Leases capabilities in operator only, which uses Leases for leader
// election purposes in HA mode.
// This is only enabled for cilium-operator
K8sEnableLeasesFallbackDiscovery bool
// EgressMultiHomeIPRuleCompat instructs Cilium to use a new scheme to
// store rules and routes under ENI and Azure IPAM modes, if false.
// Otherwise, it will use the old scheme.
EgressMultiHomeIPRuleCompat bool
// Install ingress/egress routes through uplink on host for Pods when working with
// delegated IPAM plugin.
InstallUplinkRoutesForDelegatedIPAM bool
// InstallNoConntrackIptRules instructs Cilium to install Iptables rules to skip netfilter connection tracking on all pod traffic.
InstallNoConntrackIptRules bool
// ContainerIPLocalReservedPorts instructs the Cilium CNI plugin to reserve
// the provided comma-separated list of ports in the container network namespace
ContainerIPLocalReservedPorts string
// EnableCustomCalls enables tail call hooks for user-defined custom
// eBPF programs, typically used to collect custom per-endpoint
// metrics.
EnableCustomCalls bool
// BGPSecretsNamespace is the Kubernetes namespace to get BGP control plane secrets from.
BGPSecretsNamespace string
// EnableCiliumEndpointSlice enables the cilium endpoint slicing feature.
EnableCiliumEndpointSlice bool
// ARPPingKernelManaged denotes whether kernel can auto-refresh Neighbor entries
ARPPingKernelManaged bool
// VLANBPFBypass list of explicitly allowed VLAN id's for bpf logic bypass
VLANBPFBypass []int
// DisableExternalIPMigration disable externalIP mitigation (CVE-2020-8554)
DisableExternalIPMitigation bool
// EnableICMPRules enables ICMP-based rule support for Cilium Network Policies.
EnableICMPRules bool
// Use the CiliumInternalIPs (vs. NodeInternalIPs) for IPsec encapsulation.
UseCiliumInternalIPForIPsec bool
// BypassIPAvailabilityUponRestore bypasses the IP availability error
// within IPAM upon endpoint restore and allows the use of the restored IP
// regardless of whether it's available in the pool.
BypassIPAvailabilityUponRestore bool
// EnableVTEP enable Cilium VXLAN VTEP integration
EnableVTEP bool
// VtepEndpoints VTEP endpoint IPs
VtepEndpoints []net.IP
// VtepCIDRs VTEP CIDRs
VtepCIDRs []*cidr.CIDR
// VtepMask VTEP Mask
VtepCidrMask net.IP
// VtepMACs VTEP MACs
VtepMACs []mac.MAC
// TCFilterPriority sets the priority of the cilium tc filter, enabling other
// filters to be inserted prior to the cilium filter.
TCFilterPriority uint16
// Enables BGP control plane features.
EnableBGPControlPlane bool
// Enables BGP control plane status reporting.
EnableBGPControlPlaneStatusReport bool
// BGPRouterIDAllocationMode is the mode to allocate the BGP router-id.
BGPRouterIDAllocationMode string
// BGPRouterIDAllocationIPPool is the IP pool to allocate the BGP router-id from.
BGPRouterIDAllocationIPPool string
// BPFMapEventBuffers has configuration on what BPF map event buffers to enabled
// and configuration options for those.
BPFMapEventBuffers map[string]string
BPFMapEventBuffersValidator Validator `json:"-"`
bpfMapEventConfigs BPFEventBufferConfigs
// BPFDistributedLRU enables per-CPU distributed backend memory
BPFDistributedLRU bool
// BPFEventsDropEnabled controls whether the Cilium datapath exposes "drop" events to Cilium monitor and Hubble.
BPFEventsDropEnabled bool
// BPFEventsPolicyVerdictEnabled controls whether the Cilium datapath exposes "policy verdict" events to Cilium monitor and Hubble.
BPFEventsPolicyVerdictEnabled bool
// BPFEventsTraceEnabled controls whether the Cilium datapath exposes "trace" events to Cilium monitor and Hubble.
BPFEventsTraceEnabled bool
// BPFConntrackAccounting controls whether CT accounting for packets and bytes is enabled.
BPFConntrackAccounting bool
// IPAMCiliumNodeUpdateRate is the maximum rate at which the CiliumNode custom
// resource is updated.
IPAMCiliumNodeUpdateRate time.Duration
// EnableK8sNetworkPolicy enables support for K8s NetworkPolicy.
EnableK8sNetworkPolicy bool
// EnableCiliumNetworkPolicy enables support for Cilium Network Policy.
EnableCiliumNetworkPolicy bool
// EnableCiliumClusterwideNetworkPolicy enables support for Cilium Clusterwide
// Network Policy.
EnableCiliumClusterwideNetworkPolicy bool
// PolicyCIDRMatchMode is the list of entities that can be selected by CIDR policy.
// Currently supported values:
// - world
// - world, remote-node
PolicyCIDRMatchMode []string
// MaxConnectedClusters sets the maximum number of clusters that can be
// connected in a clustermesh.
// The value is used to determine the bit allocation for cluster ID and
// identity in a numeric identity. Values > 255 will decrease the number of
// allocatable identities.
MaxConnectedClusters uint32
// ForceDeviceRequired enforces the attachment of BPF programs on native device.
ForceDeviceRequired bool
// ServiceNoBackendResponse determines how we handle traffic to a service with no backends.
ServiceNoBackendResponse string
// EnableNodeSelectorLabels enables use of the node label based identity
EnableNodeSelectorLabels bool
// NodeLabels is the list of label prefixes used to determine identity of a node (requires enabling of
// EnableNodeSelectorLabels)
NodeLabels []string
// EnableSocketLBPodConnectionTermination enables the termination of connections from pods
// to deleted service backends when socket-LB is enabled
EnableSocketLBPodConnectionTermination bool
// EnableNonDefaultDenyPolicies allows policies to define whether they are operating in default-deny mode
EnableNonDefaultDenyPolicies bool
// EnableSourceIPVerification enables the source ip validation of connection from endpoints to endpoints
EnableSourceIPVerification bool
// EnableEndpointLockdownOnPolicyOverflow enables endpoint lockdown when an endpoint's
// policy map overflows.
EnableEndpointLockdownOnPolicyOverflow bool
// ConnectivityProbeFrequencyRatio is the ratio of the connectivity probe frequency vs resource consumption
ConnectivityProbeFrequencyRatio float64
// EnableExtendedIPProtocols controls whether traffic with extended IP protocols is supported in datapath
EnableExtendedIPProtocols bool
}
var (
// Config represents the daemon configuration
Config = &DaemonConfig{
CreationTime: time.Now(),
Opts: NewIntOptions(&DaemonOptionLibrary),
Monitor: &models.MonitorStatus{Cpus: int64(runtime.NumCPU()), Npages: 64, Pagesize: int64(os.Getpagesize()), Lost: 0, Unknown: 0},
IPv6ClusterAllocCIDR: defaults.IPv6ClusterAllocCIDR,
IPv6ClusterAllocCIDRBase: defaults.IPv6ClusterAllocCIDRBase,
IPAMDefaultIPPool: defaults.IPAMDefaultIPPool,
EnableHealthChecking: defaults.EnableHealthChecking,
EnableEndpointHealthChecking: defaults.EnableEndpointHealthChecking,
EnableHealthCheckLoadBalancerIP: defaults.EnableHealthCheckLoadBalancerIP,
HealthCheckICMPFailureThreshold: defaults.HealthCheckICMPFailureThreshold,
EnableIPv4: defaults.EnableIPv4,
EnableIPv6: defaults.EnableIPv6,
EnableIPv6NDP: defaults.EnableIPv6NDP,
EnableSCTP: defaults.EnableSCTP,
EnableL7Proxy: defaults.EnableL7Proxy,
ToFQDNsMaxIPsPerHost: defaults.ToFQDNsMaxIPsPerHost,
IdentityChangeGracePeriod: defaults.IdentityChangeGracePeriod,
CiliumIdentityMaxJitter: defaults.CiliumIdentityMaxJitter,
IdentityRestoreGracePeriod: defaults.IdentityRestoreGracePeriodK8s,
FixedIdentityMapping: make(map[string]string),
LogOpt: make(map[string]string),
ServiceLoopbackIPv4: defaults.ServiceLoopbackIPv4,
EnableEndpointRoutes: defaults.EnableEndpointRoutes,
AnnotateK8sNode: defaults.AnnotateK8sNode,
AutoCreateCiliumNodeResource: defaults.AutoCreateCiliumNodeResource,
IdentityAllocationMode: IdentityAllocationModeKVstore,
AllowICMPFragNeeded: defaults.AllowICMPFragNeeded,
AllocatorListTimeout: defaults.AllocatorListTimeout,
EnableICMPRules: defaults.EnableICMPRules,
UseCiliumInternalIPForIPsec: defaults.UseCiliumInternalIPForIPsec,
K8sEnableLeasesFallbackDiscovery: defaults.K8sEnableLeasesFallbackDiscovery,
EnableVTEP: defaults.EnableVTEP,
EnableBGPControlPlane: defaults.EnableBGPControlPlane,
EnableK8sNetworkPolicy: defaults.EnableK8sNetworkPolicy,
EnableCiliumNetworkPolicy: defaults.EnableCiliumNetworkPolicy,
EnableCiliumClusterwideNetworkPolicy: defaults.EnableCiliumClusterwideNetworkPolicy,
PolicyCIDRMatchMode: defaults.PolicyCIDRMatchMode,
MaxConnectedClusters: defaults.MaxConnectedClusters,
BPFDistributedLRU: defaults.BPFDistributedLRU,
BPFEventsDropEnabled: defaults.BPFEventsDropEnabled,
BPFEventsPolicyVerdictEnabled: defaults.BPFEventsPolicyVerdictEnabled,
BPFEventsTraceEnabled: defaults.BPFEventsTraceEnabled,
BPFConntrackAccounting: defaults.BPFConntrackAccounting,
EnableEnvoyConfig: defaults.EnableEnvoyConfig,
EnableNonDefaultDenyPolicies: defaults.EnableNonDefaultDenyPolicies,
EnableSourceIPVerification: defaults.EnableSourceIPVerification,
ConnectivityProbeFrequencyRatio: defaults.ConnectivityProbeFrequencyRatio,
}
)
// IsExcludedLocalAddress returns true if the specified IP matches one of the
// excluded local IP ranges
func (c *DaemonConfig) IsExcludedLocalAddress(addr netip.Addr) bool {
for _, prefix := range c.ExcludeLocalAddresses {
if prefix.Contains(addr) {
return true
}
}
return false
}
// IsPodSubnetsDefined returns true if encryption subnets should be configured at init time.
func (c *DaemonConfig) IsPodSubnetsDefined() bool {
return len(c.IPv4PodSubnets) > 0 || len(c.IPv6PodSubnets) > 0
}
// NodeConfigFile is the name of the C header which contains the node's
// network parameters.
const nodeConfigFile = "node_config.h"
// GetNodeConfigPath returns the full path of the NodeConfigFile.
func (c *DaemonConfig) GetNodeConfigPath() string {
return filepath.Join(c.GetGlobalsDir(), nodeConfigFile)
}
// GetGlobalsDir returns the path for the globals directory.
func (c *DaemonConfig) GetGlobalsDir() string {
return filepath.Join(c.StateDir, "globals")
}
// AlwaysAllowLocalhost returns true if the daemon has the option set that
// localhost can always reach local endpoints
func (c *DaemonConfig) AlwaysAllowLocalhost() bool {
switch c.AllowLocalhost {
case AllowLocalhostAlways:
return true
case AllowLocalhostAuto, AllowLocalhostPolicy:
return false
default:
return false
}
}
// TunnelingEnabled returns true if tunneling is enabled.
func (c *DaemonConfig) TunnelingEnabled() bool {
// We check if routing mode is not native rather than checking if it's
// tunneling because, in unit tests, RoutingMode is usually not set and we
// would like for TunnelingEnabled to default to the actual default
// (tunneling is enabled) in that case.
return c.RoutingMode != RoutingModeNative
}
// AreDevicesRequired returns true if the agent needs to attach to the native
// devices to implement some features.
func (c *DaemonConfig) AreDevicesRequired(kprCfg kpr.KPRConfig, wireguardEnabled bool) bool {
return kprCfg.EnableNodePort || c.EnableHostFirewall || wireguardEnabled ||
c.EnableL2Announcements || c.ForceDeviceRequired || c.EnableIPSec
}
// NeedIngressOnWireGuardDevice returns true if the agent needs to attach
// cil_from_wireguard on the Ingress of Cilium's WireGuard device
func (c *DaemonConfig) NeedIngressOnWireGuardDevice(kprCfg kpr.KPRConfig, wireguardEnabled bool) bool {
if !wireguardEnabled {
return false
}
// In native routing mode we want to deliver packets to local endpoints
// straight from BPF, without passing through the stack.
// This matches overlay mode (where bpf_overlay would handle the delivery)
// and native routing mode without encryption (where bpf_host at the native
// device would handle the delivery).
if !c.TunnelingEnabled() {
return true
}
// When WG & encrypt-node are on, a NodePort BPF to-be forwarded request
// to a remote node running a selected service endpoint must be encrypted.
// To make the NodePort's rev-{S,D}NAT translations to happen for a reply
// from the remote node, we need to attach bpf_host to the Cilium's WG
// netdev (otherwise, the WG netdev after decrypting the reply will pass
// it to the stack which drops the packet).
if kprCfg.EnableNodePort && c.EncryptNode {
return true
}
return false
}
// NeedEgressOnWireGuardDevice returns true if the agent needs to attach
// cil_to_wireguard on the Egress of Cilium's WireGuard device
func (c *DaemonConfig) NeedEgressOnWireGuardDevice(kprCfg kpr.KPRConfig, wireguardEnabled bool) bool {
if !wireguardEnabled {
return false
}
// No need to handle rev-NAT xlations in wireguard with tunneling enabled.
if c.TunnelingEnabled() {
return false
}
// Attaching cil_to_wireguard to cilium_wg0 egress is required for handling
// the rev-NAT xlations when encrypting KPR traffic.
if kprCfg.EnableNodePort && c.EnableL7Proxy && kprCfg.KubeProxyReplacement == KubeProxyReplacementTrue {
return true
}
return false
}
// MasqueradingEnabled returns true if either IPv4 or IPv6 masquerading is enabled.
func (c *DaemonConfig) MasqueradingEnabled() bool {
return c.EnableIPv4Masquerade || c.EnableIPv6Masquerade
}
// IptablesMasqueradingIPv4Enabled returns true if iptables-based
// masquerading is enabled for IPv4.
func (c *DaemonConfig) IptablesMasqueradingIPv4Enabled() bool {
return !c.EnableBPFMasquerade && c.EnableIPv4Masquerade
}
// IptablesMasqueradingIPv6Enabled returns true if iptables-based
// masquerading is enabled for IPv6.
func (c *DaemonConfig) IptablesMasqueradingIPv6Enabled() bool {
return !c.EnableBPFMasquerade && c.EnableIPv6Masquerade
}
// IptablesMasqueradingEnabled returns true if iptables-based
// masquerading is enabled.
func (c *DaemonConfig) IptablesMasqueradingEnabled() bool {
return c.IptablesMasqueradingIPv4Enabled() || c.IptablesMasqueradingIPv6Enabled()
}
// NodeIpsetNeeded returns true if a node ipsets should be used to skip
// masquerading for traffic to cluster nodes.
func (c *DaemonConfig) NodeIpsetNeeded() bool {
return !c.TunnelingEnabled() && c.IptablesMasqueradingEnabled()
}
// NodeEncryptionEnabled returns true if node encryption is enabled
func (c *DaemonConfig) NodeEncryptionEnabled() bool {
return c.EncryptNode
}
// EncryptionEnabled returns true if encryption is enabled
func (c *DaemonConfig) EncryptionEnabled() bool {
return c.EnableIPSec
}
// IPv4Enabled returns true if IPv4 is enabled
func (c *DaemonConfig) IPv4Enabled() bool {
return c.EnableIPv4
}
// IPv6Enabled returns true if IPv6 is enabled
func (c *DaemonConfig) IPv6Enabled() bool {
return c.EnableIPv6
}
// IPv6NDPEnabled returns true if IPv6 NDP support is enabled
func (c *DaemonConfig) IPv6NDPEnabled() bool {
return c.EnableIPv6NDP
}
// SCTPEnabled returns true if SCTP support is enabled
func (c *DaemonConfig) SCTPEnabled() bool {
return c.EnableSCTP
}
// HealthCheckingEnabled returns true if health checking is enabled
func (c *DaemonConfig) HealthCheckingEnabled() bool {
return c.EnableHealthChecking
}
// IPAMMode returns the IPAM mode
func (c *DaemonConfig) IPAMMode() string {
return strings.ToLower(c.IPAM)
}
// TracingEnabled returns if tracing policy (outlining which rules apply to a
// specific set of labels) is enabled.
func (c *DaemonConfig) TracingEnabled() bool {
return c.Opts.IsEnabled(PolicyTracing)
}
// UnreachableRoutesEnabled returns true if unreachable routes is enabled
func (c *DaemonConfig) UnreachableRoutesEnabled() bool {
return c.EnableUnreachableRoutes
}
// CiliumNamespaceName returns the name of the namespace in which Cilium is
// deployed in
func (c *DaemonConfig) CiliumNamespaceName() string {
return c.K8sNamespace
}
// AgentNotReadyNodeTaintValue returns the value of the taint key that cilium agents
// will manage on their nodes
func (c *DaemonConfig) AgentNotReadyNodeTaintValue() string {
if c.AgentNotReadyNodeTaintKey != "" {
return c.AgentNotReadyNodeTaintKey
} else {
return defaults.AgentNotReadyNodeTaint
}
}
// K8sNetworkPolicyEnabled returns true if cilium agent needs to support K8s NetworkPolicy, false otherwise.
func (c *DaemonConfig) K8sNetworkPolicyEnabled() bool {
return c.EnableK8sNetworkPolicy
}
func (c *DaemonConfig) PolicyCIDRMatchesNodes() bool {
return slices.Contains(c.PolicyCIDRMatchMode, "nodes")
}
// PerNodeLabelsEnabled returns true if per-node labels feature
// is enabled
func (c *DaemonConfig) PerNodeLabelsEnabled() bool {
return c.EnableNodeSelectorLabels
}
func (c *DaemonConfig) validatePolicyCIDRMatchMode() error {
// Currently, the only acceptable values is "nodes".
for _, mode := range c.PolicyCIDRMatchMode {
switch mode {
case "nodes":
continue
default:
return fmt.Errorf("unknown CIDR match mode: %s", mode)
}
}
return nil
}
// DirectRoutingDeviceRequired return whether the Direct Routing Device is needed under
// the current configuration.
func (c *DaemonConfig) DirectRoutingDeviceRequired(kprCfg kpr.KPRConfig, wireguardEnabled bool) bool {
// BPF NodePort and BPF Host Routing are using the direct routing device now.
// When tunneling is enabled, node-to-node redirection will be done by tunneling.
BPFHostRoutingEnabled := !c.EnableHostLegacyRouting
// XDP needs IPV4_DIRECT_ROUTING when building tunnel headers:
if kprCfg.EnableNodePort && c.NodePortAcceleration != NodePortAccelerationDisabled {
return true
}
return kprCfg.EnableNodePort || BPFHostRoutingEnabled || wireguardEnabled
}
func (c *DaemonConfig) validateIPv6ClusterAllocCIDR() error {
ip, cidr, err := net.ParseCIDR(c.IPv6ClusterAllocCIDR)
if err != nil {
return err
}
if ones, _ := cidr.Mask.Size(); ones != 64 {
return fmt.Errorf("Prefix length must be /64")
}
c.IPv6ClusterAllocCIDRBase = ip.Mask(cidr.Mask).String()
return nil
}
func (c *DaemonConfig) validateIPv6NAT46x64CIDR() error {
parsedPrefix, err := netip.ParsePrefix(c.IPv6NAT46x64CIDR)
if err != nil {
return err
}
if parsedPrefix.Bits() != 96 {
return fmt.Errorf("Prefix length must be /96")
}
c.IPv6NAT46x64CIDRBase = parsedPrefix.Masked().Addr()
return nil
}
func (c *DaemonConfig) validateContainerIPLocalReservedPorts() error {
if c.ContainerIPLocalReservedPorts == "" || c.ContainerIPLocalReservedPorts == defaults.ContainerIPLocalReservedPortsAuto {
return nil
}
if regexp.MustCompile(`^(\d+(-\d+)?)(,\d+(-\d+)?)*$`).MatchString(c.ContainerIPLocalReservedPorts) {
return nil
}
return fmt.Errorf("Invalid comma separated list of of ranges for %s option", ContainerIPLocalReservedPorts)
}
// Validate validates the daemon configuration
func (c *DaemonConfig) Validate(vp *viper.Viper) error {
if err := c.validateIPv6ClusterAllocCIDR(); err != nil {
return fmt.Errorf("unable to parse CIDR value '%s' of option --%s: %w",
c.IPv6ClusterAllocCIDR, IPv6ClusterAllocCIDRName, err)
}
if err := c.validateIPv6NAT46x64CIDR(); err != nil {
return fmt.Errorf("unable to parse internal CIDR value '%s': %w",
c.IPv6NAT46x64CIDR, err)
}
if c.MTU < 0 {
return fmt.Errorf("MTU '%d' cannot be negative", c.MTU)
}
if c.RouteMetric < 0 {
return fmt.Errorf("RouteMetric '%d' cannot be negative", c.RouteMetric)
}
if c.IPAM == ipamOption.IPAMENI && c.EnableIPv6 {
return fmt.Errorf("IPv6 cannot be enabled in ENI IPAM mode")
}
if c.EnableIPv6NDP {
if !c.EnableIPv6 {
return fmt.Errorf("IPv6NDP cannot be enabled when IPv6 is not enabled")
}
if len(c.IPv6MCastDevice) == 0 {
return fmt.Errorf("IPv6NDP cannot be enabled without %s", IPv6MCastDevice)
}
}
switch c.RoutingMode {
case RoutingModeNative, RoutingModeTunnel:
default:
return fmt.Errorf("invalid routing mode %q, valid modes = {%q, %q}",
c.RoutingMode, RoutingModeTunnel, RoutingModeNative)
}
cinfo := clustermeshTypes.ClusterInfo{
ID: c.ClusterID,
Name: c.ClusterName,
MaxConnectedClusters: c.MaxConnectedClusters,
}
if err := cinfo.InitClusterIDMax(); err != nil {
return err
}
if err := cinfo.Validate(); err != nil {
return err
}
if err := c.checkMapSizeLimits(); err != nil {
return err
}
if err := c.checkIPv4NativeRoutingCIDR(); err != nil {
return err
}
if err := c.checkIPv6NativeRoutingCIDR(); err != nil {
return err
}
if err := c.checkIPAMDelegatedPlugin(); err != nil {
return err
}
if c.EnableVTEP {
err := c.validateVTEP(vp)
if err != nil {
return fmt.Errorf("Failed to validate VTEP configuration: %w", err)
}
}
if err := c.validatePolicyCIDRMatchMode(); err != nil {
return err
}
if err := c.validateContainerIPLocalReservedPorts(); err != nil {
return err
}
return nil
}
// ReadDirConfig reads the given directory and returns a map that maps the
// filename to the contents of that file.
func ReadDirConfig(logger *slog.Logger, dirName string) (map[string]any, error) {
m := map[string]any{}
files, err := os.ReadDir(dirName)
if err != nil && !os.IsNotExist(err) {
return nil, fmt.Errorf("unable to read configuration directory: %w", err)
}
for _, f := range files {
if f.IsDir() {
continue
}
fName := filepath.Join(dirName, f.Name())
// the file can still be a symlink to a directory
if f.Type()&os.ModeSymlink == 0 {
absFileName, err := filepath.EvalSymlinks(fName)
if err != nil {
logger.Warn("Unable to read configuration file",
logfields.Error, err,
logfields.File, absFileName,
)
continue
}
fName = absFileName
}
fi, err := os.Stat(fName)
if err != nil {
logger.Warn("Unable to read configuration file",
logfields.Error, err,
logfields.File, fName,
)
continue
}
if fi.Mode().IsDir() {
continue
}
b, err := os.ReadFile(fName)
if err != nil {
logger.Warn("Unable to read configuration file",
logfields.Error, err,
logfields.File, fName,
)
continue
}
m[f.Name()] = string(bytes.TrimSpace(b))
}
return m, nil
}
// MergeConfig merges the given configuration map with viper's configuration.
func MergeConfig(vp *viper.Viper, m map[string]any) error {
err := vp.MergeConfigMap(m)
if err != nil {
return fmt.Errorf("unable to read merge directory configuration: %w", err)
}
return nil
}
// ReplaceDeprecatedFields replaces the deprecated options set with the new set
// of options that overwrite the deprecated ones.
// This function replaces the deprecated fields used by environment variables
// with a different name than the option they are setting. This also replaces
// the deprecated names used in the Kubernetes ConfigMap.
// Once we remove them from this function we also need to remove them from
// daemon_main.go and warn users about the old environment variable nor the
// option in the configuration map have any effect.
func ReplaceDeprecatedFields(m map[string]any) {
deprecatedFields := map[string]string{
"monitor-aggregation-level": MonitorAggregationName,
"ct-global-max-entries-tcp": CTMapEntriesGlobalTCPName,
"ct-global-max-entries-other": CTMapEntriesGlobalAnyName,
}
for deprecatedOption, newOption := range deprecatedFields {
if deprecatedValue, ok := m[deprecatedOption]; ok {
if _, ok := m[newOption]; !ok {
m[newOption] = deprecatedValue
}
}
}
}
func (c *DaemonConfig) parseExcludedLocalAddresses(s []string) error {
for _, ipString := range s {
prefix, err := netip.ParsePrefix(ipString)
if err != nil {
return fmt.Errorf("unable to parse excluded local address %s: %w", ipString, err)
}
c.ExcludeLocalAddresses = append(c.ExcludeLocalAddresses, prefix)
}
return nil
}
// SetupLogging sets all logging-related options with the values from viper,
// then setup logging based on these options and the given tag.
//
// This allows initializing logging as early as possible, then log entries
// produced below in Populate can honor the requested logging configurations.
func (c *DaemonConfig) SetupLogging(vp *viper.Viper, tag string) {
c.Debug = vp.GetBool(DebugArg)
c.LogDriver = vp.GetStringSlice(LogDriver)
if m, err := command.GetStringMapStringE(vp, LogOpt); err != nil {
// slogloggercheck: log fatal errors using the default logger before it's initialized.
logging.Fatal(logging.DefaultSlogLogger, fmt.Sprintf("unable to parse %s", LogOpt), logfields.Error, err)
} else {
c.LogOpt = m
}
if err := logging.SetupLogging(c.LogDriver, c.LogOpt, tag, c.Debug); err != nil {
// slogloggercheck: log fatal errors using the default logger before it's initialized.
logging.Fatal(logging.DefaultSlogLogger, "Unable to set up logging", logfields.Error, err)
}
}
// Populate sets all non-logging options with the values from viper.
//
// This function may emit logs. Consider calling SetupLogging before this
// to make sure that they honor logging-related options.
func (c *DaemonConfig) Populate(logger *slog.Logger, vp *viper.Viper) {
var err error
c.AgentHealthPort = vp.GetInt(AgentHealthPort)
c.ClusterHealthPort = vp.GetInt(ClusterHealthPort)
c.ClusterMeshHealthPort = vp.GetInt(ClusterMeshHealthPort)
c.AllowICMPFragNeeded = vp.GetBool(AllowICMPFragNeeded)
c.AllowLocalhost = vp.GetString(AllowLocalhost)
c.AnnotateK8sNode = vp.GetBool(AnnotateK8sNode)
c.AutoCreateCiliumNodeResource = vp.GetBool(AutoCreateCiliumNodeResource)
c.BPFRoot = vp.GetString(BPFRoot)
c.CGroupRoot = vp.GetString(CGroupRoot)
c.ClusterID = vp.GetUint32(clustermeshTypes.OptClusterID)
c.ClusterName = vp.GetString(clustermeshTypes.OptClusterName)
c.MaxConnectedClusters = vp.GetUint32(clustermeshTypes.OptMaxConnectedClusters)
c.DatapathMode = vp.GetString(DatapathMode)
c.DebugVerbose = vp.GetStringSlice(DebugVerbose)
c.EnableIPv4 = vp.GetBool(EnableIPv4Name)
c.EnableIPv6 = vp.GetBool(EnableIPv6Name)
c.EnableIPv6NDP = vp.GetBool(EnableIPv6NDPName)
c.EnableSRv6 = vp.GetBool(EnableSRv6)
c.SRv6EncapMode = vp.GetString(SRv6EncapModeName)
c.EnableSCTP = vp.GetBool(EnableSCTPName)
c.IPv6MCastDevice = vp.GetString(IPv6MCastDevice)
c.EnableIPSec = vp.GetBool(EnableIPSecName)
c.EnableL2Announcements = vp.GetBool(EnableL2Announcements)
c.L2AnnouncerLeaseDuration = vp.GetDuration(L2AnnouncerLeaseDuration)
c.L2AnnouncerRenewDeadline = vp.GetDuration(L2AnnouncerRenewDeadline)
c.L2AnnouncerRetryPeriod = vp.GetDuration(L2AnnouncerRetryPeriod)
c.EnableXDPPrefilter = vp.GetBool(EnableXDPPrefilter)
c.EnableTCX = vp.GetBool(EnableTCX)
c.DisableCiliumEndpointCRD = vp.GetBool(DisableCiliumEndpointCRDName)
c.MasqueradeInterfaces = vp.GetStringSlice(MasqueradeInterfaces)
c.BPFSocketLBHostnsOnly = vp.GetBool(BPFSocketLBHostnsOnly)
c.EnableSocketLBTracing = vp.GetBool(EnableSocketLBTracing)
c.EnableSocketLBPodConnectionTermination = vp.GetBool(EnableSocketLBPodConnectionTermination)
c.EnableBPFTProxy = vp.GetBool(EnableBPFTProxy)
c.EnableAutoDirectRouting = vp.GetBool(EnableAutoDirectRoutingName)
c.DirectRoutingSkipUnreachable = vp.GetBool(DirectRoutingSkipUnreachableName)
c.EnableEndpointRoutes = vp.GetBool(EnableEndpointRoutes)
c.EnableHealthChecking = vp.GetBool(EnableHealthChecking)
c.EnableEndpointHealthChecking = vp.GetBool(EnableEndpointHealthChecking)
c.EnableHealthCheckLoadBalancerIP = vp.GetBool(EnableHealthCheckLoadBalancerIP)
c.HealthCheckICMPFailureThreshold = vp.GetInt(HealthCheckICMPFailureThreshold)
c.EnableLocalNodeRoute = vp.GetBool(EnableLocalNodeRoute)
c.EnablePolicy = strings.ToLower(vp.GetString(EnablePolicy))
c.EnableL7Proxy = vp.GetBool(EnableL7Proxy)
c.EnableTracing = vp.GetBool(EnableTracing)
c.EnableIPIPTermination = vp.GetBool(EnableIPIPTermination)
c.EnableIPIPDevices = c.EnableIPIPTermination
c.EnableUnreachableRoutes = vp.GetBool(EnableUnreachableRoutes)
c.EnableHostLegacyRouting = vp.GetBool(EnableHostLegacyRouting)
c.NodePortBindProtection = vp.GetBool(NodePortBindProtection)
c.NodePortNat46X64 = vp.GetBool(LoadBalancerNat46X64)
c.EnableAutoProtectNodePortRange = vp.GetBool(EnableAutoProtectNodePortRange)
c.EnableMKE = vp.GetBool(EnableMKE)
c.CgroupPathMKE = vp.GetString(CgroupPathMKE)
c.EnableHostFirewall = vp.GetBool(EnableHostFirewall)
c.EnableLocalRedirectPolicy = vp.GetBool(EnableLocalRedirectPolicy)
c.EncryptInterface = vp.GetStringSlice(EncryptInterface)
c.EncryptNode = vp.GetBool(EncryptNode)
c.IdentityChangeGracePeriod = vp.GetDuration(IdentityChangeGracePeriod)
c.CiliumIdentityMaxJitter = vp.GetDuration(CiliumIdentityMaxJitter)
c.IdentityRestoreGracePeriod = vp.GetDuration(IdentityRestoreGracePeriod)
c.IPAM = vp.GetString(IPAM)
c.IPAMDefaultIPPool = vp.GetString(IPAMDefaultIPPool)
c.IPv4Range = vp.GetString(IPv4Range)
c.IPv4NodeAddr = vp.GetString(IPv4NodeAddr)
c.IPv4ServiceRange = vp.GetString(IPv4ServiceRange)
c.IPv6ClusterAllocCIDR = vp.GetString(IPv6ClusterAllocCIDRName)
c.IPv6NodeAddr = vp.GetString(IPv6NodeAddr)
c.IPv6Range = vp.GetString(IPv6Range)
c.IPv6ServiceRange = vp.GetString(IPv6ServiceRange)
c.K8sRequireIPv4PodCIDR = vp.GetBool(K8sRequireIPv4PodCIDRName)
c.K8sRequireIPv6PodCIDR = vp.GetBool(K8sRequireIPv6PodCIDRName)
c.K8sSyncTimeout = vp.GetDuration(K8sSyncTimeoutName)
c.AllocatorListTimeout = vp.GetDuration(AllocatorListTimeoutName)
c.KeepConfig = vp.GetBool(KeepConfig)
c.LabelPrefixFile = vp.GetString(LabelPrefixFile)
c.Labels = vp.GetStringSlice(Labels)
c.LibDir = vp.GetString(LibDir)
c.LogSystemLoadConfig = vp.GetBool(LogSystemLoadConfigName)
c.ServiceLoopbackIPv4 = vp.GetString(ServiceLoopbackIPv4)
c.LocalRouterIPv4 = vp.GetString(LocalRouterIPv4)
c.LocalRouterIPv6 = vp.GetString(LocalRouterIPv6)
c.EnableBPFClockProbe = vp.GetBool(EnableBPFClockProbe)
c.EnableIPMasqAgent = vp.GetBool(EnableIPMasqAgent)
c.EnableEgressGateway = vp.GetBool(EnableEgressGateway) || vp.GetBool(EnableIPv4EgressGateway)
c.EnableEnvoyConfig = vp.GetBool(EnableEnvoyConfig)
c.AgentHealthRequireK8sConnectivity = vp.GetBool(AgentHealthRequireK8sConnectivity)
c.InstallIptRules = vp.GetBool(InstallIptRules)
c.IPSecKeyFile = vp.GetString(IPSecKeyFileName)
c.IPsecKeyRotationDuration = vp.GetDuration(IPsecKeyRotationDuration)
c.EnableIPsecKeyWatcher = vp.GetBool(EnableIPsecKeyWatcher)
c.EnableIPSecXfrmStateCaching = vp.GetBool(EnableIPSecXfrmStateCaching)
c.MonitorAggregation = vp.GetString(MonitorAggregationName)
c.MonitorAggregationInterval = vp.GetDuration(MonitorAggregationInterval)
c.MTU = vp.GetInt(MTUName)
c.PreAllocateMaps = vp.GetBool(PreAllocateMapsName)
c.ProcFs = vp.GetString(ProcFs)
c.RestoreState = vp.GetBool(Restore)
c.RouteMetric = vp.GetInt(RouteMetric)
c.RunDir = vp.GetString(StateDir)
c.ExternalEnvoyProxy = vp.GetBool(ExternalEnvoyProxy)
c.SocketPath = vp.GetString(SocketPath)
c.TracePayloadlen = vp.GetInt(TracePayloadlen)
c.TracePayloadlenOverlay = vp.GetInt(TracePayloadlenOverlay)
c.Version = vp.GetString(Version)
c.PolicyTriggerInterval = vp.GetDuration(PolicyTriggerInterval)
c.CTMapEntriesTimeoutTCP = vp.GetDuration(CTMapEntriesTimeoutTCPName)
c.CTMapEntriesTimeoutAny = vp.GetDuration(CTMapEntriesTimeoutAnyName)
c.CTMapEntriesTimeoutSVCTCP = vp.GetDuration(CTMapEntriesTimeoutSVCTCPName)
c.CTMapEntriesTimeoutSVCTCPGrace = vp.GetDuration(CTMapEntriesTimeoutSVCTCPGraceName)
c.CTMapEntriesTimeoutSVCAny = vp.GetDuration(CTMapEntriesTimeoutSVCAnyName)
c.CTMapEntriesTimeoutSYN = vp.GetDuration(CTMapEntriesTimeoutSYNName)
c.CTMapEntriesTimeoutFIN = vp.GetDuration(CTMapEntriesTimeoutFINName)
c.PolicyAuditMode = vp.GetBool(PolicyAuditModeArg)
c.PolicyAccounting = vp.GetBool(PolicyAccountingArg)
c.EnableIPv4FragmentsTracking = vp.GetBool(EnableIPv4FragmentsTrackingName)
c.EnableIPv6FragmentsTracking = vp.GetBool(EnableIPv6FragmentsTrackingName)
c.FragmentsMapEntries = vp.GetInt(FragmentsMapEntriesName)
c.LoadBalancerRSSv4CIDR = vp.GetString(LoadBalancerRSSv4CIDR)
c.LoadBalancerRSSv6CIDR = vp.GetString(LoadBalancerRSSv6CIDR)
c.LoadBalancerIPIPSockMark = vp.GetBool(LoadBalancerIPIPSockMark)
c.InstallNoConntrackIptRules = vp.GetBool(InstallNoConntrackIptRules)
c.ContainerIPLocalReservedPorts = vp.GetString(ContainerIPLocalReservedPorts)
c.EnableCustomCalls = vp.GetBool(EnableCustomCallsName)
c.BGPSecretsNamespace = vp.GetString(BGPSecretsNamespace)
c.EnableNat46X64Gateway = vp.GetBool(EnableNat46X64Gateway)
c.EnableRemoteNodeMasquerade = vp.GetBool(EnableRemoteNodeMasquerade)
c.EnableIPv4Masquerade = vp.GetBool(EnableIPv4Masquerade) && c.EnableIPv4
c.EnableIPv6Masquerade = vp.GetBool(EnableIPv6Masquerade) && c.EnableIPv6
c.EnableBPFMasquerade = vp.GetBool(EnableBPFMasquerade)
c.EnableMasqueradeRouteSource = vp.GetBool(EnableMasqueradeRouteSource)
c.EnablePMTUDiscovery = vp.GetBool(EnablePMTUDiscovery)
c.IPv6NAT46x64CIDR = defaults.IPv6NAT46x64CIDR
c.IPAMCiliumNodeUpdateRate = vp.GetDuration(IPAMCiliumNodeUpdateRate)
c.BPFDistributedLRU = vp.GetBool(BPFDistributedLRU)
c.BPFEventsDropEnabled = vp.GetBool(BPFEventsDropEnabled)
c.BPFEventsPolicyVerdictEnabled = vp.GetBool(BPFEventsPolicyVerdictEnabled)
c.BPFEventsTraceEnabled = vp.GetBool(BPFEventsTraceEnabled)
c.BPFConntrackAccounting = vp.GetBool(BPFConntrackAccounting)
c.EnableIPSecEncryptedOverlay = vp.GetBool(EnableIPSecEncryptedOverlay)
c.BootIDFile = vp.GetString(BootIDFilename)
c.EnableExtendedIPProtocols = vp.GetBool(EnableExtendedIPProtocols)
c.ServiceNoBackendResponse = vp.GetString(ServiceNoBackendResponse)
switch c.ServiceNoBackendResponse {
case ServiceNoBackendResponseReject, ServiceNoBackendResponseDrop:
case "":
c.ServiceNoBackendResponse = defaults.ServiceNoBackendResponse
default:
logging.Fatal(logger, "Invalid value for --%s: %s (must be 'reject' or 'drop')", ServiceNoBackendResponse, c.ServiceNoBackendResponse)
}
c.populateLoadBalancerSettings(logger, vp)
c.EgressMultiHomeIPRuleCompat = vp.GetBool(EgressMultiHomeIPRuleCompat)
c.InstallUplinkRoutesForDelegatedIPAM = vp.GetBool(InstallUplinkRoutesForDelegatedIPAM)
vlanBPFBypassIDs := vp.GetStringSlice(VLANBPFBypass)
c.VLANBPFBypass = make([]int, 0, len(vlanBPFBypassIDs))
for _, vlanIDStr := range vlanBPFBypassIDs {
vlanID, err := strconv.Atoi(vlanIDStr)
if err != nil {
logging.Fatal(logger, fmt.Sprintf("Cannot parse vlan ID integer from --%s option", VLANBPFBypass), logfields.Error, err)
}
c.VLANBPFBypass = append(c.VLANBPFBypass, vlanID)
}
c.DisableExternalIPMitigation = vp.GetBool(DisableExternalIPMitigation)
tcFilterPrio := vp.GetUint32(TCFilterPriority)
if tcFilterPrio > math.MaxUint16 {
logging.Fatal(logger, fmt.Sprintf("%s cannot be higher than %d", TCFilterPriority, math.MaxUint16))
}
c.TCFilterPriority = uint16(tcFilterPrio)
c.RoutingMode = vp.GetString(RoutingMode)
if vp.IsSet(AddressScopeMax) {
c.AddressScopeMax, err = ip.ParseScope(vp.GetString(AddressScopeMax))
if err != nil {
logging.Fatal(logger, fmt.Sprintf("Cannot parse scope integer from --%s option", AddressScopeMax), logfields.Error, err)
}
} else {
c.AddressScopeMax = defaults.AddressScopeMax
}
if c.EnableNat46X64Gateway || c.NodePortNat46X64 {
if !c.EnableIPv4 || !c.EnableIPv6 {
logging.Fatal(logger, fmt.Sprintf("%s requires both --%s and --%s enabled", EnableNat46X64Gateway, EnableIPv4Name, EnableIPv6Name))
}
}
encryptionStrictModeEnabled := vp.GetBool(EnableEncryptionStrictMode)
if encryptionStrictModeEnabled {
if c.EnableIPv6 {
logger.Info("WireGuard encryption strict mode only supports IPv4. IPv6 traffic is not protected and can be leaked.")
}
strictCIDR := vp.GetString(EncryptionStrictModeCIDR)
c.EncryptionStrictModeCIDR, err = netip.ParsePrefix(strictCIDR)
if err != nil {
logging.Fatal(logger, fmt.Sprintf("Cannot parse CIDR %s from --%s option", strictCIDR, EncryptionStrictModeCIDR), logfields.Error, err)
}
if !c.EncryptionStrictModeCIDR.Addr().Is4() {
logging.Fatal(logger, fmt.Sprintf("%s must be an IPv4 CIDR", EncryptionStrictModeCIDR))
}
c.EncryptionStrictModeAllowRemoteNodeIdentities = vp.GetBool(EncryptionStrictModeAllowRemoteNodeIdentities)
c.EnableEncryptionStrictMode = encryptionStrictModeEnabled
}
ipv4NativeRoutingCIDR := vp.GetString(IPv4NativeRoutingCIDR)
if ipv4NativeRoutingCIDR != "" {
c.IPv4NativeRoutingCIDR, err = cidr.ParseCIDR(ipv4NativeRoutingCIDR)
if err != nil {
logging.Fatal(logger, fmt.Sprintf("Unable to parse CIDR '%s'", ipv4NativeRoutingCIDR), logfields.Error, err)
}
if len(c.IPv4NativeRoutingCIDR.IP) != net.IPv4len {
logging.Fatal(logger, fmt.Sprintf("%s must be an IPv4 CIDR", IPv4NativeRoutingCIDR))
}
}
ipv6NativeRoutingCIDR := vp.GetString(IPv6NativeRoutingCIDR)
if ipv6NativeRoutingCIDR != "" {
c.IPv6NativeRoutingCIDR, err = cidr.ParseCIDR(ipv6NativeRoutingCIDR)
if err != nil {
logging.Fatal(logger, fmt.Sprintf("Unable to parse CIDR '%s'", ipv6NativeRoutingCIDR), logfields.Error, err)
}
if len(c.IPv6NativeRoutingCIDR.IP) != net.IPv6len {
logging.Fatal(logger, fmt.Sprintf("%s must be an IPv6 CIDR", IPv6NativeRoutingCIDR))
}
}
if c.DirectRoutingSkipUnreachable && !c.EnableAutoDirectRouting {
logging.Fatal(logger, fmt.Sprintf("Flag %s cannot be enabled when %s is not enabled. As if %s is then enabled, it may lead to unexpected behaviour causing network connectivity issues.", DirectRoutingSkipUnreachableName, EnableAutoDirectRoutingName, EnableAutoDirectRoutingName))
}
if err := c.calculateBPFMapSizes(logger, vp); err != nil {
logging.Fatal(logger, err.Error())
}
c.ClockSource = ClockSourceKtime
c.EnableIdentityMark = vp.GetBool(EnableIdentityMark)
// toFQDNs options
c.DNSPolicyUnloadOnShutdown = vp.GetBool(DNSPolicyUnloadOnShutdown)
c.FQDNRegexCompileLRUSize = vp.GetUint(FQDNRegexCompileLRUSize)
c.ToFQDNsMaxIPsPerHost = vp.GetInt(ToFQDNsMaxIPsPerHost)
if maxZombies := vp.GetInt(ToFQDNsMaxDeferredConnectionDeletes); maxZombies >= 0 {
c.ToFQDNsMaxDeferredConnectionDeletes = vp.GetInt(ToFQDNsMaxDeferredConnectionDeletes)
} else {
logging.Fatal(logger, fmt.Sprintf("%s must be positive, or 0 to disable deferred connection deletion",
ToFQDNsMaxDeferredConnectionDeletes))
}
switch {
case vp.IsSet(ToFQDNsMinTTL): // set by user
c.ToFQDNsMinTTL = vp.GetInt(ToFQDNsMinTTL)
default:
c.ToFQDNsMinTTL = defaults.ToFQDNsMinTTL
}
c.ToFQDNsProxyPort = vp.GetInt(ToFQDNsProxyPort)
c.ToFQDNsPreCache = vp.GetString(ToFQDNsPreCache)
c.ToFQDNsIdleConnectionGracePeriod = vp.GetDuration(ToFQDNsIdleConnectionGracePeriod)
c.FQDNProxyResponseMaxDelay = vp.GetDuration(FQDNProxyResponseMaxDelay)
c.DNSProxyConcurrencyLimit = vp.GetInt(DNSProxyConcurrencyLimit)
c.DNSProxyEnableTransparentMode = vp.GetBool(DNSProxyEnableTransparentMode)
c.DNSProxyInsecureSkipTransparentModeCheck = vp.GetBool(DNSProxyInsecureSkipTransparentModeCheck)
c.DNSProxyLockCount = vp.GetInt(DNSProxyLockCount)
c.DNSProxyLockTimeout = vp.GetDuration(DNSProxyLockTimeout)
c.DNSProxySocketLingerTimeout = vp.GetInt(DNSProxySocketLingerTimeout)
c.FQDNRejectResponse = vp.GetString(FQDNRejectResponseCode)
// Convert IP strings into net.IPNet types
subnets, invalid := ip.ParseCIDRs(vp.GetStringSlice(IPv4PodSubnets))
if len(invalid) > 0 {
logger.Warn("IPv4PodSubnets parameter can not be parsed.",
logfields.Subnets, invalid,
)
}
c.IPv4PodSubnets = subnets
subnets, invalid = ip.ParseCIDRs(vp.GetStringSlice(IPv6PodSubnets))
if len(invalid) > 0 {
logger.Warn("IPv6PodSubnets parameter can not be parsed.",
logfields.Subnets, invalid,
)
}
c.IPv6PodSubnets = subnets
monitorAggregationFlags := vp.GetStringSlice(MonitorAggregationFlags)
var ctMonitorReportFlags uint16
for i := range monitorAggregationFlags {
value := strings.ToLower(monitorAggregationFlags[i])
flag, exists := TCPFlags[value]
if !exists {
logging.Fatal(logger, fmt.Sprintf("Unable to parse TCP flag %q for %s!", value, MonitorAggregationFlags))
}
ctMonitorReportFlags |= flag
}
c.MonitorAggregationFlags = ctMonitorReportFlags
// Map options
if m := command.GetStringMapString(vp, FixedIdentityMapping); err != nil {
logging.Fatal(logger, fmt.Sprintf("unable to parse %s: %s", FixedIdentityMapping, err))
} else if len(m) != 0 {
c.FixedIdentityMapping = m
}
if m := command.GetStringMapString(vp, FixedZoneMapping); err != nil {
logging.Fatal(logger, fmt.Sprintf("unable to parse %s: %s", FixedZoneMapping, err))
} else if len(m) != 0 {
forward := make(map[string]uint8, len(m))
reverse := make(map[uint8]string, len(m))
for k, v := range m {
bigN, _ := strconv.Atoi(v)
n := uint8(bigN)
if oldKey, ok := reverse[n]; ok && oldKey != k {
logging.Fatal(logger, fmt.Sprintf("duplicate numeric ID entry for %s: %q and %q map to the same value %d", FixedZoneMapping, oldKey, k, n))
}
if oldN, ok := forward[k]; ok && oldN != n {
logging.Fatal(logger, fmt.Sprintf("duplicate zone name entry for %s: %d and %d map to different values %s", FixedZoneMapping, oldN, n, k))
}
forward[k] = n
reverse[n] = k
}
c.FixedZoneMapping = forward
c.ReverseFixedZoneMapping = reverse
}
c.ConntrackGCInterval = vp.GetDuration(ConntrackGCInterval)
c.ConntrackGCMaxInterval = vp.GetDuration(ConntrackGCMaxInterval)
bpfEventsDefaultRateLimit := vp.GetUint32(BPFEventsDefaultRateLimit)
bpfEventsDefaultBurstLimit := vp.GetUint32(BPFEventsDefaultBurstLimit)
switch {
case bpfEventsDefaultRateLimit > 0 && bpfEventsDefaultBurstLimit == 0:
logging.Fatal(logger, "invalid BPF events default config: burst limit must also be specified when rate limit is provided")
case bpfEventsDefaultRateLimit == 0 && bpfEventsDefaultBurstLimit > 0:
logging.Fatal(logger, "invalid BPF events default config: rate limit must also be specified when burst limit is provided")
default:
c.BPFEventsDefaultRateLimit = vp.GetUint32(BPFEventsDefaultRateLimit)
c.BPFEventsDefaultBurstLimit = vp.GetUint32(BPFEventsDefaultBurstLimit)
}
c.bpfMapEventConfigs = make(BPFEventBufferConfigs)
parseBPFMapEventConfigs(c.bpfMapEventConfigs, defaults.BPFEventBufferConfigs)
if m, err := command.GetStringMapStringE(vp, BPFMapEventBuffers); err != nil {
logging.Fatal(logger, fmt.Sprintf("unable to parse %s: %s", BPFMapEventBuffers, err))
} else {
parseBPFMapEventConfigs(c.bpfMapEventConfigs, m)
}
if err := c.parseExcludedLocalAddresses(vp.GetStringSlice(ExcludeLocalAddress)); err != nil {
logging.Fatal(logger, "Unable to parse excluded local addresses", logfields.Error, err)
}
// Ensure CiliumEndpointSlice is enabled only if CiliumEndpointCRD is enabled too.
c.EnableCiliumEndpointSlice = vp.GetBool(EnableCiliumEndpointSlice)
if c.EnableCiliumEndpointSlice && c.DisableCiliumEndpointCRD {
logging.Fatal(logger, fmt.Sprintf("Running Cilium with %s=%t requires %s set to false to enable CiliumEndpoint CRDs.",
EnableCiliumEndpointSlice, c.EnableCiliumEndpointSlice, DisableCiliumEndpointCRDName))
}
// To support K8s NetworkPolicy
c.EnableK8sNetworkPolicy = vp.GetBool(EnableK8sNetworkPolicy)
c.PolicyCIDRMatchMode = vp.GetStringSlice(PolicyCIDRMatchMode)
c.EnableNodeSelectorLabels = vp.GetBool(EnableNodeSelectorLabels)
c.NodeLabels = vp.GetStringSlice(NodeLabels)
c.EnableCiliumNetworkPolicy = vp.GetBool(EnableCiliumNetworkPolicy)
c.EnableCiliumClusterwideNetworkPolicy = vp.GetBool(EnableCiliumClusterwideNetworkPolicy)
c.IdentityAllocationMode = vp.GetString(IdentityAllocationMode)
switch c.IdentityAllocationMode {
// This is here for tests. Some call Populate without the normal init
case "":
c.IdentityAllocationMode = IdentityAllocationModeKVstore
case IdentityAllocationModeKVstore, IdentityAllocationModeCRD, IdentityAllocationModeDoubleWriteReadKVstore, IdentityAllocationModeDoubleWriteReadCRD:
// c.IdentityAllocationMode is set above
default:
logging.Fatal(logger, fmt.Sprintf("Invalid identity allocation mode %q. It must be one of %s, %s or %s / %s", c.IdentityAllocationMode, IdentityAllocationModeKVstore, IdentityAllocationModeCRD, IdentityAllocationModeDoubleWriteReadKVstore, IdentityAllocationModeDoubleWriteReadCRD))
}
theKVStore := vp.GetString(KVStore)
if theKVStore == "" {
if c.IdentityAllocationMode != IdentityAllocationModeCRD {
logger.Warn(fmt.Sprintf("Running Cilium with %q=%q requires identity allocation via CRDs. Changing %s to %q", KVStore, theKVStore, IdentityAllocationMode, IdentityAllocationModeCRD))
c.IdentityAllocationMode = IdentityAllocationModeCRD
}
if c.DisableCiliumEndpointCRD && NetworkPolicyEnabled(c) {
logger.Warn(fmt.Sprintf("Running Cilium with %q=%q requires endpoint CRDs when network policy enforcement system is enabled. Changing %s to %t", KVStore, theKVStore, DisableCiliumEndpointCRDName, false))
c.DisableCiliumEndpointCRD = false
}
}
switch c.IPAM {
case ipamOption.IPAMKubernetes, ipamOption.IPAMClusterPool:
if c.EnableIPv4 {
c.K8sRequireIPv4PodCIDR = true
}
if c.EnableIPv6 {
c.K8sRequireIPv6PodCIDR = true
}
}
if m, err := command.GetStringMapStringE(vp, IPAMMultiPoolPreAllocation); err != nil {
logging.Fatal(logger, fmt.Sprintf("unable to parse %s: %s", IPAMMultiPoolPreAllocation, err))
} else {
c.IPAMMultiPoolPreAllocation = m
}
if len(c.IPAMMultiPoolPreAllocation) == 0 {
// Default to the same value as IPAMDefaultIPPool
c.IPAMMultiPoolPreAllocation = map[string]string{c.IPAMDefaultIPPool: "8"}
}
// Hidden options
c.ConfigFile = vp.GetString(ConfigFile)
c.HTTP403Message = vp.GetString(HTTP403Message)
c.K8sNamespace = vp.GetString(K8sNamespaceName)
c.AgentNotReadyNodeTaintKey = vp.GetString(AgentNotReadyNodeTaintKeyName)
c.MaxControllerInterval = vp.GetUint(MaxCtrlIntervalName)
c.EndpointQueueSize = sanitizeIntParam(logger, vp, EndpointQueueSize, defaults.EndpointQueueSize)
c.EnableICMPRules = vp.GetBool(EnableICMPRules)
c.UseCiliumInternalIPForIPsec = vp.GetBool(UseCiliumInternalIPForIPsec)
c.BypassIPAvailabilityUponRestore = vp.GetBool(BypassIPAvailabilityUponRestore)
// VTEP integration enable option
c.EnableVTEP = vp.GetBool(EnableVTEP)
// Enable BGP control plane features
c.EnableBGPControlPlane = vp.GetBool(EnableBGPControlPlane)
// Enable BGP control plane status reporting
c.EnableBGPControlPlaneStatusReport = vp.GetBool(EnableBGPControlPlaneStatusReport)
// BGP router-id allocation mode
c.BGPRouterIDAllocationMode = vp.GetString(BGPRouterIDAllocationMode)
c.BGPRouterIDAllocationIPPool = vp.GetString(BGPRouterIDAllocationIPPool)
// Support failure-mode for policy map overflow
c.EnableEndpointLockdownOnPolicyOverflow = vp.GetBool(EnableEndpointLockdownOnPolicyOverflow)
// Parse node label patterns
nodeLabelPatterns := vp.GetStringSlice(ExcludeNodeLabelPatterns)
for _, pattern := range nodeLabelPatterns {
r, err := regexp.Compile(pattern)
if err != nil {
logger.Error(fmt.Sprintf("Unable to compile exclude node label regex pattern %s", pattern), logfields.Error, err)
continue
}
c.ExcludeNodeLabelPatterns = append(c.ExcludeNodeLabelPatterns, r)
}
if theKVStore != "" {
c.IdentityRestoreGracePeriod = defaults.IdentityRestoreGracePeriodKvstore
}
c.EnableSourceIPVerification = vp.GetBool(EnableSourceIPVerification)
// Allow the range [0.0, 1.0].
connectivityFreqRatio := vp.GetFloat64(ConnectivityProbeFrequencyRatio)
if 0.0 <= connectivityFreqRatio && connectivityFreqRatio <= 1.0 {
c.ConnectivityProbeFrequencyRatio = connectivityFreqRatio
} else {
logger.Warn(
"specified connectivity probe frequency ratio must be in the range [0.0, 1.0], using default",
logfields.Ratio, connectivityFreqRatio,
)
c.ConnectivityProbeFrequencyRatio = defaults.ConnectivityProbeFrequencyRatio
}
}
func (c *DaemonConfig) populateLoadBalancerSettings(logger *slog.Logger, vp *viper.Viper) {
c.NodePortAcceleration = vp.GetString(LoadBalancerAcceleration)
// If old settings were explicitly set by the user, then have them
// override the new ones in order to not break existing setups.
if vp.IsSet(NodePortAcceleration) {
prior := c.NodePortAcceleration
c.NodePortAcceleration = vp.GetString(NodePortAcceleration)
if vp.IsSet(LoadBalancerAcceleration) && prior != c.NodePortAcceleration {
logging.Fatal(logger, fmt.Sprintf("Both --%s and --%s were set. Only use --%s instead.",
LoadBalancerAcceleration, NodePortAcceleration, LoadBalancerAcceleration))
}
}
}
func (c *DaemonConfig) checkMapSizeLimits() error {
if c.AuthMapEntries < AuthMapEntriesMin {
return fmt.Errorf("specified AuthMap max entries %d must be greater or equal to %d", c.AuthMapEntries, AuthMapEntriesMin)
}
if c.AuthMapEntries > AuthMapEntriesMax {
return fmt.Errorf("specified AuthMap max entries %d must not exceed maximum %d", c.AuthMapEntries, AuthMapEntriesMax)
}
if c.CTMapEntriesGlobalTCP < LimitTableMin || c.CTMapEntriesGlobalAny < LimitTableMin {
return fmt.Errorf("specified CT tables values %d/%d must be greater or equal to %d",
c.CTMapEntriesGlobalTCP, c.CTMapEntriesGlobalAny, LimitTableMin)
}
if c.CTMapEntriesGlobalTCP > LimitTableMax || c.CTMapEntriesGlobalAny > LimitTableMax {
return fmt.Errorf("specified CT tables values %d/%d must not exceed maximum %d",
c.CTMapEntriesGlobalTCP, c.CTMapEntriesGlobalAny, LimitTableMax)
}
if c.NATMapEntriesGlobal < LimitTableMin {
return fmt.Errorf("specified NAT table size %d must be greater or equal to %d",
c.NATMapEntriesGlobal, LimitTableMin)
}
if c.NATMapEntriesGlobal > LimitTableMax {
return fmt.Errorf("specified NAT tables size %d must not exceed maximum %d",
c.NATMapEntriesGlobal, LimitTableMax)
}
if c.NATMapEntriesGlobal > c.CTMapEntriesGlobalTCP+c.CTMapEntriesGlobalAny {
if c.NATMapEntriesGlobal == NATMapEntriesGlobalDefault {
// Auto-size for the case where CT table size was adapted but NAT still on default
c.NATMapEntriesGlobal = int((c.CTMapEntriesGlobalTCP + c.CTMapEntriesGlobalAny) * 2 / 3)
} else {
return fmt.Errorf("specified NAT tables size %d must not exceed maximum CT table size %d",
c.NATMapEntriesGlobal, c.CTMapEntriesGlobalTCP+c.CTMapEntriesGlobalAny)
}
}
if c.FragmentsMapEntries < FragmentsMapMin {
return fmt.Errorf("specified max entries %d for fragment-tracking map must be greater or equal to %d",
c.FragmentsMapEntries, FragmentsMapMin)
}
if c.FragmentsMapEntries > FragmentsMapMax {
return fmt.Errorf("specified max entries %d for fragment-tracking map must not exceed maximum %d",
c.FragmentsMapEntries, FragmentsMapMax)
}
return nil
}
func (c *DaemonConfig) checkIPv4NativeRoutingCIDR() error {
if c.IPv4NativeRoutingCIDR != nil {
return nil
}
if !c.EnableIPv4 || !c.EnableIPv4Masquerade {
return nil
}
if c.EnableIPMasqAgent {
return nil
}
if c.TunnelingEnabled() {
return nil
}
if c.IPAMMode() == ipamOption.IPAMENI || c.IPAMMode() == ipamOption.IPAMAlibabaCloud {
return nil
}
return fmt.Errorf(
"native routing cidr must be configured with option --%s "+
"in combination with --%s=true --%s=true --%s=false --%s=%s --%s=%s",
IPv4NativeRoutingCIDR,
EnableIPv4Name, EnableIPv4Masquerade,
EnableIPMasqAgent,
RoutingMode, RoutingModeNative,
IPAM, c.IPAMMode())
}
func (c *DaemonConfig) checkIPv6NativeRoutingCIDR() error {
if c.IPv6NativeRoutingCIDR != nil {
return nil
}
if !c.EnableIPv6 || !c.EnableIPv6Masquerade {
return nil
}
if c.EnableIPMasqAgent {
return nil
}
if c.TunnelingEnabled() {
return nil
}
return fmt.Errorf(
"native routing cidr must be configured with option --%s "+
"in combination with --%s=true --%s=true --%s=false --%s=%s",
IPv6NativeRoutingCIDR,
EnableIPv6Name, EnableIPv6Masquerade,
EnableIPMasqAgent,
RoutingMode, RoutingModeNative)
}
func (c *DaemonConfig) checkIPAMDelegatedPlugin() error {
if c.IPAM == ipamOption.IPAMDelegatedPlugin {
// When using IPAM delegated plugin, IP addresses are allocated by the CNI binary,
// not the daemon. Therefore, features which require the daemon to allocate IPs for itself
// must be disabled.
if c.EnableIPv4 && c.LocalRouterIPv4 == "" {
return fmt.Errorf("--%s must be provided when IPv4 is enabled with --%s=%s", LocalRouterIPv4, IPAM, ipamOption.IPAMDelegatedPlugin)
}
if c.EnableIPv6 && c.LocalRouterIPv6 == "" {
return fmt.Errorf("--%s must be provided when IPv6 is enabled with --%s=%s", LocalRouterIPv6, IPAM, ipamOption.IPAMDelegatedPlugin)
}
if c.EnableEndpointHealthChecking {
return fmt.Errorf("--%s must be disabled with --%s=%s", EnableEndpointHealthChecking, IPAM, ipamOption.IPAMDelegatedPlugin)
}
// envoy config (Ingress, Gateway API, ...) require cilium-agent to create an IP address
// specifically for differentiating envoy traffic, which is not possible
// with delegated IPAM.
if c.EnableEnvoyConfig {
return fmt.Errorf("--%s must be disabled with --%s=%s", EnableEnvoyConfig, IPAM, ipamOption.IPAMDelegatedPlugin)
}
}
return nil
}
func (c *DaemonConfig) calculateBPFMapSizes(logger *slog.Logger, vp *viper.Viper) error {
// BPF map size options
// Any map size explicitly set via option will override the dynamic
// sizing.
c.AuthMapEntries = vp.GetInt(AuthMapEntriesName)
c.CTMapEntriesGlobalTCP = vp.GetInt(CTMapEntriesGlobalTCPName)
c.CTMapEntriesGlobalAny = vp.GetInt(CTMapEntriesGlobalAnyName)
c.NATMapEntriesGlobal = vp.GetInt(NATMapEntriesGlobalName)
c.NeighMapEntriesGlobal = vp.GetInt(NeighMapEntriesGlobalName)
c.PolicyMapFullReconciliationInterval = vp.GetDuration(PolicyMapFullReconciliationIntervalName)
// Don't attempt dynamic sizing if any of the sizeof members was not
// populated by the daemon (or any other caller).
if c.SizeofCTElement == 0 ||
c.SizeofNATElement == 0 ||
c.SizeofNeighElement == 0 ||
c.SizeofSockRevElement == 0 {
return nil
}
// Allow the range (0.0, 1.0] because the dynamic size will anyway be
// clamped to the table limits. Thus, a ratio of e.g. 0.98 will not lead
// to 98% of the total memory being allocated for BPF maps.
dynamicSizeRatio := vp.GetFloat64(MapEntriesGlobalDynamicSizeRatioName)
if 0.0 < dynamicSizeRatio && dynamicSizeRatio <= 1.0 {
vms, err := memory.Get()
if err != nil || vms == nil {
logging.Fatal(logger, "Failed to get system memory", logfields.Error, err)
}
c.BPFMapsDynamicSizeRatio = dynamicSizeRatio
c.calculateDynamicBPFMapSizes(logger, vp, vms.Total, dynamicSizeRatio)
} else if c.BPFDistributedLRU {
return fmt.Errorf("distributed LRU is only valid with a specified dynamic map size ratio")
} else if dynamicSizeRatio < 0.0 {
return fmt.Errorf("specified dynamic map size ratio %f must be > 0.0", dynamicSizeRatio)
} else if dynamicSizeRatio > 1.0 {
return fmt.Errorf("specified dynamic map size ratio %f must be ≤ 1.0", dynamicSizeRatio)
}
return nil
}
// SetMapElementSizes sets the BPF map element sizes (key + value) used for
// dynamic BPF map size calculations in calculateDynamicBPFMapSizes.
func (c *DaemonConfig) SetMapElementSizes(
sizeofCTElement,
sizeofNATElement,
sizeofNeighElement,
sizeofSockRevElement int) {
c.SizeofCTElement = sizeofCTElement
c.SizeofNATElement = sizeofNATElement
c.SizeofNeighElement = sizeofNeighElement
c.SizeofSockRevElement = sizeofSockRevElement
}
func (c *DaemonConfig) GetDynamicSizeCalculator(logger *slog.Logger) func(def int, min int, max int) int {
vms, err := memory.Get()
if err != nil || vms == nil {
logging.Fatal(logger, "Failed to get system memory", logfields.Error, err)
}
return c.getDynamicSizeCalculator(logger, c.BPFMapsDynamicSizeRatio, vms.Total)
}
func (c *DaemonConfig) getDynamicSizeCalculator(logger *slog.Logger, dynamicSizeRatio float64, totalMemory uint64) func(def int, min int, max int) int {
if 0.0 >= dynamicSizeRatio || dynamicSizeRatio > 1.0 {
return func(def int, min int, max int) int { return def }
}
possibleCPUs := 1
// Heuristic:
// Distribute relative to map default entries among the different maps.
// Cap each map size by the maximum. Map size provided by the user will
// override the calculated value and also the max. There will be a check
// for maximum size later on in DaemonConfig.Validate()
//
// Calculation examples:
//
// Memory CT TCP CT Any NAT
//
// 512MB 33140 16570 33140
// 1GB 66280 33140 66280
// 4GB 265121 132560 265121
// 16GB 1060485 530242 1060485
memoryAvailableForMaps := int(float64(totalMemory) * dynamicSizeRatio)
logger.Info(fmt.Sprintf("Memory available for map entries (%.3f%% of %dB): %dB", dynamicSizeRatio*100, totalMemory, memoryAvailableForMaps))
totalMapMemoryDefault := CTMapEntriesGlobalTCPDefault*c.SizeofCTElement +
CTMapEntriesGlobalAnyDefault*c.SizeofCTElement +
NATMapEntriesGlobalDefault*c.SizeofNATElement +
// Neigh table has the same number of entries as NAT Map has.
NATMapEntriesGlobalDefault*c.SizeofNeighElement +
SockRevNATMapEntriesDefault*c.SizeofSockRevElement
logger.Debug(fmt.Sprintf("Total memory for default map entdries: %d", totalMapMemoryDefault))
// In case of distributed LRU, we need to round up to the number of possible CPUs
// since this is also what the kernel does internally, see htab_map_alloc()'s:
//
// htab->map.max_entries = roundup(attr->max_entries,
// num_possible_cpus());
//
// Thus, if we would not round up from agent side, then Cilium would constantly
// try to replace maps due to property mismatch!
if c.BPFDistributedLRU {
cpus, err := ebpf.PossibleCPU()
if err != nil {
logging.Fatal(logger, "Failed to get number of possible CPUs needed for the distributed LRU")
}
possibleCPUs = cpus
}
return func(entriesDefault, min, max int) int {
entries := (entriesDefault * memoryAvailableForMaps) / totalMapMemoryDefault
entries = util.RoundUp(entries, possibleCPUs)
if entries < min {
entries = util.RoundUp(min, possibleCPUs)
} else if entries > max {
entries = util.RoundDown(max, possibleCPUs)
}
return entries
}
}
func (c *DaemonConfig) calculateDynamicBPFMapSizes(logger *slog.Logger, vp *viper.Viper, totalMemory uint64, dynamicSizeRatio float64) {
getEntries := c.getDynamicSizeCalculator(logger, dynamicSizeRatio, totalMemory)
// If value for a particular map was explicitly set by an
// option, disable dynamic sizing for this map and use the
// provided size.
if !vp.IsSet(CTMapEntriesGlobalTCPName) {
c.CTMapEntriesGlobalTCP =
getEntries(CTMapEntriesGlobalTCPDefault, LimitTableAutoGlobalTCPMin, LimitTableMax)
logger.Info(fmt.Sprintf("option %s set by dynamic sizing to %v",
CTMapEntriesGlobalTCPName, c.CTMapEntriesGlobalTCP))
} else {
logger.Debug(fmt.Sprintf("option %s set by user to %v", CTMapEntriesGlobalTCPName, c.CTMapEntriesGlobalTCP))
}
if !vp.IsSet(CTMapEntriesGlobalAnyName) {
c.CTMapEntriesGlobalAny =
getEntries(CTMapEntriesGlobalAnyDefault, LimitTableAutoGlobalAnyMin, LimitTableMax)
logger.Info(fmt.Sprintf("option %s set by dynamic sizing to %v",
CTMapEntriesGlobalAnyName, c.CTMapEntriesGlobalAny))
} else {
logger.Debug(fmt.Sprintf("option %s set by user to %v", CTMapEntriesGlobalAnyName, c.CTMapEntriesGlobalAny))
}
if !vp.IsSet(NATMapEntriesGlobalName) {
c.NATMapEntriesGlobal =
getEntries(NATMapEntriesGlobalDefault, LimitTableAutoNatGlobalMin, LimitTableMax)
logger.Info(fmt.Sprintf("option %s set by dynamic sizing to %v",
NATMapEntriesGlobalName, c.NATMapEntriesGlobal))
if c.NATMapEntriesGlobal > c.CTMapEntriesGlobalTCP+c.CTMapEntriesGlobalAny {
// CT table size was specified manually, make sure that the NAT table size
// does not exceed maximum CT table size. See
// (*DaemonConfig).checkMapSizeLimits.
c.NATMapEntriesGlobal = (c.CTMapEntriesGlobalTCP + c.CTMapEntriesGlobalAny) * 2 / 3
logger.Warn(fmt.Sprintf("option %s would exceed maximum determined by CT table sizes, capping to %v",
NATMapEntriesGlobalName, c.NATMapEntriesGlobal))
}
} else {
logger.Debug(fmt.Sprintf("option %s set by user to %v", NATMapEntriesGlobalName, c.NATMapEntriesGlobal))
}
if !vp.IsSet(NeighMapEntriesGlobalName) {
// By default we auto-size it to the same value as the NAT map since we
// need to keep at least as many neigh entries.
c.NeighMapEntriesGlobal = c.NATMapEntriesGlobal
logger.Info(fmt.Sprintf("option %s set by dynamic sizing to %v",
NeighMapEntriesGlobalName, c.NeighMapEntriesGlobal))
} else {
logger.Debug(fmt.Sprintf("option %s set by user to %v", NeighMapEntriesGlobalName, c.NeighMapEntriesGlobal))
}
}
// Validate VTEP integration configuration
func (c *DaemonConfig) validateVTEP(vp *viper.Viper) error {
vtepEndpoints := vp.GetStringSlice(VtepEndpoint)
vtepCIDRs := vp.GetStringSlice(VtepCIDR)
vtepCidrMask := vp.GetString(VtepMask)
vtepMACs := vp.GetStringSlice(VtepMAC)
if (len(vtepEndpoints) < 1) ||
len(vtepEndpoints) != len(vtepCIDRs) ||
len(vtepEndpoints) != len(vtepMACs) {
return fmt.Errorf("VTEP configuration must have the same number of Endpoint, VTEP and MAC configurations (Found %d endpoints, %d MACs, %d CIDR ranges)", len(vtepEndpoints), len(vtepMACs), len(vtepCIDRs))
}
if len(vtepEndpoints) > defaults.MaxVTEPDevices {
return fmt.Errorf("VTEP must not exceed %d VTEP devices (Found %d VTEPs)", defaults.MaxVTEPDevices, len(vtepEndpoints))
}
for _, ep := range vtepEndpoints {
endpoint := net.ParseIP(ep)
if endpoint == nil {
return fmt.Errorf("Invalid VTEP IP: %v", ep)
}
ip4 := endpoint.To4()
if ip4 == nil {
return fmt.Errorf("Invalid VTEP IPv4 address %v", ip4)
}
c.VtepEndpoints = append(c.VtepEndpoints, endpoint)
}
for _, v := range vtepCIDRs {
externalCIDR, err := cidr.ParseCIDR(v)
if err != nil {
return fmt.Errorf("Invalid VTEP CIDR: %v", v)
}
c.VtepCIDRs = append(c.VtepCIDRs, externalCIDR)
}
mask := net.ParseIP(vtepCidrMask)
if mask == nil {
return fmt.Errorf("Invalid VTEP CIDR Mask: %v", vtepCidrMask)
}
c.VtepCidrMask = mask
for _, m := range vtepMACs {
externalMAC, err := mac.ParseMAC(m)
if err != nil {
return fmt.Errorf("Invalid VTEP MAC: %v", m)
}
c.VtepMACs = append(c.VtepMACs, externalMAC)
}
return nil
}
var backupFileNames []string = []string{
"agent-runtime-config.json",
"agent-runtime-config-1.json",
"agent-runtime-config-2.json",
}
// StoreInFile stores the configuration in a the given directory under the file
// name 'daemon-config.json'. If this file already exists, it is renamed to
// 'daemon-config-1.json', if 'daemon-config-1.json' also exists,
// 'daemon-config-1.json' is renamed to 'daemon-config-2.json'
// Caller is responsible for blocking concurrent changes.
func (c *DaemonConfig) StoreInFile(logger *slog.Logger, dir string) error {
backupFiles(logger, dir, backupFileNames)
f, err := os.Create(backupFileNames[0])
if err != nil {
return err
}
defer f.Close()
e := json.NewEncoder(f)
e.SetIndent("", " ")
err = e.Encode(c)
c.shaSum = c.checksum()
return err
}
func (c *DaemonConfig) checksum() [32]byte {
// take a shallow copy for summing
sumConfig := *c
// Ignore variable parts
sumConfig.Opts = nil
sumConfig.EncryptInterface = nil
cBytes, err := json.Marshal(&sumConfig)
if err != nil {
return [32]byte{}
}
return sha256.Sum256(cBytes)
}
// ValidateUnchanged checks that invariable parts of the config have not changed since init.
// Caller is responsible for blocking concurrent changes.
func (c *DaemonConfig) ValidateUnchanged() error {
sum := c.checksum()
if sum != c.shaSum {
return c.diffFromFile()
}
return nil
}
func (c *DaemonConfig) diffFromFile() error {
f, err := os.Open(backupFileNames[0])
if err != nil {
return err
}
fi, err := f.Stat()
if err != nil {
return err
}
fileBytes := make([]byte, fi.Size())
count, err := f.Read(fileBytes)
if err != nil {
return err
}
fileBytes = fileBytes[:count]
var config DaemonConfig
err = json.Unmarshal(fileBytes, &config)
var diff string
if err != nil {
diff = fmt.Errorf("unmarshal failed %q: %w", string(fileBytes), err).Error()
} else {
// Ignore all unexported fields during Diff.
// from https://github.com/google/go-cmp/issues/313#issuecomment-1315651560
opts := cmp.FilterPath(func(p cmp.Path) bool {
sf, ok := p.Index(-1).(cmp.StructField)
if !ok {
return false
}
r, _ := utf8.DecodeRuneInString(sf.Name())
return !unicode.IsUpper(r)
}, cmp.Ignore())
diff = cmp.Diff(&config, c, opts,
cmpopts.IgnoreTypes(&IntOptions{}),
cmpopts.IgnoreTypes(&OptionLibrary{}),
cmpopts.IgnoreFields(DaemonConfig{}, "EncryptInterface"))
}
return fmt.Errorf("Config differs:\n%s", diff)
}
func (c *DaemonConfig) BGPControlPlaneEnabled() bool {
return c.EnableBGPControlPlane
}
func (c *DaemonConfig) IsDualStack() bool {
return c.EnableIPv4 && c.EnableIPv6
}
// IsLocalRouterIP checks if provided IP address matches either LocalRouterIPv4
// or LocalRouterIPv6
func (c *DaemonConfig) IsLocalRouterIP(ip string) bool {
return ip != "" && (c.LocalRouterIPv4 == ip || c.LocalRouterIPv6 == ip)
}
// StoreViperInFile stores viper's configuration in a the given directory under
// the file name 'viper-config.yaml'. If this file already exists, it is renamed
// to 'viper-config-1.yaml', if 'viper-config-1.yaml' also exists,
// 'viper-config-1.yaml' is renamed to 'viper-config-2.yaml'
func StoreViperInFile(logger *slog.Logger, dir string) error {
backupFileNames := []string{
"viper-agent-config.yaml",
"viper-agent-config-1.yaml",
"viper-agent-config-2.yaml",
}
backupFiles(logger, dir, backupFileNames)
return viper.WriteConfigAs(backupFileNames[0])
}
func backupFiles(logger *slog.Logger, dir string, backupFilenames []string) {
for i := len(backupFilenames) - 1; i > 0; i-- {
newFileName := filepath.Join(dir, backupFilenames[i-1])
oldestFilename := filepath.Join(dir, backupFilenames[i])
if _, err := os.Stat(newFileName); os.IsNotExist(err) {
continue
}
err := os.Rename(newFileName, oldestFilename)
if err != nil {
logger.Error(
"Unable to rename configuration files",
logfields.OldName, oldestFilename,
logfields.NewName, newFileName,
)
}
}
}
func sanitizeIntParam(logger *slog.Logger, vp *viper.Viper, paramName string, paramDefault int) int {
intParam := vp.GetInt(paramName)
if intParam <= 0 {
if vp.IsSet(paramName) {
logger.Warn(
"user-provided parameter had value <= 0 , which is invalid ; setting to default",
logfields.Param, paramName,
logfields.Value, paramDefault,
)
}
return paramDefault
}
return intParam
}
func validateConfigMapFlag(flag *pflag.Flag, key string, value any) error {
var err error
switch t := flag.Value.Type(); t {
case "bool":
_, err = cast.ToBoolE(value)
case "duration":
_, err = cast.ToDurationE(value)
case "float32":
_, err = cast.ToFloat32E(value)
case "float64":
_, err = cast.ToFloat64E(value)
case "int":
_, err = cast.ToIntE(value)
case "int8":
_, err = cast.ToInt8E(value)
case "int16":
_, err = cast.ToInt16E(value)
case "int32":
_, err = cast.ToInt32E(value)
case "int64":
_, err = cast.ToInt64E(value)
case "map":
// custom type, see pkg/option/map_options.go
err = flag.Value.Set(fmt.Sprintf("%s", value))
case "stringSlice":
_, err = cast.ToStringSliceE(value)
case "string":
_, err = cast.ToStringE(value)
case "uint":
_, err = cast.ToUintE(value)
case "uint8":
_, err = cast.ToUint8E(value)
case "uint16":
_, err = cast.ToUint16E(value)
case "uint32":
_, err = cast.ToUint32E(value)
case "uint64":
_, err = cast.ToUint64E(value)
case "stringToString":
_, err = command.ToStringMapStringE(value)
default:
return fmt.Errorf("unable to validate option %s value of type %s", key, t)
}
return err
}
// validateConfigMap checks whether the flag exists and validate its value
func validateConfigMap(cmd *cobra.Command, m map[string]any) error {
flags := cmd.Flags()
for key, value := range m {
flag := flags.Lookup(key)
if flag == nil {
continue
}
err := validateConfigMapFlag(flag, key, value)
if err != nil {
return fmt.Errorf("option %s: %w", key, err)
}
}
return nil
}
// InitConfig reads in config file and ENV variables if set.
func InitConfig(logger *slog.Logger, cmd *cobra.Command, programName, configName string, vp *viper.Viper) func() {
return func() {
if vp.GetBool("version") {
fmt.Printf("%s %s\n", programName, version.Version)
os.Exit(0)
}
if vp.GetString(CMDRef) != "" {
return
}
Config.ConfigFile = vp.GetString(ConfigFile) // enable ability to specify config file via flag
Config.ConfigDir = vp.GetString(ConfigDir)
vp.SetEnvPrefix("cilium")
if Config.ConfigDir != "" {
if _, err := os.Stat(Config.ConfigDir); os.IsNotExist(err) {
logging.Fatal(logger, fmt.Sprintf("Non-existent configuration directory %s", Config.ConfigDir))
}
if m, err := ReadDirConfig(logger, Config.ConfigDir); err != nil {
logging.Fatal(logger, fmt.Sprintf("Unable to read configuration directory %s", Config.ConfigDir), logfields.Error, err)
} else {
// replace deprecated fields with new fields
ReplaceDeprecatedFields(m)
// validate the config-map
if err := validateConfigMap(cmd, m); err != nil {
logging.Fatal(logger, "Incorrect config-map flag value", logfields.Error, err)
}
if err := MergeConfig(vp, m); err != nil {
logging.Fatal(logger, "Unable to merge configuration", logfields.Error, err)
}
}
}
if Config.ConfigFile != "" {
vp.SetConfigFile(Config.ConfigFile)
} else {
vp.SetConfigName(configName) // name of config file (without extension)
vp.AddConfigPath("$HOME") // adding home directory as first search path
}
// We need to check for the debug environment variable or CLI flag before
// loading the configuration file since on configuration file read failure
// we will emit a debug log entry.
if vp.GetBool(DebugArg) {
logging.SetLogLevelToDebug()
}
// If a config file is found, read it in.
if err := vp.ReadInConfig(); err == nil {
logger.Info("Using config from file", logfields.Path, vp.ConfigFileUsed())
} else if Config.ConfigFile != "" {
logging.Fatal(logger,
"Error reading config file",
logfields.Path, vp.ConfigFileUsed(),
logfields.Error, err,
)
} else {
logger.Debug("Skipped reading configuration file", logfields.Error, err)
}
// Check for the debug flag again now that the configuration file may has
// been loaded, as it might have changed.
if vp.GetBool(DebugArg) {
logging.SetLogLevelToDebug()
}
}
}
// BPFEventBufferConfig contains parsed configuration for a bpf map event buffer.
type BPFEventBufferConfig struct {
Enabled bool
MaxSize int
TTL time.Duration
}
// BPFEventBufferConfigs contains parsed bpf event buffer configs, indexed but map name.
type BPFEventBufferConfigs map[string]BPFEventBufferConfig
// GetEventBufferConfig returns either the relevant config for a map name, or a default
// one with enabled=false otherwise.
func (d *DaemonConfig) GetEventBufferConfig(name string) BPFEventBufferConfig {
return d.bpfMapEventConfigs.get(name)
}
func (cs BPFEventBufferConfigs) get(name string) BPFEventBufferConfig {
return cs[name]
}
// ParseEventBufferTupleString parses a event buffer configuration tuple string.
// For example: enabled_100_24h
// Which refers to enabled=true, maxSize=100, ttl=24hours.
func ParseEventBufferTupleString(optsStr string) (BPFEventBufferConfig, error) {
opts := strings.Split(optsStr, "_")
enabled := false
conf := BPFEventBufferConfig{}
if len(opts) != 3 {
return conf, fmt.Errorf("unexpected event buffer config value format, should be in format 'mapname=enabled_100_24h'")
}
if opts[0] != "enabled" && opts[0] != "disabled" {
return conf, fmt.Errorf("could not parse event buffer enabled: must be either 'enabled' or 'disabled'")
}
if opts[0] == "enabled" {
enabled = true
}
size, err := strconv.Atoi(opts[1])
if err != nil {
return conf, fmt.Errorf("could not parse event buffer maxSize int: %w", err)
}
ttl, err := time.ParseDuration(opts[2])
if err != nil {
return conf, fmt.Errorf("could not parse event buffer ttl duration: %w", err)
}
if size < 0 {
return conf, fmt.Errorf("event buffer max size cannot be less than zero (%d)", conf.MaxSize)
}
conf.TTL = ttl
conf.Enabled = enabled && size != 0
conf.MaxSize = size
return conf, nil
}
func parseBPFMapEventConfigs(confs BPFEventBufferConfigs, confMap map[string]string) error {
for name, confStr := range confMap {
conf, err := ParseEventBufferTupleString(confStr)
if err != nil {
return fmt.Errorf("unable to parse %s: %w", BPFMapEventBuffers, err)
}
confs[name] = conf
}
return nil
}
func (d *DaemonConfig) EnforceLXCFibLookup() bool {
// See https://github.com/cilium/cilium/issues/27343 for the symptoms.
//
// We want to enforce FIB lookup if EndpointRoutes are enabled, because
// this was a config dependency change which caused different behaviour
// since v1.14.0-snapshot.2. We will remove this hack later, once we
// have auto-device detection on by default.
return d.EnableEndpointRoutes
}
func (d *DaemonConfig) GetZone(id uint8) string {
return d.ReverseFixedZoneMapping[id]
}
func (d *DaemonConfig) GetZoneID(zone string) uint8 {
return d.FixedZoneMapping[zone]
}
// SPDX-License-Identifier: Apache-2.0
// Copyright Authors of Cilium
package option
import "maps"
var (
specPolicyTracing = Option{
Description: "Enable tracing when resolving policy (Debug)",
}
// DaemonOptionLibrary is the daemon's option library that should be
// used for read-only.
DaemonOptionLibrary = OptionLibrary{
PolicyTracing: &specPolicyTracing,
}
DaemonMutableOptionLibrary = OptionLibrary{
ConntrackAccounting: &specConntrackAccounting,
PolicyAccounting: &specPolicyAccounting,
Debug: &specDebug,
DebugLB: &specDebugLB,
DebugPolicy: &specDebugPolicy,
DropNotify: &specDropNotify,
TraceNotify: &specTraceNotify,
PolicyVerdictNotify: &specPolicyVerdictNotify,
PolicyAuditMode: &specPolicyAuditMode,
MonitorAggregation: &specMonitorAggregation,
SourceIPVerification: &specSourceIPVerification,
}
)
func init() {
maps.Copy(DaemonOptionLibrary, DaemonMutableOptionLibrary)
}
// ParseDaemonOption parses a string as daemon option
func ParseDaemonOption(opt string) (string, OptionSetting, bool, error) {
return DaemonOptionLibrary.ParseOption(opt)
}
// SPDX-License-Identifier: Apache-2.0
// Copyright Authors of Cilium
package option
import "maps"
var (
endpointMutableOptionLibrary = OptionLibrary{
ConntrackAccounting: &specConntrackAccounting,
PolicyAccounting: &specPolicyAccounting,
Debug: &specDebug,
DebugLB: &specDebugLB,
DebugPolicy: &specDebugPolicy,
DropNotify: &specDropNotify,
TraceNotify: &specTraceNotify,
PolicyVerdictNotify: &specPolicyVerdictNotify,
PolicyAuditMode: &specPolicyAuditMode,
MonitorAggregation: &specMonitorAggregation,
SourceIPVerification: &specSourceIPVerification,
}
)
func GetEndpointMutableOptionLibrary() OptionLibrary {
return maps.Clone(endpointMutableOptionLibrary)
}
// SPDX-License-Identifier: Apache-2.0
// Copyright Authors of Cilium
package option
import "cmp"
// NetworkPolicyEnabled returns true if the network policy enforcement
// system is enabled for K8s, Cilium and Cilium Clusterwide network policies.
func NetworkPolicyEnabled(cfg *DaemonConfig) bool {
return cmp.Or(
cfg.EnablePolicy != NeverEnforce,
cfg.EnableK8sNetworkPolicy,
cfg.EnableCiliumNetworkPolicy,
cfg.EnableCiliumClusterwideNetworkPolicy,
!cfg.DisableCiliumEndpointCRD,
cfg.IdentityAllocationMode != IdentityAllocationModeCRD,
)
}
// SPDX-License-Identifier: Apache-2.0
// Copyright Authors of Cilium
package option
import (
"fmt"
"strings"
)
// Validator validates the option string.
type Validator func(val string) error
// MapOptions holds a map of values and a validation function.
type MapOptions struct {
vals map[string]string
// Validators must validate individual "key=value" entries
// within the map.
validators []Validator
}
// NewMapOptions creates a reference to a new MapOptions struct.
func NewMapOptions(values *map[string]string, validators ...Validator) *MapOptions {
if *values == nil {
*values = make(map[string]string)
}
return &MapOptions{
vals: *values,
validators: validators,
}
}
func (opts *MapOptions) String() string {
var kvs []string
for k, v := range opts.vals {
kvs = append(kvs, fmt.Sprintf("%s=%s", k, v))
}
return strings.Join(kvs, ",")
}
// Type returns a string name for this Option type
func (opts *MapOptions) Type() string {
return "map"
}
// Set validates, if needed, the input value and adds it to the internal map.
// It splits the input string by ',' and then by '=' to create key-value pairs.
func (opts *MapOptions) Set(value string) error {
for kv := range strings.SplitSeq(value, ",") {
for _, validator := range opts.validators {
if err := validator(kv); err != nil {
return err
}
}
vals := strings.SplitN(kv, "=", 2)
if len(vals) == 1 {
opts.vals[vals[0]] = ""
} else {
opts.vals[vals[0]] = vals[1]
}
}
return nil
}
// SPDX-License-Identifier: Apache-2.0
// Copyright Authors of Cilium
package option
import (
"fmt"
"strconv"
"strings"
)
// MonitorAggregationLevel represents a level of aggregation for monitor events
// from the datapath. Low values represent no aggregation, that is, to increase
// the number of events emitted from the datapath; Higher values represent more
// aggregation, to minimize the number of events emitted from the datapath.
//
// The MonitorAggregationLevel does not affect the Debug option in the daemon
// or endpoint, so debug notifications will continue uninhibited by this
// setting.
type MonitorAggregationLevel OptionSetting
const (
// MonitorAggregationLevelNone represents no aggregation in the
// datapath; all packets will be monitored.
MonitorAggregationLevelNone OptionSetting = 0
// MonitorAggregationLevelLow represents aggregation of monitor events
// to emit a maximum of one trace event per packet. Trace events when
// packets are received are disabled.
MonitorAggregationLevelLowest OptionSetting = 1
// MonitorAggregationLevelLow is the same as
// MonitorAggregationLevelLowest, but may aggregate additional traffic
// in future.
MonitorAggregationLevelLow OptionSetting = 2
// MonitorAggregationLevelMedium represents aggregation of monitor
// events to only emit notifications periodically for each connection
// unless there is new information (eg, a TCP connection is closed).
MonitorAggregationLevelMedium OptionSetting = 3
// MonitorAggregationLevelMax is the maximum level of aggregation
// currently supported.
MonitorAggregationLevelMax OptionSetting = 4
)
// monitorAggregationOption maps a user-specified string to a monitor
// aggregation level.
var monitorAggregationOption = map[string]OptionSetting{
"": MonitorAggregationLevelNone,
"none": MonitorAggregationLevelNone,
"disabled": MonitorAggregationLevelNone,
"lowest": MonitorAggregationLevelLowest,
"low": MonitorAggregationLevelLow,
"medium": MonitorAggregationLevelMedium,
"max": MonitorAggregationLevelMax,
"maximum": MonitorAggregationLevelMax,
}
func init() {
for i := MonitorAggregationLevelNone; i <= MonitorAggregationLevelMax; i++ {
number := strconv.Itoa(int(i))
monitorAggregationOption[number] = OptionSetting(i)
}
}
// monitorAggregationFormat maps an aggregation level to a formatted string.
var monitorAggregationFormat = map[OptionSetting]string{
MonitorAggregationLevelNone: "None",
MonitorAggregationLevelLowest: "Lowest",
MonitorAggregationLevelLow: "Low",
MonitorAggregationLevelMedium: "Medium",
MonitorAggregationLevelMax: "Max",
}
// VerifyMonitorAggregationLevel validates the specified key/value for a
// monitor aggregation level.
func VerifyMonitorAggregationLevel(key, value string) error {
_, err := ParseMonitorAggregationLevel(value)
return err
}
// ParseMonitorAggregationLevel turns a string into a monitor aggregation
// level. The string may contain an integer value or a string representation of
// a particular monitor aggregation level.
func ParseMonitorAggregationLevel(value string) (OptionSetting, error) {
// First, attempt the string representation.
if level, ok := monitorAggregationOption[strings.ToLower(value)]; ok {
return level, nil
}
// If it's not a valid string option, attempt to parse an integer.
valueParsed, err := strconv.Atoi(value)
if err != nil {
err = fmt.Errorf("invalid monitor aggregation level %q", value)
return MonitorAggregationLevelNone, err
}
parsed := OptionSetting(valueParsed)
if parsed < MonitorAggregationLevelNone || parsed > MonitorAggregationLevelMax {
err = fmt.Errorf("monitor aggregation level must be between %d and %d",
MonitorAggregationLevelNone, MonitorAggregationLevelMax)
return MonitorAggregationLevelNone, err
}
return parsed, nil
}
// FormatMonitorAggregationLevel maps a MonitorAggregationLevel to a string.
func FormatMonitorAggregationLevel(level OptionSetting) string {
return monitorAggregationFormat[level]
}
// SPDX-License-Identifier: Apache-2.0
// Copyright Authors of Cilium
package option
import (
"encoding/json"
"fmt"
"maps"
"slices"
"strings"
"github.com/cilium/cilium/api/v1/models"
"github.com/cilium/cilium/pkg/lock"
)
// VerifyFunc validates option key with value and may return an error if the
// option should not be applied
type VerifyFunc func(key string, value string) error
// ParseFunc parses the option value and may return an error if the option
// cannot be parsed or applied.
type ParseFunc func(value string) (OptionSetting, error)
// FormatFunc formats the specified value as textual representation option.
type FormatFunc func(value OptionSetting) string
// Option is the structure used to specify the semantics of a configurable
// boolean option
type Option struct {
// Define is the name of the #define used for BPF programs
Define string
// Description is a short human readable description
Description string
// Immutable marks an option which is read-only
Immutable bool
// Requires is a list of required options, such options will be
// automatically enabled as required.
Requires []string
// Parse is called to parse the option. If not specified, defaults to
// NormalizeBool().
Parse ParseFunc
// FormatFunc is called to format the value for an option. If not
// specified, defaults to formatting 0 as "Disabled" and other values
// as "Enabled".
Format FormatFunc
// Verify is called prior to applying the option
Verify VerifyFunc
// Deprecated is true if this option is deprecated and a warning
// should be printed.
Deprecated bool
}
// OptionSetting specifies the different choices each Option has.
type OptionSetting int
const (
OptionDisabled OptionSetting = iota
OptionEnabled
)
// RequiresOption returns true if the option requires the specified option `name`.
func (o Option) RequiresOption(name string) bool {
return slices.Contains(o.Requires, name)
}
type OptionLibrary map[string]*Option
func (l OptionLibrary) Lookup(name string) (string, *Option) {
nameLower := strings.ToLower(name)
for k := range l {
if strings.ToLower(k) == nameLower {
return k, l[k]
}
}
return "", nil
}
func (l OptionLibrary) Define(name string) string {
if _, ok := l[name]; ok {
return l[name].Define
}
return name
}
func NormalizeBool(value string) (OptionSetting, error) {
switch strings.ToLower(value) {
case "true", "on", "enable", "enabled", "1":
return OptionEnabled, nil
case "false", "off", "disable", "disabled", "0":
return OptionDisabled, nil
default:
return OptionDisabled, fmt.Errorf("invalid option value %s", value)
}
}
// ValidateConfigurationMap validates a given configuration map based on the
// option library
func (l *OptionLibrary) ValidateConfigurationMap(n models.ConfigurationMap) (OptionMap, error) {
o := make(OptionMap)
for k, v := range n {
_, newVal, _, err := l.parseKeyValue(k, v)
if err != nil {
return nil, err
}
if err := l.Validate(k, v); err != nil {
return nil, err
}
o[k] = newVal
}
return o, nil
}
func (l OptionLibrary) Validate(name string, value string) error {
key, spec := l.Lookup(name)
if key == "" {
return fmt.Errorf("unknown option %s", name)
}
if spec.Immutable {
return fmt.Errorf("specified option is immutable (read-only)")
}
if spec.Verify != nil {
return spec.Verify(key, value)
}
return nil
}
type OptionMap map[string]OptionSetting
func (om OptionMap) DeepCopy() OptionMap {
return maps.Clone(om)
}
// IntOptions member functions with external access do not require
// locking by the caller, while functions with internal access presume
// the caller to have taken care of any locking needed.
type IntOptions struct {
optsMU lock.RWMutex // Protects all variables from this structure below this line
opts OptionMap
library *OptionLibrary
}
// intOptions is only used for JSON
type intOptions struct {
Opts OptionMap `json:"map"`
}
// ValidateConfigurationMap validates a given configuration map based on the
// option library
func (o *IntOptions) ValidateConfigurationMap(n models.ConfigurationMap) (OptionMap, error) {
return o.library.ValidateConfigurationMap(n)
}
// Custom json marshal for unexported 'opts' while holding a read lock
func (o *IntOptions) MarshalJSON() ([]byte, error) {
o.optsMU.RLock()
defer o.optsMU.RUnlock()
return json.Marshal(&intOptions{
Opts: o.opts,
})
}
// Custom json unmarshal for unexported 'opts' while holding a write lock
func (o *IntOptions) UnmarshalJSON(b []byte) error {
o.optsMU.Lock()
defer o.optsMU.Unlock()
err := json.Unmarshal(b, &intOptions{
Opts: o.opts,
})
if err != nil {
return err
}
// Silently discard unsupported options
for k := range o.opts {
key, _ := o.library.Lookup(k)
if key == "" {
delete(o.opts, k)
}
}
return nil
}
// GetImmutableModel returns the set of immutable options as a ConfigurationMap API model.
func (o *IntOptions) GetImmutableModel() *models.ConfigurationMap {
immutableCfg := make(models.ConfigurationMap)
return &immutableCfg
}
// GetMutableModel returns the set of mutable options as a ConfigurationMap API model.
func (o *IntOptions) GetMutableModel() *models.ConfigurationMap {
mutableCfg := make(models.ConfigurationMap)
o.optsMU.RLock()
for k, v := range o.opts {
_, config := o.library.Lookup(k)
// It's possible that an option has since been removed and thus has
// no corresponding configuration; need to check if configuration is
// nil accordingly.
if config != nil {
if config.Format == nil {
if v == OptionDisabled {
mutableCfg[k] = "Disabled"
} else {
mutableCfg[k] = "Enabled"
}
} else {
mutableCfg[k] = config.Format(v)
}
}
}
o.optsMU.RUnlock()
return &mutableCfg
}
func (o *IntOptions) DeepCopy() *IntOptions {
o.optsMU.RLock()
cpy := &IntOptions{
opts: o.opts.DeepCopy(),
library: o.library,
}
o.optsMU.RUnlock()
return cpy
}
func NewIntOptions(lib *OptionLibrary) *IntOptions {
return &IntOptions{
opts: OptionMap{},
library: lib,
}
}
func (o *IntOptions) getValue(key string) OptionSetting {
value, exists := o.opts[key]
if !exists {
return OptionDisabled
}
return value
}
func (o *IntOptions) GetValue(key string) OptionSetting {
o.optsMU.RLock()
v := o.getValue(key)
o.optsMU.RUnlock()
return v
}
func (o *IntOptions) IsEnabled(key string) bool {
return o.GetValue(key) != OptionDisabled
}
// SetValidated sets the option `key` to the specified value. The caller is
// expected to have validated the input to this function.
func (o *IntOptions) SetValidated(key string, value OptionSetting) {
o.optsMU.Lock()
o.opts[key] = value
o.optsMU.Unlock()
}
// SetBool sets the specified option to Enabled.
func (o *IntOptions) SetBool(key string, value bool) {
intValue := OptionDisabled
if value {
intValue = OptionEnabled
}
o.optsMU.Lock()
o.opts[key] = intValue
o.optsMU.Unlock()
}
func (o *IntOptions) Delete(key string) {
o.optsMU.Lock()
delete(o.opts, key)
o.optsMU.Unlock()
}
func (o *IntOptions) SetIfUnset(key string, value OptionSetting) {
o.optsMU.Lock()
if _, exists := o.opts[key]; !exists {
o.opts[key] = value
}
o.optsMU.Unlock()
}
func (o *IntOptions) InheritDefault(parent *IntOptions, key string) {
o.optsMU.RLock()
o.opts[key] = parent.GetValue(key)
o.optsMU.RUnlock()
}
func (l *OptionLibrary) ParseOption(arg string) (string, OptionSetting, bool, error) {
result := OptionEnabled
if arg[0] == '!' {
result = OptionDisabled
arg = arg[1:]
}
optionSplit := strings.SplitN(arg, "=", 2)
arg = optionSplit[0]
if len(optionSplit) > 1 {
if result == OptionDisabled {
return "", OptionDisabled, false, fmt.Errorf("invalid boolean format")
}
return l.parseKeyValue(arg, optionSplit[1])
}
return "", OptionDisabled, false, fmt.Errorf("invalid option format")
}
func (l *OptionLibrary) parseKeyValue(arg, value string) (string, OptionSetting, bool, error) {
var result OptionSetting
key, spec := l.Lookup(arg)
if key == "" {
return "", OptionDisabled, false, fmt.Errorf("unknown option %q", arg)
}
var err error
if spec.Parse != nil {
result, err = spec.Parse(value)
} else {
result, err = NormalizeBool(value)
}
if err != nil {
return "", OptionDisabled, false, err
}
if spec.Immutable {
return "", OptionDisabled, spec.Deprecated, fmt.Errorf("specified option is immutable (read-only)")
}
return key, result, spec.Deprecated, nil
}
// getFmtOpt returns #define name if option exists and is set to true in endpoint's Opts
// map or #undef name if option does not exist or exists but is set to false
func (o *IntOptions) getFmtOpt(name string) string {
define := o.library.Define(name)
if define == "" {
return ""
}
value := o.getValue(name)
if value != OptionDisabled {
return fmt.Sprintf("#define %s %d", o.library.Define(name), value)
}
return "#undef " + o.library.Define(name)
}
func (o *IntOptions) GetFmtList() string {
txt := ""
o.optsMU.RLock()
for _, k := range slices.Sorted(maps.Keys(o.opts)) {
def := o.getFmtOpt(k)
if def != "" {
txt += def + "\n"
}
}
o.optsMU.RUnlock()
return txt
}
func (o *IntOptions) Dump() {
if o == nil {
return
}
o.optsMU.RLock()
for _, k := range slices.Sorted(maps.Keys(o.opts)) {
var text string
_, option := o.library.Lookup(k)
if option == nil || option.Format == nil {
if o.opts[k] == OptionDisabled {
text = "Disabled"
} else {
text = "Enabled"
}
} else {
text = option.Format(o.opts[k])
}
fmt.Printf("%-24s %s\n", k, text)
}
o.optsMU.RUnlock()
}
// Validate validates a given configuration map based on the option library
func (o *IntOptions) Validate(n models.ConfigurationMap) error {
o.optsMU.RLock()
defer o.optsMU.RUnlock()
for k, v := range n {
_, newVal, _, err := o.library.parseKeyValue(k, v)
if err != nil {
return err
}
// Ignore validation if value is identical
if oldVal, ok := o.opts[k]; ok && oldVal == newVal {
continue
}
if err := o.library.Validate(k, v); err != nil {
return err
}
}
return nil
}
// ChangedFunc is called by `Apply()` for each option changed
type ChangedFunc func(key string, value OptionSetting, data any)
// enable enables the option `name` with all its dependencies
func (o *IntOptions) enable(name string) {
if o.library != nil {
if _, opt := o.library.Lookup(name); opt != nil {
for _, dependency := range opt.Requires {
o.enable(dependency)
}
}
}
o.opts[name] = OptionEnabled
}
// set enables the option `name` with all its dependencies, and sets the
// integer level of the option to `value`.
func (o *IntOptions) set(name string, value OptionSetting) {
o.enable(name)
o.opts[name] = value
}
// disable disables the option `name`. All options which depend on the option
// to be disabled will be disabled. Options which have previously been enabled
// as a dependency will not be automatically disabled.
func (o *IntOptions) disable(name string) {
o.opts[name] = OptionDisabled
if o.library != nil {
// Disable all options which have a dependency on the option
// that was just disabled
for key, opt := range *o.library {
if opt.RequiresOption(name) && o.opts[key] != OptionDisabled {
o.disable(key)
}
}
}
}
type changedOptions struct {
key string
value OptionSetting
}
// ApplyValidated takes a configuration map and applies the changes. For an
// option which is changed, the `ChangedFunc` function is called with the
// `data` argument passed in as well. Returns the number of options changed if
// any.
//
// The caller is expected to have validated the configuration options prior to
// calling this function.
func (o *IntOptions) ApplyValidated(n OptionMap, changed ChangedFunc, data any) int {
changes := make([]changedOptions, 0, len(n))
o.optsMU.Lock()
for k, optVal := range n {
val, ok := o.opts[k]
if optVal == OptionDisabled {
/* Only disable if enabled already */
if ok && val != OptionDisabled {
o.disable(k)
changes = append(changes, changedOptions{key: k, value: optVal})
}
} else {
/* Only enable if not enabled already */
if !ok || val == OptionDisabled {
o.set(k, optVal)
changes = append(changes, changedOptions{key: k, value: optVal})
}
}
}
o.optsMU.Unlock()
for _, change := range changes {
changed(change.key, change.value, data)
}
return len(changes)
}
// SPDX-License-Identifier: Apache-2.0
// Copyright Authors of Cilium
package api
import (
"net/netip"
"strings"
slim_metav1 "github.com/cilium/cilium/pkg/k8s/slim/k8s/apis/meta/v1"
"github.com/cilium/cilium/pkg/labels"
"github.com/cilium/cilium/pkg/option"
)
// CIDR specifies a block of IP addresses.
// Example: 192.0.2.1/32
//
// +kubebuilder:validation:Format=cidr
type CIDR string
var (
ipv4All = CIDR("0.0.0.0/0")
ipv6All = CIDR("::/0")
worldLabelNonDualStack = labels.Label{Source: labels.LabelSourceReserved, Key: labels.IDNameWorld}
worldLabelV4 = labels.Label{Source: labels.LabelSourceReserved, Key: labels.IDNameWorldIPv4}
worldLabelV6 = labels.Label{Source: labels.LabelSourceReserved, Key: labels.IDNameWorldIPv6}
)
// CIDRRule is a rule that specifies a CIDR prefix to/from which outside
// communication is allowed, along with an optional list of subnets within that
// CIDR prefix to/from which outside communication is not allowed.
type CIDRRule struct {
// CIDR is a CIDR prefix / IP Block.
//
// +kubebuilder:validation:OneOf
Cidr CIDR `json:"cidr,omitempty"`
// CIDRGroupRef is a reference to a CiliumCIDRGroup object.
// A CiliumCIDRGroup contains a list of CIDRs that the endpoint, subject to
// the rule, can (Ingress/Egress) or cannot (IngressDeny/EgressDeny) receive
// connections from.
//
// +kubebuilder:validation:OneOf
CIDRGroupRef CIDRGroupRef `json:"cidrGroupRef,omitempty"`
// CIDRGroupSelector selects CiliumCIDRGroups by their labels,
// rather than by name.
//
// +kubebuilder:validation:OneOf
CIDRGroupSelector *slim_metav1.LabelSelector `json:"cidrGroupSelector,omitempty"`
// ExceptCIDRs is a list of IP blocks which the endpoint subject to the rule
// is not allowed to initiate connections to. These CIDR prefixes should be
// contained within Cidr, using ExceptCIDRs together with CIDRGroupRef is not
// supported yet.
// These exceptions are only applied to the Cidr in this CIDRRule, and do not
// apply to any other CIDR prefixes in any other CIDRRules.
//
// +kubebuilder:validation:Optional
ExceptCIDRs []CIDR `json:"except,omitempty"`
// Generated indicates whether the rule was generated based on other rules
// or provided by user
Generated bool `json:"-"`
}
// String converts the CIDRRule into a human-readable string.
func (r CIDRRule) String() string {
exceptCIDRs := ""
if len(r.ExceptCIDRs) > 0 {
exceptCIDRs = "-" + CIDRSlice(r.ExceptCIDRs).String()
}
return string(r.Cidr) + exceptCIDRs
}
// CIDRSlice is a slice of CIDRs. It allows receiver methods to be defined for
// transforming the slice into other convenient forms such as
// EndpointSelectorSlice.
type CIDRSlice []CIDR
// GetAsEndpointSelectors returns the provided CIDR slice as a slice of
// endpoint selectors
func (s CIDRSlice) GetAsEndpointSelectors() EndpointSelectorSlice {
// If multiple CIDRs representing reserved:world are in this CIDRSlice,
// we only have to add the EndpointSelector representing reserved:world
// once.
var hasIPv4AllBeenAdded, hasIPv6AllBeenAdded bool
slice := EndpointSelectorSlice{}
for _, cidr := range s {
if cidr == ipv4All {
hasIPv4AllBeenAdded = true
}
if cidr == ipv6All {
hasIPv6AllBeenAdded = true
}
lbl, err := labels.IPStringToLabel(string(cidr))
if err == nil {
slice = append(slice, NewESFromLabels(lbl))
}
// TODO: Log the error?
}
if option.Config.IsDualStack() {
// If Cilium is in dual-stack mode then world-ipv4 and
// world-ipv6 need to be distinguished from one another.
if hasIPv4AllBeenAdded && hasIPv6AllBeenAdded {
slice = append(slice, ReservedEndpointSelectors[labels.IDNameWorld])
}
if hasIPv4AllBeenAdded {
slice = append(slice, ReservedEndpointSelectors[labels.IDNameWorldIPv4])
}
if hasIPv6AllBeenAdded {
slice = append(slice, ReservedEndpointSelectors[labels.IDNameWorldIPv6])
}
} else if option.Config.EnableIPv4 && hasIPv4AllBeenAdded {
slice = append(slice, ReservedEndpointSelectors[labels.IDNameWorld])
} else if option.Config.EnableIPv6 && hasIPv6AllBeenAdded {
slice = append(slice, ReservedEndpointSelectors[labels.IDNameWorld])
}
return slice
}
// StringSlice returns the CIDR slice as a slice of strings.
func (s CIDRSlice) StringSlice() []string {
result := make([]string, 0, len(s))
for _, c := range s {
result = append(result, string(c))
}
return result
}
// String converts the CIDRSlice into a human-readable string.
func (s CIDRSlice) String() string {
if len(s) == 0 {
return ""
}
return "[" + strings.Join(s.StringSlice(), ",") + "]"
}
// CIDRRuleSlice is a slice of CIDRRules. It allows receiver methods to be
// defined for transforming the slice into other convenient forms such as
// EndpointSelectorSlice.
type CIDRRuleSlice []CIDRRule
// GetAsEndpointSelectors returns the provided CIDRRule slice as a slice of
// endpoint selectors
//
// The ExceptCIDRs block is inserted as a negative match. Specifically, the
// DoesNotExist qualifier. For example, the CIDRRule
//
// cidr: 1.1.1.0/24
// exceptCIDRs: ["1.1.1.1/32"]
//
// results in the selector equivalent to "cidr:1.1.1.0/24 !cidr:1.1.1.1/32".
//
// This works because the label selectors will select numeric identities belonging only
// to the shorter prefixes. However, longer prefixes will have a different numeric
// identity, as the bpf ipcache is an LPM lookup. This essentially acts as a
// "carve-out", using the LPM mechanism to exlude subsets of a larger prefix.
func (s CIDRRuleSlice) GetAsEndpointSelectors() EndpointSelectorSlice {
ces := make(EndpointSelectorSlice, 0, len(s))
for _, r := range s {
ls := slim_metav1.LabelSelector{
MatchExpressions: make([]slim_metav1.LabelSelectorRequirement, 0, 1+len(r.ExceptCIDRs)),
}
// If we see the zero-prefix label, then
// we need to "duplicate" the generated selector, selecting also
// the `reserved:world` label.
var addWorldLabel labels.Label
// add the "main" label:
// either a CIDR, CIDRGroupRef, or CIDRGroupSelector
if r.Cidr != "" {
// Check to see if this is a zero-length prefix.
// If so, determine the extra label to add
if strings.HasSuffix(string(r.Cidr), "/0") {
switch {
case !option.Config.IsDualStack():
addWorldLabel = worldLabelNonDualStack
case strings.Contains(string(r.Cidr), ":"):
addWorldLabel = worldLabelV6
default:
addWorldLabel = worldLabelV4
}
}
lbl, err := labels.IPStringToLabel(string(r.Cidr))
if err != nil {
// should not happen, IP already parsed.
continue
}
ls.MatchExpressions = append(ls.MatchExpressions, slim_metav1.LabelSelectorRequirement{
Key: lbl.GetExtendedKey(),
Operator: slim_metav1.LabelSelectorOpExists,
})
} else if r.CIDRGroupRef != "" {
lbl := LabelForCIDRGroupRef(string(r.CIDRGroupRef))
ls.MatchExpressions = append(ls.MatchExpressions, slim_metav1.LabelSelectorRequirement{
Key: lbl.GetExtendedKey(),
Operator: slim_metav1.LabelSelectorOpExists,
})
} else if r.CIDRGroupSelector != nil {
ls = *NewESFromK8sLabelSelector(labels.LabelSourceCIDRGroupKeyPrefix, r.CIDRGroupSelector).LabelSelector
} else {
// should never be hit, but paranoia
continue
}
// exclude any excepted CIDRs.
// Do so by inserting a "DoesNotExist" requirement for the given prefix key
for _, exceptCIDR := range r.ExceptCIDRs {
lbl, _ := labels.IPStringToLabel(string(exceptCIDR))
ls.MatchExpressions = append(ls.MatchExpressions, slim_metav1.LabelSelectorRequirement{
Key: lbl.GetExtendedKey(),
Operator: slim_metav1.LabelSelectorOpDoesNotExist,
})
}
ces = append(ces, NewESFromK8sLabelSelector("", &ls))
// Duplicate ls with world label
if addWorldLabel.Key != "" {
worldLS := ls.DeepCopy()
worldLS.MatchExpressions[0] = slim_metav1.LabelSelectorRequirement{
Key: addWorldLabel.GetExtendedKey(),
Operator: slim_metav1.LabelSelectorOpExists,
}
ces = append(ces, NewESFromK8sLabelSelector("", worldLS))
}
}
return ces
}
// addrsToCIDRRules generates CIDRRules for the IPs passed in.
// This function will mark the rule to Generated true by default.
func addrsToCIDRRules(addrs []netip.Addr) []CIDRRule {
cidrRules := make([]CIDRRule, 0, len(addrs))
for _, addr := range addrs {
rule := CIDRRule{ExceptCIDRs: make([]CIDR, 0)}
rule.Generated = true
if addr.Is4() {
rule.Cidr = CIDR(addr.String() + "/32")
} else {
rule.Cidr = CIDR(addr.String() + "/128")
}
cidrRules = append(cidrRules, rule)
}
return cidrRules
}
// +kubebuilder:validation:MaxLength=253
// +kubebuilder:validation:Pattern=`^[a-z0-9]([-a-z0-9]*[a-z0-9])?(\.[a-z0-9]([-a-z0-9]*[a-z0-9])?)*$`
//
// CIDRGroupRef is a reference to a CIDR Group.
// A CIDR Group is a list of CIDRs whose IP addresses should be considered as a
// same entity when applying fromCIDRGroupRefs policies on incoming network traffic.
type CIDRGroupRef string
const LabelPrefixGroupName = "io.cilium.policy.cidrgroupname"
func LabelForCIDRGroupRef(ref string) labels.Label {
var key strings.Builder
key.Grow(len(LabelPrefixGroupName) + len(ref) + 1)
key.WriteString(LabelPrefixGroupName)
key.WriteString("/")
key.WriteString(ref)
return labels.NewLabel(
key.String(),
"",
labels.LabelSourceCIDRGroup,
)
}
// SPDX-License-Identifier: Apache-2.0
// Copyright Authors of Cilium
package api
import (
"fmt"
)
// Decision is a reachability policy decision
type Decision byte
const (
// Undecided means that we have not come to a decision yet
Undecided Decision = iota
// Allowed means that reachability is allowed
Allowed
// Denied means that reachability is denied
Denied
)
var (
decisionToString = map[Decision]string{
Undecided: "undecided",
Allowed: "allowed",
Denied: "denied",
}
stringToDecision = map[string]Decision{
"undecided": Undecided,
"allowed": Allowed,
"denied": Denied,
}
)
// String returns the decision in human readable format
func (d Decision) String() string {
if v, exists := decisionToString[d]; exists {
return v
}
return ""
}
// UnmarshalJSON parses a JSON formatted buffer and returns a decision
func (d *Decision) UnmarshalJSON(b []byte) error {
if d == nil {
d = new(Decision)
}
if len(b) <= len(`""`) {
return fmt.Errorf("invalid decision '%s'", string(b))
}
if v, exists := stringToDecision[string(b[1:len(b)-1])]; exists {
*d = v
return nil
}
return fmt.Errorf("unknown '%s' decision", string(b))
}
// MarshalJSON returns the decision as JSON formatted buffer
func (d Decision) MarshalJSON() ([]byte, error) {
s := d.String()
// length of decision string plus two `"`
b := make([]byte, len(s)+2)
b[0] = '"'
copy(b[1:], s)
b[len(b)-1] = '"'
return b, nil
}
// SPDX-License-Identifier: Apache-2.0
// Copyright Authors of Cilium
package api
import (
"context"
slim_metav1 "github.com/cilium/cilium/pkg/k8s/slim/k8s/apis/meta/v1"
"github.com/cilium/cilium/pkg/slices"
)
// EgressCommonRule is a rule that shares some of its fields across the
// EgressRule and EgressDenyRule. It's publicly exported so the code generators
// can generate code for this structure.
//
// +deepequal-gen:private-method=true
type EgressCommonRule struct {
// ToEndpoints is a list of endpoints identified by an EndpointSelector to
// which the endpoints subject to the rule are allowed to communicate.
//
// Example:
// Any endpoint with the label "role=frontend" can communicate with any
// endpoint carrying the label "role=backend".
//
// +kubebuilder:validation:Optional
ToEndpoints []EndpointSelector `json:"toEndpoints,omitempty"`
// ToRequires is a list of additional constraints which must be met
// in order for the selected endpoints to be able to connect to other
// endpoints. These additional constraints do no by itself grant access
// privileges and must always be accompanied with at least one matching
// ToEndpoints.
//
// Example:
// Any Endpoint with the label "team=A" requires any endpoint to which it
// communicates to also carry the label "team=A".
//
// +kubebuilder:validation:Optional
ToRequires []EndpointSelector `json:"toRequires,omitempty"`
// ToCIDR is a list of IP blocks which the endpoint subject to the rule
// is allowed to initiate connections. Only connections destined for
// outside of the cluster and not targeting the host will be subject
// to CIDR rules. This will match on the destination IP address of
// outgoing connections. Adding a prefix into ToCIDR or into ToCIDRSet
// with no ExcludeCIDRs is equivalent. Overlaps are allowed between
// ToCIDR and ToCIDRSet.
//
// Example:
// Any endpoint with the label "app=database-proxy" is allowed to
// initiate connections to 10.2.3.0/24
//
// +kubebuilder:validation:Optional
ToCIDR CIDRSlice `json:"toCIDR,omitempty"`
// ToCIDRSet is a list of IP blocks which the endpoint subject to the rule
// is allowed to initiate connections to in addition to connections
// which are allowed via ToEndpoints, along with a list of subnets contained
// within their corresponding IP block to which traffic should not be
// allowed. This will match on the destination IP address of outgoing
// connections. Adding a prefix into ToCIDR or into ToCIDRSet with no
// ExcludeCIDRs is equivalent. Overlaps are allowed between ToCIDR and
// ToCIDRSet.
//
// Example:
// Any endpoint with the label "app=database-proxy" is allowed to
// initiate connections to 10.2.3.0/24 except from IPs in subnet 10.2.3.0/28.
//
// +kubebuilder:validation:Optional
ToCIDRSet CIDRRuleSlice `json:"toCIDRSet,omitempty"`
// ToEntities is a list of special entities to which the endpoint subject
// to the rule is allowed to initiate connections. Supported entities are
// `world`, `cluster`, `host`, `remote-node`, `kube-apiserver`, `ingress`, `init`,
// `health`, `unmanaged`, `none` and `all`.
//
// +kubebuilder:validation:Optional
ToEntities EntitySlice `json:"toEntities,omitempty"`
// ToServices is a list of services to which the endpoint subject
// to the rule is allowed to initiate connections.
// Currently Cilium only supports toServices for K8s services.
//
// +kubebuilder:validation:Optional
ToServices []Service `json:"toServices,omitempty"`
// ToGroups is a directive that allows the integration with multiple outside
// providers. Currently, only AWS is supported, and the rule can select by
// multiple sub directives:
//
// Example:
// toGroups:
// - aws:
// securityGroupsIds:
// - 'sg-XXXXXXXXXXXXX'
//
// +kubebuilder:validation:Optional
ToGroups []Groups `json:"toGroups,omitempty"`
// ToNodes is a list of nodes identified by an
// EndpointSelector to which endpoints subject to the rule is allowed to communicate.
//
// +kubebuilder:validation:Optional
ToNodes []EndpointSelector `json:"toNodes,omitempty"`
// TODO: Move this to the policy package
// (https://github.com/cilium/cilium/issues/8353)
aggregatedSelectors EndpointSelectorSlice `json:"-"`
}
// DeepEqual returns true if both EgressCommonRule are deep equal.
// The semantic of a nil slice in one of its fields is different from the semantic
// of an empty non-nil slice, thus it explicitly checks for that case before calling
// the autogenerated method.
func (in *EgressCommonRule) DeepEqual(other *EgressCommonRule) bool {
if slices.XorNil(in.ToEndpoints, other.ToEndpoints) {
return false
}
if slices.XorNil(in.ToCIDR, other.ToCIDR) {
return false
}
if slices.XorNil(in.ToCIDRSet, other.ToCIDRSet) {
return false
}
if slices.XorNil(in.ToEntities, other.ToEntities) {
return false
}
return in.deepEqual(other)
}
// EgressRule contains all rule types which can be applied at egress, i.e.
// network traffic that originates inside the endpoint and exits the endpoint
// selected by the endpointSelector.
//
// - All members of this structure are optional. If omitted or empty, the
// member will have no effect on the rule.
//
// - If multiple members of the structure are specified, then all members
// must match in order for the rule to take effect. The exception to this
// rule is the ToRequires member; the effects of any Requires field in any
// rule will apply to all other rules as well.
//
// - ToEndpoints, ToCIDR, ToCIDRSet, ToEntities, ToServices and ToGroups are
// mutually exclusive. Only one of these members may be present within an
// individual rule.
type EgressRule struct {
EgressCommonRule `json:",inline"`
// ToPorts is a list of destination ports identified by port number and
// protocol which the endpoint subject to the rule is allowed to
// connect to.
//
// Example:
// Any endpoint with the label "role=frontend" is allowed to initiate
// connections to destination port 8080/tcp
//
// +kubebuilder:validation:Optional
ToPorts PortRules `json:"toPorts,omitempty"`
// ToFQDN allows whitelisting DNS names in place of IPs. The IPs that result
// from DNS resolution of `ToFQDN.MatchName`s are added to the same
// EgressRule object as ToCIDRSet entries, and behave accordingly. Any L4 and
// L7 rules within this EgressRule will also apply to these IPs.
// The DNS -> IP mapping is re-resolved periodically from within the
// cilium-agent, and the IPs in the DNS response are effected in the policy
// for selected pods as-is (i.e. the list of IPs is not modified in any way).
// Note: An explicit rule to allow for DNS traffic is needed for the pods, as
// ToFQDN counts as an egress rule and will enforce egress policy when
// PolicyEnforcment=default.
// Note: If the resolved IPs are IPs within the kubernetes cluster, the
// ToFQDN rule will not apply to that IP.
// Note: ToFQDN cannot occur in the same policy as other To* rules.
//
// +kubebuilder:validation:Optional
ToFQDNs FQDNSelectorSlice `json:"toFQDNs,omitempty"`
// ICMPs is a list of ICMP rule identified by type number
// which the endpoint subject to the rule is allowed to connect to.
//
// Example:
// Any endpoint with the label "app=httpd" is allowed to initiate
// type 8 ICMP connections.
//
// +kubebuilder:validation:Optional
ICMPs ICMPRules `json:"icmps,omitempty"`
// Authentication is the required authentication type for the allowed traffic, if any.
//
// +kubebuilder:validation:Optional
Authentication *Authentication `json:"authentication,omitempty"`
}
// EgressDenyRule contains all rule types which can be applied at egress, i.e.
// network traffic that originates inside the endpoint and exits the endpoint
// selected by the endpointSelector.
//
// - All members of this structure are optional. If omitted or empty, the
// member will have no effect on the rule.
//
// - If multiple members of the structure are specified, then all members
// must match in order for the rule to take effect. The exception to this
// rule is the ToRequires member; the effects of any Requires field in any
// rule will apply to all other rules as well.
//
// - ToEndpoints, ToCIDR, ToCIDRSet, ToEntities, ToServices and ToGroups are
// mutually exclusive. Only one of these members may be present within an
// individual rule.
type EgressDenyRule struct {
EgressCommonRule `json:",inline"`
// ToPorts is a list of destination ports identified by port number and
// protocol which the endpoint subject to the rule is not allowed to connect
// to.
//
// Example:
// Any endpoint with the label "role=frontend" is not allowed to initiate
// connections to destination port 8080/tcp
//
// +kubebuilder:validation:Optional
ToPorts PortDenyRules `json:"toPorts,omitempty"`
// ICMPs is a list of ICMP rule identified by type number
// which the endpoint subject to the rule is not allowed to connect to.
//
// Example:
// Any endpoint with the label "app=httpd" is not allowed to initiate
// type 8 ICMP connections.
//
// +kubebuilder:validation:Optional
ICMPs ICMPRules `json:"icmps,omitempty"`
}
// SetAggregatedSelectors creates a single slice containing all of the following
// fields within the EgressCommonRule, converted to EndpointSelector, to be
// stored by the caller of the EgressCommonRule for easy lookup while performing
// policy evaluation for the rule:
// * ToEntities
// * ToCIDR
// * ToCIDRSet
// * ToFQDNs
//
// ToEndpoints is not aggregated due to requirement folding in
// GetDestinationEndpointSelectorsWithRequirements()
func (e *EgressCommonRule) getAggregatedSelectors() EndpointSelectorSlice {
// explicitly check for empty non-nil slices, it should not result in any identity being selected.
if (e.ToEntities != nil && len(e.ToEntities) == 0) ||
(e.ToCIDR != nil && len(e.ToCIDR) == 0) ||
(e.ToCIDRSet != nil && len(e.ToCIDRSet) == 0) {
return nil
}
res := make(EndpointSelectorSlice, 0, len(e.ToEntities)+len(e.ToCIDR)+len(e.ToCIDRSet))
res = append(res, e.ToEntities.GetAsEndpointSelectors()...)
res = append(res, e.ToCIDR.GetAsEndpointSelectors()...)
res = append(res, e.ToCIDRSet.GetAsEndpointSelectors()...)
return res
}
// SetAggregatedSelectors creates a single slice containing all of the following
// fields within the EgressRule, converted to EndpointSelector, to be stored
// within the EgressRule for easy lookup while performing policy evaluation
// for the rule:
// * ToEntities
// * ToCIDR
// * ToCIDRSet
// * ToFQDNs
//
// ToEndpoints is not aggregated due to requirement folding in
// GetDestinationEndpointSelectorsWithRequirements()
func (e *EgressRule) SetAggregatedSelectors() {
ess := e.getAggregatedSelectors()
ess = append(ess, e.ToFQDNs.GetAsEndpointSelectors()...)
e.aggregatedSelectors = ess
}
// SetAggregatedSelectors creates a single slice containing all of the following
// fields within the EgressRule, converted to EndpointSelector, to be stored
// within the EgressRule for easy lookup while performing policy evaluation
// for the rule:
// * ToEntities
// * ToCIDR
// * ToCIDRSet
// * ToFQDNs
//
// ToEndpoints is not aggregated due to requirement folding in
// GetDestinationEndpointSelectorsWithRequirements()
func (e *EgressCommonRule) SetAggregatedSelectors() {
e.aggregatedSelectors = e.getAggregatedSelectors()
}
// GetDestinationEndpointSelectorsWithRequirements returns a slice of endpoints selectors covering
// all L3 dst selectors of the egress rule
func (e *EgressRule) GetDestinationEndpointSelectorsWithRequirements(requirements []slim_metav1.LabelSelectorRequirement) EndpointSelectorSlice {
if e.aggregatedSelectors == nil {
e.SetAggregatedSelectors()
}
return e.EgressCommonRule.getDestinationEndpointSelectorsWithRequirements(requirements)
}
// GetDestinationEndpointSelectorsWithRequirements returns a slice of endpoints selectors covering
// all L3 source selectors of the ingress rule
func (e *EgressDenyRule) GetDestinationEndpointSelectorsWithRequirements(requirements []slim_metav1.LabelSelectorRequirement) EndpointSelectorSlice {
if e.aggregatedSelectors == nil {
e.SetAggregatedSelectors()
}
return e.EgressCommonRule.getDestinationEndpointSelectorsWithRequirements(requirements)
}
// GetDestinationEndpointSelectorsWithRequirements returns a slice of endpoints selectors covering
// all L3 source selectors of the ingress rule
func (e *EgressCommonRule) getDestinationEndpointSelectorsWithRequirements(
requirements []slim_metav1.LabelSelectorRequirement,
) EndpointSelectorSlice {
// explicitly check for empty non-nil slices, it should not result in any identity being selected.
if e.aggregatedSelectors == nil || (e.ToEndpoints != nil && len(e.ToEndpoints) == 0) ||
(e.ToNodes != nil && len(e.ToNodes) == 0) {
return nil
}
res := make(EndpointSelectorSlice, 0, len(e.ToEndpoints)+len(e.aggregatedSelectors)+len(e.ToNodes))
if len(requirements) > 0 && len(e.ToEndpoints) > 0 {
for idx := range e.ToEndpoints {
sel := *e.ToEndpoints[idx].DeepCopy()
sel.MatchExpressions = append(sel.MatchExpressions, requirements...)
sel.SyncRequirementsWithLabelSelector()
// Even though this string is deep copied, we need to override it
// because we are updating the contents of the MatchExpressions.
sel.cachedLabelSelectorString = sel.LabelSelector.String()
res = append(res, sel)
}
} else {
res = append(res, e.ToEndpoints...)
res = append(res, e.ToNodes...)
}
return append(res, e.aggregatedSelectors...)
}
// AllowsWildcarding returns true if wildcarding should be performed upon
// policy evaluation for the given rule.
func (e *EgressRule) AllowsWildcarding() bool {
return e.EgressCommonRule.AllowsWildcarding() && len(e.ToFQDNs) == 0
}
// AllowsWildcarding returns true if wildcarding should be performed upon
// policy evaluation for the given rule.
func (e *EgressCommonRule) AllowsWildcarding() bool {
return len(e.ToRequires)+len(e.ToServices) == 0
}
// RequiresDerivative returns true when the EgressCommonRule contains sections
// that need a derivative policy created in order to be enforced
// (e.g. ToGroups).
func (e *EgressCommonRule) RequiresDerivative() bool {
return len(e.ToGroups) > 0
}
func (e *EgressCommonRule) IsL3() bool {
if e == nil {
return false
}
return len(e.ToEndpoints) > 0 ||
len(e.ToRequires) > 0 ||
len(e.ToCIDR) > 0 ||
len(e.ToCIDRSet) > 0 ||
len(e.ToEntities) > 0 ||
len(e.ToGroups) > 0 ||
len(e.ToNodes) > 0
}
// CreateDerivative will return a new rule based on the data gathered by the
// rules that creates a new derivative policy.
// In the case of ToGroups will call outside using the groups callback and this
// function can take a bit of time.
func (e *EgressRule) CreateDerivative(ctx context.Context) (*EgressRule, error) {
newRule := e.DeepCopy()
if !e.RequiresDerivative() {
return newRule, nil
}
newRule.ToCIDRSet = make(CIDRRuleSlice, 0, len(e.ToGroups))
cidrSet, err := ExtractCidrSet(ctx, e.ToGroups)
if err != nil {
return &EgressRule{}, err
}
newRule.ToCIDRSet = append(e.ToCIDRSet, cidrSet...)
newRule.ToGroups = nil
e.SetAggregatedSelectors()
return newRule, nil
}
// CreateDerivative will return a new rule based on the data gathered by the
// rules that creates a new derivative policy.
// In the case of ToGroups will call outside using the groups callback and this
// function can take a bit of time.
func (e *EgressDenyRule) CreateDerivative(ctx context.Context) (*EgressDenyRule, error) {
newRule := e.DeepCopy()
if !e.RequiresDerivative() {
return newRule, nil
}
newRule.ToCIDRSet = make(CIDRRuleSlice, 0, len(e.ToGroups))
cidrSet, err := ExtractCidrSet(ctx, e.ToGroups)
if err != nil {
return &EgressDenyRule{}, err
}
newRule.ToCIDRSet = append(e.ToCIDRSet, cidrSet...)
newRule.ToGroups = nil
e.SetAggregatedSelectors()
return newRule, nil
}
// SPDX-License-Identifier: Apache-2.0
// Copyright Authors of Cilium
package api
import (
k8sapi "github.com/cilium/cilium/pkg/k8s/apis/cilium.io"
"github.com/cilium/cilium/pkg/labels"
)
// Entity specifies the class of receiver/sender endpoints that do not have
// individual identities. Entities are used to describe "outside of cluster",
// "host", etc.
//
// +kubebuilder:validation:Enum=all;world;cluster;host;init;ingress;unmanaged;remote-node;health;none;kube-apiserver
type Entity string
const (
// EntityAll is an entity that represents all traffic
EntityAll Entity = "all"
// EntityWorld is an entity that represents traffic external to
// endpoint's cluster
EntityWorld Entity = "world"
// EntityWorldIPv4 is an entity that represents traffic external to
// endpoint's cluster, specifically an IPv4 endpoint, to distinguish
// it from IPv6 in dual-stack mode.
EntityWorldIPv4 Entity = "world-ipv4"
// EntityWorldIPv6 is an entity that represents traffic external to
// endpoint's cluster, specifically an IPv6 endpoint, to distinguish
// it from IPv4 in dual-stack mode.
EntityWorldIPv6 Entity = "world-ipv6"
// EntityCluster is an entity that represents traffic within the
// endpoint's cluster, to endpoints not managed by cilium
EntityCluster Entity = "cluster"
// EntityHost is an entity that represents traffic within endpoint host
EntityHost Entity = "host"
// EntityInit is an entity that represents an initializing endpoint
EntityInit Entity = "init"
// EntityIngress is an entity that represents envoy proxy
EntityIngress Entity = "ingress"
// EntityUnmanaged is an entity that represents unamanaged endpoints.
EntityUnmanaged Entity = "unmanaged"
// EntityRemoteNode is an entity that represents all remote nodes
EntityRemoteNode Entity = "remote-node"
// EntityHealth is an entity that represents all health endpoints.
EntityHealth Entity = "health"
// EntityNone is an entity that can be selected but never exist
EntityNone Entity = "none"
// EntityKubeAPIServer is an entity that represents the kube-apiserver.
EntityKubeAPIServer Entity = "kube-apiserver"
)
var (
endpointSelectorWorld = NewESFromLabels(labels.NewLabel(labels.IDNameWorld, "", labels.LabelSourceReserved))
endpointSelectorWorldIPv4 = NewESFromLabels(labels.NewLabel(labels.IDNameWorldIPv4, "", labels.LabelSourceReserved))
endpointSelectorWorldIPv6 = NewESFromLabels(labels.NewLabel(labels.IDNameWorldIPv6, "", labels.LabelSourceReserved))
endpointSelectorHost = NewESFromLabels(labels.NewLabel(labels.IDNameHost, "", labels.LabelSourceReserved))
endpointSelectorInit = NewESFromLabels(labels.NewLabel(labels.IDNameInit, "", labels.LabelSourceReserved))
endpointSelectorIngress = NewESFromLabels(labels.NewLabel(labels.IDNameIngress, "", labels.LabelSourceReserved))
endpointSelectorRemoteNode = NewESFromLabels(labels.NewLabel(labels.IDNameRemoteNode, "", labels.LabelSourceReserved))
endpointSelectorHealth = NewESFromLabels(labels.NewLabel(labels.IDNameHealth, "", labels.LabelSourceReserved))
EndpointSelectorNone = NewESFromLabels(labels.NewLabel(labels.IDNameNone, "", labels.LabelSourceReserved))
endpointSelectorUnmanaged = NewESFromLabels(labels.NewLabel(labels.IDNameUnmanaged, "", labels.LabelSourceReserved))
endpointSelectorKubeAPIServer = NewESFromLabels(labels.LabelKubeAPIServer[labels.IDNameKubeAPIServer])
// EntitySelectorMapping maps special entity names that come in
// policies to selectors
// If you add an entry here, you must also update the CRD
// validation above.
EntitySelectorMapping = map[Entity]EndpointSelectorSlice{
EntityAll: {WildcardEndpointSelector},
EntityWorld: {endpointSelectorWorld, endpointSelectorWorldIPv4, endpointSelectorWorldIPv6},
EntityWorldIPv4: {endpointSelectorWorldIPv4},
EntityWorldIPv6: {endpointSelectorWorldIPv6},
EntityHost: {endpointSelectorHost},
EntityInit: {endpointSelectorInit},
EntityIngress: {endpointSelectorIngress},
EntityRemoteNode: {endpointSelectorRemoteNode},
EntityHealth: {endpointSelectorHealth},
EntityUnmanaged: {endpointSelectorUnmanaged},
EntityNone: {EndpointSelectorNone},
EntityKubeAPIServer: {endpointSelectorKubeAPIServer},
// EntityCluster is populated with an empty entry to allow the
// cilium client importing this package to perform basic rule
// validation. The basic rule validation only enforces
// awareness of individual entity names and does not require
// understanding of the individual endpoint selectors. The
// endpoint selector for the cluster entity can only be
// initialized at runtime as it depends on user configuration
// such as the cluster name. See InitEntities() below.
EntityCluster: {},
}
)
// EntitySlice is a slice of entities
type EntitySlice []Entity
// GetAsEndpointSelectors returns the provided entity slice as a slice of
// endpoint selectors
func (s EntitySlice) GetAsEndpointSelectors() EndpointSelectorSlice {
slice := EndpointSelectorSlice{}
for _, e := range s {
if selector, ok := EntitySelectorMapping[e]; ok {
slice = append(slice, selector...)
}
}
return slice
}
// InitEntities is called to initialize the policy API layer
func InitEntities(clusterName string) {
EntitySelectorMapping[EntityCluster] = EndpointSelectorSlice{
endpointSelectorHost,
endpointSelectorRemoteNode,
endpointSelectorInit,
endpointSelectorIngress,
endpointSelectorHealth,
endpointSelectorUnmanaged,
endpointSelectorKubeAPIServer,
NewESFromLabels(labels.NewLabel(k8sapi.PolicyLabelCluster, clusterName, labels.LabelSourceK8s)),
}
}
// SPDX-License-Identifier: Apache-2.0
// Copyright Authors of Cilium
package api
import (
"fmt"
"regexp"
"strings"
"github.com/cilium/cilium/pkg/fqdn/dns"
"github.com/cilium/cilium/pkg/fqdn/matchpattern"
"github.com/cilium/cilium/pkg/labels"
)
var (
// allowedMatchNameChars tests that MatchName contains only valid DNS characters
allowedMatchNameChars = regexp.MustCompile("^[-a-zA-Z0-9_.]+$")
// allowedPatternChars tests that the MatchPattern field contains only the
// characters we want in our wildcard scheme.
allowedPatternChars = regexp.MustCompile("^[-a-zA-Z0-9_.*]+$") // the * inside the [] is a literal *
// FQDNMatchNameRegexString is a regex string which matches what's expected
// in the MatchName field in the FQDNSelector. This should be kept in-sync
// with the marker comment for validation. There's no way to use a Golang
// variable in the marker comment, so it's left up to the developer.
FQDNMatchNameRegexString = `^([-a-zA-Z0-9_]+[.]?)+$`
// FQDNMatchPatternRegexString is a regex string which matches what's expected
// in the MatchPattern field in the FQDNSelector. This should be kept in-sync
// with the marker comment for validation. There's no way to use a Golang
// variable in the marker comment, so it's left up to the developer.
FQDNMatchPatternRegexString = `^([-a-zA-Z0-9_*]+[.]?)+$`
)
type FQDNSelector struct {
// MatchName matches literal DNS names. A trailing "." is automatically added
// when missing.
//
// +kubebuilder:validation:MaxLength=255
// +kubebuilder:validation:Pattern=`^([-a-zA-Z0-9_]+[.]?)+$`
// +kubebuilder:validation:OneOf
MatchName string `json:"matchName,omitempty"`
// MatchPattern allows using wildcards to match DNS names. All wildcards are
// case insensitive. The wildcards are:
// - "*" matches 0 or more DNS valid characters, and may occur anywhere in
// the pattern. As a special case a "*" as the leftmost character, without a
// following "." matches all subdomains as well as the name to the right.
// A trailing "." is automatically added when missing.
//
// Examples:
// `*.cilium.io` matches subdomains of cilium at that level
// www.cilium.io and blog.cilium.io match, cilium.io and google.com do not
// `*cilium.io` matches cilium.io and all subdomains ends with "cilium.io"
// except those containing "." separator, subcilium.io and sub-cilium.io match,
// www.cilium.io and blog.cilium.io does not
// sub*.cilium.io matches subdomains of cilium where the subdomain component
// begins with "sub"
// sub.cilium.io and subdomain.cilium.io match, www.cilium.io,
// blog.cilium.io, cilium.io and google.com do not
//
// +kubebuilder:validation:MaxLength=255
// +kubebuilder:validation:Pattern=`^([-a-zA-Z0-9_*]+[.]?)+$`
// +kubebuilder:validation:OneOf
MatchPattern string `json:"matchPattern,omitempty"`
}
func (s *FQDNSelector) String() string {
const m = "MatchName: "
const mm = ", MatchPattern: "
var str strings.Builder
str.Grow(len(m) + len(mm) + len(s.MatchName) + len(s.MatchPattern))
str.WriteString(m)
str.WriteString(s.MatchName)
str.WriteString(mm)
str.WriteString(s.MatchPattern)
return str.String()
}
// IdentityLabel returns the label which needs to be added to each identity
// selected by this selector. The identity label is based on the MatchName
// if set, otherwise on the MatchPattern. This matches the behavior of the
// ToRegex function
func (s *FQDNSelector) IdentityLabel() labels.Label {
match := s.MatchPattern
if s.MatchName != "" {
match = s.MatchName
}
return labels.NewLabel(match, "", labels.LabelSourceFQDN)
}
// sanitize for FQDNSelector is a little wonky. While we do more processing
// when using MatchName the basic requirement is that is a valid regexp. We
// test that it can compile here.
func (s *FQDNSelector) sanitize() error {
if len(s.MatchName) > 0 && len(s.MatchPattern) > 0 {
return fmt.Errorf("only one of MatchName or MatchPattern is allowed in an FQDNSelector")
}
if len(s.MatchName) > 0 && !allowedMatchNameChars.MatchString(s.MatchName) {
return fmt.Errorf("Invalid characters in MatchName: \"%s\". Only 0-9, a-z, A-Z and . and - characters are allowed", s.MatchName)
}
if len(s.MatchPattern) > 0 && !allowedPatternChars.MatchString(s.MatchPattern) {
return fmt.Errorf("Invalid characters in MatchPattern: \"%s\". Only 0-9, a-z, A-Z and ., - and * characters are allowed", s.MatchPattern)
}
_, err := matchpattern.Validate(s.MatchPattern)
return err
}
// ToRegex converts the given FQDNSelector to its corresponding regular
// expression. If the MatchName field is set in the selector, it performs all
// needed formatting to ensure that the field is a valid regular expression.
func (s *FQDNSelector) ToRegex() (*regexp.Regexp, error) {
var preparedMatch string
if s.MatchName != "" {
preparedMatch = dns.FQDN(s.MatchName)
} else {
preparedMatch = matchpattern.Sanitize(s.MatchPattern)
}
regex, err := matchpattern.Validate(preparedMatch)
return regex, err
}
// PortRuleDNS is a list of allowed DNS lookups.
type PortRuleDNS FQDNSelector
// Sanitize checks that the matchName in the portRule can be compiled as a
// regex. It does not check that a DNS name is a valid DNS name.
func (r *PortRuleDNS) Sanitize() error {
if len(r.MatchName) > 0 && !allowedMatchNameChars.MatchString(r.MatchName) {
return fmt.Errorf("Invalid characters in MatchName: \"%s\". Only 0-9, a-z, A-Z and . and - characters are allowed", r.MatchName)
}
if len(r.MatchPattern) > 0 && !allowedPatternChars.MatchString(r.MatchPattern) {
return fmt.Errorf("Invalid characters in MatchPattern: \"%s\". Only 0-9, a-z, A-Z and ., - and * characters are allowed", r.MatchPattern)
}
_, err := matchpattern.Validate(r.MatchPattern)
return err
}
// GetAsEndpointSelectors returns a FQDNSelector as a single EntityNone
// EndpointSelector slice.
// Note that toFQDNs behaves differently than most other rules. The presence of
// any toFQDNs rules means the endpoint must enforce policy, but the IPs are later
// added as toCIDRSet entries and processed as such.
func (s *FQDNSelector) GetAsEndpointSelectors() EndpointSelectorSlice {
return []EndpointSelector{EndpointSelectorNone}
}
// FQDNSelectorSlice is a wrapper type for []FQDNSelector to make is simpler to
// bind methods.
type FQDNSelectorSlice []FQDNSelector
// GetAsEndpointSelectors will return a single EntityNone if any
// toFQDNs rules exist, and a nil slice otherwise.
func (s FQDNSelectorSlice) GetAsEndpointSelectors() EndpointSelectorSlice {
for _, rule := range s {
return rule.GetAsEndpointSelectors()
}
return nil
}
// SPDX-License-Identifier: Apache-2.0
// Copyright Authors of Cilium
package api
import (
"context"
"fmt"
"net/netip"
"github.com/cilium/cilium/pkg/ip"
"github.com/cilium/cilium/pkg/lock"
)
const (
AWSProvider = "AWS" // AWS provider key
)
var (
providers lock.Map[string, GroupProviderFunc] // map with the list of providers to callback to retrieve info from.
)
// GroupProviderFunc is a func that need to be register to be able to
// register a new provider in the platform.
type GroupProviderFunc func(context.Context, *Groups) ([]netip.Addr, error)
// Groups structure to store all kinds of new integrations that needs a new
// derivative policy.
type Groups struct {
AWS *AWSGroup `json:"aws,omitempty"`
}
// AWSGroup is an structure that can be used to whitelisting information from AWS integration
type AWSGroup struct {
Labels map[string]string `json:"labels,omitempty"`
SecurityGroupsIds []string `json:"securityGroupsIds,omitempty"`
SecurityGroupsNames []string `json:"securityGroupsNames,omitempty"`
Region string `json:"region,omitempty"`
}
// RegisterToGroupsProvider it will register a new callback that will be used
// when a new ToGroups rule is added.
func RegisterToGroupsProvider(providerName string, callback GroupProviderFunc) {
providers.Store(providerName, callback)
}
// GetCidrSet will return the CIDRRule for the rule using the callbacks that
// are register in the platform.
func (group *Groups) GetCidrSet(ctx context.Context) ([]CIDRRule, error) {
var addrs []netip.Addr
// Get per provider CIDRSet
if group.AWS != nil {
callback, ok := providers.Load(AWSProvider)
if !ok {
return nil, fmt.Errorf("Provider %s is not registered", AWSProvider)
}
awsAddrs, err := callback(ctx, group)
if err != nil {
return nil, fmt.Errorf(
"Cannot retrieve data from %s provider: %w",
AWSProvider, err)
}
addrs = append(addrs, awsAddrs...)
}
resultAddrs := ip.KeepUniqueAddrs(addrs)
return addrsToCIDRRules(resultAddrs), nil
}
// SPDX-License-Identifier: Apache-2.0
// Copyright Authors of Cilium
package api
import (
"fmt"
"regexp"
)
// MismatchAction specifies what to do when there is no header match
// Empty string is the default for making the rule to fail the match.
// Otherwise the rule is still considered as matching, but the mismatches
// are logged in the access log.
type MismatchAction string
const (
MismatchActionLog MismatchAction = "LOG" // Keep checking other matches
MismatchActionAdd MismatchAction = "ADD" // Add the missing value to a possibly multi-valued header
MismatchActionDelete MismatchAction = "DELETE" // Remove the whole mismatching header
MismatchActionReplace MismatchAction = "REPLACE" // Replace (of add if missing) the header
)
// HeaderMatch extends the HeaderValue for matching requirement of a
// named header field against an immediate string or a secret value.
// If none of the optional fields is present, then the
// header value is not matched, only presence of the header is enough.
type HeaderMatch struct {
// Mismatch identifies what to do in case there is no match. The default is
// to drop the request. Otherwise the overall rule is still considered as
// matching, but the mismatches are logged in the access log.
//
// +kubebuilder:validation:Enum=LOG;ADD;DELETE;REPLACE
// +kubebuilder:validation:Optional
Mismatch MismatchAction `json:"mismatch,omitempty"`
// Name identifies the header.
// +kubebuilder:validation:Required
// +kubebuilder:validation:MinLength=1
Name string `json:"name"`
// Secret refers to a secret that contains the value to be matched against.
// The secret must only contain one entry. If the referred secret does not
// exist, and there is no "Value" specified, the match will fail.
//
// +kubebuilder:validation:Optional
Secret *Secret `json:"secret,omitempty"`
// Value matches the exact value of the header. Can be specified either
// alone or together with "Secret"; will be used as the header value if the
// secret can not be found in the latter case.
//
// +kubebuilder:validation:Optional
Value string `json:"value,omitempty"`
}
// PortRuleHTTP is a list of HTTP protocol constraints. All fields are
// optional, if all fields are empty or missing, the rule does not have any
// effect.
//
// All fields of this type are extended POSIX regex as defined by IEEE Std
// 1003.1, (i.e this follows the egrep/unix syntax, not the perl syntax)
// matched against the path of an incoming request. Currently it can contain
// characters disallowed from the conventional "path" part of a URL as defined
// by RFC 3986.
type PortRuleHTTP struct {
// Path is an extended POSIX regex matched against the path of a
// request. Currently it can contain characters disallowed from the
// conventional "path" part of a URL as defined by RFC 3986.
//
// If omitted or empty, all paths are all allowed.
//
// +kubebuilder:validation:Optional
Path string `json:"path,omitempty"`
// Method is an extended POSIX regex matched against the method of a
// request, e.g. "GET", "POST", "PUT", "PATCH", "DELETE", ...
//
// If omitted or empty, all methods are allowed.
//
// +kubebuilder:validation:Optional
Method string `json:"method,omitempty"`
// Host is an extended POSIX regex matched against the host header of a
// request. Examples:
//
// - foo.bar.com will match the host fooXbar.com or foo-bar.com
// - foo\.bar\.com will only match the host foo.bar.com
//
// If omitted or empty, the value of the host header is ignored.
//
// +kubebuilder:validation:Format=idn-hostname
// +kubebuilder:validation:Optional
Host string `json:"host,omitempty"`
// Headers is a list of HTTP headers which must be present in the
// request. If omitted or empty, requests are allowed regardless of
// headers present.
//
// +kubebuilder:validation:Optional
Headers []string `json:"headers,omitempty"`
// HeaderMatches is a list of HTTP headers which must be
// present and match against the given values. Mismatch field can be used
// to specify what to do when there is no match.
//
// +kubebuilder:validation:Optional
HeaderMatches []*HeaderMatch `json:"headerMatches,omitempty"`
}
// Sanitize sanitizes HTTP rules. It ensures that the path and method fields
// are valid regular expressions. Note that the proxy may support a wider-range
// of regular expressions (e.g. that specified by ECMAScript), so this function
// may return some false positives. If the rule is invalid, returns an error.
func (h *PortRuleHTTP) Sanitize() error {
if h.Path != "" {
_, err := regexp.Compile(h.Path)
if err != nil {
return err
}
}
if h.Method != "" {
_, err := regexp.Compile(h.Method)
if err != nil {
return err
}
}
// Headers are not sanitized.
// But HeaderMatches are
for _, m := range h.HeaderMatches {
if m.Name == "" {
return fmt.Errorf("Header name missing")
}
if m.Mismatch != "" &&
m.Mismatch != MismatchActionLog && m.Mismatch != MismatchActionAdd &&
m.Mismatch != MismatchActionDelete && m.Mismatch != MismatchActionReplace {
return fmt.Errorf("Invalid header action: %s", m.Mismatch)
}
if m.Secret != nil && m.Secret.Name == "" {
return fmt.Errorf("Secret name missing")
}
}
return nil
}
// SPDX-License-Identifier: Apache-2.0
// Copyright Authors of Cilium
package api
import (
"encoding/json"
"fmt"
"k8s.io/apimachinery/pkg/util/intstr"
)
const (
IPv4Family = "IPv4"
IPv6Family = "IPv6"
)
var icmpIpv4TypeNameToCode = map[string]string{
"EchoReply": "0",
"DestinationUnreachable": "3",
"Redirect": "5",
"Echo": "8",
"EchoRequest": "8",
"RouterAdvertisement": "9",
"RouterSelection": "10",
"TimeExceeded": "11",
"ParameterProblem": "12",
"Timestamp": "13",
"TimestampReply": "14",
"Photuris": "40",
"ExtendedEchoRequest": "42",
"ExtendedEchoReply": "43",
}
var icmpIpv6TypeNameToCode = map[string]string{
"DestinationUnreachable": "1",
"PacketTooBig": "2",
"TimeExceeded": "3",
"ParameterProblem": "4",
"EchoRequest": "128",
"EchoReply": "129",
"MulticastListenerQuery": "130",
"MulticastListenerReport": "131",
"MulticastListenerDone": "132",
"RouterSolicitation": "133",
"RouterAdvertisement": "134",
"NeighborSolicitation": "135",
"NeighborAdvertisement": "136",
"RedirectMessage": "137",
"RouterRenumbering": "138",
"ICMPNodeInformationQuery": "139",
"ICMPNodeInformationResponse": "140",
"InverseNeighborDiscoverySolicitation": "141",
"InverseNeighborDiscoveryAdvertisement": "142",
"HomeAgentAddressDiscoveryRequest": "144",
"HomeAgentAddressDiscoveryReply": "145",
"MobilePrefixSolicitation": "146",
"MobilePrefixAdvertisement": "147",
"DuplicateAddressRequestCodeSuffix": "157",
"DuplicateAddressConfirmationCodeSuffix": "158",
"ExtendedEchoRequest": "160",
"ExtendedEchoReply": "161",
}
type ICMPRules []ICMPRule
// ICMPRule is a list of ICMP fields.
type ICMPRule struct {
// Fields is a list of ICMP fields.
//
// +kubebuilder:validation:Optional
// +kubebuilder:validation:MaxItems=40
Fields []ICMPField `json:"fields,omitempty"`
}
// ICMPField is a ICMP field.
//
// +deepequal-gen=true
// +deepequal-gen:private-method=true
type ICMPField struct {
// Family is a IP address version.
// Currently, we support `IPv4` and `IPv6`.
// `IPv4` is set as default.
//
// +kubebuilder:default=IPv4
// +kubebuilder:validation:Optional
// +kubebuilder:validation:Enum=IPv4;IPv6
Family string `json:"family,omitempty"`
// Type is a ICMP-type.
// It should be an 8bit code (0-255), or it's CamelCase name (for example, "EchoReply").
// Allowed ICMP types are:
// Ipv4: EchoReply | DestinationUnreachable | Redirect | Echo | EchoRequest |
// RouterAdvertisement | RouterSelection | TimeExceeded | ParameterProblem |
// Timestamp | TimestampReply | Photuris | ExtendedEcho Request | ExtendedEcho Reply
// Ipv6: DestinationUnreachable | PacketTooBig | TimeExceeded | ParameterProblem |
// EchoRequest | EchoReply | MulticastListenerQuery| MulticastListenerReport |
// MulticastListenerDone | RouterSolicitation | RouterAdvertisement | NeighborSolicitation |
// NeighborAdvertisement | RedirectMessage | RouterRenumbering | ICMPNodeInformationQuery |
// ICMPNodeInformationResponse | InverseNeighborDiscoverySolicitation | InverseNeighborDiscoveryAdvertisement |
// HomeAgentAddressDiscoveryRequest | HomeAgentAddressDiscoveryReply | MobilePrefixSolicitation |
// MobilePrefixAdvertisement | DuplicateAddressRequestCodeSuffix | DuplicateAddressConfirmationCodeSuffix |
// ExtendedEchoRequest | ExtendedEchoReply
//
// +deepequal-gen=false
// +kubebuilder:validation:XIntOrString
// +kubebuilder:validation:Pattern="^([0-9]|[1-9][0-9]|1[0-9]{2}|2[0-4][0-9]|25[0-5]|EchoReply|DestinationUnreachable|Redirect|Echo|RouterAdvertisement|RouterSelection|TimeExceeded|ParameterProblem|Timestamp|TimestampReply|Photuris|ExtendedEchoRequest|ExtendedEcho Reply|PacketTooBig|ParameterProblem|EchoRequest|MulticastListenerQuery|MulticastListenerReport|MulticastListenerDone|RouterSolicitation|RouterAdvertisement|NeighborSolicitation|NeighborAdvertisement|RedirectMessage|RouterRenumbering|ICMPNodeInformationQuery|ICMPNodeInformationResponse|InverseNeighborDiscoverySolicitation|InverseNeighborDiscoveryAdvertisement|HomeAgentAddressDiscoveryRequest|HomeAgentAddressDiscoveryReply|MobilePrefixSolicitation|MobilePrefixAdvertisement|DuplicateAddressRequestCodeSuffix|DuplicateAddressConfirmationCodeSuffix)$"
Type *intstr.IntOrString `json:"type"`
}
func (i *ICMPField) DeepEqual(o *ICMPField) bool {
if i == nil {
return o == nil
}
if i.Type.String() != o.Type.String() {
return false
}
return i.deepEqual(o)
}
// UnmarshalJSON unmarshals the ICMPField from the byte array and check if the Type matches with IP version.
func (i *ICMPField) UnmarshalJSON(value []byte) error {
var t struct {
Family string `json:"family,omitempty"`
Type *intstr.IntOrString `json:"type"`
}
if err := json.Unmarshal(value, &t); err != nil {
return err
}
// If i.Type is ICMP type name, the value should be checked if it belongs to the map for the given family.
if t.Type.String() != "0" && t.Type.IntValue() == 0 {
name := t.Type.String()
var nameToCode map[string]string
switch t.Family {
case IPv6Family:
nameToCode = icmpIpv6TypeNameToCode
default:
nameToCode = icmpIpv4TypeNameToCode
}
if _, ok := nameToCode[name]; !ok {
return fmt.Errorf("ICMP type %s not found in %s", name, t.Family)
}
}
i.Family = t.Family
i.Type = t.Type
return nil
}
// Iterate iterates over all elements of ICMPRules.
func (ir ICMPRules) Iterate(f func(pr Ports) error) error {
for i := range ir {
if err := f(&ir[i]); err != nil {
return err
}
}
return nil
}
// Len returns the length of the elements of ICMPRules.
func (ir ICMPRules) Len() int {
return len(ir)
}
// GetPortProtocols generates PortProtocol slice from ICMPRule and returns it.
func (ir ICMPRule) GetPortProtocols() []PortProtocol {
var pps []PortProtocol
for _, t := range ir.Fields {
pp := t.PortProtocol()
pps = append(pps, *pp)
}
return pps
}
// GetPortRule generates PortRule from ICMPRule and returns it.
func (ir ICMPRule) GetPortRule() *PortRule {
var pps []PortProtocol
for _, t := range ir.Fields {
pp := t.PortProtocol()
pps = append(pps, *pp)
}
pr := PortRule{
Ports: pps,
}
return &pr
}
// PortProtocol translates ICMPType to PortProtocol.
func (i ICMPField) PortProtocol() *PortProtocol {
var proto L4Proto
var nameToCode map[string]string
switch i.Family {
case IPv6Family:
proto = ProtoICMPv6
nameToCode = icmpIpv6TypeNameToCode
default:
proto = ProtoICMP
nameToCode = icmpIpv4TypeNameToCode
}
port := i.Type.String()
if name, ok := nameToCode[port]; ok {
port = name
}
pr := PortProtocol{
Port: port,
Protocol: proto,
}
return &pr
}
// SPDX-License-Identifier: Apache-2.0
// Copyright Authors of Cilium
package api
import (
"context"
slim_metav1 "github.com/cilium/cilium/pkg/k8s/slim/k8s/apis/meta/v1"
"github.com/cilium/cilium/pkg/slices"
)
// IngressCommonRule is a rule that shares some of its fields across the
// IngressRule and IngressDenyRule. It's publicly exported so the code
// generators can generate code for this structure.
//
// +deepequal-gen:private-method=true
type IngressCommonRule struct {
// FromEndpoints is a list of endpoints identified by an
// EndpointSelector which are allowed to communicate with the endpoint
// subject to the rule.
//
// Example:
// Any endpoint with the label "role=backend" can be consumed by any
// endpoint carrying the label "role=frontend".
//
// +kubebuilder:validation:Optional
FromEndpoints []EndpointSelector `json:"fromEndpoints,omitempty"`
// FromRequires is a list of additional constraints which must be met
// in order for the selected endpoints to be reachable. These
// additional constraints do no by itself grant access privileges and
// must always be accompanied with at least one matching FromEndpoints.
//
// Example:
// Any Endpoint with the label "team=A" requires consuming endpoint
// to also carry the label "team=A".
//
// +kubebuilder:validation:Optional
FromRequires []EndpointSelector `json:"fromRequires,omitempty"`
// FromCIDR is a list of IP blocks which the endpoint subject to the
// rule is allowed to receive connections from. Only connections which
// do *not* originate from the cluster or from the local host are subject
// to CIDR rules. In order to allow in-cluster connectivity, use the
// FromEndpoints field. This will match on the source IP address of
// incoming connections. Adding a prefix into FromCIDR or into
// FromCIDRSet with no ExcludeCIDRs is equivalent. Overlaps are
// allowed between FromCIDR and FromCIDRSet.
//
// Example:
// Any endpoint with the label "app=my-legacy-pet" is allowed to receive
// connections from 10.3.9.1
//
// +kubebuilder:validation:Optional
FromCIDR CIDRSlice `json:"fromCIDR,omitempty"`
// FromCIDRSet is a list of IP blocks which the endpoint subject to the
// rule is allowed to receive connections from in addition to FromEndpoints,
// along with a list of subnets contained within their corresponding IP block
// from which traffic should not be allowed.
// This will match on the source IP address of incoming connections. Adding
// a prefix into FromCIDR or into FromCIDRSet with no ExcludeCIDRs is
// equivalent. Overlaps are allowed between FromCIDR and FromCIDRSet.
//
// Example:
// Any endpoint with the label "app=my-legacy-pet" is allowed to receive
// connections from 10.0.0.0/8 except from IPs in subnet 10.96.0.0/12.
//
// +kubebuilder:validation:Optional
FromCIDRSet CIDRRuleSlice `json:"fromCIDRSet,omitempty"`
// FromEntities is a list of special entities which the endpoint subject
// to the rule is allowed to receive connections from. Supported entities are
// `world`, `cluster`, `host`, `remote-node`, `kube-apiserver`, `ingress`, `init`,
// `health`, `unmanaged`, `none` and `all`.
//
// +kubebuilder:validation:Optional
FromEntities EntitySlice `json:"fromEntities,omitempty"`
// FromGroups is a directive that allows the integration with multiple outside
// providers. Currently, only AWS is supported, and the rule can select by
// multiple sub directives:
//
// Example:
// FromGroups:
// - aws:
// securityGroupsIds:
// - 'sg-XXXXXXXXXXXXX'
//
// +kubebuilder:validation:Optional
FromGroups []Groups `json:"fromGroups,omitempty"`
// FromNodes is a list of nodes identified by an
// EndpointSelector which are allowed to communicate with the endpoint
// subject to the rule.
//
// +kubebuilder:validation:Optional
FromNodes []EndpointSelector `json:"fromNodes,omitempty"`
// TODO: Move this to the policy package
// (https://github.com/cilium/cilium/issues/8353)
aggregatedSelectors EndpointSelectorSlice `json:"-"`
}
// DeepEqual returns true if both IngressCommonRule are deep equal.
// The semantic of a nil slice in one of its fields is different from the semantic
// of an empty non-nil slice, thus it explicitly checks for that case before calling
// the autogenerated method.
func (in *IngressCommonRule) DeepEqual(other *IngressCommonRule) bool {
if slices.XorNil(in.FromEndpoints, other.FromEndpoints) {
return false
}
if slices.XorNil(in.FromCIDR, other.FromCIDR) {
return false
}
if slices.XorNil(in.FromCIDRSet, other.FromCIDRSet) {
return false
}
if slices.XorNil(in.FromEntities, other.FromEntities) {
return false
}
return in.deepEqual(other)
}
// IngressRule contains all rule types which can be applied at ingress,
// i.e. network traffic that originates outside of the endpoint and
// is entering the endpoint selected by the endpointSelector.
//
// - All members of this structure are optional. If omitted or empty, the
// member will have no effect on the rule.
//
// - If multiple members are set, all of them need to match in order for
// the rule to take effect. The exception to this rule is FromRequires field;
// the effects of any Requires field in any rule will apply to all other
// rules as well.
//
// - FromEndpoints, FromCIDR, FromCIDRSet and FromEntities are mutually
// exclusive. Only one of these members may be present within an individual
// rule.
type IngressRule struct {
IngressCommonRule `json:",inline"`
// ToPorts is a list of destination ports identified by port number and
// protocol which the endpoint subject to the rule is allowed to
// receive connections on.
//
// Example:
// Any endpoint with the label "app=httpd" can only accept incoming
// connections on port 80/tcp.
//
// +kubebuilder:validation:Optional
ToPorts PortRules `json:"toPorts,omitempty"`
// ICMPs is a list of ICMP rule identified by type number
// which the endpoint subject to the rule is allowed to
// receive connections on.
//
// Example:
// Any endpoint with the label "app=httpd" can only accept incoming
// type 8 ICMP connections.
//
// +kubebuilder:validation:Optional
ICMPs ICMPRules `json:"icmps,omitempty"`
// Authentication is the required authentication type for the allowed traffic, if any.
//
// +kubebuilder:validation:Optional
Authentication *Authentication `json:"authentication,omitempty"`
}
// IngressDenyRule contains all rule types which can be applied at ingress,
// i.e. network traffic that originates outside of the endpoint and
// is entering the endpoint selected by the endpointSelector.
//
// - All members of this structure are optional. If omitted or empty, the
// member will have no effect on the rule.
//
// - If multiple members are set, all of them need to match in order for
// the rule to take effect. The exception to this rule is FromRequires field;
// the effects of any Requires field in any rule will apply to all other
// rules as well.
//
// - FromEndpoints, FromCIDR, FromCIDRSet, FromGroups and FromEntities are mutually
// exclusive. Only one of these members may be present within an individual
// rule.
type IngressDenyRule struct {
IngressCommonRule `json:",inline"`
// ToPorts is a list of destination ports identified by port number and
// protocol which the endpoint subject to the rule is not allowed to
// receive connections on.
//
// Example:
// Any endpoint with the label "app=httpd" can not accept incoming
// connections on port 80/tcp.
//
// +kubebuilder:validation:Optional
ToPorts PortDenyRules `json:"toPorts,omitempty"`
// ICMPs is a list of ICMP rule identified by type number
// which the endpoint subject to the rule is not allowed to
// receive connections on.
//
// Example:
// Any endpoint with the label "app=httpd" can not accept incoming
// type 8 ICMP connections.
//
// +kubebuilder:validation:Optional
ICMPs ICMPRules `json:"icmps,omitempty"`
}
// SetAggregatedSelectors creates a single slice containing all of the following
// fields within the IngressRule, converted to EndpointSelector, to be stored
// within the IngressRule for easy lookup while performing policy evaluation
// for the rule:
// * FromEntities
// * FromCIDR
// * FromCIDRSet
//
// FromEndpoints is not aggregated due to requirement folding in
// GetSourceEndpointSelectorsWithRequirements()
func (i *IngressCommonRule) SetAggregatedSelectors() {
// Goroutines can race setting i.aggregatedSelectors, but they will all compute the same result, so it does not matter.
// explicitly check for empty non-nil slices, it should not result in any identity being selected.
if (i.FromCIDR != nil && len(i.FromCIDR) == 0) ||
(i.FromCIDRSet != nil && len(i.FromCIDRSet) == 0) ||
(i.FromEntities != nil && len(i.FromEntities) == 0) {
i.aggregatedSelectors = nil
return
}
res := make(EndpointSelectorSlice, 0, len(i.FromEntities)+len(i.FromCIDR)+len(i.FromCIDRSet))
res = append(res, i.FromEntities.GetAsEndpointSelectors()...)
res = append(res, i.FromCIDR.GetAsEndpointSelectors()...)
res = append(res, i.FromCIDRSet.GetAsEndpointSelectors()...)
i.aggregatedSelectors = res
}
// GetSourceEndpointSelectorsWithRequirements returns a slice of endpoints selectors covering
// all L3 source selectors of the ingress rule
func (i *IngressCommonRule) GetSourceEndpointSelectorsWithRequirements(requirements []slim_metav1.LabelSelectorRequirement) EndpointSelectorSlice {
if i.aggregatedSelectors == nil {
i.SetAggregatedSelectors()
}
// explicitly check for empty non-nil slices, it should not result in any identity being selected.
if i.aggregatedSelectors == nil || (i.FromEndpoints != nil && len(i.FromEndpoints) == 0) ||
(i.FromNodes != nil && len(i.FromNodes) == 0) {
return nil
}
res := make(EndpointSelectorSlice, 0, len(i.FromEndpoints)+len(i.aggregatedSelectors)+len(i.FromNodes))
if len(requirements) > 0 && len(i.FromEndpoints) > 0 {
for idx := range i.FromEndpoints {
sel := *i.FromEndpoints[idx].DeepCopy()
sel.MatchExpressions = append(sel.MatchExpressions, requirements...)
sel.SyncRequirementsWithLabelSelector()
// Even though this string is deep copied, we need to override it
// because we are updating the contents of the MatchExpressions.
sel.cachedLabelSelectorString = sel.LabelSelector.String()
res = append(res, sel)
}
} else {
res = append(res, i.FromEndpoints...)
res = append(res, i.FromNodes...)
}
return append(res, i.aggregatedSelectors...)
}
// AllowsWildcarding returns true if wildcarding should be performed upon
// policy evaluation for the given rule.
func (i *IngressCommonRule) AllowsWildcarding() bool {
return len(i.FromRequires) == 0
}
// RequiresDerivative returns true when the EgressCommonRule contains sections
// that need a derivative policy created in order to be enforced
// (e.g. FromGroups).
func (e *IngressCommonRule) RequiresDerivative() bool {
return len(e.FromGroups) > 0
}
// IsL3 returns true if the IngressCommonRule contains at least a rule that
// affects L3 policy enforcement.
func (in *IngressCommonRule) IsL3() bool {
if in == nil {
return false
}
return len(in.FromEndpoints) > 0 ||
len(in.FromRequires) > 0 ||
len(in.FromCIDR) > 0 ||
len(in.FromCIDRSet) > 0 ||
len(in.FromEntities) > 0 ||
len(in.FromGroups) > 0 ||
len(in.FromNodes) > 0
}
// CreateDerivative will return a new rule based on the data gathered by the
// rules that creates a new derivative policy.
// In the case of FromGroups will call outside using the groups callback and this
// function can take a bit of time.
func (e *IngressRule) CreateDerivative(ctx context.Context) (*IngressRule, error) {
newRule := e.DeepCopy()
if !e.RequiresDerivative() {
return newRule, nil
}
newRule.FromCIDRSet = make(CIDRRuleSlice, 0, len(e.FromGroups))
cidrSet, err := ExtractCidrSet(ctx, e.FromGroups)
if err != nil {
return &IngressRule{}, err
}
newRule.FromCIDRSet = append(e.FromCIDRSet, cidrSet...)
newRule.FromGroups = nil
e.SetAggregatedSelectors()
return newRule, nil
}
// CreateDerivative will return a new rule based on the data gathered by the
// rules that creates a new derivative policy.
// In the case of FromGroups will call outside using the groups callback and this
// function can take a bit of time.
func (e *IngressDenyRule) CreateDerivative(ctx context.Context) (*IngressDenyRule, error) {
newRule := e.DeepCopy()
if !e.RequiresDerivative() {
return newRule, nil
}
newRule.FromCIDRSet = make(CIDRRuleSlice, 0, len(e.FromGroups))
cidrSet, err := ExtractCidrSet(ctx, e.FromGroups)
if err != nil {
return &IngressDenyRule{}, err
}
newRule.FromCIDRSet = append(e.FromCIDRSet, cidrSet...)
newRule.FromGroups = nil
e.SetAggregatedSelectors()
return newRule, nil
}
// SPDX-License-Identifier: Apache-2.0
// Copyright Authors of Cilium
package api
import (
"github.com/cilium/proxy/pkg/policy/api/kafka"
)
// L4Proto is a layer 4 protocol name
type L4Proto string
const (
// Keep pkg/u8proto up-to-date with any additions here
ProtoTCP L4Proto = "TCP"
ProtoUDP L4Proto = "UDP"
ProtoSCTP L4Proto = "SCTP"
ProtoICMP L4Proto = "ICMP"
ProtoICMPv6 L4Proto = "ICMPV6"
ProtoVRRP L4Proto = "VRRP"
ProtoIGMP L4Proto = "IGMP"
ProtoAny L4Proto = "ANY"
PortProtocolAny = "0/ANY"
)
// IsAny returns true if an L4Proto represents ANY protocol
func (l4 L4Proto) IsAny() bool {
return l4 == ProtoAny || string(l4) == ""
}
// SupportedProtocols returns the currently supported protocols in the policy
// engine, excluding "ANY".
func SupportedProtocols() []L4Proto {
return []L4Proto{ProtoTCP, ProtoUDP, ProtoSCTP}
}
// PortProtocol specifies an L4 port with an optional transport protocol
type PortProtocol struct {
// Port can be an L4 port number, or a name in the form of "http"
// or "http-8080".
//
// +kubebuilder:validation:Pattern=`^(6553[0-5]|655[0-2][0-9]|65[0-4][0-9]{2}|6[0-4][0-9]{3}|[1-5][0-9]{4}|[0-9]{1,4})|([a-zA-Z0-9]-?)*[a-zA-Z](-?[a-zA-Z0-9])*$`
Port string `json:"port,omitempty"`
// EndPort can only be an L4 port number.
//
// +kubebuilder:validation:Minimum=0
// +kubebuilder:validation:Maximum=65535
// +kubebuilder:validation:Optional
EndPort int32 `json:"endPort,omitempty"`
// Protocol is the L4 protocol. If "ANY", omitted or empty, any protocols
// with transport ports (TCP, UDP, SCTP) match.
//
// Accepted values: "TCP", "UDP", "SCTP", "VRRP", "IGMP", "ANY"
//
// Matching on ICMP is not supported.
//
// Named port specified for a container may narrow this down, but may not
// contradict this.
//
// +kubebuilder:validation:Enum=TCP;UDP;SCTP;VRRP;IGMP;ANY
// +kubebuilder:validation:Optional
Protocol L4Proto `json:"protocol,omitempty"`
}
// Covers returns true if the ports and protocol specified in the received
// PortProtocol are equal to or a superset of the ports and protocol in 'other'.
// Named ports only cover other named ports with exactly the same name.
func (p PortProtocol) Covers(other PortProtocol) bool {
if p.Port != other.Port {
return false
}
if p.Protocol != other.Protocol {
return p.Protocol.IsAny()
}
return true
}
// Secret is a reference to a secret, backed by k8s or local file system.
type Secret struct {
// Namespace is the namespace in which the secret exists. Context of use
// determines the default value if left out (e.g., "default").
//
// +kubebuilder:validation:Optional
Namespace string `json:"namespace,omitempty"`
// Name is the name of the secret.
//
// +kubebuilder:validation:Required
Name string `json:"name"`
}
// TLSContext provides TLS configuration via reference to either k8s secrets
// or via filepath. If both are set, directory is given priority over
// k8sSecrets.
type TLSContext struct {
// Secret is the secret that contains the certificates and private key for
// the TLS context.
// By default, Cilium will search in this secret for the following items:
// - 'ca.crt' - Which represents the trusted CA to verify remote source.
// - 'tls.crt' - Which represents the public key certificate.
// - 'tls.key' - Which represents the private key matching the public key
// certificate.
//
// +kubebuilder:validation:Required
Secret *Secret `json:"secret"`
// TrustedCA is the file name or k8s secret item name for the trusted CA.
// If omitted, 'ca.crt' is assumed, if it exists. If given, the item must
// exist.
//
// +kubebuilder:validation:Optional
TrustedCA string `json:"trustedCA,omitempty"`
// Certificate is the file name or k8s secret item name for the certificate
// chain. If omitted, 'tls.crt' is assumed, if it exists. If given, the
// item must exist.
//
// +kubebuilder:validation:Optional
Certificate string `json:"certificate,omitempty"`
// PrivateKey is the file name or k8s secret item name for the private key
// matching the certificate chain. If omitted, 'tls.key' is assumed, if it
// exists. If given, the item must exist.
//
// +kubebuilder:validation:Optional
PrivateKey string `json:"privateKey,omitempty"`
}
// EnvoyConfig defines a reference to a CiliumEnvoyConfig or CiliumClusterwideEnvoyConfig
type EnvoyConfig struct {
// Kind is the resource type being referred to. Defaults to CiliumEnvoyConfig or
// CiliumClusterwideEnvoyConfig for CiliumNetworkPolicy and CiliumClusterwideNetworkPolicy,
// respectively. The only case this is currently explicitly needed is when referring to a
// CiliumClusterwideEnvoyConfig from CiliumNetworkPolicy, as using a namespaced listener
// from a cluster scoped policy is not allowed.
//
// +kubebuilder:validation:Enum=CiliumEnvoyConfig;CiliumClusterwideEnvoyConfig
// +kubebuilder:validation:Optional
Kind string `json:"kind"`
// Name is the resource name of the CiliumEnvoyConfig or CiliumClusterwideEnvoyConfig where
// the listener is defined in.
//
// +kubebuilder:validation:MinLength=1
// +kubebuilder:validation:Required
Name string `json:"name"`
}
// Listener defines a reference to an Envoy listener specified in a CEC or CCEC resource.
type Listener struct {
// EnvoyConfig is a reference to the CEC or CCEC resource in which
// the listener is defined.
//
// +kubebuilder:validation:Required
EnvoyConfig *EnvoyConfig `json:"envoyConfig"`
// Name is the name of the listener.
//
// +kubebuilder:validation:MinLength=1
// +kubebuilder:validation:Required
Name string `json:"name"`
// Priority for this Listener that is used when multiple rules would apply different
// listeners to a policy map entry. Behavior of this is implementation dependent.
//
// +kubebuilder:validation:Minimum=1
// +kubebuilder:validation:Maximum=100
// +kubebuilder:validation:Optional
Priority uint8 `json:"priority"`
}
// ServerName allows using prefix only wildcards to match DNS names.
//
// - "*" matches 0 or more DNS valid characters, and may only occur at the
// beginning of the pattern. As a special case a "*" as the leftmost character,
// without a following "." matches all subdomains as well as the name to the right.
//
// Examples:
// - `*.cilium.io` matches exactly one subdomain of cilium at that level www.cilium.io and blog.cilium.io match, cilium.io and google.com do not.
// - `**.cilium.io` matches more than one subdomain of cilium, e.g. sub1.sub2.cilium.io and sub.cilium.io match, cilium.io do not.
//
// +kubebuilder:validation:MaxLength=255
// +kubebuilder:validation:Pattern=`^(\*?\*\.)?([-a-zA-Z0-9_]+\.?)+$`
// +kubebuilder:validation:OneOf
type ServerName string
// PortRule is a list of ports/protocol combinations with optional Layer 7
// rules which must be met.
type PortRule struct {
// Ports is a list of L4 port/protocol
//
// +kubebuilder:validation:Optional
// +kubebuilder:validation:MaxItems=40
Ports []PortProtocol `json:"ports,omitempty"`
// TerminatingTLS is the TLS context for the connection terminated by
// the L7 proxy. For egress policy this specifies the server-side TLS
// parameters to be applied on the connections originated from the local
// endpoint and terminated by the L7 proxy. For ingress policy this specifies
// the server-side TLS parameters to be applied on the connections
// originated from a remote source and terminated by the L7 proxy.
//
// +kubebuilder:validation:Optional
TerminatingTLS *TLSContext `json:"terminatingTLS,omitempty"`
// OriginatingTLS is the TLS context for the connections originated by
// the L7 proxy. For egress policy this specifies the client-side TLS
// parameters for the upstream connection originating from the L7 proxy
// to the remote destination. For ingress policy this specifies the
// client-side TLS parameters for the connection from the L7 proxy to
// the local endpoint.
//
// +kubebuilder:validation:Optional
OriginatingTLS *TLSContext `json:"originatingTLS,omitempty"`
// ServerNames is a list of allowed TLS SNI values. If not empty, then
// TLS must be present and one of the provided SNIs must be indicated in the
// TLS handshake.
//
// +kubebuilder:validation:Optional
// +kubebuilder:validation:MinItems=1
// +listType=set
ServerNames []ServerName `json:"serverNames,omitempty"`
// listener specifies the name of a custom Envoy listener to which this traffic should be
// redirected to.
//
// +kubebuilder:validation:Optional
Listener *Listener `json:"listener,omitempty"`
// Rules is a list of additional port level rules which must be met in
// order for the PortRule to allow the traffic. If omitted or empty,
// no layer 7 rules are enforced.
//
// +kubebuilder:validation:Optional
Rules *L7Rules `json:"rules,omitempty"`
}
// GetPortProtocols returns the Ports field of the PortRule.
func (pd PortRule) GetPortProtocols() []PortProtocol {
return pd.Ports
}
// GetPortRule returns the PortRule.
func (pd *PortRule) GetPortRule() *PortRule {
return pd
}
func (pd *PortRule) GetServerNames() []string {
res := make([]string, 0, len(pd.ServerNames))
for _, sn := range pd.ServerNames {
res = append(res, string(sn))
}
return res
}
// PortDenyRule is a list of ports/protocol that should be used for deny
// policies. This structure lacks the L7Rules since it's not supported in deny
// policies.
type PortDenyRule struct {
// Ports is a list of L4 port/protocol
//
// +kubebuilder:validation:Optional
Ports []PortProtocol `json:"ports,omitempty"`
}
// GetPortProtocols returns the Ports field of the PortDenyRule.
func (pd PortDenyRule) GetPortProtocols() []PortProtocol {
return pd.Ports
}
// GetPortRule returns nil has it is not a PortRule.
func (pd *PortDenyRule) GetPortRule() *PortRule {
return nil
}
// L7Rules is a union of port level rule types. Mixing of different port
// level rule types is disallowed, so exactly one of the following must be set.
// If none are specified, then no additional port level rules are applied.
type L7Rules struct {
// HTTP specific rules.
//
// +kubebuilder:validation:Optional
// +kubebuilder:validation:OneOf
HTTP []PortRuleHTTP `json:"http,omitempty"`
// Kafka-specific rules.
// Deprecated: This beta feature is deprecated and will be removed in a future release.
//
// +kubebuilder:validation:Optional
// +kubebuilder:validation:OneOf
Kafka []kafka.PortRule `json:"kafka,omitempty"`
// DNS-specific rules.
//
// +kubebuilder:validation:Optional
// +kubebuilder:validation:OneOf
DNS []PortRuleDNS `json:"dns,omitempty"`
// Name of the L7 protocol for which the Key-value pair rules apply.
//
// +kubebuilder:validation:Optional
// +kubebuilder:validation:OneOf
L7Proto string `json:"l7proto,omitempty"`
// Key-value pair rules.
//
// +kubebuilder:validation:Optional
L7 []PortRuleL7 `json:"l7,omitempty"`
}
// Len returns the total number of rules inside `L7Rules`.
// Returns 0 if nil.
func (rules *L7Rules) Len() int {
if rules == nil {
return 0
}
return len(rules.HTTP) + len(rules.Kafka) + len(rules.DNS) + len(rules.L7)
}
// IsEmpty returns whether the `L7Rules` is nil or contains no rules.
func (rules *L7Rules) IsEmpty() bool {
return rules.Len() == 0
}
// PortRules is a slice of PortRule.
type PortRules []PortRule
// Iterate iterates over all elements of PortRules.
func (pr PortRules) Iterate(f func(pr Ports) error) error {
for i := range pr {
err := f(&pr[i])
if err != nil {
return err
}
}
return nil
}
// Len returns the length of the elements of PortRules.
func (pr PortRules) Len() int {
return len(pr)
}
// PortDenyRules is a slice of PortDenyRule.
type PortDenyRules []PortDenyRule
// Iterate iterates over all elements of PortDenyRules.
func (pr PortDenyRules) Iterate(f func(pr Ports) error) error {
for i := range pr {
err := f(&pr[i])
if err != nil {
return err
}
}
return nil
}
// Len returns the length of the elements of PortDenyRules.
func (pr PortDenyRules) Len() int {
return len(pr)
}
// Ports is an interface that should be used by all implementations of the
// PortProtocols.
type Ports interface {
// GetPortProtocols returns the slice PortProtocol
GetPortProtocols() []PortProtocol
// GetPortRule returns a PortRule, if the implementation does not support
// it, then returns nil.
GetPortRule() *PortRule
}
// PortsIterator is an interface that should be implemented by structures that
// can iterate over a list of Ports interfaces.
type PortsIterator interface {
Iterate(f func(pr Ports) error) error
Len() int
}
// SPDX-License-Identifier: Apache-2.0
// Copyright Authors of Cilium
package api
import (
"fmt"
)
// PortRuleL7 is a list of key-value pairs interpreted by a L7 protocol as
// protocol constraints. All fields are optional, if all fields are empty or
// missing, the rule does not have any effect.
type PortRuleL7 map[string]string
// Sanitize sanitizes key-value pair rules. It makes sure keys are present.
func (rule *PortRuleL7) Sanitize() error {
for k := range *rule {
if k == "" {
return fmt.Errorf("Empty key not allowed")
}
}
return nil
}
// SPDX-License-Identifier: Apache-2.0
// Copyright Authors of Cilium
package api
import (
"context"
"github.com/cilium/cilium/pkg/labels"
)
// AuthenticationMode is a string identifying a supported authentication type
type AuthenticationMode string
const (
AuthenticationModeDisabled AuthenticationMode = "disabled" // Always succeeds
AuthenticationModeRequired AuthenticationMode = "required" // Mutual TLS with SPIFFE as certificate provider by default
AuthenticationModeAlwaysFail AuthenticationMode = "test-always-fail"
)
// Authentication specifies the kind of cryptographic authentication required for the traffic to
// be allowed.
type Authentication struct {
// Mode is the required authentication mode for the allowed traffic, if any.
//
// +kubebuilder:validation:Enum=disabled;required;test-always-fail
// +kubebuilder:validation:Required
Mode AuthenticationMode `json:"mode"`
}
// DefaultDenyConfig expresses a policy's desired default mode for the subject
// endpoints.
type DefaultDenyConfig struct {
// Whether or not the endpoint should have a default-deny rule applied
// to ingress traffic.
//
// +kubebuilder:validation:Optional
Ingress *bool `json:"ingress,omitempty"`
// Whether or not the endpoint should have a default-deny rule applied
// to egress traffic.
//
// +kubebuilder:validation:Optional
Egress *bool `json:"egress,omitempty"`
}
// LogConfig specifies custom policy-specific Hubble logging configuration.
type LogConfig struct {
// Value is a free-form string that is included in Hubble flows
// that match this policy. The string is limited to 32 printable characters.
//
// +kubebuilder:validation:MaxLength=32
// +kubebuilder:validation:Pattern=`^\PC*$`
Value string `json:"value,omitempty"`
}
// Rule is a policy rule which must be applied to all endpoints which match the
// labels contained in the endpointSelector
//
// Each rule is split into an ingress section which contains all rules
// applicable at ingress, and an egress section applicable at egress. For rule
// types such as `L4Rule` and `CIDR` which can be applied at both ingress and
// egress, both ingress and egress side have to either specifically allow the
// connection or one side has to be omitted.
//
// Either ingress, egress, or both can be provided. If both ingress and egress
// are omitted, the rule has no effect.
//
// +deepequal-gen:private-method=true
type Rule struct {
// EndpointSelector selects all endpoints which should be subject to
// this rule. EndpointSelector and NodeSelector cannot be both empty and
// are mutually exclusive.
//
// +kubebuilder:validation:OneOf
EndpointSelector EndpointSelector `json:"endpointSelector,omitzero"`
// NodeSelector selects all nodes which should be subject to this rule.
// EndpointSelector and NodeSelector cannot be both empty and are mutually
// exclusive. Can only be used in CiliumClusterwideNetworkPolicies.
//
// +kubebuilder:validation:OneOf
NodeSelector EndpointSelector `json:"nodeSelector,omitzero"`
// Ingress is a list of IngressRule which are enforced at ingress.
// If omitted or empty, this rule does not apply at ingress.
//
// +kubebuilder:validation:AnyOf
Ingress []IngressRule `json:"ingress,omitempty"`
// IngressDeny is a list of IngressDenyRule which are enforced at ingress.
// Any rule inserted here will be denied regardless of the allowed ingress
// rules in the 'ingress' field.
// If omitted or empty, this rule does not apply at ingress.
//
// +kubebuilder:validation:AnyOf
IngressDeny []IngressDenyRule `json:"ingressDeny,omitempty"`
// Egress is a list of EgressRule which are enforced at egress.
// If omitted or empty, this rule does not apply at egress.
//
// +kubebuilder:validation:AnyOf
Egress []EgressRule `json:"egress,omitempty"`
// EgressDeny is a list of EgressDenyRule which are enforced at egress.
// Any rule inserted here will be denied regardless of the allowed egress
// rules in the 'egress' field.
// If omitted or empty, this rule does not apply at egress.
//
// +kubebuilder:validation:AnyOf
EgressDeny []EgressDenyRule `json:"egressDeny,omitempty"`
// Labels is a list of optional strings which can be used to
// re-identify the rule or to store metadata. It is possible to lookup
// or delete strings based on labels. Labels are not required to be
// unique, multiple rules can have overlapping or identical labels.
//
// +kubebuilder:validation:Optional
Labels labels.LabelArray `json:"labels,omitempty"`
// EnableDefaultDeny determines whether this policy configures the
// subject endpoint(s) to have a default deny mode. If enabled,
// this causes all traffic not explicitly allowed by a network policy
// to be dropped.
//
// If not specified, the default is true for each traffic direction
// that has rules, and false otherwise. For example, if a policy
// only has Ingress or IngressDeny rules, then the default for
// ingress is true and egress is false.
//
// If multiple policies apply to an endpoint, that endpoint's default deny
// will be enabled if any policy requests it.
//
// This is useful for creating broad-based network policies that will not
// cause endpoints to enter default-deny mode.
//
// +kubebuilder:validation:Optional
EnableDefaultDeny DefaultDenyConfig `json:"enableDefaultDeny,omitzero"`
// Description is a free form string, it can be used by the creator of
// the rule to store human readable explanation of the purpose of this
// rule. Rules cannot be identified by comment.
//
// +kubebuilder:validation:Optional
Description string `json:"description,omitempty"`
// Log specifies custom policy-specific Hubble logging configuration.
//
// +kubebuilder:validation:Optional
Log LogConfig `json:"log,omitzero"`
}
func (r *Rule) DeepEqual(o *Rule) bool {
switch {
case (r == nil) != (o == nil):
return false
case (r == nil) && (o == nil):
return true
}
return r.deepEqual(o)
}
// NewRule builds a new rule with no selector and no policy.
func NewRule() *Rule {
return &Rule{}
}
// WithEndpointSelector configures the Rule with the specified selector.
func (r *Rule) WithEndpointSelector(es EndpointSelector) *Rule {
r.EndpointSelector = es
return r
}
// WithIngressRules configures the Rule with the specified rules.
func (r *Rule) WithIngressRules(rules []IngressRule) *Rule {
r.Ingress = rules
return r
}
// WithIngressDenyRules configures the Rule with the specified rules.
func (r *Rule) WithIngressDenyRules(rules []IngressDenyRule) *Rule {
r.IngressDeny = rules
return r
}
// WithEgressRules configures the Rule with the specified rules.
func (r *Rule) WithEgressRules(rules []EgressRule) *Rule {
r.Egress = rules
return r
}
// WithEgressDenyRules configures the Rule with the specified rules.
func (r *Rule) WithEgressDenyRules(rules []EgressDenyRule) *Rule {
r.EgressDeny = rules
return r
}
// WithEnableDefaultDeny configures the Rule to enable default deny.
func (r *Rule) WithEnableDefaultDeny(ingress, egress bool) *Rule {
r.EnableDefaultDeny = DefaultDenyConfig{&ingress, &egress}
return r
}
// WithLabels configures the Rule with the specified labels metadata.
func (r *Rule) WithLabels(labels labels.LabelArray) *Rule {
r.Labels = labels
return r
}
// WithDescription configures the Rule with the specified description metadata.
func (r *Rule) WithDescription(desc string) *Rule {
r.Description = desc
return r
}
// RequiresDerivative it return true if the rule has a derivative rule.
func (r *Rule) RequiresDerivative() bool {
for _, rule := range r.Egress {
if rule.RequiresDerivative() {
return true
}
}
for _, rule := range r.EgressDeny {
if rule.RequiresDerivative() {
return true
}
}
for _, rule := range r.Ingress {
if rule.RequiresDerivative() {
return true
}
}
for _, rule := range r.IngressDeny {
if rule.RequiresDerivative() {
return true
}
}
return false
}
// CreateDerivative will return a new Rule with the new data based gather
// by the rules that autogenerated new Rule
func (r *Rule) CreateDerivative(ctx context.Context) (*Rule, error) {
newRule := r.DeepCopy()
newRule.Egress = []EgressRule{}
newRule.EgressDeny = []EgressDenyRule{}
newRule.Ingress = []IngressRule{}
newRule.IngressDeny = []IngressDenyRule{}
for _, egressRule := range r.Egress {
derivativeEgressRule, err := egressRule.CreateDerivative(ctx)
if err != nil {
return newRule, err
}
newRule.Egress = append(newRule.Egress, *derivativeEgressRule)
}
for _, egressDenyRule := range r.EgressDeny {
derivativeEgressDenyRule, err := egressDenyRule.CreateDerivative(ctx)
if err != nil {
return newRule, err
}
newRule.EgressDeny = append(newRule.EgressDeny, *derivativeEgressDenyRule)
}
for _, ingressRule := range r.Ingress {
derivativeIngressRule, err := ingressRule.CreateDerivative(ctx)
if err != nil {
return newRule, err
}
newRule.Ingress = append(newRule.Ingress, *derivativeIngressRule)
}
for _, ingressDenyRule := range r.IngressDeny {
derivativeDenyIngressRule, err := ingressDenyRule.CreateDerivative(ctx)
if err != nil {
return newRule, err
}
newRule.IngressDeny = append(newRule.IngressDeny, *derivativeDenyIngressRule)
}
return newRule, nil
}
// SPDX-License-Identifier: Apache-2.0
// Copyright Authors of Cilium
package api
import (
"errors"
"fmt"
"net/netip"
"slices"
"strconv"
"strings"
"github.com/cilium/cilium/pkg/iana"
"github.com/cilium/cilium/pkg/labels"
"github.com/cilium/cilium/pkg/option"
)
const (
maxPorts = 40
maxICMPFields = 40
)
var (
ErrFromToNodesRequiresNodeSelectorOption = fmt.Errorf("FromNodes/ToNodes rules can only be applied when the %q flag is set", option.EnableNodeSelectorLabels)
errUnsupportedICMPWithToPorts = errors.New("the ICMPs block may only be present without ToPorts. Define a separate rule to use ToPorts")
errEmptyServerName = errors.New("empty server name is not allowed")
enableDefaultDenyDefault = true
)
// Sanitize validates and sanitizes a policy rule. Minor edits such as capitalization
// of the protocol name are automatically fixed up.
// As part of `EndpointSelector` sanitization we also convert the label keys to internal
// representation prefixed with the source information. Check `EndpointSelector.sanitize()`
// method for more details.
// More fundamental violations will cause an error to be returned.
//
// Note: this function is called from both the operator and the agent;
// make sure any configuration flags are bound in **both** binaries.
func (r *Rule) Sanitize() error {
if len(r.Ingress) == 0 && len(r.IngressDeny) == 0 && len(r.Egress) == 0 && len(r.EgressDeny) == 0 {
return fmt.Errorf("rule must have at least one of Ingress, IngressDeny, Egress, EgressDeny")
}
if option.Config.EnableNonDefaultDenyPolicies {
// Fill in the default traffic posture of this Rule.
// Default posture is per-direction (ingress or egress),
// if there is a peer selector for that direction, the
// default is deny, else allow.
if r.EnableDefaultDeny.Egress == nil {
x := len(r.Egress) > 0 || len(r.EgressDeny) > 0
r.EnableDefaultDeny.Egress = &x
}
if r.EnableDefaultDeny.Ingress == nil {
x := len(r.Ingress) > 0 || len(r.IngressDeny) > 0
r.EnableDefaultDeny.Ingress = &x
}
} else {
// Since Non Default Deny Policies is disabled by flag, set EnableDefaultDeny to true
r.EnableDefaultDeny.Egress = &enableDefaultDenyDefault
r.EnableDefaultDeny.Ingress = &enableDefaultDenyDefault
}
if r.EndpointSelector.LabelSelector == nil && r.NodeSelector.LabelSelector == nil {
return errors.New("rule must have one of EndpointSelector or NodeSelector")
}
if r.EndpointSelector.LabelSelector != nil && r.NodeSelector.LabelSelector != nil {
return errors.New("rule cannot have both EndpointSelector and NodeSelector")
}
if r.EndpointSelector.LabelSelector != nil {
if err := r.EndpointSelector.Sanitize(); err != nil {
return err
}
}
var hostPolicy bool
if r.NodeSelector.LabelSelector != nil {
if err := r.NodeSelector.Sanitize(); err != nil {
return err
}
hostPolicy = true
}
for i := range r.Ingress {
if err := r.Ingress[i].sanitize(hostPolicy); err != nil {
return err
}
}
for i := range r.IngressDeny {
if err := r.IngressDeny[i].sanitize(); err != nil {
return err
}
}
for i := range r.Egress {
if err := r.Egress[i].sanitize(hostPolicy); err != nil {
return err
}
}
for i := range r.EgressDeny {
if err := r.EgressDeny[i].sanitize(); err != nil {
return err
}
}
return nil
}
func countL7Rules(ports []PortRule) map[string]int {
result := make(map[string]int)
for _, port := range ports {
if !port.Rules.IsEmpty() {
result["DNS"] += len(port.Rules.DNS)
result["HTTP"] += len(port.Rules.HTTP)
result["Kafka"] += len(port.Rules.Kafka)
}
}
return result
}
func (i *IngressRule) sanitize(hostPolicy bool) error {
l7Members := countL7Rules(i.ToPorts)
l7IngressSupport := map[string]bool{
"DNS": false,
"Kafka": true,
"HTTP": true,
}
if err := i.IngressCommonRule.sanitize(); err != nil {
return err
}
if hostPolicy && len(l7Members) > 0 {
return errors.New("L7 policy is not supported on host ingress yet")
}
if len(l7Members) > 0 && !option.Config.EnableL7Proxy {
return errors.New("L7 policy is not supported since L7 proxy is not enabled")
}
for member := range l7Members {
if l7Members[member] > 0 && !l7IngressSupport[member] {
return fmt.Errorf("L7 protocol %s is not supported on ingress yet", member)
}
}
if len(i.ICMPs) > 0 && !option.Config.EnableICMPRules {
return fmt.Errorf("ICMP rules can only be applied when the %q flag is set", option.EnableICMPRules)
}
if len(i.ICMPs) > 0 && len(i.ToPorts) > 0 {
return errUnsupportedICMPWithToPorts
}
for n := range i.ToPorts {
if err := i.ToPorts[n].sanitize(true); err != nil {
return err
}
}
for n := range i.ICMPs {
if err := i.ICMPs[n].verify(); err != nil {
return err
}
}
i.SetAggregatedSelectors()
return nil
}
func (i *IngressDenyRule) sanitize() error {
if err := i.IngressCommonRule.sanitize(); err != nil {
return err
}
if len(i.ICMPs) > 0 && !option.Config.EnableICMPRules {
return fmt.Errorf("ICMP rules can only be applied when the %q flag is set", option.EnableICMPRules)
}
if len(i.ICMPs) > 0 && len(i.ToPorts) > 0 {
return errUnsupportedICMPWithToPorts
}
for n := range i.ToPorts {
if err := i.ToPorts[n].sanitize(); err != nil {
return err
}
}
for n := range i.ICMPs {
if err := i.ICMPs[n].verify(); err != nil {
return err
}
}
i.SetAggregatedSelectors()
return nil
}
func (i *IngressCommonRule) sanitize() error {
l3Members := map[string]int{
"FromEndpoints": len(i.FromEndpoints),
"FromCIDR": len(i.FromCIDR),
"FromCIDRSet": len(i.FromCIDRSet),
"FromEntities": len(i.FromEntities),
"FromNodes": len(i.FromNodes),
"FromGroups": len(i.FromGroups),
}
for m1 := range l3Members {
for m2 := range l3Members {
if m2 != m1 && l3Members[m1] > 0 && l3Members[m2] > 0 {
return fmt.Errorf("combining %s and %s is not supported yet", m1, m2)
}
}
}
var retErr error
if len(i.FromNodes) > 0 && !option.Config.EnableNodeSelectorLabels {
retErr = ErrFromToNodesRequiresNodeSelectorOption
}
for n := range i.FromEndpoints {
if err := i.FromEndpoints[n].Sanitize(); err != nil {
return errors.Join(err, retErr)
}
}
for n := range i.FromRequires {
if err := i.FromRequires[n].Sanitize(); err != nil {
return errors.Join(err, retErr)
}
}
for n := range i.FromNodes {
if err := i.FromNodes[n].Sanitize(); err != nil {
return errors.Join(err, retErr)
}
}
for n := range i.FromCIDR {
if err := i.FromCIDR[n].sanitize(); err != nil {
return errors.Join(err, retErr)
}
}
for n := range i.FromCIDRSet {
if err := i.FromCIDRSet[n].sanitize(); err != nil {
return errors.Join(err, retErr)
}
}
for _, fromEntity := range i.FromEntities {
_, ok := EntitySelectorMapping[fromEntity]
if !ok {
return errors.Join(fmt.Errorf("unsupported entity: %s", fromEntity), retErr)
}
}
return retErr
}
// countNonGeneratedRules counts the number of CIDRRule items which are not
// `Generated`, i.e. were directly provided by the user.
// The `Generated` field is currently only set by the `ToServices`
// implementation, which extracts service endpoints and translates them as
// ToCIDRSet rules before the CNP is passed to the policy repository.
// Therefore, we want to allow the combination of ToCIDRSet and ToServices
// rules, if (and only if) the ToCIDRSet only contains `Generated` entries.
func countNonGeneratedCIDRRules(s CIDRRuleSlice) int {
n := 0
for _, c := range s {
if !c.Generated {
n++
}
}
return n
}
// countNonGeneratedEndpoints counts the number of EndpointSelector items which are not
// `Generated`, i.e. were directly provided by the user.
// The `Generated` field is currently only set by the `ToServices`
// implementation, which extracts service endpoints and translates them as
// ToEndpoints rules before the CNP is passed to the policy repository.
// Therefore, we want to allow the combination of ToEndpoints and ToServices
// rules, if (and only if) the ToEndpoints only contains `Generated` entries.
func countNonGeneratedEndpoints(s []EndpointSelector) int {
n := 0
for _, c := range s {
if !c.Generated {
n++
}
}
return n
}
func (e *EgressRule) sanitize(hostPolicy bool) error {
l3Members := e.l3Members()
l3DependentL4Support := e.l3DependentL4Support()
l7Members := countL7Rules(e.ToPorts)
l7EgressSupport := map[string]bool{
"DNS": true,
"Kafka": !hostPolicy,
"HTTP": !hostPolicy,
}
if err := e.EgressCommonRule.sanitize(l3Members); err != nil {
return err
}
for member := range l3Members {
if l3Members[member] > 0 && len(e.ToPorts) > 0 && !l3DependentL4Support[member] {
return fmt.Errorf("combining %s and ToPorts is not supported yet", member)
}
}
if len(l7Members) > 0 && !option.Config.EnableL7Proxy {
return errors.New("L7 policy is not supported since L7 proxy is not enabled")
}
for member := range l7Members {
if l7Members[member] > 0 && !l7EgressSupport[member] {
where := ""
if hostPolicy {
where = "host "
}
return fmt.Errorf("L7 protocol %s is not supported on %segress yet", member, where)
}
}
if len(e.ICMPs) > 0 && !option.Config.EnableICMPRules {
return fmt.Errorf("ICMP rules can only be applied when the %q flag is set", option.EnableICMPRules)
}
if len(e.ICMPs) > 0 && len(e.ToPorts) > 0 {
return errUnsupportedICMPWithToPorts
}
for i := range e.ToPorts {
if err := e.ToPorts[i].sanitize(false); err != nil {
return err
}
}
for n := range e.ICMPs {
if err := e.ICMPs[n].verify(); err != nil {
return err
}
}
for i := range e.ToFQDNs {
err := e.ToFQDNs[i].sanitize()
if err != nil {
return err
}
}
e.SetAggregatedSelectors()
return nil
}
func (e *EgressRule) l3Members() map[string]int {
l3Members := e.EgressCommonRule.l3Members()
l3Members["ToFQDNs"] = len(e.ToFQDNs)
return l3Members
}
func (e *EgressRule) l3DependentL4Support() map[string]bool {
l3DependentL4Support := e.EgressCommonRule.l3DependentL4Support()
l3DependentL4Support["ToFQDNs"] = true
return l3DependentL4Support
}
func (e *EgressDenyRule) sanitize() error {
l3Members := e.l3Members()
l3DependentL4Support := e.l3DependentL4Support()
if err := e.EgressCommonRule.sanitize(l3Members); err != nil {
return err
}
for member := range l3Members {
if l3Members[member] > 0 && len(e.ToPorts) > 0 && !l3DependentL4Support[member] {
return fmt.Errorf("combining %s and ToPorts is not supported yet", member)
}
}
if len(e.ICMPs) > 0 && !option.Config.EnableICMPRules {
return fmt.Errorf("ICMP rules can only be applied when the %q flag is set", option.EnableICMPRules)
}
if len(e.ICMPs) > 0 && len(e.ToPorts) > 0 {
return errUnsupportedICMPWithToPorts
}
for i := range e.ToPorts {
if err := e.ToPorts[i].sanitize(); err != nil {
return err
}
}
for n := range e.ICMPs {
if err := e.ICMPs[n].verify(); err != nil {
return err
}
}
e.SetAggregatedSelectors()
return nil
}
func (e *EgressDenyRule) l3Members() map[string]int {
return e.EgressCommonRule.l3Members()
}
func (e *EgressDenyRule) l3DependentL4Support() map[string]bool {
return e.EgressCommonRule.l3DependentL4Support()
}
func (e *EgressCommonRule) sanitize(l3Members map[string]int) error {
for m1 := range l3Members {
for m2 := range l3Members {
if m2 != m1 && l3Members[m1] > 0 && l3Members[m2] > 0 {
return fmt.Errorf("combining %s and %s is not supported yet", m1, m2)
}
}
}
var retErr error
if len(e.ToNodes) > 0 && !option.Config.EnableNodeSelectorLabels {
retErr = ErrFromToNodesRequiresNodeSelectorOption
}
for i := range e.ToEndpoints {
if err := e.ToEndpoints[i].Sanitize(); err != nil {
return errors.Join(err, retErr)
}
}
for i := range e.ToRequires {
if err := e.ToRequires[i].Sanitize(); err != nil {
return errors.Join(err, retErr)
}
}
for i := range e.ToNodes {
if err := e.ToNodes[i].Sanitize(); err != nil {
return errors.Join(err, retErr)
}
}
for i := range e.ToCIDR {
if err := e.ToCIDR[i].sanitize(); err != nil {
return errors.Join(err, retErr)
}
}
for i := range e.ToCIDRSet {
if err := e.ToCIDRSet[i].sanitize(); err != nil {
return errors.Join(err, retErr)
}
}
for _, toEntity := range e.ToEntities {
_, ok := EntitySelectorMapping[toEntity]
if !ok {
return errors.Join(fmt.Errorf("unsupported entity: %s", toEntity), retErr)
}
}
return retErr
}
func (e *EgressCommonRule) l3Members() map[string]int {
return map[string]int{
"ToCIDR": len(e.ToCIDR),
"ToCIDRSet": countNonGeneratedCIDRRules(e.ToCIDRSet),
"ToEndpoints": countNonGeneratedEndpoints(e.ToEndpoints),
"ToEntities": len(e.ToEntities),
"ToServices": len(e.ToServices),
"ToGroups": len(e.ToGroups),
"ToNodes": len(e.ToNodes),
}
}
func (e *EgressCommonRule) l3DependentL4Support() map[string]bool {
return map[string]bool{
"ToCIDR": true,
"ToCIDRSet": true,
"ToEndpoints": true,
"ToEntities": true,
"ToServices": true,
"ToGroups": true,
"ToNodes": true,
}
}
func (pr *L7Rules) sanitize(ports []PortProtocol) error {
nTypes := 0
if pr.HTTP != nil {
nTypes++
for i := range pr.HTTP {
if err := pr.HTTP[i].Sanitize(); err != nil {
return err
}
}
}
if pr.Kafka != nil {
nTypes++
for i := range pr.Kafka {
if err := pr.Kafka[i].Sanitize(); err != nil {
return err
}
}
}
if pr.DNS != nil {
// Forthcoming TPROXY redirection restricts DNS proxy to the standard DNS port (53).
// Require the port 53 be explicitly configured, and disallow other port numbers.
if len(ports) == 0 {
return errors.New("port 53 must be specified for DNS rules")
}
nTypes++
for i := range pr.DNS {
if err := pr.DNS[i].Sanitize(); err != nil {
return err
}
}
}
if pr.L7 != nil && pr.L7Proto == "" {
return fmt.Errorf("'l7' may only be specified when a 'l7proto' is also specified")
}
if pr.L7Proto != "" {
nTypes++
for i := range pr.L7 {
if err := pr.L7[i].Sanitize(); err != nil {
return err
}
}
}
if nTypes > 1 {
return fmt.Errorf("multiple L7 protocol rule types specified in single rule")
}
return nil
}
// It is not allowed to configure an ingress listener, but we still
// have some unit tests relying on this. So, allow overriding this check in the unit tests.
var TestAllowIngressListener = false
func (pr *PortRule) sanitize(ingress bool) error {
hasDNSRules := pr.Rules != nil && len(pr.Rules.DNS) > 0
if ingress && hasDNSRules {
return fmt.Errorf("DNS rules are not allowed on ingress")
}
if len(pr.ServerNames) > 0 && !pr.Rules.IsEmpty() && pr.TerminatingTLS == nil {
return fmt.Errorf("ServerNames are not allowed with L7 rules without TLS termination")
}
if slices.Contains(pr.ServerNames, "") {
return errEmptyServerName
}
if len(pr.Ports) > maxPorts {
return fmt.Errorf("too many ports, the max is %d", maxPorts)
}
haveZeroPort := false
for i := range pr.Ports {
var isZero bool
var err error
if isZero, err = pr.Ports[i].sanitize(hasDNSRules); err != nil {
return err
}
if isZero {
haveZeroPort = true
}
// DNS L7 rules can be TCP, UDP or ANY, all others are TCP only.
switch {
case pr.Rules.IsEmpty(), hasDNSRules:
// nothing to do if no rules OR they are DNS rules (note the comma above)
case pr.Ports[i].Protocol != ProtoTCP:
return fmt.Errorf("L7 rules can only apply to TCP (not %s) except for DNS rules", pr.Ports[i].Protocol)
}
}
listener := pr.Listener
if listener != nil {
// For now we have only tested custom listener support on the egress path. TODO
// (jrajahalme): Lift this limitation in follow-up work once proper testing has been
// done on the ingress path.
if ingress && !TestAllowIngressListener {
return fmt.Errorf("Listener is not allowed on ingress (%s)", listener.Name)
}
// There is no quarantee that Listener will support Cilium policy enforcement. Even
// now proxylib-based enforcement (e.g, Kafka) may work, but has not been tested.
// TODO (jrajahalme): Lift this limitation in follow-up work for proxylib based
// parsers if needed and when tested.
if !pr.Rules.IsEmpty() {
return fmt.Errorf("Listener is not allowed with L7 rules (%s)", listener.Name)
}
}
// Sanitize L7 rules
if !pr.Rules.IsEmpty() {
if haveZeroPort {
return errors.New("L7 rules can not be used when a port is 0")
}
if err := pr.Rules.sanitize(pr.Ports); err != nil {
return err
}
}
return nil
}
func (pr *PortDenyRule) sanitize() error {
if len(pr.Ports) > maxPorts {
return fmt.Errorf("too many ports, the max is %d", maxPorts)
}
for i := range pr.Ports {
if _, err := pr.Ports[i].sanitize(false); err != nil {
return err
}
}
return nil
}
func (pp *PortProtocol) sanitize(hasDNSRules bool) (isZero bool, err error) {
if pp.Port == "" {
if !option.Config.EnableExtendedIPProtocols {
return isZero, errors.New("port must be specified")
}
}
// Port names are formatted as IANA Service Names. This means that
// some legal numeric literals are no longer considered numbers, e.g,
// 0x10 is now considered a name rather than number 16.
if iana.IsSvcName(pp.Port) {
pp.Port = strings.ToLower(pp.Port) // Normalize for case insensitive comparison
} else if pp.Port != "" {
if pp.Port != "0" && (pp.Protocol == ProtoVRRP || pp.Protocol == ProtoIGMP) {
return isZero, errors.New("port must be empty or 0")
}
p, err := strconv.ParseUint(pp.Port, 0, 16)
if err != nil {
return isZero, fmt.Errorf("unable to parse port: %w", err)
}
isZero = p == 0
if hasDNSRules && pp.EndPort > int32(p) {
return isZero, errors.New("DNS rules do not support port ranges")
}
}
pp.Protocol, err = ParseL4Proto(string(pp.Protocol))
return isZero, err
}
func (ir *ICMPRule) verify() error {
if len(ir.Fields) > maxICMPFields {
return fmt.Errorf("too many types, the max is %d", maxICMPFields)
}
for _, f := range ir.Fields {
if f.Family != IPv4Family && f.Family != IPv6Family && f.Family != "" {
return fmt.Errorf("wrong family: %s", f.Family)
}
}
return nil
}
// sanitize the given CIDR.
func (c CIDR) sanitize() error {
strCIDR := string(c)
if strCIDR == "" {
return fmt.Errorf("IP must be specified")
}
prefix, err := netip.ParsePrefix(strCIDR)
if err != nil {
_, err := netip.ParseAddr(strCIDR)
if err != nil {
return fmt.Errorf("unable to parse CIDR: %w", err)
}
return nil
}
prefixLength := prefix.Bits()
if prefixLength < 0 {
return fmt.Errorf("CIDR cannot specify non-contiguous mask %s", prefix)
}
return nil
}
// sanitize validates a CIDRRule by checking that the CIDR prefix itself is
// valid, and ensuring that all of the exception CIDR prefixes are contained
// within the allowed CIDR prefix.
func (c *CIDRRule) sanitize() error {
// Exactly one of CIDR, CIDRGroupRef, or CIDRGroupSelector must be set
cnt := 0
if len(c.CIDRGroupRef) > 0 {
cnt++
}
if len(c.Cidr) > 0 {
cnt++
}
if c.CIDRGroupSelector != nil {
cnt++
es := NewESFromK8sLabelSelector(labels.LabelSourceCIDRGroupKeyPrefix, c.CIDRGroupSelector)
if err := es.Sanitize(); err != nil {
return fmt.Errorf("failed to parse cidrGroupSelector %v: %w", c.CIDRGroupSelector.String(), err)
}
}
if cnt == 0 {
return fmt.Errorf("one of cidr, cidrGroupRef, or cidrGroupSelector is required")
}
if cnt > 1 {
return fmt.Errorf("more than one of cidr, cidrGroupRef, or cidrGroupSelector may not be set")
}
if len(c.CIDRGroupRef) > 0 || c.CIDRGroupSelector != nil {
return nil // these are selectors;
}
// Only allow notation <IP address>/<prefix>. Note that this differs from
// the logic in api.CIDR.Sanitize().
prefix, err := netip.ParsePrefix(string(c.Cidr))
if err != nil {
return fmt.Errorf("unable to parse CIDRRule %q: %w", c.Cidr, err)
}
prefixLength := prefix.Bits()
if prefixLength < 0 {
return fmt.Errorf("CIDR cannot specify non-contiguous mask %s", prefix)
}
// Ensure that each provided exception CIDR prefix is formatted correctly,
// and is contained within the CIDR prefix to/from which we want to allow
// traffic.
for _, p := range c.ExceptCIDRs {
except, err := netip.ParsePrefix(string(p))
if err != nil {
return err
}
// Note: this also checks that the allow CIDR prefix and the exception
// CIDR prefixes are part of the same address family.
if !prefix.Contains(except.Addr()) {
return fmt.Errorf("allow CIDR prefix %s does not contain "+
"exclude CIDR prefix %s", c.Cidr, p)
}
}
return nil
}
// SPDX-License-Identifier: Apache-2.0
// Copyright Authors of Cilium
package api
import (
"fmt"
"strings"
)
// Rules is a collection of api.Rule.
//
// All rules must be evaluated in order to come to a conclusion. While
// it is sufficient to have a single fromEndpoints rule match, none of
// the fromRequires may be violated at the same time.
// +deepequal-gen:private-method=true
type Rules []*Rule
func (rs Rules) String() string {
strRules := make([]string, 0, len(rs))
for _, r := range rs {
strRules = append(strRules, fmt.Sprintf("%+v", r))
}
return "[" + strings.Join(strRules, ",\n") + "]"
}
// DeepEqual is a deepequal function, deeply comparing the
// receiver with other. the receiver must be non-nil.
func (rs *Rules) DeepEqual(other *Rules) bool {
switch {
case (rs == nil) != (other == nil):
return false
case (rs == nil) && (other == nil):
return true
}
return rs.deepEqual(other)
}
// SPDX-License-Identifier: Apache-2.0
// Copyright Authors of Cilium
package api
import (
"encoding/json"
"fmt"
"strings"
k8sLbls "github.com/cilium/cilium/pkg/k8s/slim/k8s/apis/labels"
slim_metav1 "github.com/cilium/cilium/pkg/k8s/slim/k8s/apis/meta/v1"
"github.com/cilium/cilium/pkg/k8s/slim/k8s/apis/meta/v1/validation"
"github.com/cilium/cilium/pkg/labels"
"github.com/cilium/cilium/pkg/logging"
"github.com/cilium/cilium/pkg/logging/logfields"
"github.com/cilium/cilium/pkg/metrics"
)
// EndpointSelector is a wrapper for k8s LabelSelector.
type EndpointSelector struct {
*slim_metav1.LabelSelector `json:",inline"`
// requirements provides a cache for a k8s-friendly format of the
// LabelSelector, which allows more efficient matching in Matches().
//
// Kept as a pointer to allow EndpointSelector to be used as a map key.
requirements *k8sLbls.Requirements `json:"-"`
// cachedLabelSelectorString is the cached representation of the
// LabelSelector for this EndpointSelector. It is populated when
// EndpointSelectors are created via `NewESFromMatchRequirements`. It is
// immutable after its creation.
cachedLabelSelectorString string `json:"-"`
// Generated indicates whether the rule was generated based on other rules
// or provided by user
Generated bool `json:"-"`
// sanitized indicates if the EndpointSelector has been validated and converted
// to Cilium's internal representation for usage. Internally Cilium uses k8s label
// APIs which doesn't allow for `:` in label keys. When sanitizing we convert
// keys to the format expected by k8s with prefix `<source>.`
//
// Cilium's Label key conversion logic as part of sanitization is:
// 1. `<prefix>:<string>` -> `<prefix>.<string>` (Source: <prefix>)
// 2. `<prefix>.<string>` -> `any.<prefix>.<string>` (Source: any)
// 3. `<string>` -> `any.<string>` (Source: any)
sanitized bool `json:"-"`
}
// Used for `omitzero` json tag.
func (n *EndpointSelector) IsZero() bool {
return n.LabelSelector == nil
}
// LabelSelectorString returns a user-friendly string representation of
// EndpointSelector.
func (n *EndpointSelector) LabelSelectorString() string {
if n != nil && n.LabelSelector == nil {
return "<all>"
}
return slim_metav1.FormatLabelSelector(n.LabelSelector)
}
// String returns a string representation of EndpointSelector.
func (n EndpointSelector) String() string {
j, _ := n.MarshalJSON()
return string(j)
}
// CachedString returns the cached string representation of the LabelSelector
// for this EndpointSelector.
func (n EndpointSelector) CachedString() string {
return n.cachedLabelSelectorString
}
// UnmarshalJSON unmarshals the endpoint selector from the byte array.
func (n *EndpointSelector) UnmarshalJSON(b []byte) error {
n.LabelSelector = &slim_metav1.LabelSelector{}
return json.Unmarshal(b, n.LabelSelector)
}
// MarshalJSON returns a JSON representation of the byte array.
// If the object is not sanitized, we return the seralized value of
// underlying selector without the custom handling.
// When sanitized, we convert the label keys to Cilium specific representation
// with source prefix in format `<source>:<key>` before serialization.
func (n EndpointSelector) MarshalJSON() ([]byte, error) {
ls := slim_metav1.LabelSelector{}
if n.LabelSelector == nil {
return json.Marshal(ls)
}
if !n.sanitized {
return json.Marshal(n.LabelSelector)
}
if n.MatchLabels != nil {
newLabels := map[string]string{}
for k, v := range n.MatchLabels {
newLabels[labels.GetCiliumKeyFrom(k)] = v
}
ls.MatchLabels = newLabels
}
if n.MatchExpressions != nil {
newMatchExpr := make([]slim_metav1.LabelSelectorRequirement, len(n.MatchExpressions))
for i, v := range n.MatchExpressions {
v.Key = labels.GetCiliumKeyFrom(v.Key)
newMatchExpr[i] = v
}
ls.MatchExpressions = newMatchExpr
}
return json.Marshal(ls)
}
// HasKeyPrefix checks if the endpoint selector contains the given key prefix in
// its MatchLabels map and MatchExpressions slice.
func (n EndpointSelector) HasKeyPrefix(prefix string) bool {
for k := range n.MatchLabels {
if strings.HasPrefix(k, prefix) {
return true
}
}
for _, v := range n.MatchExpressions {
if strings.HasPrefix(v.Key, prefix) {
return true
}
}
return false
}
// HasKey checks if the endpoint selector contains the given key in
// its MatchLabels map or in its MatchExpressions slice.
func (n EndpointSelector) HasKey(key string) bool {
if _, ok := n.MatchLabels[key]; ok {
return true
}
for _, v := range n.MatchExpressions {
if v.Key == key {
return true
}
}
return false
}
// GetMatch checks for a match on the specified key, and returns the value that
// the key must match, and true. If a match cannot be found, returns nil, false.
func (n EndpointSelector) GetMatch(key string) ([]string, bool) {
if value, ok := n.MatchLabels[key]; ok {
return []string{value}, true
}
for _, v := range n.MatchExpressions {
if v.Key == key && v.Operator == slim_metav1.LabelSelectorOpIn {
return v.Values, true
}
}
return nil, false
}
// labelSelectorToRequirements turns a kubernetes Selector into a slice of
// requirements equivalent to the selector. These are cached internally in the
// EndpointSelector to speed up Matches().
//
// This validates the labels, which can be expensive (and may fail..)
// If there's an error, the selector will be nil and the Matches()
// implementation will refuse to match any labels.
func labelSelectorToRequirements(labelSelector *slim_metav1.LabelSelector) *k8sLbls.Requirements {
selector, err := slim_metav1.LabelSelectorAsSelector(labelSelector)
if err != nil {
metrics.PolicyChangeTotal.WithLabelValues(metrics.LabelValueOutcomeFail).Inc()
// slogloggercheck: it's safe to use the default logger here as it has been initialized by the program up to this point.
logging.DefaultSlogLogger.Error(
"unable to construct selector in label selector",
logfields.LogSubsys, "policy-api",
logfields.Error, err,
logfields.EndpointLabelSelector, labelSelector,
)
return nil
}
metrics.PolicyChangeTotal.WithLabelValues(metrics.LabelValueOutcomeSuccess).Inc()
requirements, selectable := selector.Requirements()
if !selectable {
return nil
}
return &requirements
}
// NewESFromLabels creates a new endpoint selector from the given labels.
func NewESFromLabels(lbls ...labels.Label) EndpointSelector {
ml := map[string]string{}
for _, lbl := range lbls {
ml[lbl.GetExtendedKey()] = lbl.Value
}
return NewESFromMatchRequirements(ml, nil)
}
// NewESFromMatchRequirements creates a new endpoint selector from the given
// match specifications: An optional set of labels that must match, and
// an optional slice of LabelSelectorRequirements.
// The returned selector object is marked as sanitized, the caller is responsible
// for ensuring that the Label keys are prefixed correctly with the required source.
//
// If the caller intends to reuse 'matchLabels' or 'reqs' after creating the
// EndpointSelector, they must make a copy of the parameter.
func NewESFromMatchRequirements(matchLabels map[string]string, reqs []slim_metav1.LabelSelectorRequirement) EndpointSelector {
labelSelector := &slim_metav1.LabelSelector{
MatchLabels: matchLabels,
MatchExpressions: reqs,
}
return EndpointSelector{
LabelSelector: labelSelector,
requirements: labelSelectorToRequirements(labelSelector),
cachedLabelSelectorString: labelSelector.String(),
sanitized: true,
}
}
// SyncRequirementsWithLabelSelector ensures that the requirements within the
// specified EndpointSelector are in sync with the LabelSelector. This is
// because the LabelSelector has publicly accessible fields, which can be
// updated without concurrently updating the requirements, so the two fields can
// become out of sync.
func (n *EndpointSelector) SyncRequirementsWithLabelSelector() {
n.requirements = labelSelectorToRequirements(n.LabelSelector)
}
// newReservedEndpointSelector returns a selector that matches on all
// endpoints with the specified reserved label.
func newReservedEndpointSelector(ID string) EndpointSelector {
reservedLabels := labels.NewLabel(ID, "", labels.LabelSourceReserved)
return NewESFromLabels(reservedLabels)
}
var (
// WildcardEndpointSelector is a wildcard endpoint selector matching
// all endpoints that can be described with labels.
WildcardEndpointSelector = NewESFromLabels()
// ReservedEndpointSelectors map reserved labels to EndpointSelectors
// that will match those endpoints.
ReservedEndpointSelectors = map[string]EndpointSelector{
labels.IDNameHost: newReservedEndpointSelector(labels.IDNameHost),
labels.IDNameRemoteNode: newReservedEndpointSelector(labels.IDNameRemoteNode),
labels.IDNameWorld: newReservedEndpointSelector(labels.IDNameWorld),
labels.IDNameWorldIPv4: newReservedEndpointSelector(labels.IDNameWorldIPv4),
labels.IDNameWorldIPv6: newReservedEndpointSelector(labels.IDNameWorldIPv6),
}
)
func NewESFromK8sLabelSelectorWithExtender(extender labels.KeyExtender, lss ...*slim_metav1.LabelSelector) EndpointSelector {
var (
matchLabels map[string]string
matchExpressions []slim_metav1.LabelSelectorRequirement
)
for _, ls := range lss {
if ls == nil {
continue
}
if ls.MatchLabels != nil {
if matchLabels == nil {
matchLabels = map[string]string{}
}
for k, v := range ls.MatchLabels {
matchLabels[extender(k)] = v
}
}
if ls.MatchExpressions != nil {
if matchExpressions == nil {
matchExpressions = make([]slim_metav1.LabelSelectorRequirement, 0, len(ls.MatchExpressions))
}
for _, v := range ls.MatchExpressions {
v.Key = extender(v.Key)
matchExpressions = append(matchExpressions, v)
}
}
}
return NewESFromMatchRequirements(matchLabels, matchExpressions)
}
// NewESFromK8sLabelSelector returns a new endpoint selector from the label
// where it the given srcPrefix will be encoded in the label's keys.
func NewESFromK8sLabelSelector(srcPrefix string, lss ...*slim_metav1.LabelSelector) EndpointSelector {
return NewESFromK8sLabelSelectorWithExtender(labels.GetSourcePrefixKeyExtender(srcPrefix), lss...)
}
// AddMatch adds a match for 'key' == 'value' to the endpoint selector.
func (n *EndpointSelector) AddMatch(key, value string) {
if n.MatchLabels == nil {
n.MatchLabels = map[string]string{}
}
n.MatchLabels[key] = value
n.requirements = labelSelectorToRequirements(n.LabelSelector)
n.cachedLabelSelectorString = n.LabelSelector.String()
}
// AddMatchExpression adds a match expression to label selector of the endpoint selector.
func (n *EndpointSelector) AddMatchExpression(key string, op slim_metav1.LabelSelectorOperator, values []string) {
n.MatchExpressions = append(n.MatchExpressions, slim_metav1.LabelSelectorRequirement{
Key: key,
Operator: op,
Values: values,
})
// Update cache of the EndpointSelector from the embedded label selector.
// This is to make sure we have updates caches containing the required selectors.
n.requirements = labelSelectorToRequirements(n.LabelSelector)
n.cachedLabelSelectorString = n.LabelSelector.String()
}
// Matches returns true if the endpoint selector Matches the `lblsToMatch`.
// Returns always true if the endpoint selector contains the reserved label for
// "all".
func (n *EndpointSelector) Matches(lblsToMatch k8sLbls.Labels) bool {
// Try to update cached requirements for this EndpointSelector if possible.
if n.requirements == nil {
n.requirements = labelSelectorToRequirements(n.LabelSelector)
// Nil indicates that requirements failed validation in some way,
// so we cannot parse the labels for matching purposes; thus, we cannot
// match if labels cannot be parsed, so return false.
if n.requirements == nil {
return false
}
}
reqs := *n.requirements
for i := range reqs {
if !reqs[i].Matches(lblsToMatch) {
return false
}
}
return true
}
// IsWildcard returns true if the endpoint selector selects all endpoints.
func (n *EndpointSelector) IsWildcard() bool {
return n.LabelSelector != nil &&
len(n.LabelSelector.MatchLabels)+len(n.LabelSelector.MatchExpressions) == 0
}
// ConvertToLabelSelectorRequirementSlice converts the MatchLabels and
// MatchExpressions within the specified EndpointSelector into a list of
// LabelSelectorRequirements.
func (n *EndpointSelector) ConvertToLabelSelectorRequirementSlice() []slim_metav1.LabelSelectorRequirement {
requirements := make([]slim_metav1.LabelSelectorRequirement, 0, len(n.MatchExpressions)+len(n.MatchLabels))
// Append already existing match expressions.
requirements = append(requirements, n.MatchExpressions...)
// Convert each MatchLables to LabelSelectorRequirement.
for key, value := range n.MatchLabels {
requirementFromMatchLabels := slim_metav1.LabelSelectorRequirement{
Key: key,
Operator: slim_metav1.LabelSelectorOpIn,
Values: []string{value},
}
requirements = append(requirements, requirementFromMatchLabels)
}
return requirements
}
// Sanitize returns an error if the EndpointSelector's LabelSelector is invalid.
// It also muatates all label selector keys into Cilium's internal representation.
// Check documentation of `EndpointSelector.sanitized` for more details.
func (n *EndpointSelector) Sanitize() error {
es := n
if !n.sanitized {
sanitizedEndpointSelector := NewESFromK8sLabelSelectorWithExtender(labels.DefaultKeyExtender, n.LabelSelector)
es = &sanitizedEndpointSelector
}
errList := validation.ValidateLabelSelector(es.LabelSelector, validation.LabelSelectorValidationOptions{AllowInvalidLabelValueInSelector: false}, nil)
if len(errList) > 0 {
return fmt.Errorf("invalid label selector: %w", errList.ToAggregate())
}
if !n.sanitized {
n.sanitized = true
n.LabelSelector = es.LabelSelector
n.requirements = es.requirements
n.cachedLabelSelectorString = es.cachedLabelSelectorString
}
return nil
}
// EndpointSelectorSlice is a slice of EndpointSelectors that can be sorted.
type EndpointSelectorSlice []EndpointSelector
func (s EndpointSelectorSlice) Len() int { return len(s) }
func (s EndpointSelectorSlice) Swap(i, j int) { s[i], s[j] = s[j], s[i] }
func (s EndpointSelectorSlice) Less(i, j int) bool {
strI := s[i].LabelSelectorString()
strJ := s[j].LabelSelectorString()
return strings.Compare(strI, strJ) < 0
}
// Matches returns true if any of the EndpointSelectors in the slice match the
// provided labels
func (s EndpointSelectorSlice) Matches(ctx labels.LabelArray) bool {
for _, selector := range s {
if selector.Matches(ctx) {
return true
}
}
return false
}
// SelectsAllEndpoints returns whether the EndpointSelectorSlice selects all
// endpoints, which is true if the wildcard endpoint selector is present in the
// slice.
func (s EndpointSelectorSlice) SelectsAllEndpoints() bool {
for _, selector := range s {
if selector.IsWildcard() {
return true
}
}
return false
}
// SPDX-License-Identifier: Apache-2.0
// Copyright Authors of Cilium
package api
import (
"context"
"fmt"
"slices"
"strings"
)
// Exists returns true if the HTTP rule already exists in the list of rules
func (h *PortRuleHTTP) Exists(rules L7Rules) bool {
return slices.ContainsFunc(rules.HTTP, h.Equal)
}
// Equal returns true if both HTTP rules are equal
func (h *PortRuleHTTP) Equal(o PortRuleHTTP) bool {
if h.Path != o.Path ||
h.Method != o.Method ||
h.Host != o.Host ||
len(h.Headers) != len(o.Headers) ||
len(h.HeaderMatches) != len(o.HeaderMatches) {
return false
}
for i, value := range h.Headers {
if o.Headers[i] != value {
return false
}
}
for i, value := range h.HeaderMatches {
if !o.HeaderMatches[i].Equal(value) {
return false
}
}
return true
}
// Equal returns true if both Secrets are equal
func (a *Secret) Equal(b *Secret) bool {
return a == nil && b == nil || a != nil && b != nil && *a == *b
}
// Equal returns true if both HeaderMatches are equal
func (h *HeaderMatch) Equal(o *HeaderMatch) bool {
if h.Mismatch != o.Mismatch ||
h.Name != o.Name ||
h.Value != o.Value ||
!h.Secret.Equal(o.Secret) {
return false
}
return true
}
// Exists returns true if the DNS rule already exists in the list of rules
func (d *PortRuleDNS) Exists(rules L7Rules) bool {
return slices.ContainsFunc(rules.DNS, d.Equal)
}
// Exists returns true if the L7 rule already exists in the list of rules
func (h *PortRuleL7) Exists(rules L7Rules) bool {
return slices.ContainsFunc(rules.L7, h.Equal)
}
// Equal returns true if both rules are equal
func (d *PortRuleDNS) Equal(o PortRuleDNS) bool {
return d != nil && d.MatchName == o.MatchName && d.MatchPattern == o.MatchPattern
}
// Equal returns true if both L7 rules are equal
func (h *PortRuleL7) Equal(o PortRuleL7) bool {
if len(*h) != len(o) {
return false
}
for k, v := range *h {
if v2, ok := o[k]; !ok || v2 != v {
return false
}
}
return true
}
// Validate returns an error if the layer 4 protocol is not valid
func (l4 L4Proto) Validate() error {
switch l4 {
case ProtoAny, ProtoTCP, ProtoUDP, ProtoSCTP, ProtoVRRP, ProtoIGMP:
default:
return fmt.Errorf("invalid protocol %q, must be { tcp | udp | sctp | vrrp | igmp | any }", l4)
}
return nil
}
// ParseL4Proto parses a string as layer 4 protocol
func ParseL4Proto(proto string) (L4Proto, error) {
if proto == "" {
return ProtoAny, nil
}
p := L4Proto(strings.ToUpper(proto))
return p, p.Validate()
}
// ResourceQualifiedName returns the qualified name of an Envoy resource,
// prepending CEC namespace and CEC name to the resource name and using
// '/' as a separator.
//
// If resourceName already has a slash, it must be of the form 'namespace/name', where namespace
// usually is equal to 'namespace'. This also applies for clusterwide resources for which
// 'namespace' is empty.
//
// If 'resourceName' has no slash, it will be prepended with 'namespace/cecName' so that the
// full name passed to Envoy is 'namespace/cecName/resourceName'. This makes non-qualified resource
// names and resource name references local to the given namespace and CiliumEnvoyConfig CRD.
//
// if 'forceNamespace' is 'true' then resourceName is always prepended with "namespace/cecName/",
// even it it already has backslashes, unless the first component of the name is equal to
// 'namespace'.
//
// As a special case pass through an empty resourceName without qualification so that unnamed
// resources do not become named. This is important to not transform an invalid Envoy configuration
// to a valid one with a fake name.
type Option int
const (
ForceNamespace Option = iota
)
func ResourceQualifiedName(namespace, cecName, resourceName string, options ...Option) (name string, updated bool) {
forceNamespace := false
for _, option := range options {
switch option {
case ForceNamespace:
forceNamespace = true
}
}
idx := strings.IndexRune(resourceName, '/')
if resourceName == "" || idx >= 0 && (!forceNamespace || (idx == len(namespace) && strings.HasPrefix(resourceName, namespace))) {
return resourceName, false
}
var sb strings.Builder
sb.WriteString(namespace)
sb.WriteRune('/')
sb.WriteString(cecName)
sb.WriteRune('/')
sb.WriteString(resourceName)
return sb.String(), true
}
// ParseQualifiedName returns the namespace, name, and the resource name of a name qualified with ResourceQualifiedName()
func ParseQualifiedName(qualifiedName string) (namespace, name, resourceName string) {
parts := strings.SplitN(qualifiedName, "/", 3)
if len(parts) < 3 {
return "", "", qualifiedName
}
return parts[0], parts[1], parts[2]
}
// ExtractCidrSet abstracts away some of the logic from the CreateDerivative methods
func ExtractCidrSet(ctx context.Context, groups []Groups) ([]CIDRRule, error) {
var cidrSet []CIDRRule
for _, group := range groups {
c, err := group.GetCidrSet(ctx)
if err != nil {
return cidrSet, err
}
if len(c) > 0 {
cidrSet = append(cidrSet, c...)
}
}
return cidrSet, nil
}
//go:build !ignore_autogenerated
// +build !ignore_autogenerated
// SPDX-License-Identifier: Apache-2.0
// Copyright Authors of Cilium
// Code generated by deepcopy-gen. DO NOT EDIT.
package api
import (
labels "github.com/cilium/cilium/pkg/k8s/slim/k8s/apis/labels"
v1 "github.com/cilium/cilium/pkg/k8s/slim/k8s/apis/meta/v1"
kafka "github.com/cilium/proxy/pkg/policy/api/kafka"
intstr "k8s.io/apimachinery/pkg/util/intstr"
)
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *AWSGroup) DeepCopyInto(out *AWSGroup) {
*out = *in
if in.Labels != nil {
in, out := &in.Labels, &out.Labels
*out = make(map[string]string, len(*in))
for key, val := range *in {
(*out)[key] = val
}
}
if in.SecurityGroupsIds != nil {
in, out := &in.SecurityGroupsIds, &out.SecurityGroupsIds
*out = make([]string, len(*in))
copy(*out, *in)
}
if in.SecurityGroupsNames != nil {
in, out := &in.SecurityGroupsNames, &out.SecurityGroupsNames
*out = make([]string, len(*in))
copy(*out, *in)
}
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AWSGroup.
func (in *AWSGroup) DeepCopy() *AWSGroup {
if in == nil {
return nil
}
out := new(AWSGroup)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *Authentication) DeepCopyInto(out *Authentication) {
*out = *in
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Authentication.
func (in *Authentication) DeepCopy() *Authentication {
if in == nil {
return nil
}
out := new(Authentication)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *CIDRRule) DeepCopyInto(out *CIDRRule) {
*out = *in
if in.CIDRGroupSelector != nil {
in, out := &in.CIDRGroupSelector, &out.CIDRGroupSelector
*out = new(v1.LabelSelector)
(*in).DeepCopyInto(*out)
}
if in.ExceptCIDRs != nil {
in, out := &in.ExceptCIDRs, &out.ExceptCIDRs
*out = make([]CIDR, len(*in))
copy(*out, *in)
}
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CIDRRule.
func (in *CIDRRule) DeepCopy() *CIDRRule {
if in == nil {
return nil
}
out := new(CIDRRule)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in CIDRRuleSlice) DeepCopyInto(out *CIDRRuleSlice) {
{
in := &in
*out = make(CIDRRuleSlice, len(*in))
for i := range *in {
(*in)[i].DeepCopyInto(&(*out)[i])
}
return
}
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CIDRRuleSlice.
func (in CIDRRuleSlice) DeepCopy() CIDRRuleSlice {
if in == nil {
return nil
}
out := new(CIDRRuleSlice)
in.DeepCopyInto(out)
return *out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in CIDRSlice) DeepCopyInto(out *CIDRSlice) {
{
in := &in
*out = make(CIDRSlice, len(*in))
copy(*out, *in)
return
}
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CIDRSlice.
func (in CIDRSlice) DeepCopy() CIDRSlice {
if in == nil {
return nil
}
out := new(CIDRSlice)
in.DeepCopyInto(out)
return *out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *DefaultDenyConfig) DeepCopyInto(out *DefaultDenyConfig) {
*out = *in
if in.Ingress != nil {
in, out := &in.Ingress, &out.Ingress
*out = new(bool)
**out = **in
}
if in.Egress != nil {
in, out := &in.Egress, &out.Egress
*out = new(bool)
**out = **in
}
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DefaultDenyConfig.
func (in *DefaultDenyConfig) DeepCopy() *DefaultDenyConfig {
if in == nil {
return nil
}
out := new(DefaultDenyConfig)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *EgressCommonRule) DeepCopyInto(out *EgressCommonRule) {
*out = *in
if in.ToEndpoints != nil {
in, out := &in.ToEndpoints, &out.ToEndpoints
*out = make([]EndpointSelector, len(*in))
for i := range *in {
(*in)[i].DeepCopyInto(&(*out)[i])
}
}
if in.ToRequires != nil {
in, out := &in.ToRequires, &out.ToRequires
*out = make([]EndpointSelector, len(*in))
for i := range *in {
(*in)[i].DeepCopyInto(&(*out)[i])
}
}
if in.ToCIDR != nil {
in, out := &in.ToCIDR, &out.ToCIDR
*out = make(CIDRSlice, len(*in))
copy(*out, *in)
}
if in.ToCIDRSet != nil {
in, out := &in.ToCIDRSet, &out.ToCIDRSet
*out = make(CIDRRuleSlice, len(*in))
for i := range *in {
(*in)[i].DeepCopyInto(&(*out)[i])
}
}
if in.ToEntities != nil {
in, out := &in.ToEntities, &out.ToEntities
*out = make(EntitySlice, len(*in))
copy(*out, *in)
}
if in.ToServices != nil {
in, out := &in.ToServices, &out.ToServices
*out = make([]Service, len(*in))
for i := range *in {
(*in)[i].DeepCopyInto(&(*out)[i])
}
}
if in.ToGroups != nil {
in, out := &in.ToGroups, &out.ToGroups
*out = make([]Groups, len(*in))
for i := range *in {
(*in)[i].DeepCopyInto(&(*out)[i])
}
}
if in.ToNodes != nil {
in, out := &in.ToNodes, &out.ToNodes
*out = make([]EndpointSelector, len(*in))
for i := range *in {
(*in)[i].DeepCopyInto(&(*out)[i])
}
}
if in.aggregatedSelectors != nil {
in, out := &in.aggregatedSelectors, &out.aggregatedSelectors
*out = make(EndpointSelectorSlice, len(*in))
for i := range *in {
(*in)[i].DeepCopyInto(&(*out)[i])
}
}
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new EgressCommonRule.
func (in *EgressCommonRule) DeepCopy() *EgressCommonRule {
if in == nil {
return nil
}
out := new(EgressCommonRule)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *EgressDenyRule) DeepCopyInto(out *EgressDenyRule) {
*out = *in
in.EgressCommonRule.DeepCopyInto(&out.EgressCommonRule)
if in.ToPorts != nil {
in, out := &in.ToPorts, &out.ToPorts
*out = make(PortDenyRules, len(*in))
for i := range *in {
(*in)[i].DeepCopyInto(&(*out)[i])
}
}
if in.ICMPs != nil {
in, out := &in.ICMPs, &out.ICMPs
*out = make(ICMPRules, len(*in))
for i := range *in {
(*in)[i].DeepCopyInto(&(*out)[i])
}
}
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new EgressDenyRule.
func (in *EgressDenyRule) DeepCopy() *EgressDenyRule {
if in == nil {
return nil
}
out := new(EgressDenyRule)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *EgressRule) DeepCopyInto(out *EgressRule) {
*out = *in
in.EgressCommonRule.DeepCopyInto(&out.EgressCommonRule)
if in.ToPorts != nil {
in, out := &in.ToPorts, &out.ToPorts
*out = make(PortRules, len(*in))
for i := range *in {
(*in)[i].DeepCopyInto(&(*out)[i])
}
}
if in.ToFQDNs != nil {
in, out := &in.ToFQDNs, &out.ToFQDNs
*out = make(FQDNSelectorSlice, len(*in))
copy(*out, *in)
}
if in.ICMPs != nil {
in, out := &in.ICMPs, &out.ICMPs
*out = make(ICMPRules, len(*in))
for i := range *in {
(*in)[i].DeepCopyInto(&(*out)[i])
}
}
if in.Authentication != nil {
in, out := &in.Authentication, &out.Authentication
*out = new(Authentication)
**out = **in
}
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new EgressRule.
func (in *EgressRule) DeepCopy() *EgressRule {
if in == nil {
return nil
}
out := new(EgressRule)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *EndpointSelector) DeepCopyInto(out *EndpointSelector) {
*out = *in
if in.LabelSelector != nil {
in, out := &in.LabelSelector, &out.LabelSelector
*out = new(v1.LabelSelector)
(*in).DeepCopyInto(*out)
}
if in.requirements != nil {
in, out := &in.requirements, &out.requirements
*out = new(labels.Requirements)
if **in != nil {
in, out := *in, *out
*out = make([]labels.Requirement, len(*in))
for i := range *in {
(*in)[i].DeepCopyInto(&(*out)[i])
}
}
}
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new EndpointSelector.
func (in *EndpointSelector) DeepCopy() *EndpointSelector {
if in == nil {
return nil
}
out := new(EndpointSelector)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in EndpointSelectorSlice) DeepCopyInto(out *EndpointSelectorSlice) {
{
in := &in
*out = make(EndpointSelectorSlice, len(*in))
for i := range *in {
(*in)[i].DeepCopyInto(&(*out)[i])
}
return
}
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new EndpointSelectorSlice.
func (in EndpointSelectorSlice) DeepCopy() EndpointSelectorSlice {
if in == nil {
return nil
}
out := new(EndpointSelectorSlice)
in.DeepCopyInto(out)
return *out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in EntitySlice) DeepCopyInto(out *EntitySlice) {
{
in := &in
*out = make(EntitySlice, len(*in))
copy(*out, *in)
return
}
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new EntitySlice.
func (in EntitySlice) DeepCopy() EntitySlice {
if in == nil {
return nil
}
out := new(EntitySlice)
in.DeepCopyInto(out)
return *out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *EnvoyConfig) DeepCopyInto(out *EnvoyConfig) {
*out = *in
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new EnvoyConfig.
func (in *EnvoyConfig) DeepCopy() *EnvoyConfig {
if in == nil {
return nil
}
out := new(EnvoyConfig)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *FQDNSelector) DeepCopyInto(out *FQDNSelector) {
*out = *in
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new FQDNSelector.
func (in *FQDNSelector) DeepCopy() *FQDNSelector {
if in == nil {
return nil
}
out := new(FQDNSelector)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in FQDNSelectorSlice) DeepCopyInto(out *FQDNSelectorSlice) {
{
in := &in
*out = make(FQDNSelectorSlice, len(*in))
copy(*out, *in)
return
}
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new FQDNSelectorSlice.
func (in FQDNSelectorSlice) DeepCopy() FQDNSelectorSlice {
if in == nil {
return nil
}
out := new(FQDNSelectorSlice)
in.DeepCopyInto(out)
return *out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *Groups) DeepCopyInto(out *Groups) {
*out = *in
if in.AWS != nil {
in, out := &in.AWS, &out.AWS
*out = new(AWSGroup)
(*in).DeepCopyInto(*out)
}
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Groups.
func (in *Groups) DeepCopy() *Groups {
if in == nil {
return nil
}
out := new(Groups)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *HeaderMatch) DeepCopyInto(out *HeaderMatch) {
*out = *in
if in.Secret != nil {
in, out := &in.Secret, &out.Secret
*out = new(Secret)
**out = **in
}
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new HeaderMatch.
func (in *HeaderMatch) DeepCopy() *HeaderMatch {
if in == nil {
return nil
}
out := new(HeaderMatch)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *ICMPField) DeepCopyInto(out *ICMPField) {
*out = *in
if in.Type != nil {
in, out := &in.Type, &out.Type
*out = new(intstr.IntOrString)
**out = **in
}
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ICMPField.
func (in *ICMPField) DeepCopy() *ICMPField {
if in == nil {
return nil
}
out := new(ICMPField)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *ICMPRule) DeepCopyInto(out *ICMPRule) {
*out = *in
if in.Fields != nil {
in, out := &in.Fields, &out.Fields
*out = make([]ICMPField, len(*in))
for i := range *in {
(*in)[i].DeepCopyInto(&(*out)[i])
}
}
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ICMPRule.
func (in *ICMPRule) DeepCopy() *ICMPRule {
if in == nil {
return nil
}
out := new(ICMPRule)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in ICMPRules) DeepCopyInto(out *ICMPRules) {
{
in := &in
*out = make(ICMPRules, len(*in))
for i := range *in {
(*in)[i].DeepCopyInto(&(*out)[i])
}
return
}
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ICMPRules.
func (in ICMPRules) DeepCopy() ICMPRules {
if in == nil {
return nil
}
out := new(ICMPRules)
in.DeepCopyInto(out)
return *out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *IngressCommonRule) DeepCopyInto(out *IngressCommonRule) {
*out = *in
if in.FromEndpoints != nil {
in, out := &in.FromEndpoints, &out.FromEndpoints
*out = make([]EndpointSelector, len(*in))
for i := range *in {
(*in)[i].DeepCopyInto(&(*out)[i])
}
}
if in.FromRequires != nil {
in, out := &in.FromRequires, &out.FromRequires
*out = make([]EndpointSelector, len(*in))
for i := range *in {
(*in)[i].DeepCopyInto(&(*out)[i])
}
}
if in.FromCIDR != nil {
in, out := &in.FromCIDR, &out.FromCIDR
*out = make(CIDRSlice, len(*in))
copy(*out, *in)
}
if in.FromCIDRSet != nil {
in, out := &in.FromCIDRSet, &out.FromCIDRSet
*out = make(CIDRRuleSlice, len(*in))
for i := range *in {
(*in)[i].DeepCopyInto(&(*out)[i])
}
}
if in.FromEntities != nil {
in, out := &in.FromEntities, &out.FromEntities
*out = make(EntitySlice, len(*in))
copy(*out, *in)
}
if in.FromGroups != nil {
in, out := &in.FromGroups, &out.FromGroups
*out = make([]Groups, len(*in))
for i := range *in {
(*in)[i].DeepCopyInto(&(*out)[i])
}
}
if in.FromNodes != nil {
in, out := &in.FromNodes, &out.FromNodes
*out = make([]EndpointSelector, len(*in))
for i := range *in {
(*in)[i].DeepCopyInto(&(*out)[i])
}
}
if in.aggregatedSelectors != nil {
in, out := &in.aggregatedSelectors, &out.aggregatedSelectors
*out = make(EndpointSelectorSlice, len(*in))
for i := range *in {
(*in)[i].DeepCopyInto(&(*out)[i])
}
}
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new IngressCommonRule.
func (in *IngressCommonRule) DeepCopy() *IngressCommonRule {
if in == nil {
return nil
}
out := new(IngressCommonRule)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *IngressDenyRule) DeepCopyInto(out *IngressDenyRule) {
*out = *in
in.IngressCommonRule.DeepCopyInto(&out.IngressCommonRule)
if in.ToPorts != nil {
in, out := &in.ToPorts, &out.ToPorts
*out = make(PortDenyRules, len(*in))
for i := range *in {
(*in)[i].DeepCopyInto(&(*out)[i])
}
}
if in.ICMPs != nil {
in, out := &in.ICMPs, &out.ICMPs
*out = make(ICMPRules, len(*in))
for i := range *in {
(*in)[i].DeepCopyInto(&(*out)[i])
}
}
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new IngressDenyRule.
func (in *IngressDenyRule) DeepCopy() *IngressDenyRule {
if in == nil {
return nil
}
out := new(IngressDenyRule)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *IngressRule) DeepCopyInto(out *IngressRule) {
*out = *in
in.IngressCommonRule.DeepCopyInto(&out.IngressCommonRule)
if in.ToPorts != nil {
in, out := &in.ToPorts, &out.ToPorts
*out = make(PortRules, len(*in))
for i := range *in {
(*in)[i].DeepCopyInto(&(*out)[i])
}
}
if in.ICMPs != nil {
in, out := &in.ICMPs, &out.ICMPs
*out = make(ICMPRules, len(*in))
for i := range *in {
(*in)[i].DeepCopyInto(&(*out)[i])
}
}
if in.Authentication != nil {
in, out := &in.Authentication, &out.Authentication
*out = new(Authentication)
**out = **in
}
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new IngressRule.
func (in *IngressRule) DeepCopy() *IngressRule {
if in == nil {
return nil
}
out := new(IngressRule)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *K8sServiceNamespace) DeepCopyInto(out *K8sServiceNamespace) {
*out = *in
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new K8sServiceNamespace.
func (in *K8sServiceNamespace) DeepCopy() *K8sServiceNamespace {
if in == nil {
return nil
}
out := new(K8sServiceNamespace)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *K8sServiceSelectorNamespace) DeepCopyInto(out *K8sServiceSelectorNamespace) {
*out = *in
in.Selector.DeepCopyInto(&out.Selector)
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new K8sServiceSelectorNamespace.
func (in *K8sServiceSelectorNamespace) DeepCopy() *K8sServiceSelectorNamespace {
if in == nil {
return nil
}
out := new(K8sServiceSelectorNamespace)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *L7Rules) DeepCopyInto(out *L7Rules) {
*out = *in
if in.HTTP != nil {
in, out := &in.HTTP, &out.HTTP
*out = make([]PortRuleHTTP, len(*in))
for i := range *in {
(*in)[i].DeepCopyInto(&(*out)[i])
}
}
if in.Kafka != nil {
in, out := &in.Kafka, &out.Kafka
*out = make([]kafka.PortRule, len(*in))
copy(*out, *in)
}
if in.DNS != nil {
in, out := &in.DNS, &out.DNS
*out = make([]PortRuleDNS, len(*in))
copy(*out, *in)
}
if in.L7 != nil {
in, out := &in.L7, &out.L7
*out = make([]PortRuleL7, len(*in))
for i := range *in {
if (*in)[i] != nil {
in, out := &(*in)[i], &(*out)[i]
*out = make(PortRuleL7, len(*in))
for key, val := range *in {
(*out)[key] = val
}
}
}
}
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new L7Rules.
func (in *L7Rules) DeepCopy() *L7Rules {
if in == nil {
return nil
}
out := new(L7Rules)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *Listener) DeepCopyInto(out *Listener) {
*out = *in
if in.EnvoyConfig != nil {
in, out := &in.EnvoyConfig, &out.EnvoyConfig
*out = new(EnvoyConfig)
**out = **in
}
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Listener.
func (in *Listener) DeepCopy() *Listener {
if in == nil {
return nil
}
out := new(Listener)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *LogConfig) DeepCopyInto(out *LogConfig) {
*out = *in
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new LogConfig.
func (in *LogConfig) DeepCopy() *LogConfig {
if in == nil {
return nil
}
out := new(LogConfig)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *PortDenyRule) DeepCopyInto(out *PortDenyRule) {
*out = *in
if in.Ports != nil {
in, out := &in.Ports, &out.Ports
*out = make([]PortProtocol, len(*in))
copy(*out, *in)
}
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PortDenyRule.
func (in *PortDenyRule) DeepCopy() *PortDenyRule {
if in == nil {
return nil
}
out := new(PortDenyRule)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in PortDenyRules) DeepCopyInto(out *PortDenyRules) {
{
in := &in
*out = make(PortDenyRules, len(*in))
for i := range *in {
(*in)[i].DeepCopyInto(&(*out)[i])
}
return
}
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PortDenyRules.
func (in PortDenyRules) DeepCopy() PortDenyRules {
if in == nil {
return nil
}
out := new(PortDenyRules)
in.DeepCopyInto(out)
return *out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *PortProtocol) DeepCopyInto(out *PortProtocol) {
*out = *in
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PortProtocol.
func (in *PortProtocol) DeepCopy() *PortProtocol {
if in == nil {
return nil
}
out := new(PortProtocol)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *PortRule) DeepCopyInto(out *PortRule) {
*out = *in
if in.Ports != nil {
in, out := &in.Ports, &out.Ports
*out = make([]PortProtocol, len(*in))
copy(*out, *in)
}
if in.TerminatingTLS != nil {
in, out := &in.TerminatingTLS, &out.TerminatingTLS
*out = new(TLSContext)
(*in).DeepCopyInto(*out)
}
if in.OriginatingTLS != nil {
in, out := &in.OriginatingTLS, &out.OriginatingTLS
*out = new(TLSContext)
(*in).DeepCopyInto(*out)
}
if in.ServerNames != nil {
in, out := &in.ServerNames, &out.ServerNames
*out = make([]ServerName, len(*in))
copy(*out, *in)
}
if in.Listener != nil {
in, out := &in.Listener, &out.Listener
*out = new(Listener)
(*in).DeepCopyInto(*out)
}
if in.Rules != nil {
in, out := &in.Rules, &out.Rules
*out = new(L7Rules)
(*in).DeepCopyInto(*out)
}
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PortRule.
func (in *PortRule) DeepCopy() *PortRule {
if in == nil {
return nil
}
out := new(PortRule)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *PortRuleDNS) DeepCopyInto(out *PortRuleDNS) {
*out = *in
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PortRuleDNS.
func (in *PortRuleDNS) DeepCopy() *PortRuleDNS {
if in == nil {
return nil
}
out := new(PortRuleDNS)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *PortRuleHTTP) DeepCopyInto(out *PortRuleHTTP) {
*out = *in
if in.Headers != nil {
in, out := &in.Headers, &out.Headers
*out = make([]string, len(*in))
copy(*out, *in)
}
if in.HeaderMatches != nil {
in, out := &in.HeaderMatches, &out.HeaderMatches
*out = make([]*HeaderMatch, len(*in))
for i := range *in {
if (*in)[i] != nil {
in, out := &(*in)[i], &(*out)[i]
*out = new(HeaderMatch)
(*in).DeepCopyInto(*out)
}
}
}
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PortRuleHTTP.
func (in *PortRuleHTTP) DeepCopy() *PortRuleHTTP {
if in == nil {
return nil
}
out := new(PortRuleHTTP)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in PortRuleL7) DeepCopyInto(out *PortRuleL7) {
{
in := &in
*out = make(PortRuleL7, len(*in))
for key, val := range *in {
(*out)[key] = val
}
return
}
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PortRuleL7.
func (in PortRuleL7) DeepCopy() PortRuleL7 {
if in == nil {
return nil
}
out := new(PortRuleL7)
in.DeepCopyInto(out)
return *out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in PortRules) DeepCopyInto(out *PortRules) {
{
in := &in
*out = make(PortRules, len(*in))
for i := range *in {
(*in)[i].DeepCopyInto(&(*out)[i])
}
return
}
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PortRules.
func (in PortRules) DeepCopy() PortRules {
if in == nil {
return nil
}
out := new(PortRules)
in.DeepCopyInto(out)
return *out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *Rule) DeepCopyInto(out *Rule) {
*out = *in
in.EndpointSelector.DeepCopyInto(&out.EndpointSelector)
in.NodeSelector.DeepCopyInto(&out.NodeSelector)
if in.Ingress != nil {
in, out := &in.Ingress, &out.Ingress
*out = make([]IngressRule, len(*in))
for i := range *in {
(*in)[i].DeepCopyInto(&(*out)[i])
}
}
if in.IngressDeny != nil {
in, out := &in.IngressDeny, &out.IngressDeny
*out = make([]IngressDenyRule, len(*in))
for i := range *in {
(*in)[i].DeepCopyInto(&(*out)[i])
}
}
if in.Egress != nil {
in, out := &in.Egress, &out.Egress
*out = make([]EgressRule, len(*in))
for i := range *in {
(*in)[i].DeepCopyInto(&(*out)[i])
}
}
if in.EgressDeny != nil {
in, out := &in.EgressDeny, &out.EgressDeny
*out = make([]EgressDenyRule, len(*in))
for i := range *in {
(*in)[i].DeepCopyInto(&(*out)[i])
}
}
out.Labels = in.Labels.DeepCopy()
in.EnableDefaultDeny.DeepCopyInto(&out.EnableDefaultDeny)
out.Log = in.Log
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Rule.
func (in *Rule) DeepCopy() *Rule {
if in == nil {
return nil
}
out := new(Rule)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in Rules) DeepCopyInto(out *Rules) {
{
in := &in
*out = make(Rules, len(*in))
for i := range *in {
if (*in)[i] != nil {
in, out := &(*in)[i], &(*out)[i]
*out = new(Rule)
(*in).DeepCopyInto(*out)
}
}
return
}
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Rules.
func (in Rules) DeepCopy() Rules {
if in == nil {
return nil
}
out := new(Rules)
in.DeepCopyInto(out)
return *out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *Secret) DeepCopyInto(out *Secret) {
*out = *in
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Secret.
func (in *Secret) DeepCopy() *Secret {
if in == nil {
return nil
}
out := new(Secret)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *Service) DeepCopyInto(out *Service) {
*out = *in
if in.K8sServiceSelector != nil {
in, out := &in.K8sServiceSelector, &out.K8sServiceSelector
*out = new(K8sServiceSelectorNamespace)
(*in).DeepCopyInto(*out)
}
if in.K8sService != nil {
in, out := &in.K8sService, &out.K8sService
*out = new(K8sServiceNamespace)
**out = **in
}
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Service.
func (in *Service) DeepCopy() *Service {
if in == nil {
return nil
}
out := new(Service)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *ServiceSelector) DeepCopyInto(out *ServiceSelector) {
*out = *in
if in.LabelSelector != nil {
in, out := &in.LabelSelector, &out.LabelSelector
*out = new(v1.LabelSelector)
(*in).DeepCopyInto(*out)
}
if in.requirements != nil {
in, out := &in.requirements, &out.requirements
*out = new(labels.Requirements)
if **in != nil {
in, out := *in, *out
*out = make([]labels.Requirement, len(*in))
for i := range *in {
(*in)[i].DeepCopyInto(&(*out)[i])
}
}
}
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ServiceSelector.
func (in *ServiceSelector) DeepCopy() *ServiceSelector {
if in == nil {
return nil
}
out := new(ServiceSelector)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *TLSContext) DeepCopyInto(out *TLSContext) {
*out = *in
if in.Secret != nil {
in, out := &in.Secret, &out.Secret
*out = new(Secret)
**out = **in
}
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TLSContext.
func (in *TLSContext) DeepCopy() *TLSContext {
if in == nil {
return nil
}
out := new(TLSContext)
in.DeepCopyInto(out)
return out
}
//go:build !ignore_autogenerated
// +build !ignore_autogenerated
// SPDX-License-Identifier: Apache-2.0
// Copyright Authors of Cilium
// Code generated by deepequal-gen. DO NOT EDIT.
package api
// DeepEqual is an autogenerated deepequal function, deeply comparing the
// receiver with other. in must be non-nil.
func (in *AWSGroup) DeepEqual(other *AWSGroup) bool {
if other == nil {
return false
}
if ((in.Labels != nil) && (other.Labels != nil)) || ((in.Labels == nil) != (other.Labels == nil)) {
in, other := &in.Labels, &other.Labels
if other == nil {
return false
}
if len(*in) != len(*other) {
return false
} else {
for key, inValue := range *in {
if otherValue, present := (*other)[key]; !present {
return false
} else {
if inValue != otherValue {
return false
}
}
}
}
}
if ((in.SecurityGroupsIds != nil) && (other.SecurityGroupsIds != nil)) || ((in.SecurityGroupsIds == nil) != (other.SecurityGroupsIds == nil)) {
in, other := &in.SecurityGroupsIds, &other.SecurityGroupsIds
if other == nil {
return false
}
if len(*in) != len(*other) {
return false
} else {
for i, inElement := range *in {
if inElement != (*other)[i] {
return false
}
}
}
}
if ((in.SecurityGroupsNames != nil) && (other.SecurityGroupsNames != nil)) || ((in.SecurityGroupsNames == nil) != (other.SecurityGroupsNames == nil)) {
in, other := &in.SecurityGroupsNames, &other.SecurityGroupsNames
if other == nil {
return false
}
if len(*in) != len(*other) {
return false
} else {
for i, inElement := range *in {
if inElement != (*other)[i] {
return false
}
}
}
}
if in.Region != other.Region {
return false
}
return true
}
// DeepEqual is an autogenerated deepequal function, deeply comparing the
// receiver with other. in must be non-nil.
func (in *Authentication) DeepEqual(other *Authentication) bool {
if other == nil {
return false
}
if in.Mode != other.Mode {
return false
}
return true
}
// DeepEqual is an autogenerated deepequal function, deeply comparing the
// receiver with other. in must be non-nil.
func (in *CIDRRule) DeepEqual(other *CIDRRule) bool {
if other == nil {
return false
}
if in.Cidr != other.Cidr {
return false
}
if in.CIDRGroupRef != other.CIDRGroupRef {
return false
}
if (in.CIDRGroupSelector == nil) != (other.CIDRGroupSelector == nil) {
return false
} else if in.CIDRGroupSelector != nil {
if !in.CIDRGroupSelector.DeepEqual(other.CIDRGroupSelector) {
return false
}
}
if ((in.ExceptCIDRs != nil) && (other.ExceptCIDRs != nil)) || ((in.ExceptCIDRs == nil) != (other.ExceptCIDRs == nil)) {
in, other := &in.ExceptCIDRs, &other.ExceptCIDRs
if other == nil {
return false
}
if len(*in) != len(*other) {
return false
} else {
for i, inElement := range *in {
if inElement != (*other)[i] {
return false
}
}
}
}
if in.Generated != other.Generated {
return false
}
return true
}
// DeepEqual is an autogenerated deepequal function, deeply comparing the
// receiver with other. in must be non-nil.
func (in *CIDRRuleSlice) DeepEqual(other *CIDRRuleSlice) bool {
if other == nil {
return false
}
if len(*in) != len(*other) {
return false
} else {
for i, inElement := range *in {
if !inElement.DeepEqual(&(*other)[i]) {
return false
}
}
}
return true
}
// DeepEqual is an autogenerated deepequal function, deeply comparing the
// receiver with other. in must be non-nil.
func (in *CIDRSlice) DeepEqual(other *CIDRSlice) bool {
if other == nil {
return false
}
if len(*in) != len(*other) {
return false
} else {
for i, inElement := range *in {
if inElement != (*other)[i] {
return false
}
}
}
return true
}
// DeepEqual is an autogenerated deepequal function, deeply comparing the
// receiver with other. in must be non-nil.
func (in *DefaultDenyConfig) DeepEqual(other *DefaultDenyConfig) bool {
if other == nil {
return false
}
if (in.Ingress == nil) != (other.Ingress == nil) {
return false
} else if in.Ingress != nil {
if *in.Ingress != *other.Ingress {
return false
}
}
if (in.Egress == nil) != (other.Egress == nil) {
return false
} else if in.Egress != nil {
if *in.Egress != *other.Egress {
return false
}
}
return true
}
// deepEqual is an autogenerated deepequal function, deeply comparing the
// receiver with other. in must be non-nil.
func (in *EgressCommonRule) deepEqual(other *EgressCommonRule) bool {
if other == nil {
return false
}
if ((in.ToEndpoints != nil) && (other.ToEndpoints != nil)) || ((in.ToEndpoints == nil) != (other.ToEndpoints == nil)) {
in, other := &in.ToEndpoints, &other.ToEndpoints
if other == nil {
return false
}
if len(*in) != len(*other) {
return false
} else {
for i, inElement := range *in {
if !inElement.DeepEqual(&(*other)[i]) {
return false
}
}
}
}
if ((in.ToRequires != nil) && (other.ToRequires != nil)) || ((in.ToRequires == nil) != (other.ToRequires == nil)) {
in, other := &in.ToRequires, &other.ToRequires
if other == nil {
return false
}
if len(*in) != len(*other) {
return false
} else {
for i, inElement := range *in {
if !inElement.DeepEqual(&(*other)[i]) {
return false
}
}
}
}
if ((in.ToCIDR != nil) && (other.ToCIDR != nil)) || ((in.ToCIDR == nil) != (other.ToCIDR == nil)) {
in, other := &in.ToCIDR, &other.ToCIDR
if other == nil || !in.DeepEqual(other) {
return false
}
}
if ((in.ToCIDRSet != nil) && (other.ToCIDRSet != nil)) || ((in.ToCIDRSet == nil) != (other.ToCIDRSet == nil)) {
in, other := &in.ToCIDRSet, &other.ToCIDRSet
if other == nil || !in.DeepEqual(other) {
return false
}
}
if ((in.ToEntities != nil) && (other.ToEntities != nil)) || ((in.ToEntities == nil) != (other.ToEntities == nil)) {
in, other := &in.ToEntities, &other.ToEntities
if other == nil || !in.DeepEqual(other) {
return false
}
}
if ((in.ToServices != nil) && (other.ToServices != nil)) || ((in.ToServices == nil) != (other.ToServices == nil)) {
in, other := &in.ToServices, &other.ToServices
if other == nil {
return false
}
if len(*in) != len(*other) {
return false
} else {
for i, inElement := range *in {
if !inElement.DeepEqual(&(*other)[i]) {
return false
}
}
}
}
if ((in.ToGroups != nil) && (other.ToGroups != nil)) || ((in.ToGroups == nil) != (other.ToGroups == nil)) {
in, other := &in.ToGroups, &other.ToGroups
if other == nil {
return false
}
if len(*in) != len(*other) {
return false
} else {
for i, inElement := range *in {
if !inElement.DeepEqual(&(*other)[i]) {
return false
}
}
}
}
if ((in.ToNodes != nil) && (other.ToNodes != nil)) || ((in.ToNodes == nil) != (other.ToNodes == nil)) {
in, other := &in.ToNodes, &other.ToNodes
if other == nil {
return false
}
if len(*in) != len(*other) {
return false
} else {
for i, inElement := range *in {
if !inElement.DeepEqual(&(*other)[i]) {
return false
}
}
}
}
if ((in.aggregatedSelectors != nil) && (other.aggregatedSelectors != nil)) || ((in.aggregatedSelectors == nil) != (other.aggregatedSelectors == nil)) {
in, other := &in.aggregatedSelectors, &other.aggregatedSelectors
if other == nil || !in.DeepEqual(other) {
return false
}
}
return true
}
// DeepEqual is an autogenerated deepequal function, deeply comparing the
// receiver with other. in must be non-nil.
func (in *EgressDenyRule) DeepEqual(other *EgressDenyRule) bool {
if other == nil {
return false
}
if !in.EgressCommonRule.DeepEqual(&other.EgressCommonRule) {
return false
}
if ((in.ToPorts != nil) && (other.ToPorts != nil)) || ((in.ToPorts == nil) != (other.ToPorts == nil)) {
in, other := &in.ToPorts, &other.ToPorts
if other == nil || !in.DeepEqual(other) {
return false
}
}
if ((in.ICMPs != nil) && (other.ICMPs != nil)) || ((in.ICMPs == nil) != (other.ICMPs == nil)) {
in, other := &in.ICMPs, &other.ICMPs
if other == nil || !in.DeepEqual(other) {
return false
}
}
return true
}
// DeepEqual is an autogenerated deepequal function, deeply comparing the
// receiver with other. in must be non-nil.
func (in *EgressRule) DeepEqual(other *EgressRule) bool {
if other == nil {
return false
}
if !in.EgressCommonRule.DeepEqual(&other.EgressCommonRule) {
return false
}
if ((in.ToPorts != nil) && (other.ToPorts != nil)) || ((in.ToPorts == nil) != (other.ToPorts == nil)) {
in, other := &in.ToPorts, &other.ToPorts
if other == nil || !in.DeepEqual(other) {
return false
}
}
if ((in.ToFQDNs != nil) && (other.ToFQDNs != nil)) || ((in.ToFQDNs == nil) != (other.ToFQDNs == nil)) {
in, other := &in.ToFQDNs, &other.ToFQDNs
if other == nil || !in.DeepEqual(other) {
return false
}
}
if ((in.ICMPs != nil) && (other.ICMPs != nil)) || ((in.ICMPs == nil) != (other.ICMPs == nil)) {
in, other := &in.ICMPs, &other.ICMPs
if other == nil || !in.DeepEqual(other) {
return false
}
}
if (in.Authentication == nil) != (other.Authentication == nil) {
return false
} else if in.Authentication != nil {
if !in.Authentication.DeepEqual(other.Authentication) {
return false
}
}
return true
}
// DeepEqual is an autogenerated deepequal function, deeply comparing the
// receiver with other. in must be non-nil.
func (in *EndpointSelector) DeepEqual(other *EndpointSelector) bool {
if other == nil {
return false
}
if (in.LabelSelector == nil) != (other.LabelSelector == nil) {
return false
} else if in.LabelSelector != nil {
if !in.LabelSelector.DeepEqual(other.LabelSelector) {
return false
}
}
if (in.requirements == nil) != (other.requirements == nil) {
return false
} else if in.requirements != nil {
if !in.requirements.DeepEqual(other.requirements) {
return false
}
}
if in.cachedLabelSelectorString != other.cachedLabelSelectorString {
return false
}
if in.Generated != other.Generated {
return false
}
if in.sanitized != other.sanitized {
return false
}
return true
}
// DeepEqual is an autogenerated deepequal function, deeply comparing the
// receiver with other. in must be non-nil.
func (in *EndpointSelectorSlice) DeepEqual(other *EndpointSelectorSlice) bool {
if other == nil {
return false
}
if len(*in) != len(*other) {
return false
} else {
for i, inElement := range *in {
if !inElement.DeepEqual(&(*other)[i]) {
return false
}
}
}
return true
}
// DeepEqual is an autogenerated deepequal function, deeply comparing the
// receiver with other. in must be non-nil.
func (in *EntitySlice) DeepEqual(other *EntitySlice) bool {
if other == nil {
return false
}
if len(*in) != len(*other) {
return false
} else {
for i, inElement := range *in {
if inElement != (*other)[i] {
return false
}
}
}
return true
}
// DeepEqual is an autogenerated deepequal function, deeply comparing the
// receiver with other. in must be non-nil.
func (in *EnvoyConfig) DeepEqual(other *EnvoyConfig) bool {
if other == nil {
return false
}
if in.Kind != other.Kind {
return false
}
if in.Name != other.Name {
return false
}
return true
}
// DeepEqual is an autogenerated deepequal function, deeply comparing the
// receiver with other. in must be non-nil.
func (in *FQDNSelector) DeepEqual(other *FQDNSelector) bool {
if other == nil {
return false
}
if in.MatchName != other.MatchName {
return false
}
if in.MatchPattern != other.MatchPattern {
return false
}
return true
}
// DeepEqual is an autogenerated deepequal function, deeply comparing the
// receiver with other. in must be non-nil.
func (in *FQDNSelectorSlice) DeepEqual(other *FQDNSelectorSlice) bool {
if other == nil {
return false
}
if len(*in) != len(*other) {
return false
} else {
for i, inElement := range *in {
if !inElement.DeepEqual(&(*other)[i]) {
return false
}
}
}
return true
}
// DeepEqual is an autogenerated deepequal function, deeply comparing the
// receiver with other. in must be non-nil.
func (in *Groups) DeepEqual(other *Groups) bool {
if other == nil {
return false
}
if (in.AWS == nil) != (other.AWS == nil) {
return false
} else if in.AWS != nil {
if !in.AWS.DeepEqual(other.AWS) {
return false
}
}
return true
}
// DeepEqual is an autogenerated deepequal function, deeply comparing the
// receiver with other. in must be non-nil.
func (in *HeaderMatch) DeepEqual(other *HeaderMatch) bool {
if other == nil {
return false
}
if in.Mismatch != other.Mismatch {
return false
}
if in.Name != other.Name {
return false
}
if (in.Secret == nil) != (other.Secret == nil) {
return false
} else if in.Secret != nil {
if !in.Secret.DeepEqual(other.Secret) {
return false
}
}
if in.Value != other.Value {
return false
}
return true
}
// deepEqual is an autogenerated deepequal function, deeply comparing the
// receiver with other. in must be non-nil.
func (in *ICMPField) deepEqual(other *ICMPField) bool {
if other == nil {
return false
}
if in.Family != other.Family {
return false
}
return true
}
// DeepEqual is an autogenerated deepequal function, deeply comparing the
// receiver with other. in must be non-nil.
func (in *ICMPRule) DeepEqual(other *ICMPRule) bool {
if other == nil {
return false
}
if ((in.Fields != nil) && (other.Fields != nil)) || ((in.Fields == nil) != (other.Fields == nil)) {
in, other := &in.Fields, &other.Fields
if other == nil {
return false
}
if len(*in) != len(*other) {
return false
} else {
for i, inElement := range *in {
if !inElement.DeepEqual(&(*other)[i]) {
return false
}
}
}
}
return true
}
// DeepEqual is an autogenerated deepequal function, deeply comparing the
// receiver with other. in must be non-nil.
func (in *ICMPRules) DeepEqual(other *ICMPRules) bool {
if other == nil {
return false
}
if len(*in) != len(*other) {
return false
} else {
for i, inElement := range *in {
if !inElement.DeepEqual(&(*other)[i]) {
return false
}
}
}
return true
}
// deepEqual is an autogenerated deepequal function, deeply comparing the
// receiver with other. in must be non-nil.
func (in *IngressCommonRule) deepEqual(other *IngressCommonRule) bool {
if other == nil {
return false
}
if ((in.FromEndpoints != nil) && (other.FromEndpoints != nil)) || ((in.FromEndpoints == nil) != (other.FromEndpoints == nil)) {
in, other := &in.FromEndpoints, &other.FromEndpoints
if other == nil {
return false
}
if len(*in) != len(*other) {
return false
} else {
for i, inElement := range *in {
if !inElement.DeepEqual(&(*other)[i]) {
return false
}
}
}
}
if ((in.FromRequires != nil) && (other.FromRequires != nil)) || ((in.FromRequires == nil) != (other.FromRequires == nil)) {
in, other := &in.FromRequires, &other.FromRequires
if other == nil {
return false
}
if len(*in) != len(*other) {
return false
} else {
for i, inElement := range *in {
if !inElement.DeepEqual(&(*other)[i]) {
return false
}
}
}
}
if ((in.FromCIDR != nil) && (other.FromCIDR != nil)) || ((in.FromCIDR == nil) != (other.FromCIDR == nil)) {
in, other := &in.FromCIDR, &other.FromCIDR
if other == nil || !in.DeepEqual(other) {
return false
}
}
if ((in.FromCIDRSet != nil) && (other.FromCIDRSet != nil)) || ((in.FromCIDRSet == nil) != (other.FromCIDRSet == nil)) {
in, other := &in.FromCIDRSet, &other.FromCIDRSet
if other == nil || !in.DeepEqual(other) {
return false
}
}
if ((in.FromEntities != nil) && (other.FromEntities != nil)) || ((in.FromEntities == nil) != (other.FromEntities == nil)) {
in, other := &in.FromEntities, &other.FromEntities
if other == nil || !in.DeepEqual(other) {
return false
}
}
if ((in.FromGroups != nil) && (other.FromGroups != nil)) || ((in.FromGroups == nil) != (other.FromGroups == nil)) {
in, other := &in.FromGroups, &other.FromGroups
if other == nil {
return false
}
if len(*in) != len(*other) {
return false
} else {
for i, inElement := range *in {
if !inElement.DeepEqual(&(*other)[i]) {
return false
}
}
}
}
if ((in.FromNodes != nil) && (other.FromNodes != nil)) || ((in.FromNodes == nil) != (other.FromNodes == nil)) {
in, other := &in.FromNodes, &other.FromNodes
if other == nil {
return false
}
if len(*in) != len(*other) {
return false
} else {
for i, inElement := range *in {
if !inElement.DeepEqual(&(*other)[i]) {
return false
}
}
}
}
if ((in.aggregatedSelectors != nil) && (other.aggregatedSelectors != nil)) || ((in.aggregatedSelectors == nil) != (other.aggregatedSelectors == nil)) {
in, other := &in.aggregatedSelectors, &other.aggregatedSelectors
if other == nil || !in.DeepEqual(other) {
return false
}
}
return true
}
// DeepEqual is an autogenerated deepequal function, deeply comparing the
// receiver with other. in must be non-nil.
func (in *IngressDenyRule) DeepEqual(other *IngressDenyRule) bool {
if other == nil {
return false
}
if !in.IngressCommonRule.DeepEqual(&other.IngressCommonRule) {
return false
}
if ((in.ToPorts != nil) && (other.ToPorts != nil)) || ((in.ToPorts == nil) != (other.ToPorts == nil)) {
in, other := &in.ToPorts, &other.ToPorts
if other == nil || !in.DeepEqual(other) {
return false
}
}
if ((in.ICMPs != nil) && (other.ICMPs != nil)) || ((in.ICMPs == nil) != (other.ICMPs == nil)) {
in, other := &in.ICMPs, &other.ICMPs
if other == nil || !in.DeepEqual(other) {
return false
}
}
return true
}
// DeepEqual is an autogenerated deepequal function, deeply comparing the
// receiver with other. in must be non-nil.
func (in *IngressRule) DeepEqual(other *IngressRule) bool {
if other == nil {
return false
}
if !in.IngressCommonRule.DeepEqual(&other.IngressCommonRule) {
return false
}
if ((in.ToPorts != nil) && (other.ToPorts != nil)) || ((in.ToPorts == nil) != (other.ToPorts == nil)) {
in, other := &in.ToPorts, &other.ToPorts
if other == nil || !in.DeepEqual(other) {
return false
}
}
if ((in.ICMPs != nil) && (other.ICMPs != nil)) || ((in.ICMPs == nil) != (other.ICMPs == nil)) {
in, other := &in.ICMPs, &other.ICMPs
if other == nil || !in.DeepEqual(other) {
return false
}
}
if (in.Authentication == nil) != (other.Authentication == nil) {
return false
} else if in.Authentication != nil {
if !in.Authentication.DeepEqual(other.Authentication) {
return false
}
}
return true
}
// DeepEqual is an autogenerated deepequal function, deeply comparing the
// receiver with other. in must be non-nil.
func (in *K8sServiceNamespace) DeepEqual(other *K8sServiceNamespace) bool {
if other == nil {
return false
}
if in.ServiceName != other.ServiceName {
return false
}
if in.Namespace != other.Namespace {
return false
}
return true
}
// DeepEqual is an autogenerated deepequal function, deeply comparing the
// receiver with other. in must be non-nil.
func (in *K8sServiceSelectorNamespace) DeepEqual(other *K8sServiceSelectorNamespace) bool {
if other == nil {
return false
}
if !in.Selector.DeepEqual(&other.Selector) {
return false
}
if in.Namespace != other.Namespace {
return false
}
return true
}
// DeepEqual is an autogenerated deepequal function, deeply comparing the
// receiver with other. in must be non-nil.
func (in *L7Rules) DeepEqual(other *L7Rules) bool {
if other == nil {
return false
}
if ((in.HTTP != nil) && (other.HTTP != nil)) || ((in.HTTP == nil) != (other.HTTP == nil)) {
in, other := &in.HTTP, &other.HTTP
if other == nil {
return false
}
if len(*in) != len(*other) {
return false
} else {
for i, inElement := range *in {
if !inElement.DeepEqual(&(*other)[i]) {
return false
}
}
}
}
if ((in.Kafka != nil) && (other.Kafka != nil)) || ((in.Kafka == nil) != (other.Kafka == nil)) {
in, other := &in.Kafka, &other.Kafka
if other == nil {
return false
}
if len(*in) != len(*other) {
return false
} else {
for i, inElement := range *in {
if !inElement.DeepEqual(&(*other)[i]) {
return false
}
}
}
}
if ((in.DNS != nil) && (other.DNS != nil)) || ((in.DNS == nil) != (other.DNS == nil)) {
in, other := &in.DNS, &other.DNS
if other == nil {
return false
}
if len(*in) != len(*other) {
return false
} else {
for i, inElement := range *in {
if !inElement.DeepEqual(&(*other)[i]) {
return false
}
}
}
}
if in.L7Proto != other.L7Proto {
return false
}
if ((in.L7 != nil) && (other.L7 != nil)) || ((in.L7 == nil) != (other.L7 == nil)) {
in, other := &in.L7, &other.L7
if other == nil {
return false
}
if len(*in) != len(*other) {
return false
} else {
for i, inElement := range *in {
if !inElement.DeepEqual(&(*other)[i]) {
return false
}
}
}
}
return true
}
// DeepEqual is an autogenerated deepequal function, deeply comparing the
// receiver with other. in must be non-nil.
func (in *Listener) DeepEqual(other *Listener) bool {
if other == nil {
return false
}
if (in.EnvoyConfig == nil) != (other.EnvoyConfig == nil) {
return false
} else if in.EnvoyConfig != nil {
if !in.EnvoyConfig.DeepEqual(other.EnvoyConfig) {
return false
}
}
if in.Name != other.Name {
return false
}
if in.Priority != other.Priority {
return false
}
return true
}
// DeepEqual is an autogenerated deepequal function, deeply comparing the
// receiver with other. in must be non-nil.
func (in *LogConfig) DeepEqual(other *LogConfig) bool {
if other == nil {
return false
}
if in.Value != other.Value {
return false
}
return true
}
// DeepEqual is an autogenerated deepequal function, deeply comparing the
// receiver with other. in must be non-nil.
func (in *PortDenyRule) DeepEqual(other *PortDenyRule) bool {
if other == nil {
return false
}
if ((in.Ports != nil) && (other.Ports != nil)) || ((in.Ports == nil) != (other.Ports == nil)) {
in, other := &in.Ports, &other.Ports
if other == nil {
return false
}
if len(*in) != len(*other) {
return false
} else {
for i, inElement := range *in {
if !inElement.DeepEqual(&(*other)[i]) {
return false
}
}
}
}
return true
}
// DeepEqual is an autogenerated deepequal function, deeply comparing the
// receiver with other. in must be non-nil.
func (in *PortDenyRules) DeepEqual(other *PortDenyRules) bool {
if other == nil {
return false
}
if len(*in) != len(*other) {
return false
} else {
for i, inElement := range *in {
if !inElement.DeepEqual(&(*other)[i]) {
return false
}
}
}
return true
}
// DeepEqual is an autogenerated deepequal function, deeply comparing the
// receiver with other. in must be non-nil.
func (in *PortProtocol) DeepEqual(other *PortProtocol) bool {
if other == nil {
return false
}
if in.Port != other.Port {
return false
}
if in.EndPort != other.EndPort {
return false
}
if in.Protocol != other.Protocol {
return false
}
return true
}
// DeepEqual is an autogenerated deepequal function, deeply comparing the
// receiver with other. in must be non-nil.
func (in *PortRule) DeepEqual(other *PortRule) bool {
if other == nil {
return false
}
if ((in.Ports != nil) && (other.Ports != nil)) || ((in.Ports == nil) != (other.Ports == nil)) {
in, other := &in.Ports, &other.Ports
if other == nil {
return false
}
if len(*in) != len(*other) {
return false
} else {
for i, inElement := range *in {
if !inElement.DeepEqual(&(*other)[i]) {
return false
}
}
}
}
if (in.TerminatingTLS == nil) != (other.TerminatingTLS == nil) {
return false
} else if in.TerminatingTLS != nil {
if !in.TerminatingTLS.DeepEqual(other.TerminatingTLS) {
return false
}
}
if (in.OriginatingTLS == nil) != (other.OriginatingTLS == nil) {
return false
} else if in.OriginatingTLS != nil {
if !in.OriginatingTLS.DeepEqual(other.OriginatingTLS) {
return false
}
}
if ((in.ServerNames != nil) && (other.ServerNames != nil)) || ((in.ServerNames == nil) != (other.ServerNames == nil)) {
in, other := &in.ServerNames, &other.ServerNames
if other == nil {
return false
}
if len(*in) != len(*other) {
return false
} else {
for i, inElement := range *in {
if inElement != (*other)[i] {
return false
}
}
}
}
if (in.Listener == nil) != (other.Listener == nil) {
return false
} else if in.Listener != nil {
if !in.Listener.DeepEqual(other.Listener) {
return false
}
}
if (in.Rules == nil) != (other.Rules == nil) {
return false
} else if in.Rules != nil {
if !in.Rules.DeepEqual(other.Rules) {
return false
}
}
return true
}
// DeepEqual is an autogenerated deepequal function, deeply comparing the
// receiver with other. in must be non-nil.
func (in *PortRuleDNS) DeepEqual(other *PortRuleDNS) bool {
if other == nil {
return false
}
if in.MatchName != other.MatchName {
return false
}
if in.MatchPattern != other.MatchPattern {
return false
}
return true
}
// DeepEqual is an autogenerated deepequal function, deeply comparing the
// receiver with other. in must be non-nil.
func (in *PortRuleHTTP) DeepEqual(other *PortRuleHTTP) bool {
if other == nil {
return false
}
if in.Path != other.Path {
return false
}
if in.Method != other.Method {
return false
}
if in.Host != other.Host {
return false
}
if ((in.Headers != nil) && (other.Headers != nil)) || ((in.Headers == nil) != (other.Headers == nil)) {
in, other := &in.Headers, &other.Headers
if other == nil {
return false
}
if len(*in) != len(*other) {
return false
} else {
for i, inElement := range *in {
if inElement != (*other)[i] {
return false
}
}
}
}
if ((in.HeaderMatches != nil) && (other.HeaderMatches != nil)) || ((in.HeaderMatches == nil) != (other.HeaderMatches == nil)) {
in, other := &in.HeaderMatches, &other.HeaderMatches
if other == nil {
return false
}
if len(*in) != len(*other) {
return false
} else {
for i, inElement := range *in {
if !inElement.DeepEqual((*other)[i]) {
return false
}
}
}
}
return true
}
// DeepEqual is an autogenerated deepequal function, deeply comparing the
// receiver with other. in must be non-nil.
func (in *PortRuleL7) DeepEqual(other *PortRuleL7) bool {
if other == nil {
return false
}
if len(*in) != len(*other) {
return false
} else {
for key, inValue := range *in {
if otherValue, present := (*other)[key]; !present {
return false
} else {
if inValue != otherValue {
return false
}
}
}
}
return true
}
// DeepEqual is an autogenerated deepequal function, deeply comparing the
// receiver with other. in must be non-nil.
func (in *PortRules) DeepEqual(other *PortRules) bool {
if other == nil {
return false
}
if len(*in) != len(*other) {
return false
} else {
for i, inElement := range *in {
if !inElement.DeepEqual(&(*other)[i]) {
return false
}
}
}
return true
}
// deepEqual is an autogenerated deepequal function, deeply comparing the
// receiver with other. in must be non-nil.
func (in *Rule) deepEqual(other *Rule) bool {
if other == nil {
return false
}
if !in.EndpointSelector.DeepEqual(&other.EndpointSelector) {
return false
}
if !in.NodeSelector.DeepEqual(&other.NodeSelector) {
return false
}
if ((in.Ingress != nil) && (other.Ingress != nil)) || ((in.Ingress == nil) != (other.Ingress == nil)) {
in, other := &in.Ingress, &other.Ingress
if other == nil {
return false
}
if len(*in) != len(*other) {
return false
} else {
for i, inElement := range *in {
if !inElement.DeepEqual(&(*other)[i]) {
return false
}
}
}
}
if ((in.IngressDeny != nil) && (other.IngressDeny != nil)) || ((in.IngressDeny == nil) != (other.IngressDeny == nil)) {
in, other := &in.IngressDeny, &other.IngressDeny
if other == nil {
return false
}
if len(*in) != len(*other) {
return false
} else {
for i, inElement := range *in {
if !inElement.DeepEqual(&(*other)[i]) {
return false
}
}
}
}
if ((in.Egress != nil) && (other.Egress != nil)) || ((in.Egress == nil) != (other.Egress == nil)) {
in, other := &in.Egress, &other.Egress
if other == nil {
return false
}
if len(*in) != len(*other) {
return false
} else {
for i, inElement := range *in {
if !inElement.DeepEqual(&(*other)[i]) {
return false
}
}
}
}
if ((in.EgressDeny != nil) && (other.EgressDeny != nil)) || ((in.EgressDeny == nil) != (other.EgressDeny == nil)) {
in, other := &in.EgressDeny, &other.EgressDeny
if other == nil {
return false
}
if len(*in) != len(*other) {
return false
} else {
for i, inElement := range *in {
if !inElement.DeepEqual(&(*other)[i]) {
return false
}
}
}
}
if ((in.Labels != nil) && (other.Labels != nil)) || ((in.Labels == nil) != (other.Labels == nil)) {
in, other := &in.Labels, &other.Labels
if other == nil || !in.DeepEqual(other) {
return false
}
}
if !in.EnableDefaultDeny.DeepEqual(&other.EnableDefaultDeny) {
return false
}
if in.Description != other.Description {
return false
}
if in.Log != other.Log {
return false
}
return true
}
// deepEqual is an autogenerated deepequal function, deeply comparing the
// receiver with other. in must be non-nil.
func (in *Rules) deepEqual(other *Rules) bool {
if other == nil {
return false
}
if len(*in) != len(*other) {
return false
} else {
for i, inElement := range *in {
if !inElement.DeepEqual((*other)[i]) {
return false
}
}
}
return true
}
// DeepEqual is an autogenerated deepequal function, deeply comparing the
// receiver with other. in must be non-nil.
func (in *Secret) DeepEqual(other *Secret) bool {
if other == nil {
return false
}
if in.Namespace != other.Namespace {
return false
}
if in.Name != other.Name {
return false
}
return true
}
// DeepEqual is an autogenerated deepequal function, deeply comparing the
// receiver with other. in must be non-nil.
func (in *Service) DeepEqual(other *Service) bool {
if other == nil {
return false
}
if (in.K8sServiceSelector == nil) != (other.K8sServiceSelector == nil) {
return false
} else if in.K8sServiceSelector != nil {
if !in.K8sServiceSelector.DeepEqual(other.K8sServiceSelector) {
return false
}
}
if (in.K8sService == nil) != (other.K8sService == nil) {
return false
} else if in.K8sService != nil {
if !in.K8sService.DeepEqual(other.K8sService) {
return false
}
}
return true
}
// DeepEqual is an autogenerated deepequal function, deeply comparing the
// receiver with other. in must be non-nil.
func (in *ServiceSelector) DeepEqual(other *ServiceSelector) bool {
if other == nil {
return false
}
if (in.LabelSelector == nil) != (other.LabelSelector == nil) {
return false
} else if in.LabelSelector != nil {
if !in.LabelSelector.DeepEqual(other.LabelSelector) {
return false
}
}
if (in.requirements == nil) != (other.requirements == nil) {
return false
} else if in.requirements != nil {
if !in.requirements.DeepEqual(other.requirements) {
return false
}
}
if in.cachedLabelSelectorString != other.cachedLabelSelectorString {
return false
}
if in.Generated != other.Generated {
return false
}
if in.sanitized != other.sanitized {
return false
}
return true
}
// DeepEqual is an autogenerated deepequal function, deeply comparing the
// receiver with other. in must be non-nil.
func (in *TLSContext) DeepEqual(other *TLSContext) bool {
if other == nil {
return false
}
if (in.Secret == nil) != (other.Secret == nil) {
return false
} else if in.Secret != nil {
if !in.Secret.DeepEqual(other.Secret) {
return false
}
}
if in.TrustedCA != other.TrustedCA {
return false
}
if in.Certificate != other.Certificate {
return false
}
if in.PrivateKey != other.PrivateKey {
return false
}
return true
}
// SPDX-License-Identifier: Apache-2.0
// Copyright Authors of Cilium
package policy
import (
"net/netip"
"github.com/cilium/cilium/pkg/ip"
"github.com/cilium/cilium/pkg/policy/api"
"k8s.io/apimachinery/pkg/util/sets"
)
// getPrefixesFromCIDR fetches all CIDRs referred to by the specified slice
// and returns them as regular golang CIDR objects.
func getPrefixesFromCIDR(cidrs api.CIDRSlice) []netip.Prefix {
result, _, _ := ip.ParsePrefixes(cidrs.StringSlice())
return result
}
// GetPrefixesFromCIDRSet fetches all CIDRs referred to by the specified slice
// and returns them as regular golang CIDR objects. Includes CIDRs listed in
// ExceptCIDRs fields.
//
// Assumes that validation already occurred on 'rules'.
func GetPrefixesFromCIDRSet(rules api.CIDRRuleSlice) []netip.Prefix {
out := make([]netip.Prefix, 0, len(rules))
for _, rule := range rules {
if rule.Cidr != "" {
pfx, err := netip.ParsePrefix(string(rule.Cidr))
if err == nil {
// must parse, was already validated.
out = append(out, pfx.Masked())
}
}
for _, except := range rule.ExceptCIDRs {
pfx, err := netip.ParsePrefix(string(except))
if err == nil {
out = append(out, pfx.Masked())
}
}
}
return out
}
// GetCIDRPrefixes runs through the specified 'rules' to find every reference
// to a CIDR in the rules, and returns a slice containing all of these CIDRs.
//
// Includes prefixes referenced solely by "ExceptCIDRs" entries.
//
// Assumes that validation already occurred on 'rules'.
func GetCIDRPrefixes(rules api.Rules) []netip.Prefix {
if len(rules) == 0 {
return nil
}
res := make(sets.Set[netip.Prefix], 32)
for _, r := range rules {
for _, ir := range r.Ingress {
if len(ir.FromCIDR) > 0 {
res.Insert(getPrefixesFromCIDR(ir.FromCIDR)...)
}
if len(ir.FromCIDRSet) > 0 {
res.Insert(GetPrefixesFromCIDRSet(ir.FromCIDRSet)...)
}
}
for _, ir := range r.IngressDeny {
if len(ir.FromCIDR) > 0 {
res.Insert(getPrefixesFromCIDR(ir.FromCIDR)...)
}
if len(ir.FromCIDRSet) > 0 {
res.Insert(GetPrefixesFromCIDRSet(ir.FromCIDRSet)...)
}
}
for _, er := range r.Egress {
if len(er.ToCIDR) > 0 {
res.Insert(getPrefixesFromCIDR(er.ToCIDR)...)
}
if len(er.ToCIDRSet) > 0 {
res.Insert(GetPrefixesFromCIDRSet(er.ToCIDRSet)...)
}
}
for _, er := range r.EgressDeny {
if len(er.ToCIDR) > 0 {
res.Insert(getPrefixesFromCIDR(er.ToCIDR)...)
}
if len(er.ToCIDRSet) > 0 {
res.Insert(GetPrefixesFromCIDRSet(er.ToCIDRSet)...)
}
}
}
return res.UnsortedList()
}
// SPDX-License-Identifier: Apache-2.0
// Copyright Authors of Cilium
package policy
import (
ipcacheTypes "github.com/cilium/cilium/pkg/ipcache/types"
"github.com/cilium/cilium/pkg/labels"
"github.com/cilium/cilium/pkg/lock"
"github.com/cilium/cilium/pkg/source"
"github.com/cilium/cilium/pkg/time"
)
var (
mutex lock.RWMutex // Protects enablePolicy
enablePolicy string // Whether policy enforcement is enabled.
)
// SetPolicyEnabled sets the policy enablement configuration. Valid values are:
// - endpoint.AlwaysEnforce
// - endpoint.NeverEnforce
// - endpoint.DefaultEnforcement
func SetPolicyEnabled(val string) {
mutex.Lock()
enablePolicy = val
mutex.Unlock()
}
// GetPolicyEnabled returns the policy enablement configuration
func GetPolicyEnabled() string {
mutex.RLock()
val := enablePolicy
mutex.RUnlock()
return val
}
// AddOptions are options which can be passed to PolicyAdd
type AddOptions struct {
// Replace if true indicates that existing rules with identical labels should be replaced
Replace bool
// ReplaceWithLabels if present indicates that existing rules with the
// given LabelArray should be deleted.
ReplaceWithLabels labels.LabelArray
// Generated should be set as true to signalize a the policy being inserted
// was generated by cilium-agent, e.g. dns poller.
Generated bool
// The source of this policy, one of api, fqdn or k8s
Source source.Source
// The time the policy initially began to be processed in Cilium, such as when the
// policy was received from the API server.
ProcessingStartTime time.Time
// Resource provides the object ID for the underlying object that backs
// this information from 'source'.
Resource ipcacheTypes.ResourceID
// ReplaceByResource indicates the policy repository should replace any
// rules owned by the given Resource with the new set of rules
ReplaceByResource bool
}
// DeleteOptions are options which can be passed to PolicyDelete
type DeleteOptions struct {
// The source of this policy, one of api, fqdn or k8s
Source source.Source
// Resource provides the object ID for the underlying object that backs
// this information from 'source'.
Resource ipcacheTypes.ResourceID
// DeleteByResource should be true if the resource should be used to identify
// which rules should be deleted.
DeleteByResource bool
}
// SPDX-License-Identifier: Apache-2.0
// Copyright Authors of Cilium
package correlation
import (
"log/slog"
"strings"
flowpb "github.com/cilium/cilium/api/v1/flow"
"github.com/cilium/cilium/pkg/hubble/parser/getters"
"github.com/cilium/cilium/pkg/identity"
k8sConst "github.com/cilium/cilium/pkg/k8s/apis/cilium.io"
"github.com/cilium/cilium/pkg/labels"
"github.com/cilium/cilium/pkg/logging/logfields"
monitorAPI "github.com/cilium/cilium/pkg/monitor/api"
"github.com/cilium/cilium/pkg/policy"
"github.com/cilium/cilium/pkg/policy/trafficdirection"
policyTypes "github.com/cilium/cilium/pkg/policy/types"
"github.com/cilium/cilium/pkg/source"
"github.com/cilium/cilium/pkg/u8proto"
)
// CorrelatePolicy updates the IngressAllowedBy/EgressAllowedBy fields on the
// provided flow.
func CorrelatePolicy(logger *slog.Logger, endpointGetter getters.EndpointGetter, f *flowpb.Flow) {
if f.GetEventType().GetType() != int32(monitorAPI.MessageTypePolicyVerdict) {
// If it's not a policy verdict, we don't care.
return
}
// We are only interested in flows which are either allowed (i.e. the verdict is either
// FORWARDED or REDIRECTED) or explicitly denied (i.e. DROPPED, and matched by a deny policy),
// since we cannot usefully annotate the verdict otherwise. (Put differently, which policy
// should be listed in {in|e}gress_denied_by for an unmatched flow?)
verdict := f.GetVerdict()
allowed := verdict == flowpb.Verdict_FORWARDED || verdict == flowpb.Verdict_REDIRECTED
denied := verdict == flowpb.Verdict_DROPPED && f.GetDropReasonDesc() == flowpb.DropReason_POLICY_DENY
if !(allowed || denied) {
return
}
// extract fields relevant for looking up the policy
direction, endpointID, remoteIdentity, proto, dport := extractFlowKey(f)
if dport == 0 || proto == 0 {
logger.Debug(
"failed to extract flow key",
logfields.EndpointID, endpointID,
)
return
}
// obtain reference to endpoint on which the policy verdict was taken
epInfo, ok := endpointGetter.GetEndpointInfoByID(endpointID)
if !ok {
logger.Debug(
"failed to lookup endpoint",
logfields.EndpointID, endpointID,
)
return
}
info, ok := lookupPolicyForKey(epInfo,
policy.KeyForDirection(direction).WithIdentity(remoteIdentity).WithPortProto(proto, dport),
f.GetPolicyMatchType())
if !ok {
logger.Debug(
"unable to find policy for policy verdict notification",
logfields.Identity, remoteIdentity,
logfields.Port, dport,
logfields.Protocol, proto,
logfields.TrafficDirection, direction,
)
return
}
rules := toProto(info)
switch {
case direction == trafficdirection.Egress && allowed:
f.EgressAllowedBy = rules
case direction == trafficdirection.Egress && denied:
f.EgressDeniedBy = rules
case direction == trafficdirection.Ingress && allowed:
f.IngressAllowedBy = rules
case direction == trafficdirection.Ingress && denied:
f.IngressDeniedBy = rules
}
// policy log is independent of verdict
f.PolicyLog = info.Log
}
func extractFlowKey(f *flowpb.Flow) (
direction trafficdirection.TrafficDirection,
endpointID uint16,
remoteIdentity identity.NumericIdentity,
proto u8proto.U8proto,
dport uint16) {
switch f.GetTrafficDirection() {
case flowpb.TrafficDirection_EGRESS:
direction = trafficdirection.Egress
// We only get a uint32 because proto has no 16-bit types.
endpointID = uint16(f.GetSource().GetID())
remoteIdentity = identity.NumericIdentity(f.GetDestination().GetIdentity())
case flowpb.TrafficDirection_INGRESS:
direction = trafficdirection.Ingress
endpointID = uint16(f.GetDestination().GetID())
remoteIdentity = identity.NumericIdentity(f.GetSource().GetIdentity())
default:
direction = trafficdirection.Invalid
endpointID = 0
remoteIdentity = identity.IdentityUnknown
}
if tcp := f.GetL4().GetTCP(); tcp != nil {
proto = u8proto.TCP
dport = uint16(tcp.GetDestinationPort())
} else if udp := f.GetL4().GetUDP(); udp != nil {
proto = u8proto.UDP
dport = uint16(udp.GetDestinationPort())
} else if icmpv4 := f.GetL4().GetICMPv4(); icmpv4 != nil {
proto = u8proto.ICMP
dport = uint16(icmpv4.Type)
} else if icmpv6 := f.GetL4().GetICMPv6(); icmpv6 != nil {
proto = u8proto.ICMPv6
dport = uint16(icmpv6.Type)
} else if sctp := f.GetL4().GetSCTP(); sctp != nil {
proto = u8proto.SCTP
dport = uint16(sctp.GetDestinationPort())
} else {
proto = u8proto.ANY
dport = 0
}
return
}
func lookupPolicyForKey(ep getters.EndpointInfo, key policy.Key, matchType uint32) (policyTypes.PolicyCorrelationInfo, bool) {
switch matchType {
case monitorAPI.PolicyMatchL3L4:
// Check for L4 policy rules.
//
// Consider the network policy:
//
// spec:
// podSelector: {}
// ingress:
// - podSelector:
// matchLabels:
// app: client
// ports:
// - port: 80
// protocol: TCP
case monitorAPI.PolicyMatchL3Proto:
// Check for L3 policy rules with protocol (but no port).
//
// Consider the network policy:
//
// spec:
// podSelector: {}
// ingress:
// - podSelector:
// matchLabels:
// app: client
// ports:
// - protocol: TCP
key = policy.KeyForDirection(key.TrafficDirection()).WithIdentity(key.Identity).WithProto(key.Nexthdr)
case monitorAPI.PolicyMatchL4Only:
// Check for port-specific rules.
// This covers the case where one or more identities are allowed by network policy.
//
// Consider the network policy:
//
// spec:
// podSelector: {}
// ingress:
// - ports:
// - port: 80
// protocol: TCP // protocol is optional for this match.
key = policy.KeyForDirection(key.TrafficDirection()).WithPortProto(key.Nexthdr, key.DestPort)
case monitorAPI.PolicyMatchProtoOnly:
// Check for protocol-only policies.
//
// Consider the network policy:
//
// spec:
// podSelector: {}
// ingress:
// - ports:
// - protocol: TCP
key = policy.KeyForDirection(key.TrafficDirection()).WithProto(key.Nexthdr)
case monitorAPI.PolicyMatchL3Only:
// Check for L3 policy rules.
//
// Consider the network policy:
//
// spec:
// podSelector: {}
// ingress:
// - podSelector:
// matchLabels:
// app: client
key = policy.KeyForDirection(key.TrafficDirection()).WithIdentity(key.Identity)
case monitorAPI.PolicyMatchAll:
// Check for allow-all policy rules.
//
// Consider the network policy:
//
// spec:
// podSelector: {}
// ingress:
// - {}
key = policy.KeyForDirection(key.TrafficDirection())
}
return ep.GetPolicyCorrelationInfoForKey(key)
}
func toProto(info policyTypes.PolicyCorrelationInfo) (policies []*flowpb.Policy) {
for model := range labels.ModelsFromLabelArrayListString(info.RuleLabels) {
policies = append(policies, policyFromModel(model, info))
}
return policies
}
// policyFromModel derives and sets fields in the flow policy from the label set array and policy
// correlation information.
//
// This function supports namespaced and cluster-scoped resources.
func policyFromModel(model []string, info policyTypes.PolicyCorrelationInfo) *flowpb.Policy {
f := &flowpb.Policy{
Labels: model,
Revision: info.Revision,
}
for _, lbl := range model {
if lbl, isK8sLabel := strings.CutPrefix(lbl, string(source.Kubernetes)+":"); isK8sLabel {
if key, value, found := strings.Cut(lbl, "="); found {
switch key {
case k8sConst.PolicyLabelName:
f.Name = value
case k8sConst.PolicyLabelNamespace:
f.Namespace = value
case k8sConst.PolicyLabelDerivedFrom:
f.Kind = value
default:
if f.Kind != "" && f.Name != "" && f.Namespace != "" {
return f
}
}
}
}
}
return f
}
// SPDX-License-Identifier: Apache-2.0
// Copyright Authors of Cilium
package policy
import (
"sync/atomic"
"github.com/cilium/cilium/pkg/container/versioned"
identityPkg "github.com/cilium/cilium/pkg/identity"
"github.com/cilium/cilium/pkg/identity/identitymanager"
"github.com/cilium/cilium/pkg/lock"
)
// policyCache represents a cache of resolved policies for identities.
type policyCache struct {
lock.Mutex
// repo is a circular reference back to the Repository, but as
// we create only one Repository and one PolicyCache for each
// Cilium Agent process, these will never need to be garbage
// collected.
repo *Repository
policies map[identityPkg.NumericIdentity]*cachedSelectorPolicy
}
// newPolicyCache creates a new cache of SelectorPolicy.
func newPolicyCache(repo *Repository, idmgr identitymanager.IDManager) *policyCache {
cache := &policyCache{
repo: repo,
policies: make(map[identityPkg.NumericIdentity]*cachedSelectorPolicy),
}
if idmgr != nil {
idmgr.Subscribe(cache)
}
return cache
}
// lookupOrCreate adds the specified Identity to the policy cache, with a reference
// from the specified Endpoint, then returns the threadsafe copy of the policy.
func (cache *policyCache) lookupOrCreate(identity *identityPkg.Identity) *cachedSelectorPolicy {
cache.Lock()
defer cache.Unlock()
cip, ok := cache.policies[identity.ID]
if !ok {
cip = newCachedSelectorPolicy(identity)
cache.policies[identity.ID] = cip
}
return cip
}
// GetPolicySnapshot returns a snapshot of the current policy cache.
// The policy snapshot has the lock order as: Repository.Mutex before policyCache.Mutex.
func (cache *policyCache) GetPolicySnapshot() map[identityPkg.NumericIdentity]SelectorPolicy {
cache.Lock()
defer cache.Unlock()
snapshot := make(map[identityPkg.NumericIdentity]SelectorPolicy, len(cache.policies))
for k, v := range cache.policies {
selPolicy := v.getPolicy()
if selPolicy != nil {
snapshot[k] = selPolicy
}
}
return snapshot
}
// delete forgets about any cached SelectorPolicy that this endpoint uses.
//
// Returns true if the SelectorPolicy was removed from the cache.
func (cache *policyCache) delete(identity *identityPkg.Identity) bool {
cache.Lock()
defer cache.Unlock()
cip, ok := cache.policies[identity.ID]
if ok {
delete(cache.policies, identity.ID)
selPolicy := cip.getPolicy()
if selPolicy != nil {
selPolicy.detach(true, 0)
}
}
return ok
}
// updateSelectorPolicy resolves the policy for the security identity of the
// specified endpoint and stores it internally. It will skip policy resolution
// if the cached policy is already at the revision specified in the repo.
// The endpointID specifies which endpoint initiated this selector policy
// update. This ensures that endpoints are not continuously triggering regenerations
// of themselves if the selector policy is created and initiates a regeneration trigger
// on detach.
//
// Returns whether the cache was updated, or an error.
//
// Must be called with repo.Mutex held for reading.
func (cache *policyCache) updateSelectorPolicy(identity *identityPkg.Identity, endpointID uint64) (*selectorPolicy, bool, error) {
cip := cache.lookupOrCreate(identity)
// As long as UpdatePolicy() is triggered from endpoint
// regeneration, it's possible for two endpoints with the
// *same* identity to race to update the policy here. Such
// racing would lead to first of the endpoints using a
// selectorPolicy that is already detached from the selector
// cache, and thus not getting any incremental updates.
//
// Lock the 'cip' for the duration of the revision check and
// the possible policy update.
cip.Lock()
defer cip.Unlock()
// Don't resolve policy if it was already done for this or later revision.
if selPolicy := cip.getPolicy(); selPolicy != nil && selPolicy.Revision >= cache.repo.GetRevision() {
return selPolicy, false, nil
}
// Resolve the policies, which could fail
selPolicy, err := cache.repo.resolvePolicyLocked(identity)
if err != nil {
return nil, false, err
}
cip.setPolicy(selPolicy, endpointID)
return selPolicy, true, nil
}
// LocalEndpointIdentityAdded is not needed; we only care about local endpoint
// deletion
func (cache *policyCache) LocalEndpointIdentityAdded(identity *identityPkg.Identity) {
}
// LocalEndpointIdentityRemoved deletes the cached SelectorPolicy for the
// specified Identity.
func (cache *policyCache) LocalEndpointIdentityRemoved(identity *identityPkg.Identity) {
cache.delete(identity)
}
// getAuthTypes returns the AuthTypes required by the policy between the localID and remoteID, if
// any, otherwise returns nil.
func (cache *policyCache) getAuthTypes(localID, remoteID identityPkg.NumericIdentity) AuthTypes {
cache.Lock()
cip, ok := cache.policies[localID]
cache.Unlock()
if !ok {
return nil // No policy for localID (no endpoint with localID)
}
// SelectorPolicy is const after it has been created, so no locking needed to access it
selPolicy := cip.getPolicy()
if selPolicy == nil {
return nil
}
var resTypes AuthTypes
for cs, authTypes := range selPolicy.L4Policy.authMap {
missing := false
for authType := range authTypes {
if _, exists := resTypes[authType]; !exists {
missing = true
break
}
}
// Only check if 'cs' selects 'remoteID' if one of the authTypes is still missing
// from the result
if missing && cs.Selects(versioned.Latest(), remoteID) {
if resTypes == nil {
resTypes = make(AuthTypes, 1)
}
for authType := range authTypes {
resTypes[authType] = struct{}{}
}
}
}
return resTypes
}
// cachedSelectorPolicy is a wrapper around a selectorPolicy (stored in the
// 'policy' field). It is always nested directly in the owning policyCache,
// and is protected against concurrent writes via the policyCache mutex.
type cachedSelectorPolicy struct {
lock.Mutex // lock is needed to synchronize parallel policy updates
identity *identityPkg.Identity
policy atomic.Pointer[selectorPolicy]
}
func newCachedSelectorPolicy(identity *identityPkg.Identity) *cachedSelectorPolicy {
cip := &cachedSelectorPolicy{
identity: identity,
}
return cip
}
// getPolicy returns a reference to the selectorPolicy that is cached.
//
// Users should treat the result as immutable state that MUST NOT be modified.
func (cip *cachedSelectorPolicy) getPolicy() *selectorPolicy {
return cip.policy.Load()
}
// setPolicy updates the reference to the SelectorPolicy that is cached.
// Calls Detach() on the old policy, if any. It passes the endpointID of
// the endpoint that initiated the old selector policy detach. Since detach
// can trigger endpoint regenerations of all it users, this ensures
// that endpoints do not continuously update themselves.
func (cip *cachedSelectorPolicy) setPolicy(policy *selectorPolicy, endpointID uint64) {
oldPolicy := cip.policy.Swap(policy)
if oldPolicy != nil {
// Release the references the previous policy holds on the selector cache.
oldPolicy.detach(false, endpointID)
}
}
// SPDX-License-Identifier: Apache-2.0
// Copyright Authors of Cilium
package policy
import (
"bytes"
"errors"
"fmt"
"io"
"log/slog"
"maps"
"net/netip"
"strings"
"sync"
"testing"
"github.com/cilium/hive/hivetest"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
"github.com/cilium/cilium/pkg/crypto/certificatemanager"
envoypolicy "github.com/cilium/cilium/pkg/envoy/policy"
"github.com/cilium/cilium/pkg/identity"
"github.com/cilium/cilium/pkg/labels"
"github.com/cilium/cilium/pkg/option"
"github.com/cilium/cilium/pkg/policy/api"
"github.com/cilium/cilium/pkg/policy/types"
"github.com/cilium/cilium/pkg/testutils"
testpolicy "github.com/cilium/cilium/pkg/testutils/policy"
)
const (
AuthTypeSpire = types.AuthTypeSpire
AuthTypeAlwaysFail = types.AuthTypeAlwaysFail
AuthTypeDisabled = types.AuthTypeDisabled
)
func setupTest(tb testing.TB) {
ep1 = testutils.NewTestEndpoint(tb)
ep2 = testutils.NewTestEndpoint(tb)
}
var (
ep1, ep2 testutils.TestEndpoint
)
func localIdentity(n uint32) identity.NumericIdentity {
return identity.NumericIdentity(n) | identity.IdentityScopeLocal
}
func TestCacheManagement(t *testing.T) {
setupTest(t)
repo := NewPolicyRepository(hivetest.Logger(t), nil, nil, nil, nil, testpolicy.NewPolicyMetricsNoop())
cache := repo.policyCache
identity := ep1.GetSecurityIdentity()
require.Equal(t, identity, ep2.GetSecurityIdentity())
// Nonsense delete of entry that isn't yet inserted
deleted := cache.delete(identity)
require.False(t, deleted)
// Insert directly to cache and delete the entry.
csp := cache.lookupOrCreate(identity)
require.NotNil(t, csp)
require.Nil(t, csp.getPolicy())
removed := cache.delete(identity)
require.True(t, removed)
// Insert identity twice. Should be the same policy.
policy1, updated, err := cache.updateSelectorPolicy(identity, ep1.Id)
require.NoError(t, err)
require.True(t, updated)
policy2, updated, err := cache.updateSelectorPolicy(identity, ep1.Id)
require.NoError(t, err)
require.False(t, updated)
// must be same pointer
require.Same(t, policy2, policy1)
// Despite two insert calls, there is no reference tracking; any delete
// will clear the cache.
cacheCleared := cache.delete(identity)
require.True(t, cacheCleared)
cacheCleared = cache.delete(identity)
require.False(t, cacheCleared)
// Insert two distinct identities, then delete one. Other should still
// be there.
ep3 := testutils.NewTestEndpoint(t)
ep3.SetIdentity(1234, true)
identity3 := ep3.GetSecurityIdentity()
require.NotEqual(t, identity, identity3)
policy1, _, _ = cache.updateSelectorPolicy(identity, ep1.Id)
require.NotNil(t, policy1)
policy3, _, _ := cache.updateSelectorPolicy(identity3, ep3.Id)
require.NotNil(t, policy3)
require.NotSame(t, policy3, policy1)
_ = cache.delete(identity)
_, updated, _ = cache.updateSelectorPolicy(identity3, ep3.Id)
require.False(t, updated)
}
func TestCachePopulation(t *testing.T) {
repo := NewPolicyRepository(hivetest.Logger(t), nil, nil, nil, nil, testpolicy.NewPolicyMetricsNoop())
repo.revision.Store(42)
cache := repo.policyCache
identity1 := ep1.GetSecurityIdentity()
require.Equal(t, identity1, ep2.GetSecurityIdentity())
// Calculate the policy and observe that it's cached
policy1, updated, err := cache.updateSelectorPolicy(identity1, ep1.Id)
require.NoError(t, err)
require.True(t, updated)
_, updated, err = cache.updateSelectorPolicy(identity1, ep1.Id)
require.NoError(t, err)
require.False(t, updated)
policy2, _, _ := cache.updateSelectorPolicy(identity1, ep1.Id)
require.NotNil(t, policy2)
require.Same(t, policy1, policy2)
// Remove the identity and observe that it is no longer available
cacheCleared := cache.delete(identity1)
require.True(t, cacheCleared)
_, updated, _ = cache.updateSelectorPolicy(identity1, ep1.Id)
require.True(t, updated)
// Attempt to update policy for non-cached endpoint and observe failure
ep3 := testutils.NewTestEndpoint(t)
ep3.SetIdentity(1234, true)
policy3, updated, err := cache.updateSelectorPolicy(ep3.GetSecurityIdentity(), ep3.Id)
require.NoError(t, err)
require.True(t, updated)
// policy3 must be different from ep1, ep2
require.NoError(t, err)
require.NotEqual(t, policy1, policy3)
}
// Distillery integration tests
var (
// Identity, labels, selectors for an endpoint named "foo"
identityFoo = identity.NumericIdentity(100)
labelsFoo = labels.ParseSelectLabelArray("foo", "blue")
selectFoo_ = api.NewESFromLabels(labels.ParseSelectLabel("foo"))
allowFooL3_ = selectFoo_
denyFooL3__ = selectFoo_
// Identity, labels, selectors for an endpoint named "bar"
identityBar = identity.NumericIdentity(200)
labelsBar = labels.ParseSelectLabelArray("bar", "blue")
selectBar_ = api.NewESFromLabels(labels.ParseSelectLabel("bar"))
allowBarL3_ = selectBar_
// API rule sections for composability
// L4 rule sections
allowAllL4_ []api.PortRule
allowPort80 = []api.PortRule{{
Ports: []api.PortProtocol{
{Port: "80", Protocol: api.ProtoTCP},
},
}}
allowNamedPort80 = []api.PortRule{{
Ports: []api.PortProtocol{
{Port: "port-80", Protocol: api.ProtoTCP},
},
}}
denyAllL4_ []api.PortDenyRule
denyPort80 = []api.PortDenyRule{{
Ports: []api.PortProtocol{
{Port: "80", Protocol: api.ProtoTCP},
},
}}
// L7 rule sections
allowHTTPRoot = &api.L7Rules{
HTTP: []api.PortRuleHTTP{
{Method: "GET", Path: "/"},
},
}
// API rule definitions for default-deny, L3, L3L4, L3L4L7, L4, L4L7
lbls____NoAllow = labels.ParseLabelArray("no-allow")
rule____NoAllow = api.NewRule().
WithLabels(lbls____NoAllow).
WithIngressRules([]api.IngressRule{{}})
lblsL3____Allow = labels.ParseLabelArray("l3-allow")
ruleL3____Allow = api.NewRule().
WithLabels(lblsL3____Allow).
WithIngressRules([]api.IngressRule{{
IngressCommonRule: api.IngressCommonRule{
FromEndpoints: []api.EndpointSelector{allowFooL3_},
},
ToPorts: allowAllL4_,
}})
lblsL3L4__Allow = labels.ParseLabelArray("l3l4-allow")
ruleL3L4__Allow = api.NewRule().
WithLabels(lblsL3L4__Allow).
WithIngressRules([]api.IngressRule{{
IngressCommonRule: api.IngressCommonRule{
FromEndpoints: []api.EndpointSelector{allowFooL3_},
},
ToPorts: allowPort80,
}})
ruleL3npL4__Allow = api.NewRule().
WithLabels(lblsL3L4__Allow).
WithIngressRules([]api.IngressRule{{
IngressCommonRule: api.IngressCommonRule{
FromEndpoints: []api.EndpointSelector{allowFooL3_},
},
ToPorts: allowNamedPort80,
}})
lblsL3L4L7Allow = labels.ParseLabelArray("l3l4l7-allow")
ruleL3L4L7Allow = api.NewRule().
WithLabels(lblsL3L4L7Allow).
WithIngressRules([]api.IngressRule{{
IngressCommonRule: api.IngressCommonRule{
FromEndpoints: []api.EndpointSelector{allowFooL3_},
},
ToPorts: combineL4L7(allowPort80, allowHTTPRoot),
}})
ruleL3npL4L7Allow = api.NewRule().
WithLabels(lblsL3L4L7Allow).
WithIngressRules([]api.IngressRule{{
IngressCommonRule: api.IngressCommonRule{
FromEndpoints: []api.EndpointSelector{allowFooL3_},
},
ToPorts: combineL4L7(allowNamedPort80, allowHTTPRoot),
}})
lbls__L4__Allow = labels.ParseLabelArray("l4-allow")
rule__L4__Allow = api.NewRule().
WithLabels(lbls__L4__Allow).
WithIngressRules([]api.IngressRule{{
ToPorts: allowPort80,
}})
rule__L4__AllowAuth = api.NewRule().
WithLabels(lbls__L4__Allow).
WithIngressRules([]api.IngressRule{{
ToPorts: allowPort80,
Authentication: &api.Authentication{
Mode: api.AuthenticationModeRequired,
},
}})
rule__npL4__Allow = api.NewRule().
WithLabels(lbls__L4__Allow).
WithIngressRules([]api.IngressRule{{
ToPorts: allowNamedPort80,
}})
lbls__L4L7Allow = labels.ParseLabelArray("l4l7-allow")
rule__L4L7Allow = api.NewRule().
WithLabels(lbls__L4L7Allow).
WithIngressRules([]api.IngressRule{{
ToPorts: combineL4L7(allowPort80, allowHTTPRoot),
}})
rule__npL4L7Allow = api.NewRule().
WithLabels(lbls__L4L7Allow).
WithIngressRules([]api.IngressRule{{
ToPorts: combineL4L7(allowNamedPort80, allowHTTPRoot),
}})
lblsL3__AllowFoo = labels.ParseLabelArray("l3-allow-foo")
ruleL3__AllowFoo = api.NewRule().
WithLabels(lblsL3__AllowFoo).
WithIngressRules([]api.IngressRule{{
IngressCommonRule: api.IngressCommonRule{
FromEndpoints: []api.EndpointSelector{allowFooL3_},
},
}})
lblsL3__AllowBar = labels.ParseLabelArray("l3-allow-bar")
ruleL3__AllowBar = api.NewRule().
WithLabels(lblsL3__AllowBar).
WithIngressRules([]api.IngressRule{{
IngressCommonRule: api.IngressCommonRule{
FromEndpoints: []api.EndpointSelector{allowBarL3_},
},
}})
lblsL3L4AllowBar = labels.ParseLabelArray("l3l4-allow-bar")
ruleL3L4AllowBarAuth = api.NewRule().
WithLabels(lblsL3L4AllowBar).
WithIngressRules([]api.IngressRule{{
ToPorts: allowPort80,
IngressCommonRule: api.IngressCommonRule{
FromEndpoints: []api.EndpointSelector{allowBarL3_},
},
Authentication: &api.Authentication{
Mode: api.AuthenticationModeAlwaysFail,
},
}})
ruleL3__AllowBarAuth = api.NewRule().
WithLabels(lblsL3__AllowBar).
WithIngressRules([]api.IngressRule{{
IngressCommonRule: api.IngressCommonRule{
FromEndpoints: []api.EndpointSelector{allowBarL3_},
},
Authentication: &api.Authentication{
Mode: api.AuthenticationModeAlwaysFail,
},
}})
lbls____AllowAll = labels.ParseLabelArray("allow-all")
rule____AllowAll = api.NewRule().
WithLabels(lbls____AllowAll).
WithIngressRules([]api.IngressRule{{
IngressCommonRule: api.IngressCommonRule{
FromEndpoints: []api.EndpointSelector{api.WildcardEndpointSelector},
},
}})
rule____AllowAllAuth = api.NewRule().
WithLabels(lbls____AllowAll).
WithIngressRules([]api.IngressRule{{
IngressCommonRule: api.IngressCommonRule{
FromEndpoints: []api.EndpointSelector{api.WildcardEndpointSelector},
},
Authentication: &api.Authentication{
Mode: api.AuthenticationModeRequired,
},
}})
lblsAllowAllIngress = labels.LabelArray{
labels.NewLabel(LabelKeyPolicyDerivedFrom, LabelAllowAnyIngress, labels.LabelSourceReserved),
}
lbls_____NoDeny = labels.ParseLabelArray("deny")
rule_____NoDeny = api.NewRule().
WithLabels(lbls_____NoDeny).
WithIngressRules([]api.IngressRule{{}})
lblsL3_____Deny = labels.ParseLabelArray("l3-deny")
ruleL3_____Deny = api.NewRule().
WithLabels(lblsL3_____Deny).
WithIngressDenyRules([]api.IngressDenyRule{{
IngressCommonRule: api.IngressCommonRule{
FromEndpoints: []api.EndpointSelector{denyFooL3__},
},
ToPorts: denyAllL4_,
}})
lbls__L4___Deny = labels.ParseLabelArray("l4-deny")
rule__L4___Deny = api.NewRule().
WithLabels(lbls__L4___Deny).
WithIngressDenyRules([]api.IngressDenyRule{{
ToPorts: denyPort80,
}})
lblsL3L4___Deny = labels.ParseLabelArray("l3l4-deny")
ruleL3L4___Deny = api.NewRule().
WithLabels(lblsL3L4___Deny).
WithIngressDenyRules([]api.IngressDenyRule{{
IngressCommonRule: api.IngressCommonRule{
FromEndpoints: []api.EndpointSelector{denyFooL3__},
},
ToPorts: denyPort80,
}})
// Desired map keys for L3, L3-dependent L4, L4
mapKeyAllowFoo__ = IngressKey().WithIdentity(identityFoo)
mapKeyAllowBar__ = IngressKey().WithIdentity(identityBar)
mapKeyAllowBarL4 = IngressKey().WithIdentity(identityBar).WithTCPPort(80)
mapKeyAllowFooL4 = IngressKey().WithIdentity(identityFoo).WithTCPPort(80)
mapKeyDeny_Foo__ = mapKeyAllowFoo__
mapKeyDeny_FooL4 = mapKeyAllowFooL4
mapKeyAllow___L4 = IngressKey().WithTCPPort(80)
mapKeyDeny____L4 = mapKeyAllow___L4
mapKeyAllowAll__ = IngressKey()
mapKeyAllowAllE_ = EgressKey()
// Desired map entries for no L7 redirect / redirect to Proxy
mapEntryL7None_ = func(lbls ...labels.LabelArray) mapStateEntry {
return allowEntry().withLabels(lbls)
}
mapEntryL7ExplicitAuth_ = func(at AuthType, lbls ...labels.LabelArray) mapStateEntry {
return allowEntry().withLabels(lbls).withExplicitAuth(at)
}
mapEntryL7DerivedAuth_ = func(at AuthType, lbls ...labels.LabelArray) mapStateEntry {
return allowEntry().withLabels(lbls).withDerivedAuth(at)
}
mapEntryL7Deny = func(lbls ...labels.LabelArray) mapStateEntry {
return denyEntry().withLabels(lbls)
}
mapEntryL7Proxy = func(lbls ...labels.LabelArray) mapStateEntry {
return allowEntry().withLabels(lbls).withHTTPProxyPort(1)
}
)
// combineL4L7 returns a new PortRule that refers to the specified l4 ports and
// l7 rules.
func combineL4L7(l4 []api.PortRule, l7 *api.L7Rules) []api.PortRule {
result := make([]api.PortRule, 0, len(l4))
for _, pr := range l4 {
result = append(result, api.PortRule{
Ports: pr.Ports,
Rules: l7,
})
}
return result
}
// policyDistillery is a convenience wrapper around the existing policy engine,
// allowing simple direct evaluation of L3 and L4 state into "MapState".
type policyDistillery struct {
*Repository
log io.Writer
}
func newPolicyDistillery(t testing.TB, selectorCache *SelectorCache) *policyDistillery {
ret := &policyDistillery{
Repository: NewPolicyRepository(hivetest.Logger(t), nil, nil, envoypolicy.NewEnvoyL7RulesTranslator(hivetest.Logger(t), certificatemanager.NewMockSecretManagerInline()), nil, testpolicy.NewPolicyMetricsNoop()),
}
ret.selectorCache = selectorCache
return ret
}
func (d *policyDistillery) WithLogBuffer(w io.Writer) *policyDistillery {
return &policyDistillery{
Repository: d.Repository,
log: w,
}
}
// distillEndpointPolicy distills the policy repository into an EndpointPolicy
// Caller is responsible for Ready() & Detach() when done with the policy
func (d *policyDistillery) distillEndpointPolicy(logger *slog.Logger, owner PolicyOwner, identity *identity.Identity) (*EndpointPolicy, error) {
sp, _, err := d.Repository.GetSelectorPolicy(identity, 0, &dummyPolicyStats{}, owner.GetID())
if err != nil {
return nil, fmt.Errorf("failed to calculate policy: %w", err)
}
epp := sp.DistillPolicy(logger, owner, testRedirects)
if epp == nil {
return nil, errors.New("policy distillation failure")
}
return epp, nil
}
// distillPolicy distills the policy repository into a set of bpf map state
// entries for an endpoint with the specified labels.
func (d *policyDistillery) distillPolicy(logger *slog.Logger, owner PolicyOwner, identity *identity.Identity) (mapState, error) {
epp, err := d.distillEndpointPolicy(logger, owner, identity)
if err != nil {
return emptyMapState(logger), err
}
// Remove the allow-all egress entry that's generated by default. This is
// because this test suite doesn't have a notion of traffic direction, so
// the extra egress allow-all is technically correct, but omitted from the
// expected output that's asserted against for the sake of brevity.
if entry, ok := epp.policyMapState.get(mapKeyAllowAllE_); ok && !entry.IsDeny() {
epp.policyMapState.delete(mapKeyAllowAllE_)
}
epp.Ready()
epp.Detach(logger)
return epp.policyMapState, nil
}
// Perm calls f with each permutation of a.
func Perm[X any](a []X, f func([]X)) {
perm(a, f, 0)
}
// Permute the values at index i to len(a)-1.
func perm[X any](a []X, f func([]X), i int) {
if i > len(a) {
f(a)
return
}
perm(a, f, i+1)
for j := i + 1; j < len(a); j++ {
a[i], a[j] = a[j], a[i]
perm(a, f, i+1)
a[i], a[j] = a[j], a[i]
}
}
func Test_Perm(t *testing.T) {
var res []string
expected := []string{
"abc",
"acb",
"bac",
"bca",
"cba",
"cab",
}
Perm([]rune("abc"), func(x []rune) { res = append(res, string(x)) })
assert.Equal(t, expected, res, "invalid permutations")
}
func testMapState(t testing.TB, initMap mapStateMap) mapState {
return emptyMapState(hivetest.Logger(t)).withState(initMap)
}
func Test_MergeL3(t *testing.T) {
// Cache policy enforcement value from when test was ran to avoid pollution
// across tests.
oldPolicyEnable := GetPolicyEnabled()
defer SetPolicyEnabled(oldPolicyEnable)
SetPolicyEnabled(option.DefaultEnforcement)
identityCache := identity.IdentityMap{
identityFoo: labelsFoo,
identityBar: labelsBar,
}
selectorCache := testNewSelectorCache(hivetest.Logger(t), identityCache)
type authResult map[identity.NumericIdentity]AuthTypes
tests := []struct {
test int
rules api.Rules
result mapState
auths authResult
}{
{
0,
api.Rules{ruleL3__AllowFoo, ruleL3__AllowBar},
testMapState(t, mapStateMap{
mapKeyAllowFoo__: mapEntryL7None_(lblsL3__AllowFoo),
mapKeyAllowBar__: mapEntryL7None_(lblsL3__AllowBar),
}),
authResult{
identityBar: AuthTypes{},
identityFoo: AuthTypes{},
},
},
{
1,
api.Rules{ruleL3__AllowFoo, ruleL3L4__Allow},
testMapState(t, mapStateMap{
mapKeyAllowFoo__: mapEntryL7None_(lblsL3__AllowFoo),
mapKeyAllowFooL4: mapEntryL7None_(lblsL3L4__Allow),
}),
authResult{
identityBar: AuthTypes{},
identityFoo: AuthTypes{},
},
},
{
2,
api.Rules{ruleL3__AllowFoo, ruleL3__AllowBarAuth},
testMapState(t, mapStateMap{
mapKeyAllowFoo__: mapEntryL7None_(lblsL3__AllowFoo),
mapKeyAllowBar__: mapEntryL7ExplicitAuth_(AuthTypeAlwaysFail, lblsL3__AllowBar),
}),
authResult{
identityBar: AuthTypes{AuthTypeAlwaysFail: struct{}{}},
identityFoo: AuthTypes{},
},
},
{
3,
api.Rules{ruleL3__AllowFoo, ruleL3__AllowBarAuth, rule__L4__AllowAuth},
testMapState(t, mapStateMap{
mapKeyAllow___L4: mapEntryL7ExplicitAuth_(AuthTypeSpire, lbls__L4__Allow),
mapKeyAllowFoo__: mapEntryL7None_(lblsL3__AllowFoo),
mapKeyAllowBar__: mapEntryL7ExplicitAuth_(AuthTypeAlwaysFail, lblsL3__AllowBar),
}),
authResult{
identityBar: AuthTypes{AuthTypeAlwaysFail: struct{}{}, AuthTypeSpire: struct{}{}},
identityFoo: AuthTypes{AuthTypeSpire: struct{}{}},
},
},
{
4,
api.Rules{rule____AllowAll, ruleL3__AllowBarAuth},
testMapState(t, mapStateMap{
mapKeyAllowAll__: mapEntryL7None_(lbls____AllowAll),
mapKeyAllowBar__: mapEntryL7ExplicitAuth_(AuthTypeAlwaysFail, lblsL3__AllowBar),
}),
authResult{
identityBar: AuthTypes{AuthTypeAlwaysFail: struct{}{}},
identityFoo: AuthTypes{},
},
},
{
5,
api.Rules{rule____AllowAllAuth, ruleL3__AllowBar},
testMapState(t, mapStateMap{
mapKeyAllowAll__: mapEntryL7ExplicitAuth_(AuthTypeSpire, lbls____AllowAll),
mapKeyAllowBar__: mapEntryL7None_(lblsL3__AllowBar),
}),
authResult{
identityBar: AuthTypes{AuthTypeSpire: struct{}{}},
identityFoo: AuthTypes{AuthTypeSpire: struct{}{}},
},
},
{
6,
api.Rules{rule____AllowAllAuth, rule__L4__Allow},
testMapState(t, mapStateMap{
mapKeyAllowAll__: mapEntryL7ExplicitAuth_(AuthTypeSpire, lbls____AllowAll),
mapKeyAllow___L4: mapEntryL7DerivedAuth_(AuthTypeSpire, lbls__L4__Allow),
}),
authResult{
identityBar: AuthTypes{AuthTypeSpire: struct{}{}},
identityFoo: AuthTypes{AuthTypeSpire: struct{}{}},
},
},
{
7,
api.Rules{rule____AllowAllAuth, ruleL3__AllowBar, rule__L4__Allow},
testMapState(t, mapStateMap{
mapKeyAllowAll__: mapEntryL7ExplicitAuth_(AuthTypeSpire, lbls____AllowAll),
mapKeyAllow___L4: mapEntryL7DerivedAuth_(AuthTypeSpire, lbls__L4__Allow),
mapKeyAllowBar__: mapEntryL7DerivedAuth_(AuthTypeDisabled, lblsL3__AllowBar),
}),
authResult{
identityBar: AuthTypes{AuthTypeSpire: struct{}{}},
identityFoo: AuthTypes{AuthTypeSpire: struct{}{}},
},
},
{
8,
api.Rules{rule____AllowAll, ruleL3__AllowBar, rule__L4__Allow},
testMapState(t, mapStateMap{
mapKeyAllowAll__: mapEntryL7None_(lbls____AllowAll),
mapKeyAllow___L4: mapEntryL7None_(lbls__L4__Allow),
mapKeyAllowBar__: mapEntryL7None_(lblsL3__AllowBar),
}),
authResult{
identityBar: AuthTypes{},
identityFoo: AuthTypes{},
},
},
{
9,
api.Rules{rule____AllowAll, rule__L4__Allow, ruleL3__AllowBarAuth},
testMapState(t, mapStateMap{
mapKeyAllowAll__: mapEntryL7None_(lbls____AllowAll),
mapKeyAllow___L4: mapEntryL7None_(lbls__L4__Allow),
mapKeyAllowBar__: mapEntryL7ExplicitAuth_(AuthTypeAlwaysFail, lblsL3__AllowBar),
}),
authResult{
identityBar: AuthTypes{AuthTypeAlwaysFail: struct{}{}},
identityFoo: AuthTypes{},
},
},
{
10, // Same as 9, but the L3L4 entry is created by an explicit rule.
api.Rules{rule____AllowAll, rule__L4__Allow, ruleL3__AllowBarAuth, ruleL3L4AllowBarAuth},
testMapState(t, mapStateMap{
mapKeyAllowAll__: mapEntryL7None_(lbls____AllowAll),
mapKeyAllow___L4: mapEntryL7None_(lbls__L4__Allow),
mapKeyAllowBar__: mapEntryL7ExplicitAuth_(AuthTypeAlwaysFail, lblsL3__AllowBar),
mapKeyAllowBarL4: mapEntryL7ExplicitAuth_(AuthTypeAlwaysFail, lblsL3L4AllowBar),
}),
authResult{
identityBar: AuthTypes{AuthTypeAlwaysFail: struct{}{}},
identityFoo: AuthTypes{},
},
},
}
identity := identity.NewIdentityFromLabelArray(identity.NumericIdentity(identityFoo), labelsFoo)
for _, tt := range tests {
for i, r := range tt.rules {
tt.rules[i] = r.WithEndpointSelector(selectFoo_)
}
round := 0
Perm(tt.rules, func(rules []*api.Rule) {
round++
repo := newPolicyDistillery(t, selectorCache)
_, _ = repo.MustAddList(rules)
t.Run(fmt.Sprintf("permutation_%d-%d", tt.test, round), func(t *testing.T) {
logBuffer := new(bytes.Buffer)
repo = repo.WithLogBuffer(logBuffer)
mapstate, err := repo.distillPolicy(hivetest.Logger(t), DummyOwner{logger: hivetest.Logger(t)}, identity)
if err != nil {
t.Errorf("Policy resolution failure: %s", err)
}
if equal := assert.True(t, mapstate.Equal(&tt.result), mapstate.diff(&tt.result)); !equal {
t.Logf("Rules:\n%s\n\n", api.Rules(rules).String())
t.Logf("Policy Trace: \n%s\n", logBuffer.String())
t.Errorf("Policy obtained didn't match expected for endpoint %s:\nObtained: %v\nExpected: %v", labelsFoo, mapstate, tt.result)
}
for remoteID, expectedAuthTypes := range tt.auths {
authTypes := repo.GetAuthTypes(identity.ID, remoteID)
if !maps.Equal(authTypes, expectedAuthTypes) {
t.Errorf("Incorrect AuthTypes result for remote ID %d: obtained %v, expected %v", remoteID, authTypes, expectedAuthTypes)
}
}
})
})
}
}
// The following variables names are derived from the following google sheet
// https://docs.google.com/spreadsheets/d/1WANIoZGB48nryylQjjOw6lKjI80eVgPShrdMTMalLEw/edit?usp=sharing
const (
L3L4KeyL3 = iota
L3L4KeyL4
L3L4KeyL7
L3L4KeyDeny
L4KeyL3
L4KeyL4
L4KeyL7
L4KeyDeny
L3KeyL3
L3KeyL4
L3KeyL7
L3KeyDeny
Total
)
// fieldsSet is the representation of the values set in the cells M8-P8, Q8-T8
// and U8-X8.
type fieldsSet struct {
L3 *bool
L4 *bool
L7 *bool
Deny *bool
}
// generatedBPFKey is the representation of the values set in the cells [M:P]6,
// [Q:T]6 and [U:X]6.
type generatedBPFKey struct {
L3L4Key fieldsSet
L4Key fieldsSet
L3Key fieldsSet
}
func parseFieldBool(s string) *bool {
switch s {
case "X":
return nil
case "0":
return func() *bool { a := false; return &a }()
case "1":
return func() *bool { a := true; return &a }()
default:
panic("Unknown value")
}
}
func parseTable(test string) generatedBPFKey {
// Remove all consecutive white space characters and return the charts that
// need want to parse.
fields := strings.Fields(test)
if len(fields) != Total {
panic("Wrong number of expected results")
}
return generatedBPFKey{
L3L4Key: fieldsSet{
L3: parseFieldBool(fields[L3L4KeyL3]),
L4: parseFieldBool(fields[L3L4KeyL4]),
L7: parseFieldBool(fields[L3L4KeyL7]),
Deny: parseFieldBool(fields[L3L4KeyDeny]),
},
L4Key: fieldsSet{
L3: parseFieldBool(fields[L4KeyL3]),
L4: parseFieldBool(fields[L4KeyL4]),
L7: parseFieldBool(fields[L4KeyL7]),
Deny: parseFieldBool(fields[L4KeyDeny]),
},
L3Key: fieldsSet{
L3: parseFieldBool(fields[L3KeyL3]),
L4: parseFieldBool(fields[L3KeyL4]),
L7: parseFieldBool(fields[L3KeyL7]),
Deny: parseFieldBool(fields[L3KeyDeny]),
},
}
}
// testCaseToMapState generates the expected MapState logic. This function is
// an implementation of the expected behavior. Any relation between this
// function and non unit-test code should be seen as coincidental.
// The algorithm represented in this function should be the source of truth
// of our expectations when enforcing multiple types of policies.
func testCaseToMapState(t2 *testing.T, t generatedBPFKey) mapState {
m := emptyMapState(hivetest.Logger(t2))
if t.L3Key.L3 != nil {
if t.L3Key.Deny != nil && *t.L3Key.Deny {
m.upsert(mapKeyDeny_Foo__, mapEntryL7Deny())
} else {
// If L7 is not set or if it explicitly set but it's false
if t.L3Key.L7 == nil || !*t.L3Key.L7 {
m.upsert(mapKeyAllowFoo__, mapEntryL7None_())
}
// there's no "else" because we don't support L3L7 policies, i.e.,
// a L4 port needs to be specified.
}
}
if t.L4Key.L3 != nil {
if t.L4Key.Deny != nil && *t.L4Key.Deny {
m.upsert(mapKeyDeny____L4, mapEntryL7Deny())
} else {
// If L7 is not set or if it explicitly set but it's false
if t.L4Key.L7 == nil || !*t.L4Key.L7 {
m.upsert(mapKeyAllow___L4, mapEntryL7None_())
} else {
// L7 is set and it's true then we should expected a mapEntry
// with L7 redirection.
m.upsert(mapKeyAllow___L4, mapEntryL7Proxy())
}
}
}
if t.L3L4Key.L3 != nil {
if t.L3L4Key.Deny != nil && *t.L3L4Key.Deny {
m.upsert(mapKeyDeny_FooL4, mapEntryL7Deny())
} else {
// If L7 is not set or if it explicitly set but it's false
if t.L3L4Key.L7 == nil || !*t.L3L4Key.L7 {
m.upsert(mapKeyAllowFooL4, mapEntryL7None_())
} else {
// L7 is set and it's true then we should expected a mapEntry
// with L7 redirection only if we haven't set it already
// for an existing L4-only.
if t.L4Key.L7 == nil || !*t.L4Key.L7 {
m.upsert(mapKeyAllowFooL4, mapEntryL7Proxy())
}
}
}
}
return m
}
func generateMapStates(t *testing.T) []mapState {
rawTestTable := []string{
"X X X X X X X X X X X X", // 0
"X X X X X X X X 1 0 0 0",
"X X X X 0 1 0 0 X X X X",
"X X X X 0 1 0 0 1 0 0 0",
"1 1 0 0 X X X X X X X X",
"1 1 0 0 X X X X 1 0 0 0", // 5
"X X X X 0 1 0 0 X X X X",
"X X X X 0 1 0 0 1 0 0 0",
"X X X X 0 1 1 0 X X X X",
"X X X X 0 1 1 0 1 0 0 0",
"X X X X 0 1 1 0 X X X X", // 10
"X X X X 0 1 1 0 1 0 0 0",
"1 1 1 0 0 1 1 0 X X X X",
"1 1 1 0 0 1 1 0 1 0 0 0",
"1 1 1 0 0 1 1 0 X X X X",
"1 1 1 0 0 1 1 0 1 0 0 0", // 15
"1 1 1 0 X X X X X X X X",
"1 1 1 0 X X X X 1 0 0 0",
"1 1 1 0 0 1 0 0 X X X X",
"1 1 1 0 0 1 0 0 1 0 0 0",
"1 1 1 0 X X X X X X X X", // 20
"1 1 1 0 X X X X 1 0 0 0",
"1 1 1 0 0 1 0 0 X X X X",
"1 1 1 0 0 1 0 0 1 0 0 0",
"1 1 1 0 0 1 1 0 X X X X",
"1 1 1 0 0 1 1 0 1 0 0 0", // 25
"1 1 1 0 0 1 1 0 X X X X",
"1 1 1 0 0 1 1 0 1 0 0 0",
"1 1 1 0 0 1 1 0 X X X X",
"1 1 1 0 0 1 1 0 1 0 0 0",
"1 1 1 0 0 1 1 0 X X X X", // 30
"1 1 1 0 0 1 1 0 1 0 0 0",
"X X X X X X X X 1 0 0 1", // 32
"X X X X X X X X 1 0 0 1",
"1 1 0 1 0 1 0 0 1 0 0 1",
"1 1 0 1 0 1 0 0 1 0 0 1",
"X X X X X X X X 1 0 0 1",
"X X X X X X X X 1 0 0 1",
"1 1 0 1 0 1 0 0 1 0 0 1",
"1 1 0 1 0 1 0 0 1 0 0 1",
"1 1 0 1 0 1 1 0 1 0 0 1",
"1 1 0 1 0 1 1 0 1 0 0 1",
"1 1 0 1 0 1 1 0 1 0 0 1",
"1 1 0 1 0 1 1 0 1 0 0 1",
"1 1 0 1 0 1 1 0 1 0 0 1",
"1 1 0 1 0 1 1 0 1 0 0 1",
"1 1 0 1 0 1 1 0 1 0 0 1",
"1 1 0 1 0 1 1 0 1 0 0 1",
"X X X X X X X X 1 0 0 1",
"X X X X X X X X 1 0 0 1",
"1 1 0 1 0 1 0 0 1 0 0 1",
"1 1 0 1 0 1 0 0 1 0 0 1",
"X X X X X X X X 1 0 0 1",
"X X X X X X X X 1 0 0 1",
"1 1 0 1 0 1 0 0 1 0 0 1",
"1 1 0 1 0 1 0 0 1 0 0 1",
"1 1 0 1 0 1 1 0 1 0 0 1",
"1 1 0 1 0 1 1 0 1 0 0 1",
"1 1 0 1 0 1 1 0 1 0 0 1",
"1 1 0 1 0 1 1 0 1 0 0 1",
"1 1 0 1 0 1 1 0 1 0 0 1",
"1 1 0 1 0 1 1 0 1 0 0 1",
"1 1 0 1 0 1 1 0 1 0 0 1",
"1 1 0 1 0 1 1 0 1 0 0 1",
"X X X X 0 1 0 1 X X X X", // 64
"X X X X 0 1 0 1 1 0 0 0",
"X X X X 0 1 0 1 X X X X",
"X X X X 0 1 0 1 1 0 0 0",
"X X X X 0 1 0 1 X X X X",
"X X X X 0 1 0 1 1 0 0 0",
"X X X X 0 1 0 1 X X X X",
"X X X X 0 1 0 1 1 0 0 0",
"X X X X 0 1 0 1 X X X X",
"X X X X 0 1 0 1 1 0 0 0",
"X X X X 0 1 0 1 X X X X",
"X X X X 0 1 0 1 1 0 0 0",
"X X X X 0 1 0 1 X X X X",
"X X X X 0 1 0 1 1 0 0 0",
"X X X X 0 1 0 1 X X X X",
"X X X X 0 1 0 1 1 0 0 0",
"X X X X 0 1 0 1 X X X X",
"X X X X 0 1 0 1 1 0 0 0",
"X X X X 0 1 0 1 X X X X",
"X X X X 0 1 0 1 1 0 0 0",
"X X X X 0 1 0 1 X X X X",
"X X X X 0 1 0 1 1 0 0 0",
"X X X X 0 1 0 1 X X X X",
"X X X X 0 1 0 1 1 0 0 0",
"X X X X 0 1 0 1 X X X X",
"X X X X 0 1 0 1 1 0 0 0",
"X X X X 0 1 0 1 X X X X",
"X X X X 0 1 0 1 1 0 0 0",
"X X X X 0 1 0 1 X X X X",
"X X X X 0 1 0 1 1 0 0 0",
"X X X X 0 1 0 1 X X X X",
"X X X X 0 1 0 1 1 0 0 0",
"X X X X 0 1 0 1 1 0 0 1", // 96
"X X X X 0 1 0 1 1 0 0 1",
"X X X X 0 1 0 1 1 0 0 1",
"X X X X 0 1 0 1 1 0 0 1",
"X X X X 0 1 0 1 1 0 0 1",
"X X X X 0 1 0 1 1 0 0 1",
"X X X X 0 1 0 1 1 0 0 1",
"X X X X 0 1 0 1 1 0 0 1",
"X X X X 0 1 0 1 1 0 0 1",
"X X X X 0 1 0 1 1 0 0 1",
"X X X X 0 1 0 1 1 0 0 1",
"X X X X 0 1 0 1 1 0 0 1",
"X X X X 0 1 0 1 1 0 0 1",
"X X X X 0 1 0 1 1 0 0 1",
"X X X X 0 1 0 1 1 0 0 1",
"X X X X 0 1 0 1 1 0 0 1",
"X X X X 0 1 0 1 1 0 0 1",
"X X X X 0 1 0 1 1 0 0 1",
"X X X X 0 1 0 1 1 0 0 1",
"X X X X 0 1 0 1 1 0 0 1",
"X X X X 0 1 0 1 1 0 0 1",
"X X X X 0 1 0 1 1 0 0 1",
"X X X X 0 1 0 1 1 0 0 1",
"X X X X 0 1 0 1 1 0 0 1",
"X X X X 0 1 0 1 1 0 0 1",
"X X X X 0 1 0 1 1 0 0 1",
"X X X X 0 1 0 1 1 0 0 1",
"X X X X 0 1 0 1 1 0 0 1",
"X X X X 0 1 0 1 1 0 0 1",
"X X X X 0 1 0 1 1 0 0 1",
"X X X X 0 1 0 1 1 0 0 1",
"X X X X 0 1 0 1 1 0 0 1",
"1 1 0 1 X X X X X X X X", // 128
"1 1 0 1 X X X X 1 0 0 0",
"1 1 0 1 0 1 0 0 X X X X",
"1 1 0 1 0 1 0 0 1 0 0 0",
"1 1 0 1 X X X X X X X X",
"1 1 0 1 X X X X 1 0 0 0",
"1 1 0 1 0 1 0 0 X X X X",
"1 1 0 1 0 1 0 0 1 0 0 0",
"1 1 0 1 0 1 1 0 X X X X",
"1 1 0 1 0 1 1 0 1 0 0 0",
"1 1 0 1 0 1 1 0 X X X X",
"1 1 0 1 0 1 1 0 1 0 0 0",
"1 1 0 1 0 1 1 0 X X X X",
"1 1 0 1 0 1 1 0 1 0 0 0",
"1 1 0 1 0 1 1 0 X X X X",
"1 1 0 1 0 1 1 0 1 0 0 0",
"1 1 0 1 X X X X X X X X",
"1 1 0 1 X X X X 1 0 0 0",
"1 1 0 1 0 1 0 0 X X X X",
"1 1 0 1 0 1 0 0 1 0 0 0",
"1 1 0 1 X X X X X X X X",
"1 1 0 1 X X X X 1 0 0 0",
"1 1 0 1 0 1 0 0 X X X X",
"1 1 0 1 0 1 0 0 1 0 0 0",
"1 1 0 1 0 1 1 0 X X X X",
"1 1 0 1 0 1 1 0 1 0 0 0",
"1 1 0 1 0 1 1 0 X X X X",
"1 1 0 1 0 1 1 0 1 0 0 0",
"1 1 0 1 0 1 1 0 X X X X",
"1 1 0 1 0 1 1 0 1 0 0 0",
"1 1 0 1 0 1 1 0 X X X X",
"1 1 0 1 0 1 1 0 1 0 0 0",
"X X X X X X X X 1 0 0 1", // 160
"X X X X X X X X 1 0 0 1",
"1 1 0 1 0 1 0 0 1 0 0 1",
"1 1 0 1 0 1 0 0 1 0 0 1",
"X X X X X X X X 1 0 0 1",
"X X X X X X X X 1 0 0 1",
"1 1 0 1 0 1 0 0 1 0 0 1",
"1 1 0 1 0 1 0 0 1 0 0 1",
"1 1 0 1 0 1 1 0 1 0 0 1",
"1 1 0 1 0 1 1 0 1 0 0 1",
"1 1 0 1 0 1 1 0 1 0 0 1",
"1 1 0 1 0 1 1 0 1 0 0 1",
"1 1 0 1 0 1 1 0 1 0 0 1",
"1 1 0 1 0 1 1 0 1 0 0 1",
"1 1 0 1 0 1 1 0 1 0 0 1",
"1 1 0 1 0 1 1 0 1 0 0 1",
"X X X X X X X X 1 0 0 1",
"X X X X X X X X 1 0 0 1",
"1 1 0 1 0 1 0 0 1 0 0 1",
"1 1 0 1 0 1 0 0 1 0 0 1",
"X X X X X X X X 1 0 0 1",
"X X X X X X X X 1 0 0 1",
"1 1 0 1 0 1 0 0 1 0 0 1",
"1 1 0 1 0 1 0 0 1 0 0 1",
"1 1 0 1 0 1 1 0 1 0 0 1",
"1 1 0 1 0 1 1 0 1 0 0 1",
"1 1 0 1 0 1 1 0 1 0 0 1",
"1 1 0 1 0 1 1 0 1 0 0 1",
"1 1 0 1 0 1 1 0 1 0 0 1",
"1 1 0 1 0 1 1 0 1 0 0 1",
"1 1 0 1 0 1 1 0 1 0 0 1",
"1 1 0 1 0 1 1 0 1 0 0 1",
"X X X X 0 1 0 1 X X X X", // 192
"X X X X 0 1 0 1 1 0 0 0",
"X X X X 0 1 0 1 X X X X",
"X X X X 0 1 0 1 1 0 0 0",
"X X X X 0 1 0 1 X X X X",
"X X X X 0 1 0 1 1 0 0 0",
"X X X X 0 1 0 1 X X X X",
"X X X X 0 1 0 1 1 0 0 0",
"X X X X 0 1 0 1 X X X X",
"X X X X 0 1 0 1 1 0 0 0",
"X X X X 0 1 0 1 X X X X",
"X X X X 0 1 0 1 1 0 0 0",
"X X X X 0 1 0 1 X X X X",
"X X X X 0 1 0 1 1 0 0 0",
"X X X X 0 1 0 1 X X X X",
"X X X X 0 1 0 1 1 0 0 0",
"X X X X 0 1 0 1 X X X X",
"X X X X 0 1 0 1 1 0 0 0",
"X X X X 0 1 0 1 X X X X",
"X X X X 0 1 0 1 1 0 0 0",
"X X X X 0 1 0 1 X X X X",
"X X X X 0 1 0 1 1 0 0 0",
"X X X X 0 1 0 1 X X X X",
"X X X X 0 1 0 1 1 0 0 0",
"X X X X 0 1 0 1 X X X X",
"X X X X 0 1 0 1 1 0 0 0",
"X X X X 0 1 0 1 X X X X",
"X X X X 0 1 0 1 1 0 0 0",
"X X X X 0 1 0 1 X X X X",
"X X X X 0 1 0 1 1 0 0 0",
"X X X X 0 1 0 1 X X X X",
"X X X X 0 1 0 1 1 0 0 0",
"X X X X 1 1 0 1 1 0 0 1", // 224
"X X X X 1 1 0 1 1 0 0 1",
"X X X X 1 1 0 1 1 0 0 1",
"X X X X 1 1 0 1 1 0 0 1",
"X X X X 1 1 0 1 1 0 0 1",
"X X X X 1 1 0 1 1 0 0 1",
"X X X X 1 1 0 1 1 0 0 1",
"X X X X 1 1 0 1 1 0 0 1",
"X X X X 1 1 0 1 1 0 0 1",
"X X X X 1 1 0 1 1 0 0 1",
"X X X X 1 1 0 1 1 0 0 1",
"X X X X 1 1 0 1 1 0 0 1",
"X X X X 1 1 0 1 1 0 0 1",
"X X X X 1 1 0 1 1 0 0 1",
"X X X X 1 1 0 1 1 0 0 1",
"X X X X 1 1 0 1 1 0 0 1",
"X X X X 1 1 0 1 1 0 0 1",
"X X X X 1 1 0 1 1 0 0 1",
"X X X X 1 1 0 1 1 0 0 1",
"X X X X 1 1 0 1 1 0 0 1",
"X X X X 1 1 0 1 1 0 0 1",
"X X X X 1 1 0 1 1 0 0 1",
"X X X X 1 1 0 1 1 0 0 1",
"X X X X 1 1 0 1 1 0 0 1",
"X X X X 1 1 0 1 1 0 0 1",
"X X X X 1 1 0 1 1 0 0 1",
"X X X X 1 1 0 1 1 0 0 1",
"X X X X 1 1 0 1 1 0 0 1",
"X X X X 1 1 0 1 1 0 0 1",
"X X X X 1 1 0 1 1 0 0 1",
"X X X X 1 1 0 1 1 0 0 1",
"X X X X 1 1 0 1 1 0 0 1",
}
mapStates := make([]mapState, 0, len(rawTestTable))
for _, rawTest := range rawTestTable {
testCase := parseTable(rawTest)
mapState := testCaseToMapState(t, testCase)
mapStates = append(mapStates, mapState)
}
return mapStates
}
func generateRule(testCase int) api.Rules {
rulesIdx := api.Rules{
ruleL3____Allow,
rule__L4__Allow,
ruleL3L4__Allow,
rule__L4L7Allow,
ruleL3L4L7Allow,
// denyIdx
ruleL3_____Deny,
rule__L4___Deny,
ruleL3L4___Deny,
}
rules := make(api.Rules, 0, len(rulesIdx))
for i := len(rulesIdx) - 1; i >= 0; i-- {
if ((testCase >> i) & 0x1) != 0 {
rules = append(rules, rulesIdx[i])
} else {
if i >= 5 { // denyIdx
rules = append(rules, rule_____NoDeny)
} else {
rules = append(rules, rule____NoAllow)
}
}
}
return rules
}
func Test_MergeRules(t *testing.T) {
// Cache policy enforcement value from when test was ran to avoid pollution
// across tests.
oldPolicyEnable := GetPolicyEnabled()
defer SetPolicyEnabled(oldPolicyEnable)
SetPolicyEnabled(option.DefaultEnforcement)
identityCache := identity.IdentityMap{
identity.NumericIdentity(identityFoo): labelsFoo,
}
selectorCache := testNewSelectorCache(hivetest.Logger(t), identityCache)
identity := identity.NewIdentityFromLabelArray(identity.NumericIdentity(identityFoo), labelsFoo)
tests := []struct {
test int
rules api.Rules
expected mapState
}{
// The following table is derived from the Google Doc here:
// https://docs.google.com/spreadsheets/d/1WANIoZGB48nryylQjjOw6lKjI80eVgPShrdMTMalLEw/edit?usp=sharing
//
// Rule 0 | Rule 1 | Rule 2 | Rule 3 | Rule 4 | Rule 5 | Rule 6 | Rule 7 | Desired BPF map state
{0, api.Rules{rule_____NoDeny, rule_____NoDeny, rule_____NoDeny, rule____NoAllow, rule____NoAllow, rule____NoAllow, rule____NoAllow, rule____NoAllow}, testMapState(t, nil)},
{1, api.Rules{rule_____NoDeny, rule_____NoDeny, rule_____NoDeny, rule____NoAllow, rule____NoAllow, rule____NoAllow, rule____NoAllow, ruleL3____Allow}, testMapState(t, mapStateMap{mapKeyAllowFoo__: mapEntryL7None_(lblsL3____Allow)})},
{2, api.Rules{rule_____NoDeny, rule_____NoDeny, rule_____NoDeny, rule____NoAllow, rule____NoAllow, rule____NoAllow, rule__L4__Allow, rule____NoAllow}, testMapState(t, mapStateMap{mapKeyAllow___L4: mapEntryL7None_(lbls__L4__Allow)})},
{3, api.Rules{rule_____NoDeny, rule_____NoDeny, rule_____NoDeny, rule____NoAllow, rule____NoAllow, rule____NoAllow, rule__L4__Allow, ruleL3____Allow}, testMapState(t, mapStateMap{mapKeyAllow___L4: mapEntryL7None_(lbls__L4__Allow), mapKeyAllowFoo__: mapEntryL7None_(lblsL3____Allow)})},
{4, api.Rules{rule_____NoDeny, rule_____NoDeny, rule_____NoDeny, rule____NoAllow, rule____NoAllow, ruleL3L4__Allow, rule____NoAllow, rule____NoAllow}, testMapState(t, mapStateMap{mapKeyAllowFooL4: mapEntryL7None_(lblsL3L4__Allow)})},
{5, api.Rules{rule_____NoDeny, rule_____NoDeny, rule_____NoDeny, rule____NoAllow, rule____NoAllow, ruleL3L4__Allow, rule____NoAllow, ruleL3____Allow}, testMapState(t, mapStateMap{mapKeyAllowFooL4: mapEntryL7None_(lblsL3L4__Allow), mapKeyAllowFoo__: mapEntryL7None_(lblsL3____Allow)})},
{6, api.Rules{rule_____NoDeny, rule_____NoDeny, rule_____NoDeny, rule____NoAllow, rule____NoAllow, ruleL3L4__Allow, rule__L4__Allow, rule____NoAllow}, testMapState(t, mapStateMap{mapKeyAllow___L4: mapEntryL7None_(lbls__L4__Allow)})}, // identical L3L4 entry suppressed
{7, api.Rules{rule_____NoDeny, rule_____NoDeny, rule_____NoDeny, rule____NoAllow, rule____NoAllow, ruleL3L4__Allow, rule__L4__Allow, ruleL3____Allow}, testMapState(t, mapStateMap{mapKeyAllow___L4: mapEntryL7None_(lbls__L4__Allow), mapKeyAllowFoo__: mapEntryL7None_(lblsL3____Allow)})}, // identical L3L4 entry suppressed
{8, api.Rules{rule_____NoDeny, rule_____NoDeny, rule_____NoDeny, rule____NoAllow, rule__L4L7Allow, rule____NoAllow, rule____NoAllow, rule____NoAllow}, testMapState(t, mapStateMap{mapKeyAllow___L4: mapEntryL7Proxy(lbls__L4L7Allow)})},
{9, api.Rules{rule_____NoDeny, rule_____NoDeny, rule_____NoDeny, rule____NoAllow, rule__L4L7Allow, rule____NoAllow, rule____NoAllow, ruleL3____Allow}, testMapState(t, mapStateMap{mapKeyAllow___L4: mapEntryL7Proxy(lbls__L4L7Allow), mapKeyAllowFoo__: mapEntryL7None_(lblsL3____Allow)})},
{10, api.Rules{rule_____NoDeny, rule_____NoDeny, rule_____NoDeny, rule____NoAllow, rule__L4L7Allow, rule____NoAllow, rule__L4__Allow, rule____NoAllow}, testMapState(t, mapStateMap{mapKeyAllow___L4: mapEntryL7Proxy(lbls__L4__Allow, lbls__L4L7Allow)})},
{11, api.Rules{rule_____NoDeny, rule_____NoDeny, rule_____NoDeny, rule____NoAllow, rule__L4L7Allow, rule____NoAllow, rule__L4__Allow, ruleL3____Allow}, testMapState(t, mapStateMap{mapKeyAllow___L4: mapEntryL7Proxy(lbls__L4__Allow, lbls__L4L7Allow), mapKeyAllowFoo__: mapEntryL7None_(lblsL3____Allow)})},
{12, api.Rules{rule_____NoDeny, rule_____NoDeny, rule_____NoDeny, rule____NoAllow, rule__L4L7Allow, ruleL3L4__Allow, rule____NoAllow, rule____NoAllow}, testMapState(t, mapStateMap{mapKeyAllow___L4: mapEntryL7Proxy(lbls__L4L7Allow)})}, // L3L4 entry suppressed to allow L4-only entry to redirect
{13, api.Rules{rule_____NoDeny, rule_____NoDeny, rule_____NoDeny, rule____NoAllow, rule__L4L7Allow, ruleL3L4__Allow, rule____NoAllow, ruleL3____Allow}, testMapState(t, mapStateMap{mapKeyAllow___L4: mapEntryL7Proxy(lbls__L4L7Allow), mapKeyAllowFoo__: mapEntryL7None_(lblsL3____Allow)})}, // L3L4 entry suppressed to allow L4-only entry to redirect
{14, api.Rules{rule_____NoDeny, rule_____NoDeny, rule_____NoDeny, rule____NoAllow, rule__L4L7Allow, ruleL3L4__Allow, rule__L4__Allow, rule____NoAllow}, testMapState(t, mapStateMap{mapKeyAllow___L4: mapEntryL7Proxy(lbls__L4__Allow, lbls__L4L7Allow)})}, // L3L4 entry suppressed to allow L4-only entry to redirect
{15, api.Rules{rule_____NoDeny, rule_____NoDeny, rule_____NoDeny, rule____NoAllow, rule__L4L7Allow, ruleL3L4__Allow, rule__L4__Allow, ruleL3____Allow}, testMapState(t, mapStateMap{mapKeyAllow___L4: mapEntryL7Proxy(lbls__L4__Allow, lbls__L4L7Allow), mapKeyAllowFoo__: mapEntryL7None_(lblsL3____Allow)})}, // L3L4 entry suppressed to allow L4-only entry to redirect
{16, api.Rules{rule_____NoDeny, rule_____NoDeny, rule_____NoDeny, ruleL3L4L7Allow, rule____NoAllow, rule____NoAllow, rule____NoAllow, rule____NoAllow}, testMapState(t, mapStateMap{mapKeyAllowFooL4: mapEntryL7Proxy(lblsL3L4L7Allow)})},
{17, api.Rules{rule_____NoDeny, rule_____NoDeny, rule_____NoDeny, ruleL3L4L7Allow, rule____NoAllow, rule____NoAllow, rule____NoAllow, ruleL3____Allow}, testMapState(t, mapStateMap{mapKeyAllowFooL4: mapEntryL7Proxy(lblsL3L4L7Allow), mapKeyAllowFoo__: mapEntryL7None_(lblsL3____Allow)})},
{18, api.Rules{rule_____NoDeny, rule_____NoDeny, rule_____NoDeny, ruleL3L4L7Allow, rule____NoAllow, rule____NoAllow, rule__L4__Allow, rule____NoAllow}, testMapState(t, mapStateMap{mapKeyAllowFooL4: mapEntryL7Proxy(lblsL3L4L7Allow), mapKeyAllow___L4: mapEntryL7None_(lbls__L4__Allow)})},
{19, api.Rules{rule_____NoDeny, rule_____NoDeny, rule_____NoDeny, ruleL3L4L7Allow, rule____NoAllow, rule____NoAllow, rule__L4__Allow, ruleL3____Allow}, testMapState(t, mapStateMap{mapKeyAllowFooL4: mapEntryL7Proxy(lblsL3L4L7Allow), mapKeyAllow___L4: mapEntryL7None_(lbls__L4__Allow), mapKeyAllowFoo__: mapEntryL7None_(lblsL3____Allow)})},
{20, api.Rules{rule_____NoDeny, rule_____NoDeny, rule_____NoDeny, ruleL3L4L7Allow, rule____NoAllow, ruleL3L4__Allow, rule____NoAllow, rule____NoAllow}, testMapState(t, mapStateMap{mapKeyAllowFooL4: mapEntryL7Proxy(lblsL3L4__Allow, lblsL3L4L7Allow)})},
{21, api.Rules{rule_____NoDeny, rule_____NoDeny, rule_____NoDeny, ruleL3L4L7Allow, rule____NoAllow, ruleL3L4__Allow, rule____NoAllow, ruleL3____Allow}, testMapState(t, mapStateMap{mapKeyAllowFooL4: mapEntryL7Proxy(lblsL3L4__Allow, lblsL3L4L7Allow), mapKeyAllowFoo__: mapEntryL7None_(lblsL3____Allow)})},
{22, api.Rules{rule_____NoDeny, rule_____NoDeny, rule_____NoDeny, ruleL3L4L7Allow, rule____NoAllow, ruleL3L4__Allow, rule__L4__Allow, rule____NoAllow}, testMapState(t, mapStateMap{mapKeyAllowFooL4: mapEntryL7Proxy(lblsL3L4__Allow, lblsL3L4L7Allow), mapKeyAllow___L4: mapEntryL7None_(lbls__L4__Allow)})},
{23, api.Rules{rule_____NoDeny, rule_____NoDeny, rule_____NoDeny, ruleL3L4L7Allow, rule____NoAllow, ruleL3L4__Allow, rule__L4__Allow, ruleL3____Allow}, testMapState(t, mapStateMap{mapKeyAllowFooL4: mapEntryL7Proxy(lblsL3L4__Allow, lblsL3L4L7Allow), mapKeyAllow___L4: mapEntryL7None_(lbls__L4__Allow), mapKeyAllowFoo__: mapEntryL7None_(lblsL3____Allow)})},
{24, api.Rules{rule_____NoDeny, rule_____NoDeny, rule_____NoDeny, ruleL3L4L7Allow, rule__L4L7Allow, rule____NoAllow, rule____NoAllow, rule____NoAllow}, testMapState(t, mapStateMap{mapKeyAllow___L4: mapEntryL7Proxy(lbls__L4L7Allow)})}, // identical L3L4 entry suppressed
{25, api.Rules{rule_____NoDeny, rule_____NoDeny, rule_____NoDeny, ruleL3L4L7Allow, rule__L4L7Allow, rule____NoAllow, rule____NoAllow, ruleL3____Allow}, testMapState(t, mapStateMap{mapKeyAllow___L4: mapEntryL7Proxy(lbls__L4L7Allow), mapKeyAllowFoo__: mapEntryL7None_(lblsL3____Allow)})}, // identical L3L4 entry suppressed
{26, api.Rules{rule_____NoDeny, rule_____NoDeny, rule_____NoDeny, ruleL3L4L7Allow, rule__L4L7Allow, rule____NoAllow, rule__L4__Allow, rule____NoAllow}, testMapState(t, mapStateMap{mapKeyAllow___L4: mapEntryL7Proxy(lbls__L4__Allow, lbls__L4L7Allow)})}, // identical L3L4 entry suppressed
{27, api.Rules{rule_____NoDeny, rule_____NoDeny, rule_____NoDeny, ruleL3L4L7Allow, rule__L4L7Allow, rule____NoAllow, rule__L4__Allow, ruleL3____Allow}, testMapState(t, mapStateMap{mapKeyAllow___L4: mapEntryL7Proxy(lbls__L4__Allow, lbls__L4L7Allow), mapKeyAllowFoo__: mapEntryL7None_(lblsL3____Allow)})}, // identical L3L4 entry suppressed
{28, api.Rules{rule_____NoDeny, rule_____NoDeny, rule_____NoDeny, ruleL3L4L7Allow, rule__L4L7Allow, ruleL3L4__Allow, rule____NoAllow, rule____NoAllow}, testMapState(t, mapStateMap{mapKeyAllow___L4: mapEntryL7Proxy(lbls__L4L7Allow)})}, // identical L3L4 entry suppressed
{29, api.Rules{rule_____NoDeny, rule_____NoDeny, rule_____NoDeny, ruleL3L4L7Allow, rule__L4L7Allow, ruleL3L4__Allow, rule____NoAllow, ruleL3____Allow}, testMapState(t, mapStateMap{mapKeyAllow___L4: mapEntryL7Proxy(lbls__L4L7Allow), mapKeyAllowFoo__: mapEntryL7None_(lblsL3____Allow)})}, // identical L3L4 entry suppressed
{30, api.Rules{rule_____NoDeny, rule_____NoDeny, rule_____NoDeny, ruleL3L4L7Allow, rule__L4L7Allow, ruleL3L4__Allow, rule__L4__Allow, rule____NoAllow}, testMapState(t, mapStateMap{mapKeyAllow___L4: mapEntryL7Proxy(lbls__L4__Allow, lbls__L4L7Allow)})}, // identical L3L4 entry suppressed
{31, api.Rules{rule_____NoDeny, rule_____NoDeny, rule_____NoDeny, ruleL3L4L7Allow, rule__L4L7Allow, ruleL3L4__Allow, rule__L4__Allow, ruleL3____Allow}, testMapState(t, mapStateMap{mapKeyAllow___L4: mapEntryL7Proxy(lbls__L4__Allow, lbls__L4L7Allow), mapKeyAllowFoo__: mapEntryL7None_(lblsL3____Allow)})}, // identical L3L4 entry suppressed
}
expectedMapState := generateMapStates(t)
// Add the auto generated test cases for the deny policies
generatedIdx := 32
for i := generatedIdx; i < 256; i++ {
tests = append(tests,
struct {
test int
rules api.Rules
expected mapState
}{
test: i,
rules: generateRule(i),
expected: expectedMapState[i],
})
}
for i, tt := range tests {
repo := newPolicyDistillery(t, selectorCache)
generatedRule := generateRule(tt.test)
for _, r := range tt.rules {
if r != nil {
rule := r.WithEndpointSelector(selectFoo_)
_, _ = repo.MustAddList(api.Rules{rule})
}
}
t.Run(fmt.Sprintf("permutation_%d", tt.test), func(t *testing.T) {
logBuffer := new(bytes.Buffer)
repo = repo.WithLogBuffer(logBuffer)
mapstate, err := repo.distillPolicy(hivetest.Logger(t), DummyOwner{logger: hivetest.Logger(t)}, identity)
if err != nil {
t.Errorf("Policy resolution failure: %s", err)
}
// Ignore generated rules as they lap LabelArrayList which would
// make the tests fail.
if i < generatedIdx {
if equal := assert.True(t, mapstate.Equal(&tt.expected), mapstate.diff(&tt.expected)); !equal {
require.EqualExportedValuesf(t, tt.expected, mapstate, "Policy obtained didn't match expected for endpoint %s", labelsFoo)
t.Logf("Rules:\n%s\n\n", tt.rules.String())
t.Logf("Policy Trace: \n%s\n", logBuffer.String())
t.Errorf("Policy obtained didn't match expected for endpoint %s", labelsFoo)
}
}
if equal := assert.EqualExportedValues(t, expectedMapState[tt.test], mapstate); !equal {
t.Logf("Rules:\n%s\n\n", tt.rules.String())
t.Logf("Policy Trace: \n%s\n", logBuffer.String())
t.Error("Policy obtained didn't match expected for endpoint")
}
if equal := assert.ElementsMatch(t, tt.rules, generatedRule); !equal {
t.Logf("Rules:\n%s\n\n", tt.rules.String())
t.Logf("Policy Trace: \n%s\n", logBuffer.String())
t.Error("Generated rules didn't match manual rules")
}
})
}
}
func Test_MergeRulesWithNamedPorts(t *testing.T) {
// Cache policy enforcement value from when test was ran to avoid pollution
// across tests.
oldPolicyEnable := GetPolicyEnabled()
defer SetPolicyEnabled(oldPolicyEnable)
SetPolicyEnabled(option.DefaultEnforcement)
identityCache := identity.IdentityMap{
identity.NumericIdentity(identityFoo): labelsFoo,
}
selectorCache := testNewSelectorCache(hivetest.Logger(t), identityCache)
identity := identity.NewIdentityFromLabelArray(identity.NumericIdentity(identityFoo), labelsFoo)
tests := []struct {
test int
rules api.Rules
expected mapState
}{
// The following table is derived from the Google Doc here:
// https://docs.google.com/spreadsheets/d/1WANIoZGB48nryylQjjOw6lKjI80eVgPShrdMTMalLEw/edit?usp=sharing
//
// Rule 0 | Rule 1 | Rule 2 | Rule 3 | Rule 4 | Rule 5 | Rule 6 | Rule 7 | Desired BPF map state
{0, api.Rules{rule_____NoDeny, rule_____NoDeny, rule_____NoDeny, rule____NoAllow, rule____NoAllow, rule____NoAllow, rule____NoAllow, rule____NoAllow}, testMapState(t, nil)},
{1, api.Rules{rule_____NoDeny, rule_____NoDeny, rule_____NoDeny, rule____NoAllow, rule____NoAllow, rule____NoAllow, rule____NoAllow, ruleL3____Allow}, testMapState(t, mapStateMap{mapKeyAllowFoo__: mapEntryL7None_(lblsL3____Allow)})},
{2, api.Rules{rule_____NoDeny, rule_____NoDeny, rule_____NoDeny, rule____NoAllow, rule____NoAllow, rule____NoAllow, rule__npL4__Allow, rule____NoAllow}, testMapState(t, mapStateMap{mapKeyAllow___L4: mapEntryL7None_(lbls__L4__Allow)})},
{3, api.Rules{rule_____NoDeny, rule_____NoDeny, rule_____NoDeny, rule____NoAllow, rule____NoAllow, rule____NoAllow, rule__npL4__Allow, ruleL3____Allow}, testMapState(t, mapStateMap{mapKeyAllow___L4: mapEntryL7None_(lbls__L4__Allow), mapKeyAllowFoo__: mapEntryL7None_(lblsL3____Allow)})},
{4, api.Rules{rule_____NoDeny, rule_____NoDeny, rule_____NoDeny, rule____NoAllow, rule____NoAllow, ruleL3npL4__Allow, rule____NoAllow, rule____NoAllow}, testMapState(t, mapStateMap{mapKeyAllowFooL4: mapEntryL7None_(lblsL3L4__Allow)})},
{5, api.Rules{rule_____NoDeny, rule_____NoDeny, rule_____NoDeny, rule____NoAllow, rule____NoAllow, ruleL3npL4__Allow, rule____NoAllow, ruleL3____Allow}, testMapState(t, mapStateMap{mapKeyAllowFooL4: mapEntryL7None_(lblsL3L4__Allow), mapKeyAllowFoo__: mapEntryL7None_(lblsL3____Allow)})},
{6, api.Rules{rule_____NoDeny, rule_____NoDeny, rule_____NoDeny, rule____NoAllow, rule____NoAllow, ruleL3npL4__Allow, rule__npL4__Allow, rule____NoAllow}, testMapState(t, mapStateMap{mapKeyAllow___L4: mapEntryL7None_(lbls__L4__Allow)})}, // identical L3L4 entry suppressed
{7, api.Rules{rule_____NoDeny, rule_____NoDeny, rule_____NoDeny, rule____NoAllow, rule____NoAllow, ruleL3npL4__Allow, rule__npL4__Allow, ruleL3____Allow}, testMapState(t, mapStateMap{mapKeyAllow___L4: mapEntryL7None_(lbls__L4__Allow), mapKeyAllowFoo__: mapEntryL7None_(lblsL3____Allow)})}, // identical L3L4 entry suppressed
{8, api.Rules{rule_____NoDeny, rule_____NoDeny, rule_____NoDeny, rule____NoAllow, rule__npL4L7Allow, rule____NoAllow, rule____NoAllow, rule____NoAllow}, testMapState(t, mapStateMap{mapKeyAllow___L4: mapEntryL7Proxy(lbls__L4L7Allow)})},
{9, api.Rules{rule_____NoDeny, rule_____NoDeny, rule_____NoDeny, rule____NoAllow, rule__npL4L7Allow, rule____NoAllow, rule____NoAllow, ruleL3____Allow}, testMapState(t, mapStateMap{mapKeyAllow___L4: mapEntryL7Proxy(lbls__L4L7Allow), mapKeyAllowFoo__: mapEntryL7None_(lblsL3____Allow)})},
{10, api.Rules{rule_____NoDeny, rule_____NoDeny, rule_____NoDeny, rule____NoAllow, rule__npL4L7Allow, rule____NoAllow, rule__npL4__Allow, rule____NoAllow}, testMapState(t, mapStateMap{mapKeyAllow___L4: mapEntryL7Proxy(lbls__L4__Allow, lbls__L4L7Allow)})},
{11, api.Rules{rule_____NoDeny, rule_____NoDeny, rule_____NoDeny, rule____NoAllow, rule__npL4L7Allow, rule____NoAllow, rule__npL4__Allow, ruleL3____Allow}, testMapState(t, mapStateMap{mapKeyAllow___L4: mapEntryL7Proxy(lbls__L4__Allow, lbls__L4L7Allow), mapKeyAllowFoo__: mapEntryL7None_(lblsL3____Allow)})},
{12, api.Rules{rule_____NoDeny, rule_____NoDeny, rule_____NoDeny, rule____NoAllow, rule__npL4L7Allow, ruleL3npL4__Allow, rule____NoAllow, rule____NoAllow}, testMapState(t, mapStateMap{mapKeyAllow___L4: mapEntryL7Proxy(lbls__L4L7Allow)})}, // L3L4 entry suppressed to allow L4-only entry to redirect
{13, api.Rules{rule_____NoDeny, rule_____NoDeny, rule_____NoDeny, rule____NoAllow, rule__npL4L7Allow, ruleL3npL4__Allow, rule____NoAllow, ruleL3____Allow}, testMapState(t, mapStateMap{mapKeyAllow___L4: mapEntryL7Proxy(lbls__L4L7Allow), mapKeyAllowFoo__: mapEntryL7None_(lblsL3____Allow)})}, // L3L4 entry suppressed to allow L4-only entry to redirect
{14, api.Rules{rule_____NoDeny, rule_____NoDeny, rule_____NoDeny, rule____NoAllow, rule__npL4L7Allow, ruleL3npL4__Allow, rule__npL4__Allow, rule____NoAllow}, testMapState(t, mapStateMap{mapKeyAllow___L4: mapEntryL7Proxy(lbls__L4__Allow, lbls__L4L7Allow)})}, // L3L4 entry suppressed to allow L4-only entry to redirect
{15, api.Rules{rule_____NoDeny, rule_____NoDeny, rule_____NoDeny, rule____NoAllow, rule__npL4L7Allow, ruleL3npL4__Allow, rule__npL4__Allow, ruleL3____Allow}, testMapState(t, mapStateMap{mapKeyAllow___L4: mapEntryL7Proxy(lbls__L4__Allow, lbls__L4L7Allow), mapKeyAllowFoo__: mapEntryL7None_(lblsL3____Allow)})}, // L3L4 entry suppressed to allow L4-only entry to redirect
{16, api.Rules{rule_____NoDeny, rule_____NoDeny, rule_____NoDeny, ruleL3npL4L7Allow, rule____NoAllow, rule____NoAllow, rule____NoAllow, rule____NoAllow}, testMapState(t, mapStateMap{mapKeyAllowFooL4: mapEntryL7Proxy(lblsL3L4L7Allow)})},
{17, api.Rules{rule_____NoDeny, rule_____NoDeny, rule_____NoDeny, ruleL3npL4L7Allow, rule____NoAllow, rule____NoAllow, rule____NoAllow, ruleL3____Allow}, testMapState(t, mapStateMap{mapKeyAllowFooL4: mapEntryL7Proxy(lblsL3L4L7Allow), mapKeyAllowFoo__: mapEntryL7None_(lblsL3____Allow)})},
{18, api.Rules{rule_____NoDeny, rule_____NoDeny, rule_____NoDeny, ruleL3npL4L7Allow, rule____NoAllow, rule____NoAllow, rule__npL4__Allow, rule____NoAllow}, testMapState(t, mapStateMap{mapKeyAllowFooL4: mapEntryL7Proxy(lblsL3L4L7Allow), mapKeyAllow___L4: mapEntryL7None_(lbls__L4__Allow)})},
{19, api.Rules{rule_____NoDeny, rule_____NoDeny, rule_____NoDeny, ruleL3npL4L7Allow, rule____NoAllow, rule____NoAllow, rule__npL4__Allow, ruleL3____Allow}, testMapState(t, mapStateMap{mapKeyAllowFooL4: mapEntryL7Proxy(lblsL3L4L7Allow), mapKeyAllow___L4: mapEntryL7None_(lbls__L4__Allow), mapKeyAllowFoo__: mapEntryL7None_(lblsL3____Allow)})},
{20, api.Rules{rule_____NoDeny, rule_____NoDeny, rule_____NoDeny, ruleL3npL4L7Allow, rule____NoAllow, ruleL3npL4__Allow, rule____NoAllow, rule____NoAllow}, testMapState(t, mapStateMap{mapKeyAllowFooL4: mapEntryL7Proxy(lblsL3L4__Allow, lblsL3L4L7Allow)})},
{21, api.Rules{rule_____NoDeny, rule_____NoDeny, rule_____NoDeny, ruleL3npL4L7Allow, rule____NoAllow, ruleL3npL4__Allow, rule____NoAllow, ruleL3____Allow}, testMapState(t, mapStateMap{mapKeyAllowFooL4: mapEntryL7Proxy(lblsL3L4__Allow, lblsL3L4L7Allow), mapKeyAllowFoo__: mapEntryL7None_(lblsL3____Allow)})},
{22, api.Rules{rule_____NoDeny, rule_____NoDeny, rule_____NoDeny, ruleL3npL4L7Allow, rule____NoAllow, ruleL3npL4__Allow, rule__npL4__Allow, rule____NoAllow}, testMapState(t, mapStateMap{mapKeyAllowFooL4: mapEntryL7Proxy(lblsL3L4__Allow, lblsL3L4L7Allow), mapKeyAllow___L4: mapEntryL7None_(lbls__L4__Allow)})},
{23, api.Rules{rule_____NoDeny, rule_____NoDeny, rule_____NoDeny, ruleL3npL4L7Allow, rule____NoAllow, ruleL3npL4__Allow, rule__npL4__Allow, ruleL3____Allow}, testMapState(t, mapStateMap{mapKeyAllowFooL4: mapEntryL7Proxy(lblsL3L4__Allow, lblsL3L4L7Allow), mapKeyAllow___L4: mapEntryL7None_(lbls__L4__Allow), mapKeyAllowFoo__: mapEntryL7None_(lblsL3____Allow)})},
{24, api.Rules{rule_____NoDeny, rule_____NoDeny, rule_____NoDeny, ruleL3npL4L7Allow, rule__npL4L7Allow, rule____NoAllow, rule____NoAllow, rule____NoAllow}, testMapState(t, mapStateMap{mapKeyAllow___L4: mapEntryL7Proxy(lbls__L4L7Allow)})}, // identical L3L4 entry suppressed
{25, api.Rules{rule_____NoDeny, rule_____NoDeny, rule_____NoDeny, ruleL3npL4L7Allow, rule__npL4L7Allow, rule____NoAllow, rule____NoAllow, ruleL3____Allow}, testMapState(t, mapStateMap{mapKeyAllow___L4: mapEntryL7Proxy(lbls__L4L7Allow), mapKeyAllowFoo__: mapEntryL7None_(lblsL3____Allow)})}, // identical L3L4 entry suppressed
{26, api.Rules{rule_____NoDeny, rule_____NoDeny, rule_____NoDeny, ruleL3npL4L7Allow, rule__npL4L7Allow, rule____NoAllow, rule__npL4__Allow, rule____NoAllow}, testMapState(t, mapStateMap{mapKeyAllow___L4: mapEntryL7Proxy(lbls__L4__Allow, lbls__L4L7Allow)})}, // identical L3L4 entry suppressed
{27, api.Rules{rule_____NoDeny, rule_____NoDeny, rule_____NoDeny, ruleL3npL4L7Allow, rule__npL4L7Allow, rule____NoAllow, rule__npL4__Allow, ruleL3____Allow}, testMapState(t, mapStateMap{mapKeyAllow___L4: mapEntryL7Proxy(lbls__L4__Allow, lbls__L4L7Allow), mapKeyAllowFoo__: mapEntryL7None_(lblsL3____Allow)})}, // identical L3L4 entry suppressed
{28, api.Rules{rule_____NoDeny, rule_____NoDeny, rule_____NoDeny, ruleL3npL4L7Allow, rule__npL4L7Allow, ruleL3npL4__Allow, rule____NoAllow, rule____NoAllow}, testMapState(t, mapStateMap{mapKeyAllow___L4: mapEntryL7Proxy(lbls__L4L7Allow)})}, // identical L3L4 entry suppressed
{29, api.Rules{rule_____NoDeny, rule_____NoDeny, rule_____NoDeny, ruleL3npL4L7Allow, rule__npL4L7Allow, ruleL3npL4__Allow, rule____NoAllow, ruleL3____Allow}, testMapState(t, mapStateMap{mapKeyAllow___L4: mapEntryL7Proxy(lbls__L4L7Allow), mapKeyAllowFoo__: mapEntryL7None_(lblsL3____Allow)})}, // identical L3L4 entry suppressed
{30, api.Rules{rule_____NoDeny, rule_____NoDeny, rule_____NoDeny, ruleL3npL4L7Allow, rule__npL4L7Allow, ruleL3npL4__Allow, rule__npL4__Allow, rule____NoAllow}, testMapState(t, mapStateMap{mapKeyAllow___L4: mapEntryL7Proxy(lbls__L4__Allow, lbls__L4L7Allow)})}, // identical L3L4 entry suppressed
{31, api.Rules{rule_____NoDeny, rule_____NoDeny, rule_____NoDeny, ruleL3npL4L7Allow, rule__npL4L7Allow, ruleL3npL4__Allow, rule__npL4__Allow, ruleL3____Allow}, testMapState(t, mapStateMap{mapKeyAllow___L4: mapEntryL7Proxy(lbls__L4__Allow, lbls__L4L7Allow), mapKeyAllowFoo__: mapEntryL7None_(lblsL3____Allow)})}, // identical L3L4 entry suppressed
}
for _, tt := range tests {
repo := newPolicyDistillery(t, selectorCache)
for _, r := range tt.rules {
if r != nil {
rule := r.WithEndpointSelector(selectFoo_)
_, _ = repo.MustAddList(api.Rules{rule})
}
}
t.Run(fmt.Sprintf("permutation_%d", tt.test), func(t *testing.T) {
logBuffer := new(bytes.Buffer)
repo = repo.WithLogBuffer(logBuffer)
mapstate, err := repo.distillPolicy(hivetest.Logger(t), DummyOwner{logger: hivetest.Logger(t)}, identity)
if err != nil {
t.Errorf("Policy resolution failure: %s", err)
}
require.Truef(t, mapstate.Equal(&tt.expected),
"Policy obtained didn't match expected for endpoint %s:\n%s", labelsFoo, mapstate.diff(&tt.expected))
})
}
}
func Test_AllowAll(t *testing.T) {
// Cache policy enforcement value from when test was ran to avoid pollution
// across tests.
oldPolicyEnable := GetPolicyEnabled()
defer SetPolicyEnabled(oldPolicyEnable)
SetPolicyEnabled(option.DefaultEnforcement)
identityCache := identity.IdentityMap{
identityFoo: labelsFoo,
identityBar: labelsBar,
}
selectorCache := testNewSelectorCache(hivetest.Logger(t), identityCache)
identity := identity.NewIdentityFromLabelArray(identity.NumericIdentity(identityFoo), labelsFoo)
tests := []struct {
test int
selector api.EndpointSelector
rules api.Rules
expected mapState
}{
{0, api.EndpointSelectorNone, api.Rules{rule____AllowAll}, testMapState(t, mapStateMap{mapKeyAllowAll__: mapEntryL7None_(lblsAllowAllIngress)})},
{1, api.WildcardEndpointSelector, api.Rules{rule____AllowAll}, testMapState(t, mapStateMap{mapKeyAllowAll__: mapEntryL7None_(lbls____AllowAll)})},
}
for _, tt := range tests {
repo := newPolicyDistillery(t, selectorCache)
for _, r := range tt.rules {
if r != nil {
rule := r.WithEndpointSelector(tt.selector)
_, _ = repo.MustAddList(api.Rules{rule})
}
}
t.Run(fmt.Sprintf("permutation_%d", tt.test), func(t *testing.T) {
logBuffer := new(bytes.Buffer)
repo = repo.WithLogBuffer(logBuffer)
mapstate, err := repo.distillPolicy(hivetest.Logger(t), DummyOwner{logger: hivetest.Logger(t)}, identity)
if err != nil {
t.Errorf("Policy resolution failure: %s", err)
}
if equal := assert.True(t, mapstate.Equal(&tt.expected), mapstate.diff(&tt.expected)); !equal {
t.Logf("Rules:\n%s\n\n", tt.rules.String())
t.Logf("Policy Trace: \n%s\n", logBuffer.String())
t.Errorf("Policy obtained didn't match expected for endpoint %s", labelsFoo)
}
})
}
}
var (
ruleAllowAllIngress = api.NewRule().WithIngressRules([]api.IngressRule{{
IngressCommonRule: api.IngressCommonRule{
FromEndpoints: []api.EndpointSelector{api.WildcardEndpointSelector},
}}}).WithEndpointSelector(api.WildcardEndpointSelector)
ruleL3DenyWorld = api.NewRule().WithIngressDenyRules([]api.IngressDenyRule{{
IngressCommonRule: api.IngressCommonRule{
FromEntities: api.EntitySlice{api.EntityWorld},
},
}}).WithEgressDenyRules([]api.EgressDenyRule{{
EgressCommonRule: api.EgressCommonRule{
ToEntities: api.EntitySlice{api.EntityWorld},
},
}}).WithEndpointSelector(api.WildcardEndpointSelector)
cpyRule = *ruleL3DenyWorld
ruleL3DenyWorldWithLabels = (&cpyRule).WithLabels(labels.LabelWorld.LabelArray())
worldReservedID = identity.ReservedIdentityWorld
worldReservedIDIPv4 = identity.ReservedIdentityWorldIPv4
worldReservedIDIPv6 = identity.ReservedIdentityWorldIPv6
mapKeyL3WorldIngress = IngressKey().WithIdentity(worldReservedID)
mapKeyL3WorldIngressIPv4 = IngressKey().WithIdentity(worldReservedIDIPv4)
mapKeyL3WorldIngressIPv6 = IngressKey().WithIdentity(worldReservedIDIPv6)
mapKeyL3WorldEgress = EgressKey().WithIdentity(worldReservedID)
mapKeyL3WorldEgressIPv4 = EgressKey().WithIdentity(worldReservedIDIPv4)
mapKeyL3WorldEgressIPv6 = EgressKey().WithIdentity(worldReservedIDIPv6)
AllowEntry = types.AllowEntry()
DenyEntry = types.DenyEntry()
mapEntryDeny = NewMapStateEntry(DenyEntry).withLabels(labels.LabelArrayList{nil})
mapEntryAllow = NewMapStateEntry(AllowEntry).withLabels(labels.LabelArrayList{nil})
worldLabelArrayList = labels.LabelArrayList{labels.LabelWorld.LabelArray()}
mapEntryWorldDenyWithLabels = NewMapStateEntry(DenyEntry).withLabels(worldLabelArrayList)
worldIPIdentity = localIdentity(16324)
worldIPCIDR = api.CIDR("192.0.2.3/32")
lblWorldIP = labels.GetCIDRLabels(netip.MustParsePrefix(string(worldIPCIDR)))
hostIPv4 = api.CIDR("172.19.0.1/32")
hostIPv6 = api.CIDR("fc00:c111::3/64")
lblHostIPv4CIDR = labels.GetCIDRLabels(netip.MustParsePrefix(string(hostIPv4)))
lblHostIPv6CIDR = labels.GetCIDRLabels(netip.MustParsePrefix(string(hostIPv6)))
ruleL3AllowWorldIP = api.NewRule().WithIngressRules([]api.IngressRule{{
IngressCommonRule: api.IngressCommonRule{
FromCIDR: api.CIDRSlice{worldIPCIDR},
},
}}).WithEgressRules([]api.EgressRule{{
EgressCommonRule: api.EgressCommonRule{
ToCIDR: api.CIDRSlice{worldIPCIDR},
},
}}).WithEndpointSelector(api.WildcardEndpointSelector)
worldSubnetIdentity = localIdentity(16325)
worldSubnet = api.CIDR("192.0.2.0/24")
worldSubnetRule = api.CIDRRule{
Cidr: worldSubnet,
}
lblWorldSubnet = labels.GetCIDRLabels(netip.MustParsePrefix(string(worldSubnet)))
ruleL3DenySubnet = api.NewRule().WithIngressDenyRules([]api.IngressDenyRule{{
IngressCommonRule: api.IngressCommonRule{
FromCIDRSet: api.CIDRRuleSlice{worldSubnetRule},
},
}}).WithEgressDenyRules([]api.EgressDenyRule{{
EgressCommonRule: api.EgressCommonRule{
ToCIDRSet: api.CIDRRuleSlice{worldSubnetRule},
},
}}).WithEndpointSelector(api.WildcardEndpointSelector)
mapKeyL3SubnetIngress = IngressKey().WithIdentity(worldSubnetIdentity)
mapKeyL3SubnetEgress = EgressKey().WithIdentity(worldSubnetIdentity)
ruleL3DenySmallerSubnet = api.NewRule().WithIngressDenyRules([]api.IngressDenyRule{{
IngressCommonRule: api.IngressCommonRule{
FromCIDRSet: api.CIDRRuleSlice{api.CIDRRule{Cidr: worldIPCIDR}},
},
}}).WithEgressDenyRules([]api.EgressDenyRule{{
EgressCommonRule: api.EgressCommonRule{
ToCIDRSet: api.CIDRRuleSlice{api.CIDRRule{Cidr: worldIPCIDR}},
},
}}).WithEndpointSelector(api.WildcardEndpointSelector)
ruleL3AllowLargerSubnet = api.NewRule().WithIngressRules([]api.IngressRule{{
IngressCommonRule: api.IngressCommonRule{
FromCIDRSet: api.CIDRRuleSlice{api.CIDRRule{Cidr: worldSubnet}},
},
}}).WithEgressRules([]api.EgressRule{{
EgressCommonRule: api.EgressCommonRule{
ToCIDRSet: api.CIDRRuleSlice{api.CIDRRule{Cidr: worldSubnet}},
},
}}).WithEndpointSelector(api.WildcardEndpointSelector)
mapKeyL3SmallerSubnetIngress = IngressKey().WithIdentity(worldIPIdentity)
mapKeyL3SmallerSubnetEgress = EgressKey().WithIdentity(worldIPIdentity)
ruleL3AllowHostEgress = api.NewRule().WithEgressRules([]api.EgressRule{{
EgressCommonRule: api.EgressCommonRule{
ToCIDRSet: api.CIDRRuleSlice{api.CIDRRule{Cidr: hostIPv4}, api.CIDRRule{Cidr: hostIPv6}},
},
}}).WithEndpointSelector(api.WildcardEndpointSelector)
mapKeyL3UnknownIngress = IngressKey()
mapEntryL3UnknownIngress = newAllowEntryWithLabels(LabelsAllowAnyIngress)
mapKeyL3HostEgress = EgressKey().WithIdentity(identity.ReservedIdentityHost)
ruleL3L4Port8080ProtoAnyDenyWorld = api.NewRule().WithIngressDenyRules([]api.IngressDenyRule{
{
ToPorts: api.PortDenyRules{
api.PortDenyRule{
Ports: []api.PortProtocol{
{
Port: "8080",
Protocol: api.ProtoAny,
},
},
},
},
IngressCommonRule: api.IngressCommonRule{
FromEntities: api.EntitySlice{api.EntityWorld},
},
},
}).WithEgressDenyRules([]api.EgressDenyRule{
{
ToPorts: api.PortDenyRules{
api.PortDenyRule{
Ports: []api.PortProtocol{
{
Port: "8080",
Protocol: api.ProtoAny,
},
},
},
},
EgressCommonRule: api.EgressCommonRule{
ToEntities: api.EntitySlice{api.EntityWorld},
},
},
}).WithEndpointSelector(api.WildcardEndpointSelector)
mapKeyL3L4Port8080ProtoTCPWorldIngress = IngressKey().WithIdentity(worldReservedID).WithTCPPort(8080)
mapKeyL3L4Port8080ProtoTCPWorldEgress = EgressKey().WithIdentity(worldReservedID).WithTCPPort(8080)
mapKeyL3L4Port8080ProtoUDPWorldIngress = IngressKey().WithIdentity(worldReservedID).WithUDPPort(8080)
mapKeyL3L4Port8080ProtoUDPWorldEgress = EgressKey().WithIdentity(worldReservedID).WithUDPPort(8080)
mapKeyL3L4Port8080ProtoSCTPWorldIngress = IngressKey().WithIdentity(worldReservedID).WithSCTPPort(8080)
mapKeyL3L4Port8080ProtoSCTPWorldEgress = EgressKey().WithIdentity(worldReservedID).WithSCTPPort(8080)
mapKeyL3L4Port8080ProtoTCPWorldIPv4Ingress = IngressKey().WithIdentity(worldReservedIDIPv4).WithTCPPort(8080)
mapKeyL3L4Port8080ProtoTCPWorldIPv4Egress = EgressKey().WithIdentity(worldReservedIDIPv4).WithTCPPort(8080)
mapKeyL3L4Port8080ProtoUDPWorldIPv4Ingress = IngressKey().WithIdentity(worldReservedIDIPv4).WithUDPPort(8080)
mapKeyL3L4Port8080ProtoUDPWorldIPv4Egress = EgressKey().WithIdentity(worldReservedIDIPv4).WithUDPPort(8080)
mapKeyL3L4Port8080ProtoSCTPWorldIPv4Ingress = IngressKey().WithIdentity(worldReservedIDIPv4).WithSCTPPort(8080)
mapKeyL3L4Port8080ProtoSCTPWorldIPv4Egress = EgressKey().WithIdentity(worldReservedIDIPv4).WithSCTPPort(8080)
mapKeyL3L4Port8080ProtoTCPWorldIPv6Ingress = IngressKey().WithIdentity(worldReservedIDIPv6).WithTCPPort(8080)
mapKeyL3L4Port8080ProtoTCPWorldIPv6Egress = EgressKey().WithIdentity(worldReservedIDIPv6).WithTCPPort(8080)
mapKeyL3L4Port8080ProtoUDPWorldIPv6Ingress = IngressKey().WithIdentity(worldReservedIDIPv6).WithUDPPort(8080)
mapKeyL3L4Port8080ProtoUDPWorldIPv6Egress = EgressKey().WithIdentity(worldReservedIDIPv6).WithUDPPort(8080)
mapKeyL3L4Port8080ProtoSCTPWorldIPv6Ingress = IngressKey().WithIdentity(worldReservedIDIPv6).WithSCTPPort(8080)
mapKeyL3L4Port8080ProtoSCTPWorldIPv6Egress = EgressKey().WithIdentity(worldReservedIDIPv6).WithSCTPPort(8080)
mapKeyL3L4Port8080ProtoTCPWorldSNIngress = IngressKey().WithIdentity(worldSubnetIdentity).WithTCPPort(8080)
mapKeyL3L4Port8080ProtoTCPWorldSNEgress = EgressKey().WithIdentity(worldSubnetIdentity).WithTCPPort(8080)
mapKeyL3L4Port8080ProtoUDPWorldSNIngress = IngressKey().WithIdentity(worldSubnetIdentity).WithUDPPort(8080)
mapKeyL3L4Port8080ProtoUDPWorldSNEgress = EgressKey().WithIdentity(worldSubnetIdentity).WithUDPPort(8080)
mapKeyL3L4Port8080ProtoSCTPWorldSNIngress = IngressKey().WithIdentity(worldSubnetIdentity).WithSCTPPort(8080)
mapKeyL3L4Port8080ProtoSCTPWorldSNEgress = EgressKey().WithIdentity(worldSubnetIdentity).WithSCTPPort(8080)
mapKeyL3L4Port8080ProtoTCPWorldIPIngress = IngressKey().WithIdentity(worldIPIdentity).WithTCPPort(8080)
mapKeyL3L4Port8080ProtoTCPWorldIPEgress = EgressKey().WithIdentity(worldIPIdentity).WithTCPPort(8080)
mapKeyL3L4Port8080ProtoUDPWorldIPIngress = IngressKey().WithIdentity(worldIPIdentity).WithUDPPort(8080)
mapKeyL3L4Port8080ProtoUDPWorldIPEgress = EgressKey().WithIdentity(worldIPIdentity).WithUDPPort(8080)
mapKeyL3L4Port8080ProtoSCTPWorldIPIngress = IngressKey().WithIdentity(worldIPIdentity).WithSCTPPort(8080)
mapKeyL3L4Port8080ProtoSCTPWorldIPEgress = EgressKey().WithIdentity(worldIPIdentity).WithSCTPPort(8080)
ruleL3AllowWorldSubnet = api.NewRule().WithIngressRules([]api.IngressRule{{
ToPorts: api.PortRules{
api.PortRule{
Ports: []api.PortProtocol{
{
Port: "8080",
Protocol: api.ProtoAny,
},
},
},
},
IngressCommonRule: api.IngressCommonRule{
FromCIDR: api.CIDRSlice{worldSubnet},
},
}}).WithEgressRules([]api.EgressRule{{
ToPorts: api.PortRules{
api.PortRule{
Ports: []api.PortProtocol{
{
Port: "8080",
Protocol: api.ProtoAny,
},
},
},
},
EgressCommonRule: api.EgressCommonRule{
ToCIDR: api.CIDRSlice{worldSubnet},
},
}}).WithEndpointSelector(api.WildcardEndpointSelector)
ruleL3DenyWorldIP = api.NewRule().WithIngressDenyRules([]api.IngressDenyRule{{
IngressCommonRule: api.IngressCommonRule{
FromCIDR: api.CIDRSlice{worldIPCIDR},
},
}}).WithEgressDenyRules([]api.EgressDenyRule{{
EgressCommonRule: api.EgressCommonRule{
ToCIDR: api.CIDRSlice{worldIPCIDR},
},
}}).WithEndpointSelector(api.WildcardEndpointSelector)
mapKeyAnyIngress = IngressKey()
mapKeyL4AnyPortProtoWorldIPIngress = IngressKey().WithIdentity(worldIPIdentity)
mapKeyL4AnyPortProtoWorldIPEgress = EgressKey().WithIdentity(worldIPIdentity)
ruleL3AllowWorldSubnetNamedPort = api.NewRule().WithIngressRules([]api.IngressRule{{
ToPorts: api.PortRules{
api.PortRule{
Ports: []api.PortProtocol{
{
Port: "http",
Protocol: api.ProtoTCP,
},
},
},
},
IngressCommonRule: api.IngressCommonRule{
FromCIDR: api.CIDRSlice{worldSubnet},
},
}}).WithEndpointSelector(api.WildcardEndpointSelector)
mapKeyL3L4NamedPortHTTPProtoTCPWorldSubNetIngress = IngressKey().WithIdentity(worldSubnetIdentity).WithTCPPort(80)
mapKeyL3L4NamedPortHTTPProtoTCPWorldIPIngress = IngressKey().WithIdentity(worldIPIdentity).WithTCPPort(80)
ruleL3AllowWorldSubnetPortRange = api.NewRule().WithIngressRules([]api.IngressRule{{
ToPorts: api.PortRules{
api.PortRule{
Ports: []api.PortProtocol{
{
Port: "64",
EndPort: 127,
Protocol: api.ProtoTCP,
},
{
Port: "5",
EndPort: 10,
Protocol: api.ProtoTCP,
},
},
},
},
IngressCommonRule: api.IngressCommonRule{
FromCIDR: api.CIDRSlice{worldSubnet},
},
}}).WithEndpointSelector(api.WildcardEndpointSelector)
mapKeyL3L4Port64To127ProtoTCPWorldSubNetIngress = IngressKey().WithIdentity(worldSubnetIdentity).WithTCPPortPrefix(64, 10)
mapKeyL3L4Port5ProtoTCPWorldSubNetIngress = IngressKey().WithIdentity(worldSubnetIdentity).WithTCPPort(5)
mapKeyL3L4Port6To7ProtoTCPWorldSubNetIngress = IngressKey().WithIdentity(worldSubnetIdentity).WithTCPPortPrefix(6, 15)
mapKeyL3L4Port8To9ProtoTCPWorldSubNetIngress = IngressKey().WithIdentity(worldSubnetIdentity).WithTCPPortPrefix(8, 15)
mapKeyL3L4Port10ProtoTCPWorldSubNetIngress = IngressKey().WithIdentity(worldSubnetIdentity).WithTCPPort(10)
mapKeyL3L4Port64To127ProtoTCPWorldIPIngress = IngressKey().WithIdentity(worldIPIdentity).WithTCPPortPrefix(64, 10)
mapKeyL3L4Port5ProtoTCPWorldIPIngress = IngressKey().WithIdentity(worldIPIdentity).WithTCPPort(5)
mapKeyL3L4Port6To7ProtoTCPWorldIPIngress = IngressKey().WithIdentity(worldIPIdentity).WithTCPPortPrefix(6, 15)
mapKeyL3L4Port8To9ProtoTCPWorldIPIngress = IngressKey().WithIdentity(worldIPIdentity).WithTCPPortPrefix(8, 15)
mapKeyL3L4Port10ProtoTCPWorldIPIngress = IngressKey().WithIdentity(worldIPIdentity).WithTCPPort(10)
)
func Test_EnsureDeniesPrecedeAllows(t *testing.T) {
// Cache policy enforcement value from when test was ran to avoid pollution
// across tests.
oldPolicyEnable := GetPolicyEnabled()
defer SetPolicyEnabled(oldPolicyEnable)
SetPolicyEnabled(option.DefaultEnforcement)
identityCache := identity.IdentityMap{
identity.NumericIdentity(identityFoo): labelsFoo,
identity.ReservedIdentityWorld: labels.LabelWorld.LabelArray(),
identity.ReservedIdentityWorldIPv4: labels.LabelWorldIPv4.LabelArray(),
identity.ReservedIdentityWorldIPv6: labels.LabelWorldIPv6.LabelArray(),
worldIPIdentity: lblWorldIP.LabelArray(), // "192.0.2.3/32"
worldSubnetIdentity: lblWorldSubnet.LabelArray(), // "192.0.2.0/24"
}
selectorCache := testNewSelectorCache(hivetest.Logger(t), identityCache)
identity := identity.NewIdentityFromLabelArray(identity.NumericIdentity(identityFoo), labelsFoo)
tests := []struct {
test string
rules api.Rules
expected mapState
}{
{"deny_world_no_labels", api.Rules{ruleAllowAllIngress, ruleL3DenyWorld, ruleL3AllowWorldIP}, testMapState(t, mapStateMap{
mapKeyAnyIngress: mapEntryAllow,
mapKeyL3WorldIngress: mapEntryDeny,
mapKeyL3WorldIngressIPv4: mapEntryDeny,
mapKeyL3WorldIngressIPv6: mapEntryDeny,
mapKeyL3WorldEgress: mapEntryDeny,
mapKeyL3WorldEgressIPv4: mapEntryDeny,
mapKeyL3WorldEgressIPv6: mapEntryDeny,
mapKeyL3SubnetIngress: mapEntryDeny,
mapKeyL3SubnetEgress: mapEntryDeny,
mapKeyL3SmallerSubnetIngress: mapEntryDeny,
mapKeyL3SmallerSubnetEgress: mapEntryDeny,
})}, {"deny_world_with_labels", api.Rules{ruleAllowAllIngress, ruleL3DenyWorldWithLabels, ruleL3AllowWorldIP}, testMapState(t, mapStateMap{
mapKeyAnyIngress: mapEntryAllow,
mapKeyL3WorldIngress: mapEntryWorldDenyWithLabels,
mapKeyL3WorldIngressIPv4: mapEntryWorldDenyWithLabels,
mapKeyL3WorldIngressIPv6: mapEntryWorldDenyWithLabels,
mapKeyL3WorldEgress: mapEntryWorldDenyWithLabels,
mapKeyL3WorldEgressIPv4: mapEntryWorldDenyWithLabels,
mapKeyL3WorldEgressIPv6: mapEntryWorldDenyWithLabels,
mapKeyL3SubnetIngress: mapEntryWorldDenyWithLabels,
mapKeyL3SubnetEgress: mapEntryWorldDenyWithLabels,
mapKeyL3SmallerSubnetIngress: mapEntryWorldDenyWithLabels,
mapKeyL3SmallerSubnetEgress: mapEntryWorldDenyWithLabels,
})}, {"deny_one_ip_with_a_larger_subnet", api.Rules{ruleAllowAllIngress, ruleL3DenySubnet, ruleL3AllowWorldIP}, testMapState(t, mapStateMap{
mapKeyAnyIngress: mapEntryAllow,
mapKeyL3SubnetIngress: mapEntryDeny,
mapKeyL3SubnetEgress: mapEntryDeny,
mapKeyL3SmallerSubnetIngress: mapEntryDeny,
mapKeyL3SmallerSubnetEgress: mapEntryDeny,
})}, {"deny_part_of_a_subnet_with_an_ip", api.Rules{ruleAllowAllIngress, ruleL3DenySmallerSubnet, ruleL3AllowLargerSubnet}, testMapState(t, mapStateMap{
mapKeyAnyIngress: mapEntryAllow,
mapKeyL3SmallerSubnetIngress: mapEntryDeny,
mapKeyL3SmallerSubnetEgress: mapEntryDeny,
mapKeyL3SubnetIngress: mapEntryAllow,
mapKeyL3SubnetEgress: mapEntryAllow,
})}, {"broad_cidr_deny_is_a_portproto_subset_of_a_specific_cidr_allow", api.Rules{ruleAllowAllIngress, ruleL3L4Port8080ProtoAnyDenyWorld, ruleL3AllowWorldIP}, testMapState(t, mapStateMap{
mapKeyAnyIngress: mapEntryAllow,
mapKeyL3L4Port8080ProtoTCPWorldIngress: mapEntryDeny,
mapKeyL3L4Port8080ProtoTCPWorldEgress: mapEntryDeny,
mapKeyL3L4Port8080ProtoUDPWorldIngress: mapEntryDeny,
mapKeyL3L4Port8080ProtoUDPWorldEgress: mapEntryDeny,
mapKeyL3L4Port8080ProtoSCTPWorldIngress: mapEntryDeny,
mapKeyL3L4Port8080ProtoSCTPWorldEgress: mapEntryDeny,
mapKeyL3L4Port8080ProtoTCPWorldIPv4Ingress: mapEntryDeny,
mapKeyL3L4Port8080ProtoTCPWorldIPv4Egress: mapEntryDeny,
mapKeyL3L4Port8080ProtoUDPWorldIPv4Ingress: mapEntryDeny,
mapKeyL3L4Port8080ProtoUDPWorldIPv4Egress: mapEntryDeny,
mapKeyL3L4Port8080ProtoSCTPWorldIPv4Ingress: mapEntryDeny,
mapKeyL3L4Port8080ProtoSCTPWorldIPv4Egress: mapEntryDeny,
mapKeyL3L4Port8080ProtoTCPWorldIPv6Ingress: mapEntryDeny,
mapKeyL3L4Port8080ProtoTCPWorldIPv6Egress: mapEntryDeny,
mapKeyL3L4Port8080ProtoUDPWorldIPv6Ingress: mapEntryDeny,
mapKeyL3L4Port8080ProtoUDPWorldIPv6Egress: mapEntryDeny,
mapKeyL3L4Port8080ProtoSCTPWorldIPv6Ingress: mapEntryDeny,
mapKeyL3L4Port8080ProtoSCTPWorldIPv6Egress: mapEntryDeny,
mapKeyL3L4Port8080ProtoTCPWorldSNIngress: mapEntryDeny,
mapKeyL3L4Port8080ProtoTCPWorldSNEgress: mapEntryDeny,
mapKeyL3L4Port8080ProtoUDPWorldSNIngress: mapEntryDeny,
mapKeyL3L4Port8080ProtoUDPWorldSNEgress: mapEntryDeny,
mapKeyL3L4Port8080ProtoSCTPWorldSNIngress: mapEntryDeny,
mapKeyL3L4Port8080ProtoSCTPWorldSNEgress: mapEntryDeny,
mapKeyL3L4Port8080ProtoTCPWorldIPIngress: mapEntryDeny,
mapKeyL3L4Port8080ProtoTCPWorldIPEgress: mapEntryDeny,
mapKeyL3L4Port8080ProtoUDPWorldIPIngress: mapEntryDeny,
mapKeyL3L4Port8080ProtoUDPWorldIPEgress: mapEntryDeny,
mapKeyL3L4Port8080ProtoSCTPWorldIPIngress: mapEntryDeny,
mapKeyL3L4Port8080ProtoSCTPWorldIPEgress: mapEntryDeny,
mapKeyL3SmallerSubnetIngress: mapEntryAllow,
mapKeyL3SmallerSubnetEgress: mapEntryAllow,
})}, {"broad_cidr_allow_is_a_portproto_subset_of_a_specific_cidr_deny", api.Rules{ruleAllowAllIngress, ruleL3AllowWorldSubnet, ruleL3DenyWorldIP}, testMapState(t, mapStateMap{
mapKeyAnyIngress: mapEntryAllow,
mapKeyL3L4Port8080ProtoTCPWorldSNIngress: mapEntryAllow,
mapKeyL3L4Port8080ProtoTCPWorldSNEgress: mapEntryAllow,
mapKeyL3L4Port8080ProtoUDPWorldSNIngress: mapEntryAllow,
mapKeyL3L4Port8080ProtoUDPWorldSNEgress: mapEntryAllow,
mapKeyL3L4Port8080ProtoSCTPWorldSNIngress: mapEntryAllow,
mapKeyL3L4Port8080ProtoSCTPWorldSNEgress: mapEntryAllow,
mapKeyL4AnyPortProtoWorldIPIngress: mapEntryDeny,
mapKeyL4AnyPortProtoWorldIPEgress: mapEntryDeny,
})}, {"named_port_world_subnet", api.Rules{ruleAllowAllIngress, ruleL3AllowWorldSubnetNamedPort}, testMapState(t, mapStateMap{
mapKeyAnyIngress: mapEntryAllow,
mapKeyL3L4NamedPortHTTPProtoTCPWorldSubNetIngress: mapEntryAllow,
mapKeyL3L4NamedPortHTTPProtoTCPWorldIPIngress: mapEntryAllow,
})}, {"port_range_world_subnet", api.Rules{ruleAllowAllIngress, ruleL3AllowWorldSubnetPortRange}, testMapState(t, mapStateMap{
mapKeyAnyIngress: mapEntryAllow,
mapKeyL3L4Port64To127ProtoTCPWorldSubNetIngress: mapEntryAllow,
mapKeyL3L4Port5ProtoTCPWorldSubNetIngress: mapEntryAllow,
mapKeyL3L4Port6To7ProtoTCPWorldSubNetIngress: mapEntryAllow,
mapKeyL3L4Port8To9ProtoTCPWorldSubNetIngress: mapEntryAllow,
mapKeyL3L4Port10ProtoTCPWorldSubNetIngress: mapEntryAllow,
mapKeyL3L4Port64To127ProtoTCPWorldIPIngress: mapEntryAllow,
mapKeyL3L4Port5ProtoTCPWorldIPIngress: mapEntryAllow,
mapKeyL3L4Port6To7ProtoTCPWorldIPIngress: mapEntryAllow,
mapKeyL3L4Port8To9ProtoTCPWorldIPIngress: mapEntryAllow,
mapKeyL3L4Port10ProtoTCPWorldIPIngress: mapEntryAllow,
})},
}
// Do not test in dualstack mode
defer func(ipv4, ipv6 bool) {
option.Config.EnableIPv4 = ipv4
option.Config.EnableIPv6 = ipv6
}(option.Config.EnableIPv4, option.Config.EnableIPv6)
option.Config.EnableIPv4 = true
option.Config.EnableIPv6 = false
for _, tt := range tests {
repo := newPolicyDistillery(t, selectorCache)
for _, rule := range tt.rules {
if rule != nil {
_, _ = repo.MustAddList(api.Rules{rule})
}
}
t.Run(tt.test, func(t *testing.T) {
logBuffer := new(bytes.Buffer)
repo = repo.WithLogBuffer(logBuffer)
mapstate, err := repo.distillPolicy(hivetest.Logger(t), DummyOwner{logger: hivetest.Logger(t)}, identity)
if err != nil {
t.Errorf("Policy resolution failure: %s", err)
}
if equal := assert.True(t, mapstate.Equal(&tt.expected), mapstate.diff(&tt.expected)); !equal {
t.Logf("Policy Trace: \n%s\n", logBuffer.String())
t.Errorf("Policy test, %q, obtained didn't match expected for endpoint %s", tt.test, labelsFoo)
}
})
}
}
var (
allIPv4 = api.CIDR("0.0.0.0/0")
lblAllIPv4 = labels.ParseSelectLabelArray(fmt.Sprintf("%s:%s", labels.LabelSourceCIDR, allIPv4))
one3Z8 = api.CIDR("1.0.0.0/8")
one3Z8Identity = localIdentity(16331)
lblOne3Z8 = labels.ParseSelectLabelArray(fmt.Sprintf("%s:%s", labels.LabelSourceCIDR, one3Z8))
one0Z32 = api.CIDR("1.1.1.1/32")
one0Z32Identity = localIdentity(16332)
lblOne0Z32 = labels.ParseSelectLabelArray(fmt.Sprintf("%s:%s", labels.LabelSourceCIDR, one0Z32))
ruleAllowEgressDenyCIDRSet = api.NewRule().WithEgressRules([]api.EgressRule{{
EgressCommonRule: api.EgressCommonRule{
ToCIDR: api.CIDRSlice{allIPv4},
},
}}).WithEgressDenyRules([]api.EgressDenyRule{{
EgressCommonRule: api.EgressCommonRule{
ToCIDRSet: api.CIDRRuleSlice{
api.CIDRRule{
Cidr: one3Z8,
ExceptCIDRs: []api.CIDR{one0Z32},
},
},
},
}}).WithEndpointSelector(api.WildcardEndpointSelector)
)
// Allow-ception tests that an allow within a deny within an allow
// is properly calculated.
func Test_Allowception(t *testing.T) {
// Cache policy enforcement value from when test was ran to avoid pollution
// across tests.
oldPolicyEnable := GetPolicyEnabled()
defer SetPolicyEnabled(oldPolicyEnable)
SetPolicyEnabled(option.DefaultEnforcement)
identityCache := identity.IdentityMap{
identity.NumericIdentity(identityFoo): labelsFoo,
identity.ReservedIdentityWorld: append(labels.LabelWorld.LabelArray(), lblAllIPv4...),
one3Z8Identity: lblOne3Z8, // 16331 (0x3fcb): ["1.0.0.0/8"]
one0Z32Identity: lblOne0Z32, // 16332 (0x3fcc): ["1.1.1.1/32"]
}
selectorCache := testNewSelectorCache(hivetest.Logger(t), identityCache)
computedMapStateForAllowCeption := emptyMapState(hivetest.Logger(t)).withState(mapStateMap{
ingressKey(0, 0, 0, 0): mapEntryL7None_(lblsAllowAllIngress),
egressKey(identity.ReservedIdentityWorld, 0, 0, 0): mapEntryAllow,
egressKey(one3Z8Identity, 0, 0, 0): mapEntryDeny,
egressKey(one0Z32Identity, 0, 0, 0): mapEntryAllow,
})
identity := identity.NewIdentityFromLabelArray(identity.NumericIdentity(identityFoo), labelsFoo)
// Do not test in dualstack mode
defer func(ipv4, ipv6 bool) {
option.Config.EnableIPv4 = ipv4
option.Config.EnableIPv6 = ipv6
}(option.Config.EnableIPv4, option.Config.EnableIPv6)
option.Config.EnableIPv4 = true
option.Config.EnableIPv6 = false
repo := newPolicyDistillery(t, selectorCache)
rules := api.Rules{ruleAllowEgressDenyCIDRSet}
for _, rule := range rules {
if rule != nil {
_, _ = repo.MustAddList(api.Rules{rule})
}
}
logBuffer := new(bytes.Buffer)
repo = repo.WithLogBuffer(logBuffer)
mapstate, err := repo.distillPolicy(hivetest.Logger(t), DummyOwner{logger: hivetest.Logger(t)}, identity)
if err != nil {
t.Errorf("Policy resolution failure: %s", err)
}
if equal := assert.True(t, mapstate.Equal(&computedMapStateForAllowCeption), mapstate.diff(&computedMapStateForAllowCeption)); !equal {
t.Logf("Policy Trace: \n%s\n", logBuffer.String())
t.Errorf("Policy obtained didn't match expected for endpoint %s", labelsFoo)
}
}
func Test_EnsureEntitiesSelectableByCIDR(t *testing.T) {
// Cache policy enforcement value from when test was ran to avoid pollution
// across tests.
oldPolicyEnable := GetPolicyEnabled()
defer SetPolicyEnabled(oldPolicyEnable)
SetPolicyEnabled(option.DefaultEnforcement)
hostLabel := labels.NewFrom(labels.LabelHost)
hostLabel.MergeLabels(lblHostIPv4CIDR)
hostLabel.MergeLabels(lblHostIPv6CIDR)
identityCache := identity.IdentityMap{
identity.NumericIdentity(identityFoo): labelsFoo,
identity.ReservedIdentityHost: hostLabel.LabelArray(),
}
selectorCache := testNewSelectorCache(hivetest.Logger(t), identityCache)
identity := identity.NewIdentityFromLabelArray(identity.NumericIdentity(identityFoo), labelsFoo)
tests := []struct {
test string
rules api.Rules
expected mapState
}{
{"host_cidr_select", api.Rules{ruleL3AllowHostEgress}, emptyMapState(hivetest.Logger(t)).withState(mapStateMap{
mapKeyL3UnknownIngress: mapEntryL3UnknownIngress,
mapKeyL3HostEgress: mapEntryAllow,
})},
}
for _, tt := range tests {
repo := newPolicyDistillery(t, selectorCache)
for _, rule := range tt.rules {
if rule != nil {
_, _ = repo.MustAddList(api.Rules{rule})
}
}
t.Run(tt.test, func(t *testing.T) {
logBuffer := new(bytes.Buffer)
repo = repo.WithLogBuffer(logBuffer)
mapstate, err := repo.distillPolicy(hivetest.Logger(t), DummyOwner{logger: hivetest.Logger(t)}, identity)
if err != nil {
t.Errorf("Policy resolution failure: %s", err)
}
if equal := assert.True(t, mapstate.Equal(&tt.expected), mapstate.diff(&tt.expected)); !equal {
t.Logf("Policy Trace: \n%s\n", logBuffer.String())
t.Errorf("Policy test, %q, obtained didn't match expected for endpoint %s", tt.test, labelsFoo)
}
})
}
}
func addCIDRIdentity(prefix string, c identity.IdentityMap) identity.NumericIdentity {
lbls := labels.GetCIDRLabels(netip.MustParsePrefix(prefix)).LabelArray()
// return an existing id?
for id, ls := range c {
if ls.Equals(lbls) {
return id
}
}
// Find next free id
id := identity.IdentityScopeLocal
for {
if _, exists := c[id]; !exists {
c[id] = lbls
return id
}
id++
}
}
func addFQDNIdentity(fqdnSel api.FQDNSelector, c identity.IdentityMap) (id identity.NumericIdentity, adds identity.IdentityMap) {
lbls := labels.Labels{}
l := fqdnSel.IdentityLabel()
lbls[l.Key] = l
lblA := lbls.LabelArray()
// return an existing id?
for id, ls := range c {
if ls.Equals(lblA) {
return id, nil
}
}
// Find next free id
id = identity.IdentityScopeLocal
for {
if _, exists := c[id]; !exists {
return id, identity.IdentityMap{id: lblA}
}
id++
}
}
// Validate that incrementally deleted identities are handled properly when present in both CIDR and FQDN rules.
func Test_IncrementalFQDNDeletion(t *testing.T) {
logger := hivetest.Logger(t)
// Cache policy enforcement value from when test was ran to avoid pollution
// across tests.
oldPolicyEnable := GetPolicyEnabled()
defer SetPolicyEnabled(oldPolicyEnable)
SetPolicyEnabled(option.DefaultEnforcement)
// load in standard reserved identities
identityCache := identity.IdentityMap{
fooIdentity.ID: fooIdentity.LabelArray,
}
identity.IterateReservedIdentities(func(ni identity.NumericIdentity, id *identity.Identity) {
identityCache[ni] = id.Labels.LabelArray()
})
id2 := addCIDRIdentity("192.0.2.0/24", identityCache)
id3 := addCIDRIdentity("192.0.3.0/24", identityCache)
selectorCache := testNewSelectorCache(logger, identityCache)
fqdnSel := api.FQDNSelector{MatchName: "www.example.com"}
idExample, fqdnIdentities := addFQDNIdentity(fqdnSel, identityCache)
tests := []struct {
test string
rules api.Rules
expected MapStateMap
fqdnIds identity.IdentityMap
adds MapStateMap
}{{
test: "incremental_fqdn_deletion",
rules: api.Rules{
&api.Rule{
EndpointSelector: api.NewESFromLabels(labels.ParseSelectLabel("foo")),
Egress: []api.EgressRule{
{
EgressCommonRule: api.EgressCommonRule{
ToCIDR: []api.CIDR{
"192.0.2.0/24",
"192.0.3.0/24",
},
},
},
{
ToFQDNs: api.FQDNSelectorSlice{fqdnSel},
},
},
},
},
expected: MapStateMap{
mapKeyAllowAll__: AllowEntry,
egressL3OnlyKey(id2): AllowEntry,
egressL3OnlyKey(id3): AllowEntry,
},
fqdnIds: maps.Clone(fqdnIdentities),
adds: MapStateMap{
egressL3OnlyKey(idExample): AllowEntry,
},
}}
for _, tt := range tests {
repo := newPolicyDistillery(t, selectorCache)
repo.MustAddList(tt.rules)
t.Run(tt.test, func(t *testing.T) {
logBuffer := new(bytes.Buffer)
repo = repo.WithLogBuffer(logBuffer)
epp, err := repo.distillEndpointPolicy(logger, DummyOwner{
logger: logger,
}, fooIdentity)
if err != nil {
t.Fatal(err)
}
mapstate := epp.policyMapState
if equal := assert.True(t, mapstate.Equals(tt.expected), mapstate.Diff(tt.expected)); !equal {
t.Logf("Policy Trace: \n%s\n", logBuffer.String())
t.Errorf("Policy test, %q, obtained didn't match expected for endpoint %s", tt.test, labelsFoo)
}
wg := &sync.WaitGroup{}
selectorCache.UpdateIdentities(tt.fqdnIds, nil, wg)
wg.Wait()
closer, changes := epp.ConsumeMapChanges()
adds := MapStateMap{}
for k := range changes.Adds {
adds[k] = epp.policyMapState.entries[k].MapStateEntry
}
closer()
if equal := assert.True(t, maps.Equal(adds, tt.adds), adds.Diff(tt.adds)); !equal {
t.Errorf("Policy test, %q, obtained didn't match expected for endpoint %s", tt.test, labelsFoo)
}
// let fqdn ID expire
wg = &sync.WaitGroup{}
selectorCache.UpdateIdentities(nil, tt.fqdnIds, wg)
wg.Wait()
closer, changes = epp.ConsumeMapChanges()
closer()
if equal := assert.True(t, epp.policyMapState.Equals(tt.expected), mapstate.Diff(tt.expected)); !equal {
t.Errorf("Policy test, %q, obtained didn't match expected for endpoint %s", tt.test, labelsFoo)
}
epp.Ready()
epp.Detach(logger)
})
}
}
func TestEgressPortRangePrecedence(t *testing.T) {
type portRange struct {
startPort, endPort uint16
isAllow bool
}
tests := []struct {
name string
rules []portRange
rangeTests []portRange
}{
{
name: "deny range (1-1024) covers port allow (80)",
rules: []portRange{
{80, 0, true},
{1, 1024, false},
},
rangeTests: []portRange{
{79, 81, false},
{1023, 1025, false},
},
},
{
name: "deny port (80) in broader allow range (1-1024)",
rules: []portRange{
{80, 0, false},
{1, 1024, true},
},
rangeTests: []portRange{
{1, 2, true},
{79, 0, true},
{80, 0, false},
{81, 0, true},
{1023, 1024, true},
{1025, 1026, false},
},
},
{
name: "wildcard deny (*) covers broad allow range (1-1024)",
rules: []portRange{
{0, 0, false},
{1, 1024, true},
},
rangeTests: []portRange{
{1, 2, false},
{1023, 1025, false},
},
},
{
name: "wildcard allow (*) has an deny range hole (1-1024)",
rules: []portRange{
{0, 0, true},
{1, 1024, false},
},
rangeTests: []portRange{
{1, 2, false},
{1023, 1024, false},
{1025, 1026, true},
{65534, 0, true},
},
},
{
name: "two allow ranges (80-90, 90-100) with overlapping deny (85-95)",
rules: []portRange{
{80, 90, true},
{85, 95, false},
{90, 100, true},
},
rangeTests: []portRange{
{79, 0, false},
{80, 84, true},
{85, 95, false},
{96, 100, true},
{101, 0, true},
},
},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
td := newTestData(hivetest.Logger(t)).withIDs(ruleTestIDs)
tr := api.Rule{
EndpointSelector: endpointSelectorA,
}
for _, rul := range tt.rules {
pp := api.PortProtocol{
Port: fmt.Sprintf("%d", rul.startPort),
EndPort: int32(rul.endPort),
Protocol: api.ProtoTCP,
}
if rul.isAllow {
tr.Egress = append(tr.Egress, api.EgressRule{
EgressCommonRule: api.EgressCommonRule{
ToEndpoints: []api.EndpointSelector{endpointSelectorB},
},
ToPorts: []api.PortRule{{
Ports: []api.PortProtocol{pp},
}},
})
} else {
tr.EgressDeny = append(tr.EgressDeny, api.EgressDenyRule{
EgressCommonRule: api.EgressCommonRule{
ToEndpoints: []api.EndpointSelector{endpointSelectorB},
},
ToPorts: []api.PortDenyRule{{
Ports: []api.PortProtocol{pp},
}},
})
}
}
td.repo.mustAdd(tr)
flow := flowAToB
for _, rt := range tt.rangeTests {
for i := rt.startPort; i <= rt.endPort; i++ {
flow.Dport = i
verdict := api.Denied
if rt.isAllow {
verdict = api.Allowed
}
checkFlow(t, td.repo, flow, verdict)
}
}
})
}
}
// SPDX-License-Identifier: Apache-2.0
// Copyright Authors of Cilium
package policy
import (
"bytes"
"encoding/json"
"fmt"
"log/slog"
"math/bits"
"sort"
"strconv"
"strings"
"sync/atomic"
cilium "github.com/cilium/proxy/go/cilium/api"
"github.com/cilium/proxy/pkg/policy/api/kafka"
k8sTypes "k8s.io/apimachinery/pkg/types"
"github.com/cilium/cilium/api/v1/models"
"github.com/cilium/cilium/pkg/container/bitlpm"
"github.com/cilium/cilium/pkg/container/versioned"
"github.com/cilium/cilium/pkg/endpoint/regeneration"
"github.com/cilium/cilium/pkg/iana"
"github.com/cilium/cilium/pkg/identity"
"github.com/cilium/cilium/pkg/labels"
"github.com/cilium/cilium/pkg/lock"
"github.com/cilium/cilium/pkg/logging/logfields"
"github.com/cilium/cilium/pkg/option"
"github.com/cilium/cilium/pkg/policy/api"
"github.com/cilium/cilium/pkg/policy/trafficdirection"
"github.com/cilium/cilium/pkg/policy/types"
"github.com/cilium/cilium/pkg/u8proto"
)
type AuthType = types.AuthType
type AuthTypes = types.AuthTypes
type AuthRequirement = types.AuthRequirement
// authmap maps remote selectors to their needed AuthTypes, if any
type authMap map[CachedSelector]types.AuthTypes
// TLS context holds the secret values resolved from an 'api.TLSContext'
type TLSContext struct {
TrustedCA string `json:"trustedCA,omitempty"`
CertificateChain string `json:"certificateChain,omitempty"`
PrivateKey string `json:"privateKey,omitempty"`
// Secret holds the name of the Secret that was referenced in the Policy
Secret k8sTypes.NamespacedName
// FromFile is true if the values in the keys above were read from the filesystem
// and not a Kubernetes Secret
FromFile bool
}
// Equal returns true if 'a' and 'b' have the same contents.
func (a *TLSContext) Equal(b *TLSContext) bool {
return a == nil && b == nil || a != nil && b != nil && *a == *b
}
// MarshalJSON marsahls a redacted version of the TLSContext. We want
// to see which fields are present, but not reveal their values in any
// logs, etc.
func (t *TLSContext) MarshalJSON() ([]byte, error) {
type tlsContext TLSContext
var redacted tlsContext
if t.TrustedCA != "" {
redacted.TrustedCA = "[redacted]"
}
if t.CertificateChain != "" {
redacted.CertificateChain = "[redacted]"
}
if t.PrivateKey != "" {
redacted.PrivateKey = "[redacted]"
}
return json.Marshal(&redacted)
}
func (t *TLSContext) String() string {
data, _ := t.MarshalJSON()
return string(data)
}
type StringSet map[string]struct{}
func (a StringSet) Equal(b StringSet) bool {
if len(a) != len(b) {
return false
}
for k := range a {
if _, exists := b[k]; !exists {
return false
}
}
return true
}
// NewStringSet returns a StringSet initialized from slice of strings.
// Returns nil for an empty slice
func NewStringSet(from []string) StringSet {
if len(from) == 0 {
return nil
}
set := make(StringSet, len(from))
for _, s := range from {
set[s] = struct{}{}
}
return set
}
// Merge returns StringSet with strings from both a and b.
// Returns a or b, possibly with modifications.
func (a StringSet) Merge(b StringSet) StringSet {
if len(a) == 0 {
return b
}
for s := range b {
a[s] = struct{}{}
}
return a
}
// PerSelectorPolicy contains policy rules for a CachedSelector, i.e. for a
// selection of numerical identities.
type PerSelectorPolicy struct {
// L7Parser specifies the L7 protocol parser (optional). If specified as
// an empty string, then means that no L7 proxy redirect is performed.
L7Parser L7ParserType `json:"-"`
// TerminatingTLS is the TLS context for the connection terminated by
// the L7 proxy. For egress policy this specifies the server-side TLS
// parameters to be applied on the connections originated from the local
// POD and terminated by the L7 proxy. For ingress policy this specifies
// the server-side TLS parameters to be applied on the connections
// originated from a remote source and terminated by the L7 proxy.
TerminatingTLS *TLSContext `json:"terminatingTLS,omitempty"`
// OriginatingTLS is the TLS context for the connections originated by
// the L7 proxy. For egress policy this specifies the client-side TLS
// parameters for the upstream connection originating from the L7 proxy
// to the remote destination. For ingress policy this specifies the
// client-side TLS parameters for the connection from the L7 proxy to
// the local POD.
OriginatingTLS *TLSContext `json:"originatingTLS,omitempty"`
// ServerNames is a list of allowed TLS SNI values. If not empty, then
// TLS must be present and one of the provided SNIs must be indicated in the
// TLS handshake.
ServerNames StringSet `json:"serverNames,omitempty"`
// Listener is an optional fully qualified name of a Envoy Listner defined in a
// CiliumEnvoyConfig CRD that should be used for this traffic instead of the default
// listener
Listener string `json:"listener,omitempty"`
// Priority of the proxy redirect used when multiple proxy ports would apply to the same
// MapStateEntry.
// Lower numbers indicate higher priority. Except for the default 0, which indicates the
// lowest priority. If higher priority desired, a low unique number like 1, 2, or 3 should
// be explicitly specified here.
Priority ListenerPriority `json:"priority,omitempty"`
// Pre-computed HTTP rules, computed after rule merging is complete
EnvoyHTTPRules *cilium.HttpNetworkPolicyRules `json:"-"`
// CanShortCircuit is true if all 'EnvoyHTTPRules' may be
// short-circuited by other matches.
CanShortCircuit bool `json:"-"`
api.L7Rules
// Authentication is the kind of cryptographic authentication required for the traffic to be
// allowed at L3, if any.
Authentication *api.Authentication `json:"auth,omitempty"`
// IsDeny is set if this L4Filter contains should be denied
IsDeny bool `json:",omitempty"`
}
// Equal returns true if 'a' and 'b' represent the same L7 Rules
func (a *PerSelectorPolicy) Equal(b *PerSelectorPolicy) bool {
return a == nil && b == nil || a != nil && b != nil &&
a.L7Parser == b.L7Parser &&
a.TerminatingTLS.Equal(b.TerminatingTLS) &&
a.OriginatingTLS.Equal(b.OriginatingTLS) &&
a.ServerNames.Equal(b.ServerNames) &&
a.Listener == b.Listener &&
a.Priority == b.Priority &&
(a.Authentication == nil && b.Authentication == nil || a.Authentication != nil && a.Authentication.DeepEqual(b.Authentication)) &&
a.IsDeny == b.IsDeny &&
a.L7Rules.DeepEqual(&b.L7Rules)
}
// GetListener returns the listener of the PerSelectorPolicy.
func (a *PerSelectorPolicy) GetListener() string {
if a == nil {
return ""
}
return a.Listener
}
// GetPriority returns the pritority of the listener of the PerSelectorPolicy.
func (a *PerSelectorPolicy) GetPriority() ListenerPriority {
if a == nil {
return 0
}
return a.Priority
}
// getAuthType returns AuthType for the api.Authentication
func getAuthType(auth *api.Authentication) (bool, AuthType) {
if auth == nil {
return false, types.AuthTypeDisabled
}
switch auth.Mode {
case api.AuthenticationModeDisabled:
return true, types.AuthTypeDisabled
case api.AuthenticationModeRequired:
return true, types.AuthTypeSpire
case api.AuthenticationModeAlwaysFail:
return true, types.AuthTypeAlwaysFail
default:
return false, types.AuthTypeDisabled
}
}
// getAuthType returns the AuthType of the L4Filter.
func (a *PerSelectorPolicy) getAuthType() (bool, AuthType) {
if a == nil {
return false, types.AuthTypeDisabled
}
return getAuthType(a.Authentication)
}
// GetAuthRequirement returns the AuthRequirement of the L4Filter.
func (a *PerSelectorPolicy) getAuthRequirement() AuthRequirement {
if a == nil {
return AuthRequirement(types.AuthTypeDisabled)
}
explicit, authType := getAuthType(a.Authentication)
req := AuthRequirement(authType)
if explicit {
req |= types.AuthTypeIsExplicit
}
return req
}
// IsRedirect returns true if the L7Rules are a redirect.
func (sp *PerSelectorPolicy) IsRedirect() bool {
return sp != nil && sp.L7Parser != ""
}
// HasL7Rules returns whether the `L7Rules` contains any L7 rules.
func (sp *PerSelectorPolicy) HasL7Rules() bool {
return sp != nil && !sp.L7Rules.IsEmpty()
}
func (a *PerSelectorPolicy) GetDeny() bool {
return a != nil && a.IsDeny
}
// L7DataMap contains a map of L7 rules per endpoint where key is a CachedSelector
type L7DataMap map[CachedSelector]*PerSelectorPolicy
func (l7 L7DataMap) MarshalJSON() ([]byte, error) {
if len(l7) == 0 {
return []byte("[]"), nil
}
/* First, create a sorted slice of the selectors so we can get
* consistent JSON output */
selectors := make(types.CachedSelectorSlice, 0, len(l7))
for cs := range l7 {
selectors = append(selectors, cs)
}
sort.Sort(selectors)
/* Now we can iterate the slice and generate JSON entries. */
var err error
buffer := bytes.NewBufferString("[")
for _, cs := range selectors {
buffer.WriteString("{\"")
buffer.WriteString(cs.String())
buffer.WriteString("\":")
b, err := json.Marshal(l7[cs])
if err == nil {
buffer.Write(b)
} else {
buffer.WriteString("\"L7DataMap error: ")
buffer.WriteString(err.Error())
buffer.WriteString("\"")
}
buffer.WriteString("},")
}
buffer.Truncate(buffer.Len() - 1) // Drop the final ","
buffer.WriteString("]")
return buffer.Bytes(), err
}
// L7ParserType is the type used to indicate what L7 parser to use.
// Consts are defined for all well known L7 parsers.
// Unknown string values are created for key-value pair policies, which
// are then transparently used in redirect configuration.
type L7ParserType string
func (l7 L7ParserType) String() string {
return (string)(l7)
}
const (
// ParserTypeNone represents the case where no parser type is provided.
ParserTypeNone L7ParserType = ""
// ParserTypeTLS is used for TLS origination, termination, or SNI filtering without any L7
// parsing. If TLS policies are used with HTTP rules, ParserTypeHTTP is used instead.
ParserTypeTLS L7ParserType = "tls"
// ParserTypeCRD is used with a custom CiliumEnvoyConfig redirection. Incompatible with any
// parser type with L7 enforcement (HTTP, Kafka, proxylib), as the custom Listener generally
// does not support them.
ParserTypeCRD L7ParserType = "crd"
// ParserTypeHTTP specifies a HTTP parser type
ParserTypeHTTP L7ParserType = "http"
// ParserTypeKafka specifies a Kafka parser type
ParserTypeKafka L7ParserType = "kafka"
// ParserTypeDNS specifies a DNS parser type
ParserTypeDNS L7ParserType = "dns"
)
type ListenerPriority = types.ListenerPriority
// API listener priorities and corresponding defaults for L7 parser types
// 0 - default (low) priority for all proxy redirects
// 1 - highest listener priority
// ..
// 100 - lowest (non-default) listener priority
// 101 - priority for HTTP parser type
// 106 - priority for the Kafka parser type
// 111 - priority for the proxylib parsers
// 116 - priority for TLS interception parsers (can be promoted to HTTP/Kafka/proxylib)
// 121 - priority for DNS parser type
// 126 - default priority for CRD parser type
// 127 - reserved (listener priority passed as 0)
//
// MapStateEntry stores this reverted in 'ProxyPortPriority' where higher numbers have higher
// precedence
const (
ListenerPriorityNone ListenerPriority = 0
ListenerPriorityHTTP ListenerPriority = 101
ListenerPriorityKafka ListenerPriority = 106
ListenerPriorityProxylib ListenerPriority = 111
ListenerPriorityTLS ListenerPriority = 116
ListenerPriorityDNS ListenerPriority = 121
ListenerPriorityCRD ListenerPriority = 126
)
// defaultPriority maps the parser type to an "API listener priority"
func (l7 L7ParserType) defaultPriority() ListenerPriority {
switch l7 {
case ParserTypeNone:
return ListenerPriorityNone // no priority
case ParserTypeHTTP:
return ListenerPriorityHTTP
case ParserTypeKafka:
return ListenerPriorityKafka
case ParserTypeTLS:
return ListenerPriorityTLS
case ParserTypeDNS:
return ListenerPriorityDNS
case ParserTypeCRD:
// CRD type can have an explicit higher priority in range 1-100
return ListenerPriorityCRD
default: // proxylib parsers
return ListenerPriorityProxylib
}
}
// redirectTypes is a bitmask of redirection types of multiple filters
type redirectTypes uint16
const (
// redirectTypeDNS bit is set when policy contains a redirection to DNS proxy
redirectTypeDNS redirectTypes = 1 << iota
// redirectTypeEnvoy bit is set when policy contains a redirection to Envoy
redirectTypeEnvoy
// redirectTypeProxylib bits are set when policy contains a redirection to Proxylib (via
// Envoy)
redirectTypeProxylib redirectTypes = 1<<iota | redirectTypeEnvoy
// redirectTypeNone represents the case where there is no proxy redirect
redirectTypeNone redirectTypes = redirectTypes(0)
)
func (from L7ParserType) canPromoteTo(to L7ParserType) bool {
switch from {
case ParserTypeNone:
// ParserTypeNone can be promoted to any other type
return true
case ParserTypeTLS:
// ParserTypeTLS can be promoted to any other type, except for DNS or CRD,
// but ParserTypeTLS can not be demoted to ParserTypeNone
if to != ParserTypeNone && to != ParserTypeDNS && to != ParserTypeCRD {
return true
}
}
return false
}
// Merge ParserTypes 'a' to 'b' if possible
func (a L7ParserType) Merge(b L7ParserType) (L7ParserType, error) {
if a == b {
return a, nil
}
if a.canPromoteTo(b) {
return b, nil
}
if b.canPromoteTo(a) {
return a, nil
}
return ParserTypeNone, fmt.Errorf("cannot merge conflicting L7 parsers (%s/%s)", a, b)
}
// hasWildcard checks if the L7Rules contains a wildcard rule for the given parser type.
func hasWildcard(rules *api.L7Rules, parserType L7ParserType) bool {
if rules == nil {
return false
}
switch {
case parserType == ParserTypeDNS:
for _, rule := range rules.DNS {
if rule.MatchPattern == "*" {
return true
}
}
case parserType == ParserTypeHTTP:
for _, rule := range rules.HTTP {
if rule.Path == "" && rule.Method == "" && rule.Host == "" &&
len(rule.Headers) == 0 && len(rule.HeaderMatches) == 0 {
return true
}
}
case parserType == ParserTypeKafka:
for _, rule := range rules.Kafka {
if rule.Topic == "" {
return true
}
}
case rules.L7Proto != "":
// For custom L7 rules
for _, rule := range rules.L7 {
if len(rule) == 0 {
return true
}
}
default:
// Unsupported parser type
}
return false
}
// addWildcard adds a wildcard rule to the L7Rules for the given parser type.
// It returns a copy of the rules with the wildcard rule added.
func addWildcard(rules *api.L7Rules, parserType L7ParserType) *api.L7Rules {
rulesCopy := *rules
result := &rulesCopy
switch {
case parserType == ParserTypeDNS:
if len(rules.DNS) > 0 {
result.DNS = append(result.DNS, api.PortRuleDNS{MatchPattern: "*"})
}
case parserType == ParserTypeHTTP:
if len(rules.HTTP) > 0 {
result.HTTP = append(result.HTTP, api.PortRuleHTTP{})
}
case parserType == ParserTypeKafka:
if len(rules.Kafka) > 0 {
result.Kafka = append(result.Kafka, kafka.PortRule{})
}
case rules.L7Proto != "":
// For custom L7 rules with L7Proto
if len(rules.L7) > 0 {
result.L7 = append(result.L7, api.PortRuleL7{})
}
default:
// Unsupported parser type
}
return result
}
// ensureWildcard ensures that the L7Rules contains a wildcard rule for the given parser type.
// It returns a copy of the rules with the wildcard rule added if it wasn't already present.
func ensureWildcard(rules *api.L7Rules, parserType L7ParserType) *api.L7Rules {
if rules == nil {
return nil
}
if hasWildcard(rules, parserType) {
return rules
}
return addWildcard(rules, parserType)
}
// L4Filter represents the policy (allowed remote sources / destinations of
// traffic) that applies at a specific L4 port/protocol combination (including
// all ports and protocols), at either ingress or egress. The policy here is
// specified in terms of selectors that are mapped to security identities via
// the selector cache.
type L4Filter struct {
// Port is the destination port to allow. Port 0 indicates that all traffic
// is allowed at L4.
Port uint16 `json:"port"`
// EndPort is zero for a singular port
EndPort uint16 `json:"endPort,omitempty"`
PortName string `json:"port-name,omitempty"`
// Protocol is the L4 protocol to allow or NONE
Protocol api.L4Proto `json:"protocol"`
// U8Proto is the Protocol in numeric format, or 0 for NONE
U8Proto u8proto.U8proto `json:"-"`
// wildcard is the cached selector representing a wildcard in this filter, if any.
// This is nil the wildcard selector in not in 'PerSelectorPolicies'.
// When the wildcard selector is in 'PerSelectorPolicies' this is set to that
// same selector, which can then be used as a map key to find the corresponding
// L4-only L7 policy (which can be nil).
wildcard CachedSelector
// PerSelectorPolicies is a map of policies for selectors, including any L7 rules passed to
// the L7 proxy. nil values represent cached selectors that have selector-specific policy
// restriction (such as no L7 rules). Holds references to the cached selectors, which must
// be released!
PerSelectorPolicies L7DataMap `json:"l7-rules,omitempty"`
// Ingress is true if filter applies at ingress; false if it applies at egress.
Ingress bool `json:"-"`
// RuleOrigin is a set of rule labels tracking which policy rules are the origin for this
// L3/L4 filter.
RuleOrigin map[CachedSelector]ruleOrigin `json:"-"`
// This reference is circular, but it is cleaned up at Detach()
policy atomic.Pointer[L4Policy]
}
// SelectsAllEndpoints returns whether the L4Filter selects all
// endpoints, which is true if the wildcard endpoint selector is present in the
// map.
func (l4 *L4Filter) SelectsAllEndpoints() bool {
for cs := range l4.PerSelectorPolicies {
if cs.IsWildcard() {
return true
}
}
return false
}
// CopyL7RulesPerEndpoint returns a shallow copy of the PerSelectorPolicies of the
// L4Filter.
func (l4 *L4Filter) GetPerSelectorPolicies() L7DataMap {
return l4.PerSelectorPolicies
}
// GetIngress returns whether the L4Filter applies at ingress or egress.
func (l4 *L4Filter) GetIngress() bool {
return l4.Ingress
}
// GetPort returns the port at which the L4Filter applies as a uint16.
func (l4 *L4Filter) GetPort() uint16 {
return l4.Port
}
// Equals returns true if two L4Filters are equal
func (l4 *L4Filter) Equals(bL4 *L4Filter) bool {
if l4.Port == bL4.Port &&
l4.EndPort == bL4.EndPort &&
l4.PortName == bL4.PortName &&
l4.Protocol == bL4.Protocol &&
l4.Ingress == bL4.Ingress &&
l4.wildcard == bL4.wildcard {
if len(l4.PerSelectorPolicies) != len(bL4.PerSelectorPolicies) {
return false
}
for k, v := range l4.PerSelectorPolicies {
bV, ok := bL4.PerSelectorPolicies[k]
if !ok || !bV.Equal(v) {
return false
}
}
return true
}
return false
}
// ChangeState allows caller to revert changes made by (multiple) toMapState call(s)
// All fields are maps so we can pass this by value.
type ChangeState struct {
Adds Keys // Added or modified keys, if not nil
Deletes Keys // deleted keys, if not nil
old mapStateMap // Old values of all modified or deleted keys, if not nil
}
// NewRevertState returns an empty ChangeState suitable for reverting MapState changes.
// The private 'old' field is initialized so that old state can be restored if need be.
func NewRevertState() ChangeState {
return ChangeState{
Adds: make(Keys),
old: make(mapStateMap),
}
}
func (c *ChangeState) Empty() bool {
return len(c.Adds)+len(c.Deletes)+len(c.old) == 0
}
// Size returns the total number of Adds minus
// the total number of true Deletes (Deletes
// that are not also in Adds). The return value
// can be negative.
func (c *ChangeState) Size() int {
deleteLen := 0
for k := range c.Deletes {
if _, ok := c.Adds[k]; !ok {
deleteLen++
}
}
return len(c.Adds) - deleteLen
}
// generateWildcardMapStateEntry creates map state entry for wildcard selector in the filter.
func (l4 *L4Filter) generateWildcardMapStateEntry(logger *slog.Logger, p *EndpointPolicy, port uint16) mapStateEntry {
wildcardEntry := mapStateEntry{MapStateEntry: MapStateEntry{Invalid: true}}
if l4.wildcard != nil {
currentRule := l4.PerSelectorPolicies[l4.wildcard]
cs := l4.wildcard
wildcardEntry = l4.makeMapStateEntry(logger, p, port, cs, currentRule)
}
return wildcardEntry
}
// makeMapStateEntry creates a mapStateEntry for the given selector and policy for the Endpoint.
func (l4 *L4Filter) makeMapStateEntry(logger *slog.Logger, p *EndpointPolicy, port uint16, cs CachedSelector, currentRule *PerSelectorPolicy) mapStateEntry {
var proxyPort uint16
if currentRule.IsRedirect() {
var err error
proxyPort, err = p.LookupRedirectPort(l4.Ingress, string(l4.Protocol), port, currentRule.GetListener())
if err != nil {
// Skip unrealized redirects; this happens routineously just
// before new redirects are realized. Once created, we are called
// again.
logger.Debug(
"Skipping unrealized redirect",
logfields.Error, err,
logfields.EndpointSelector, cs,
)
return mapStateEntry{MapStateEntry: MapStateEntry{Invalid: true}}
}
}
return newMapStateEntry(
l4.RuleOrigin[cs],
proxyPort,
currentRule.GetPriority(),
currentRule.GetDeny(),
currentRule.getAuthRequirement(),
)
}
// toMapState converts a single filter into a MapState entries added to 'p.PolicyMapState'.
//
// Note: It is possible for two selectors to select the same security ID. To give priority to deny,
// AuthType, and L7 redirection (e.g., for visibility purposes), the mapstate entries are added to
// 'p.PolicyMapState' using insertWithChanges().
// Keys and old values of any added or deleted entries are added to 'changes'.
// 'redirects' is the map of currently realized redirects, it is used to find the proxy port for any redirects.
// p.SelectorCache is used as Identities interface during this call, which only has GetPrefix() that
// needs no lock.
func (l4 *L4Filter) toMapState(logger *slog.Logger, p *EndpointPolicy, features policyFeatures, changes ChangeState) {
port := l4.Port
proto := l4.U8Proto
direction := trafficdirection.Egress
if l4.Ingress {
direction = trafficdirection.Ingress
}
scopedLog := logger
if option.Config.Debug {
scopedLog = logger.With(
logfields.Port, port,
logfields.PortName, l4.PortName,
logfields.Protocol, proto,
logfields.TrafficDirection, direction,
)
}
// resolve named port
if port == 0 && l4.PortName != "" {
port = p.PolicyOwner.GetNamedPort(l4.Ingress, l4.PortName, proto)
if port == 0 {
return // nothing to be done for undefined named port
}
}
var keysToAdd []Key
for _, mp := range PortRangeToMaskedPorts(port, l4.EndPort) {
keysToAdd = append(keysToAdd,
KeyForDirection(direction).WithPortProtoPrefix(proto, mp.port, uint8(bits.LeadingZeros16(^mp.mask))))
}
// Compute and insert the wildcard entry, if present.
wildcardEntry := l4.generateWildcardMapStateEntry(scopedLog, p, port)
if !wildcardEntry.Invalid {
for _, keyToAdd := range keysToAdd {
keyToAdd.Identity = 0
p.policyMapState.insertWithChanges(keyToAdd, wildcardEntry, features, changes)
if port == 0 {
// Allow-all
scopedLog.Debug(
"ToMapState: allow all",
logfields.EndpointSelector, l4.wildcard,
)
} else {
// L4 allow
scopedLog.Debug(
"ToMapState: L4 allow all",
logfields.EndpointSelector, l4.wildcard,
)
}
}
}
for cs, currentRule := range l4.PerSelectorPolicies {
// is this wildcard? If so, we already added it above
if cs == l4.wildcard {
continue
}
// create MapStateEntry
entry := l4.makeMapStateEntry(logger, p, port, cs, currentRule)
if entry.Invalid {
continue
}
// If this entry is identical to the wildcard's entry, we can elide it.
// Do not elide for port wildcards. TODO: This is probably too
// conservative, determine if it's safe to elide l3 entry when no l4 specifier is present.
if !wildcardEntry.Invalid && port != 0 && entry.MapStateEntry == wildcardEntry.MapStateEntry {
scopedLog.Debug("ToMapState: Skipping L3/L4 key due to existing identical L4-only key", logfields.EndpointSelector, cs)
continue
}
idents := cs.GetSelections(p.VersionHandle)
if option.Config.Debug {
if entry.IsDeny() {
scopedLog.Debug(
"ToMapState: Denied remote IDs",
logfields.Version, p.VersionHandle,
logfields.EndpointSelector, cs,
logfields.PolicyID, idents,
)
} else {
scopedLog.Debug(
"ToMapState: Allowed remote IDs",
logfields.Version, p.VersionHandle,
logfields.EndpointSelector, cs,
logfields.PolicyID, idents,
)
}
}
for _, id := range idents {
for _, keyToAdd := range keysToAdd {
keyToAdd.Identity = id
p.policyMapState.insertWithChanges(keyToAdd, entry, features, changes)
}
}
}
if option.Config.Debug {
scopedLog.Debug(
"ToMapChange changes",
logfields.PolicyKeysAdded, changes.Adds,
logfields.PolicyKeysDeleted, changes.Deletes,
logfields.PolicyEntriesOld, changes.old,
)
}
}
// IdentitySelectionUpdated implements CachedSelectionUser interface
// This call is made from a single goroutine in FIFO order to keep add
// and delete events ordered properly. No locks are held.
//
// The caller is responsible for making sure the same identity is not
// present in both 'added' and 'deleted'.
func (l4 *L4Filter) IdentitySelectionUpdated(logger *slog.Logger, cs types.CachedSelector, added, deleted []identity.NumericIdentity) {
logger.Debug(
"identities selected by L4Filter updated",
logfields.EndpointSelector, cs,
logfields.AddedPolicyID, added,
logfields.DeletedPolicyID, deleted,
)
// Skip updates on wildcard selectors, as datapath and L7
// proxies do not need enumeration of all ids for L3 wildcard.
// This mirrors the per-selector logic in toMapState().
if cs.IsWildcard() {
return
}
// Push endpoint policy changes.
//
// `l4.policy` is nil when the filter is detached so
// that we could not push updates on an unstable policy.
l4Policy := l4.policy.Load()
if l4Policy != nil {
l4Policy.AccumulateMapChanges(logger, l4, cs, added, deleted)
}
}
func (l4 *L4Filter) IdentitySelectionCommit(logger *slog.Logger, txn *versioned.Tx) {
logger.Debug(
"identity selection updates done",
logfields.NewVersion, txn,
)
// Push endpoint policy incremental sync.
//
// `l4.policy` is nil when the filter is detached so
// that we could not push updates on an unstable policy.
l4Policy := l4.policy.Load()
if l4Policy != nil {
l4Policy.SyncMapChanges(l4, txn)
}
}
func (l4 *L4Filter) IsPeerSelector() bool {
return true
}
func (l4 *L4Filter) cacheIdentitySelector(sel api.EndpointSelector, lbls stringLabels, selectorCache *SelectorCache) CachedSelector {
cs, added := selectorCache.AddIdentitySelector(l4, lbls, sel)
if added {
l4.PerSelectorPolicies[cs] = nil // no per-selector policy (yet)
}
return cs
}
func (l4 *L4Filter) cacheIdentitySelectors(selectors api.EndpointSelectorSlice, meta ruleOrigin, selectorCache *SelectorCache) {
lbls := meta.stringLabels()
for _, sel := range selectors {
l4.cacheIdentitySelector(sel, lbls, selectorCache)
}
}
func (l4 *L4Filter) cacheFQDNSelectors(selectors api.FQDNSelectorSlice, meta ruleOrigin, selectorCache *SelectorCache) {
lbls := meta.stringLabels()
for _, fqdnSel := range selectors {
l4.cacheFQDNSelector(fqdnSel, lbls, selectorCache)
}
}
func (l4 *L4Filter) cacheFQDNSelector(sel api.FQDNSelector, lbls stringLabels, selectorCache *SelectorCache) types.CachedSelector {
cs, added := selectorCache.AddFQDNSelector(l4, lbls, sel)
if added {
l4.PerSelectorPolicies[cs] = nil // no per-selector policy (yet)
}
return cs
}
// add L7 rules for all endpoints in the L7DataMap
func (l7 L7DataMap) addPolicyForSelector(l7Parser L7ParserType, rules *api.L7Rules, terminatingTLS, originatingTLS *TLSContext, auth *api.Authentication, deny bool, sni []string, listener string, priority ListenerPriority) {
for epsel := range l7 {
l7policy := &PerSelectorPolicy{
L7Parser: l7Parser,
TerminatingTLS: terminatingTLS,
OriginatingTLS: originatingTLS,
Authentication: auth,
IsDeny: deny,
ServerNames: NewStringSet(sni),
Listener: listener,
Priority: priority,
}
if rules != nil {
l7policy.L7Rules = *rules
}
l7[epsel] = l7policy
}
}
type TLSDirection string
const (
TerminatingTLS TLSDirection = "terminating"
OriginatingTLS TLSDirection = "originating"
)
// getCerts reads certificates out of the PolicyContext, reading from k8s or local files depending on config
// and puts the values into the relevant keys in the TLSContext. Note that if the returned TLSContext.FromFile is
// `false`, then this will be read from Kubernetes.
func (l4 *L4Filter) getCerts(policyCtx PolicyContext, tls *api.TLSContext, direction TLSDirection) (*TLSContext, error) {
if tls == nil {
return nil, nil
}
logger := policyCtx.GetLogger()
ca, public, private, inlineSecrets, err := policyCtx.GetTLSContext(tls)
if err != nil {
logger.Warn(
"policy: Error getting TLS Context",
logfields.Error, err,
logfields.TrafficDirection, direction,
)
return nil, err
}
// If the secret is not being included into NPDS inline, we're going to pass an SDS reference instead.
if inlineSecrets {
switch direction {
case TerminatingTLS:
if public == "" || private == "" {
return nil, fmt.Errorf("Terminating TLS context is missing certs.")
}
case OriginatingTLS:
if ca == "" {
return nil, fmt.Errorf("Originating TLS context is missing CA certs.")
}
default:
return nil, fmt.Errorf("invalid TLS direction: %s", direction)
}
} else {
logger.Debug("Secret being read from Kubernetes", logfields.Secret, k8sTypes.NamespacedName(*tls.Secret))
}
return &TLSContext{
TrustedCA: ca,
CertificateChain: public,
PrivateKey: private,
FromFile: inlineSecrets,
Secret: k8sTypes.NamespacedName(*tls.Secret),
}, nil
}
// createL4Filter creates a filter for L4 policy that applies to the specified
// endpoints and port/protocol, with reference to the original rules that the
// filter is derived from. This filter may be associated with a series of L7
// rules via the `rule` parameter.
// Not called with an empty peerEndpoints.
func createL4Filter(policyCtx PolicyContext, peerEndpoints api.EndpointSelectorSlice, auth *api.Authentication, rule api.Ports, port api.PortProtocol,
protocol api.L4Proto, ingress bool, fqdns api.FQDNSelectorSlice,
) (*L4Filter, error) {
selectorCache := policyCtx.GetSelectorCache()
logger := policyCtx.GetLogger()
origin := policyCtx.Origin()
portName := ""
p := uint64(0)
if iana.IsSvcName(port.Port) {
portName = port.Port
} else {
// already validated via PortRule.Validate()
p, _ = strconv.ParseUint(port.Port, 0, 16)
}
// already validated via L4Proto.Validate(), never "ANY"
// NOTE: "ANY" for wildcarded port/proto!
u8p, _ := u8proto.ParseProtocol(string(protocol))
l4 := &L4Filter{
Port: uint16(p), // 0 for L3-only rules and named ports
EndPort: uint16(port.EndPort), // 0 for a single port, >= 'Port' for a range
PortName: portName, // non-"" for named ports
Protocol: protocol,
U8Proto: u8p,
PerSelectorPolicies: make(L7DataMap),
RuleOrigin: make(map[CachedSelector]ruleOrigin), // Filled in below.
Ingress: ingress,
}
if peerEndpoints.SelectsAllEndpoints() {
l4.wildcard = l4.cacheIdentitySelector(api.WildcardEndpointSelector, origin.stringLabels(), selectorCache)
} else {
l4.cacheIdentitySelectors(peerEndpoints, origin, selectorCache)
l4.cacheFQDNSelectors(fqdns, origin, selectorCache)
}
var l7Parser L7ParserType
var terminatingTLS *TLSContext
var originatingTLS *TLSContext
var rules *api.L7Rules
var sni []string
listener := ""
var priority ListenerPriority
pr := rule.GetPortRule()
if pr != nil {
rules = pr.Rules
sni = pr.GetServerNames()
// Get TLS contexts, if any
var err error
terminatingTLS, err = l4.getCerts(policyCtx, pr.TerminatingTLS, TerminatingTLS)
if err != nil {
return nil, err
}
originatingTLS, err = l4.getCerts(policyCtx, pr.OriginatingTLS, OriginatingTLS)
if err != nil {
return nil, err
}
// Set parser type to TLS, if TLS. This will be overridden by L7 below, if rules
// exists.
if terminatingTLS != nil || originatingTLS != nil || len(pr.ServerNames) > 0 {
l7Parser = ParserTypeTLS
}
// Determine L7ParserType from rules present. Earlier validation ensures rules
// for multiple protocols are not present here.
if rules != nil {
// we need this to redirect DNS UDP (or ANY, which is more useful)
if len(rules.DNS) > 0 {
l7Parser = ParserTypeDNS
} else if protocol == api.ProtoTCP { // Other than DNS only support TCP
switch {
case len(rules.HTTP) > 0:
l7Parser = ParserTypeHTTP
case len(rules.Kafka) > 0:
l7Parser = ParserTypeKafka
case rules.L7Proto != "":
l7Parser = (L7ParserType)(rules.L7Proto)
}
}
}
// Override the parser type and possibly priority for CRD is applicable.
if pr.Listener != nil {
l7Parser = ParserTypeCRD
}
// Map parser type to default priority for the given parser type
priority = l7Parser.defaultPriority()
// Override the parser type and possibly priority for CRD is applicable.
if pr.Listener != nil {
ns := policyCtx.GetNamespace()
resource := pr.Listener.EnvoyConfig
switch resource.Kind {
case "CiliumEnvoyConfig":
if ns == "" {
// Cluster-scoped CCNP tries to use namespaced
// CiliumEnvoyConfig
//
// TODO: Catch this in rule validation once we have a
// validation context in there so that we can differentiate
// between CNP and CCNP at validation time.
return nil, fmt.Errorf("Listener %q in CCNP can not use Kind CiliumEnvoyConfig", pr.Listener.Name)
}
case "CiliumClusterwideEnvoyConfig":
// CNP refers to a cluster-scoped listener
ns = ""
default:
}
listener, _ = api.ResourceQualifiedName(ns, resource.Name, pr.Listener.Name, api.ForceNamespace)
if pr.Listener.Priority != 0 {
priority = ListenerPriority(pr.Listener.Priority)
}
}
}
if l7Parser != ParserTypeNone || auth != nil || policyCtx.IsDeny() {
modifiedRules := rules
// If we have L7 rules and default deny is disabled (EnableDefaultDeny=false), we should ensure those rules
// don't cause other L7 traffic to be denied.
// Special handling for L7 rules is applied when:
// 1. We have L7 rules
// 2. Default deny is disabled for this direction
// 3. This is a positive policy (not a deny policy)
hasL7Rules := !rules.IsEmpty()
isDefaultDenyDisabled := (ingress && !policyCtx.DefaultDenyIngress()) || (!ingress && !policyCtx.DefaultDenyEgress())
isAllowPolicy := !policyCtx.IsDeny()
if hasL7Rules && isDefaultDenyDisabled && isAllowPolicy {
logger.Debug("Adding wildcard L7 rules for default-allow policy",
logfields.L7Parser, l7Parser,
logfields.Ingress, ingress)
modifiedRules = ensureWildcard(rules, l7Parser)
}
l4.PerSelectorPolicies.addPolicyForSelector(l7Parser, modifiedRules, terminatingTLS, originatingTLS, auth, policyCtx.IsDeny(), sni, listener, priority)
}
for cs := range l4.PerSelectorPolicies {
l4.RuleOrigin[cs] = origin
}
return l4, nil
}
func (l4 *L4Filter) removeSelectors(selectorCache *SelectorCache) {
selectors := make(types.CachedSelectorSlice, 0, len(l4.PerSelectorPolicies))
for cs := range l4.PerSelectorPolicies {
selectors = append(selectors, cs)
}
selectorCache.RemoveSelectors(selectors, l4)
}
// detach releases the references held in the L4Filter and must be called before
// the filter is left to be garbage collected.
// L4Filter may still be accessed concurrently after it has been detached.
func (l4 *L4Filter) detach(selectorCache *SelectorCache) {
l4.removeSelectors(selectorCache)
l4.policy.Store(nil)
}
// attach signifies that the L4Filter is ready and reacheable for updates
// from SelectorCache. L4Filter (and L4Policy) is read-only after this is called,
// multiple goroutines will be reading the fields from that point on.
func (l4 *L4Filter) attach(ctx PolicyContext, l4Policy *L4Policy) policyFeatures {
var features policyFeatures
for cs, sp := range l4.PerSelectorPolicies {
if sp != nil {
if sp.L7Parser != "" {
features.setFeature(redirectRules)
}
if sp.IsDeny {
features.setFeature(denyRules)
}
explicit, authType := getAuthType(sp.Authentication)
if explicit {
features.setFeature(authRules)
if authType != types.AuthTypeDisabled {
if l4Policy.authMap == nil {
l4Policy.authMap = make(authMap, 1)
}
authTypes := l4Policy.authMap[cs]
if authTypes == nil {
authTypes = make(AuthTypes, 1)
}
authTypes[authType] = struct{}{}
l4Policy.authMap[cs] = authTypes
}
}
// Compute Envoy policies when a policy is ready to be used
if len(sp.L7Rules.HTTP) > 0 {
sp.EnvoyHTTPRules, sp.CanShortCircuit = ctx.GetEnvoyHTTPRules(&sp.L7Rules)
}
}
}
l4.policy.Store(l4Policy)
return features
}
// createL4IngressFilter creates a filter for L4 policy that applies to the
// specified endpoints and port/protocol for ingress traffic, with reference
// to the original rules that the filter is derived from. This filter may be
// associated with a series of L7 rules via the `rule` parameter.
//
// hostWildcardL7 determines if L7 traffic from Host should be
// wildcarded (in the relevant daemon mode).
func createL4IngressFilter(policyCtx PolicyContext, fromEndpoints api.EndpointSelectorSlice, auth *api.Authentication, hostWildcardL7 []string, rule api.Ports, port api.PortProtocol,
protocol api.L4Proto,
) (*L4Filter, error) {
filter, err := createL4Filter(policyCtx, fromEndpoints, auth, rule, port, protocol, true, nil)
if err != nil {
return nil, err
}
// If the filter would apply proxy redirection for the Host, when we should accept
// everything from host, then wildcard Host at L7.
if len(hostWildcardL7) > 0 {
for cs, l7 := range filter.PerSelectorPolicies {
if l7.IsRedirect() && cs.Selects(versioned.Latest(), identity.ReservedIdentityHost) {
for _, name := range hostWildcardL7 {
selector := api.ReservedEndpointSelectors[name]
filter.cacheIdentitySelector(selector, policyCtx.Origin().stringLabels(), policyCtx.GetSelectorCache())
}
}
}
}
return filter, nil
}
// createL4EgressFilter creates a filter for L4 policy that applies to the
// specified endpoints and port/protocol for egress traffic, with reference
// to the original rules that the filter is derived from. This filter may be
// associated with a series of L7 rules via the `rule` parameter.
func createL4EgressFilter(policyCtx PolicyContext, toEndpoints api.EndpointSelectorSlice, auth *api.Authentication, rule api.Ports, port api.PortProtocol,
protocol api.L4Proto, fqdns api.FQDNSelectorSlice,
) (*L4Filter, error) {
return createL4Filter(policyCtx, toEndpoints, auth, rule, port, protocol, false, fqdns)
}
// redirectType returns the redirectType for this filter
func (sp *PerSelectorPolicy) redirectType() redirectTypes {
if sp == nil {
return redirectTypeNone
}
switch sp.L7Parser {
case ParserTypeNone:
return redirectTypeNone
case ParserTypeDNS:
return redirectTypeDNS
case ParserTypeHTTP, ParserTypeTLS, ParserTypeCRD:
return redirectTypeEnvoy
default:
// all other (non-empty) values are used for proxylib redirects
return redirectTypeProxylib
}
}
// Marshal returns the `L4Filter` in a JSON string.
func (l4 *L4Filter) Marshal() string {
b, err := json.Marshal(l4)
if err != nil {
b = []byte("\"L4Filter error: " + err.Error() + "\"")
}
return string(b)
}
// String returns the `L4Filter` in a human-readable string.
func (l4 *L4Filter) String() string {
b, err := json.Marshal(l4)
if err != nil {
return err.Error()
}
return string(b)
}
// Note: Only used for policy tracing
func (l4 *L4Filter) matchesLabels(labels labels.LabelArray) (bool, bool) {
if l4.wildcard != nil {
perSelectorPolicy := l4.PerSelectorPolicies[l4.wildcard]
isDeny := perSelectorPolicy.GetDeny()
return true, isDeny
} else if len(labels) == 0 {
return false, false
}
var selected bool
for sel, rule := range l4.PerSelectorPolicies {
// slow, but OK for tracing
idSel := sel.(*identitySelector)
if lis, ok := idSel.source.(*labelIdentitySelector); ok && lis.xxxMatches(labels) {
selected = true
if rule.GetDeny() {
return true, true
}
}
}
return selected, false
}
// addL4Filter adds 'filterToMerge' into the 'resMap'. Returns an error if it
// the 'filterToMerge' can't be merged with an existing filter for the same
// port and proto.
func addL4Filter(policyCtx PolicyContext,
resMap L4PolicyMap,
p api.PortProtocol, proto api.L4Proto,
filterToMerge *L4Filter,
) error {
existingFilter := resMap.ExactLookup(p.Port, uint16(p.EndPort), string(proto))
if existingFilter == nil {
resMap.Upsert(p.Port, uint16(p.EndPort), string(proto), filterToMerge)
return nil
}
selectorCache := policyCtx.GetSelectorCache()
if err := mergePortProto(policyCtx, existingFilter, filterToMerge, selectorCache); err != nil {
filterToMerge.detach(selectorCache)
return err
}
// To keep the rule origin tracking correct, merge the rule label arrays for each CachedSelector
// we know about. New CachedSelectors are added.
for cs, newLabels := range filterToMerge.RuleOrigin {
if existingLabels, ok := existingFilter.RuleOrigin[cs]; ok {
existingFilter.RuleOrigin[cs] = existingLabels.Merge(newLabels)
} else {
existingFilter.RuleOrigin[cs] = newLabels
}
}
resMap.Upsert(p.Port, uint16(p.EndPort), string(proto), existingFilter)
return nil
}
// L4PolicyMap is a list of L4 filters indexable by port/endport/protocol
type L4PolicyMap interface {
Upsert(port string, endPort uint16, protocol string, l4 *L4Filter)
Delete(port string, endPort uint16, protocol string)
ExactLookup(port string, endPort uint16, protocol string) *L4Filter
MatchesLabels(port, protocol string, labels labels.LabelArray) (match, isDeny bool)
Detach(selectorCache *SelectorCache)
ForEach(func(l4 *L4Filter) bool)
Len() int
}
// NewL4PolicyMap creates an new L4PolicMap.
func NewL4PolicyMap() L4PolicyMap {
return &l4PolicyMap{
namedPortMap: make(map[string]*L4Filter),
rangePortMap: make(map[portProtoKey]*L4Filter),
rangePortIndex: bitlpm.NewUintTrie[uint32, map[portProtoKey]struct{}](),
}
}
// NewL4PolicyMapWithValues creates an new L4PolicMap, with an initial
// set of values. The initMap argument does not support port ranges.
func NewL4PolicyMapWithValues(initMap map[string]*L4Filter) L4PolicyMap {
l4M := &l4PolicyMap{
namedPortMap: make(map[string]*L4Filter),
rangePortMap: make(map[portProtoKey]*L4Filter),
rangePortIndex: bitlpm.NewUintTrie[uint32, map[portProtoKey]struct{}](),
}
for k, v := range initMap {
portProtoSlice := strings.Split(k, "/")
if len(portProtoSlice) < 2 {
continue
}
l4M.Upsert(portProtoSlice[0], 0, portProtoSlice[1], v)
}
return l4M
}
type portProtoKey struct {
port, endPort uint16
proto uint8
}
// l4PolicyMap is the implementation of L4PolicyMap
type l4PolicyMap struct {
// namedPortMap represents the named ports (a Kubernetes feature)
// that map to an L4Filter. They must be tracked at the selection
// level, because they can only be resolved at the endpoint/identity
// level. Named ports cannot have ranges.
namedPortMap map[string]*L4Filter
// rangePortMap is a map of all L4Filters indexed by their port-
// protocol.
rangePortMap map[portProtoKey]*L4Filter
// rangePortIndex is an index of all L4Filters so that
// L4Filters that have overlapping port ranges can be looked up
// by with a single port.
rangePortIndex *bitlpm.UintTrie[uint32, map[portProtoKey]struct{}]
}
func parsePortProtocol(port, protocol string) (uint16, uint8) {
// These string values have been validated many times
// over at this point.
prt, _ := strconv.ParseUint(port, 10, 16)
proto, _ := u8proto.ParseProtocol(protocol)
return uint16(prt), uint8(proto)
}
// makePolicyMapKey creates a protocol-port uint32 with the
// upper 16 bits containing the protocol and the lower 16
// bits containing the port.
func makePolicyMapKey(port, mask uint16, proto uint8) uint32 {
return (uint32(proto) << 16) | uint32(port&mask)
}
// Upsert L4Filter adds an L4Filter indexed by protocol/port-endPort.
func (l4M *l4PolicyMap) Upsert(port string, endPort uint16, protocol string, l4 *L4Filter) {
if iana.IsSvcName(port) {
l4M.namedPortMap[port+"/"+protocol] = l4
return
}
portU, protoU := parsePortProtocol(port, protocol)
ppK := portProtoKey{
port: portU,
endPort: endPort,
proto: protoU,
}
_, indexExists := l4M.rangePortMap[ppK]
l4M.rangePortMap[ppK] = l4
// We do not need to reindex a key that already exists,
// even if the filter changed.
if !indexExists {
for _, mp := range PortRangeToMaskedPorts(portU, endPort) {
k := makePolicyMapKey(mp.port, mp.mask, protoU)
prefix := 32 - uint(bits.TrailingZeros16(mp.mask))
portProtoSet, ok := l4M.rangePortIndex.ExactLookup(prefix, k)
if !ok {
portProtoSet = make(map[portProtoKey]struct{})
l4M.rangePortIndex.Upsert(prefix, k, portProtoSet)
}
portProtoSet[ppK] = struct{}{}
}
}
}
// Delete an L4Filter from the index by protocol/port-endPort
func (l4M *l4PolicyMap) Delete(port string, endPort uint16, protocol string) {
if iana.IsSvcName(port) {
delete(l4M.namedPortMap, port+"/"+protocol)
return
}
portU, protoU := parsePortProtocol(port, protocol)
ppK := portProtoKey{
port: portU,
endPort: endPort,
proto: protoU,
}
_, indexExists := l4M.rangePortMap[ppK]
delete(l4M.rangePortMap, ppK)
// Only delete the index if the key exists.
if indexExists {
for _, mp := range PortRangeToMaskedPorts(portU, endPort) {
k := makePolicyMapKey(mp.port, mp.mask, protoU)
prefix := 32 - uint(bits.TrailingZeros16(mp.mask))
portProtoSet, ok := l4M.rangePortIndex.ExactLookup(prefix, k)
if !ok {
return
}
delete(portProtoSet, ppK)
if len(portProtoSet) == 0 {
l4M.rangePortIndex.Delete(prefix, k)
}
}
}
}
// ExactLookup looks up an L4Filter by protocol/port-endPort and looks for an exact match.
func (l4M *l4PolicyMap) ExactLookup(port string, endPort uint16, protocol string) *L4Filter {
if iana.IsSvcName(port) {
return l4M.namedPortMap[port+"/"+protocol]
}
portU, protoU := parsePortProtocol(port, protocol)
ppK := portProtoKey{
port: portU,
endPort: endPort,
proto: protoU,
}
return l4M.rangePortMap[ppK]
}
// MatchesLabels checks if a given port, protocol, and labels matches
// any Rule in the L4PolicyMap.
func (l4M *l4PolicyMap) MatchesLabels(port, protocol string, labels labels.LabelArray) (match, isDeny bool) {
if iana.IsSvcName(port) {
l4 := l4M.namedPortMap[port+"/"+protocol]
if l4 != nil {
return l4.matchesLabels(labels)
}
return
}
portU, protoU := parsePortProtocol(port, protocol)
l4PortProtoKeys := make(map[portProtoKey]struct{})
l4M.rangePortIndex.Ancestors(32, makePolicyMapKey(portU, 0xffff, protoU),
func(_ uint, _ uint32, portProtoSet map[portProtoKey]struct{}) bool {
for k := range portProtoSet {
v, ok := l4M.rangePortMap[k]
if ok {
if _, ok := l4PortProtoKeys[k]; !ok {
match, isDeny = v.matchesLabels(labels)
if isDeny {
return false
}
}
}
}
return true
})
return
}
// ForEach iterates over all L4Filters in the l4PolicyMap.
func (l4M *l4PolicyMap) ForEach(fn func(l4 *L4Filter) bool) {
for _, f := range l4M.namedPortMap {
if !fn(f) {
return
}
}
for _, v := range l4M.rangePortMap {
if !fn(v) {
return
}
}
}
// Len returns the number of entries in the map.
func (l4M *l4PolicyMap) Len() int {
if l4M == nil {
return 0
}
return len(l4M.namedPortMap) + len(l4M.rangePortMap)
}
type policyFeatures uint8
const (
denyRules policyFeatures = 1 << iota
redirectRules
authRules
allFeatures policyFeatures = ^policyFeatures(0)
)
func (pf *policyFeatures) setFeature(feature policyFeatures) {
*pf |= feature
}
func (pf policyFeatures) contains(feature policyFeatures) bool {
return pf&feature != 0
}
type L4DirectionPolicy struct {
PortRules L4PolicyMap
// features tracks properties of PortRules to skip code when features are not used
features policyFeatures
}
func newL4DirectionPolicy() L4DirectionPolicy {
return L4DirectionPolicy{
PortRules: NewL4PolicyMap(),
}
}
// Detach removes the cached selectors held by L4PolicyMap from the
// selectorCache, allowing the map to be garbage collected when there
// are no more references to it.
func (l4 L4DirectionPolicy) Detach(selectorCache *SelectorCache) {
l4.PortRules.Detach(selectorCache)
}
// detach is used directly from tracing and testing functions
func (l4M *l4PolicyMap) Detach(selectorCache *SelectorCache) {
l4M.ForEach(func(l4 *L4Filter) bool {
l4.detach(selectorCache)
return true
})
}
// Attach makes all the L4Filters to point back to the L4Policy that contains them.
// This is done before the L4PolicyMap is exposed to concurrent access.
// Returns the bitmask of all redirect types for this policymap.
func (l4 *L4DirectionPolicy) attach(ctx PolicyContext, l4Policy *L4Policy) redirectTypes {
var redirectTypes redirectTypes
var features policyFeatures
l4.PortRules.ForEach(func(f *L4Filter) bool {
features |= f.attach(ctx, l4Policy)
for _, sp := range f.PerSelectorPolicies {
redirectTypes |= sp.redirectType()
}
return true
})
l4.features = features
return redirectTypes
}
type L4Policy struct {
Ingress L4DirectionPolicy
Egress L4DirectionPolicy
authMap authMap
// Revision is the repository revision used to generate this policy.
Revision uint64
// redirectTypes is a bitmap containing the types of redirect contained by this policy. It
// is computed after the policy maps to avoid scanning them repeatedly when using the
// L4Policy
redirectTypes redirectTypes
// Endpoint policies using this L4Policy
// These are circular references, cleaned up in Detach()
// This mutex is taken while Endpoint mutex is held, so Endpoint lock
// MUST always be taken before this mutex.
mutex lock.RWMutex
users map[*EndpointPolicy]struct{}
}
// NewL4Policy creates a new L4Policy
func NewL4Policy(revision uint64) L4Policy {
return L4Policy{
Ingress: newL4DirectionPolicy(),
Egress: newL4DirectionPolicy(),
Revision: revision,
users: make(map[*EndpointPolicy]struct{}),
}
}
// insertUser adds a user to the L4Policy so that incremental
// updates of the L4Policy may be forwarded to the users of it.
// May not call into SelectorCache, as SelectorCache is locked during this call.
func (l4 *L4Policy) insertUser(user *EndpointPolicy) {
l4.mutex.Lock()
// 'users' is set to nil when the policy is detached. This
// happens to the old policy when it is being replaced with a
// new one, or when the last endpoint using this policy is
// removed.
// In the case of an policy update it is possible that an
// endpoint has started regeneration before the policy was
// updated, and that the policy was updated before the said
// endpoint reached this point. In this case the endpoint's
// policy is going to be recomputed soon after and we do
// nothing here.
if l4.users != nil {
l4.users[user] = struct{}{}
}
l4.mutex.Unlock()
}
// removeUser removes a user that no longer needs incremental updates
// from the L4Policy.
func (l4 *L4Policy) removeUser(user *EndpointPolicy) {
// 'users' is set to nil when the policy is detached. This
// happens to the old policy when it is being replaced with a
// new one, or when the last endpoint using this policy is
// removed.
l4.mutex.Lock()
if l4.users != nil {
delete(l4.users, user)
}
l4.mutex.Unlock()
}
// AccumulateMapChanges distributes the given changes to the registered users.
//
// The caller is responsible for making sure the same identity is not
// present in both 'adds' and 'deletes'.
func (l4Policy *L4Policy) AccumulateMapChanges(logger *slog.Logger, l4 *L4Filter, cs CachedSelector, adds, deletes []identity.NumericIdentity) {
port := uint16(l4.Port)
proto := l4.U8Proto
derivedFrom := l4.RuleOrigin[cs]
direction := trafficdirection.Egress
if l4.Ingress {
direction = trafficdirection.Ingress
}
perSelectorPolicy := l4.PerSelectorPolicies[cs]
redirect := perSelectorPolicy.IsRedirect()
listener := perSelectorPolicy.GetListener()
priority := perSelectorPolicy.GetPriority()
authReq := perSelectorPolicy.getAuthRequirement()
isDeny := perSelectorPolicy.GetDeny()
// Can hold rlock here as neither GetNamedPort() nor LookupRedirectPort() no longer
// takes the Endpoint lock below.
// SelectorCache may not be called into while holding this lock!
l4Policy.mutex.RLock()
defer l4Policy.mutex.RUnlock()
for epPolicy := range l4Policy.users {
// resolve named port
if port == 0 && l4.PortName != "" {
port = epPolicy.PolicyOwner.GetNamedPort(l4.Ingress, l4.PortName, proto)
if port == 0 {
continue
}
}
var proxyPort uint16
if redirect {
var err error
proxyPort, err = epPolicy.LookupRedirectPort(l4.Ingress, string(l4.Protocol), port, listener)
if err != nil {
logger.Warn(
"AccumulateMapChanges: Missing redirect.",
logfields.EndpointSelector, cs,
logfields.Port, port,
logfields.Protocol, proto,
logfields.TrafficDirection, direction,
logfields.IsRedirect, redirect,
logfields.Listener, listener,
logfields.ListenerPriority, priority,
)
continue
}
}
var keysToAdd []Key
for _, mp := range PortRangeToMaskedPorts(port, l4.EndPort) {
keysToAdd = append(keysToAdd,
KeyForDirection(direction).WithPortProtoPrefix(proto, mp.port, uint8(bits.LeadingZeros16(^mp.mask))))
}
value := newMapStateEntry(derivedFrom, proxyPort, priority, isDeny, authReq)
// If the entry is identical to wildcard map entry, we can elide it.
// See comment in L4Filter.toMapState()
wildcardMapEntry := l4.generateWildcardMapStateEntry(logger, epPolicy, port)
if !wildcardMapEntry.Invalid && port != 0 && value.MapStateEntry == wildcardMapEntry.MapStateEntry {
logger.Debug(
"AccumulateMapChanges: Skipping L3/L4 key due to existing identical L4-only key",
logfields.EndpointSelector, cs)
continue
}
if option.Config.Debug {
authString := "default"
if authReq.IsExplicit() {
authString = authReq.AuthType().String()
}
logger.Debug(
"AccumulateMapChanges",
logfields.EndpointSelector, cs,
logfields.AddedPolicyID, adds,
logfields.DeletedPolicyID, deletes,
logfields.Port, port,
logfields.Protocol, proto,
logfields.TrafficDirection, direction,
logfields.IsRedirect, redirect,
logfields.AuthType, authString,
logfields.Listener, listener,
logfields.ListenerPriority, priority,
)
}
epPolicy.policyMapChanges.AccumulateMapChanges(adds, deletes, keysToAdd, value)
}
}
// SyncMapChanges marks earlier updates as completed
func (l4Policy *L4Policy) SyncMapChanges(l4 *L4Filter, txn *versioned.Tx) {
// SelectorCache may not be called into while holding this lock!
l4Policy.mutex.RLock()
for epPolicy := range l4Policy.users {
epPolicy.policyMapChanges.SyncMapChanges(txn)
}
l4Policy.mutex.RUnlock()
}
// detach makes the L4Policy ready for garbage collection, removing
// circular pointer references.
// The endpointID argument is only necessary if isDelete is false.
// It ensures that detach does not call a regeneration trigger on
// the same endpoint that initiated a selector policy update.
// Note that the L4Policy itself is not modified in any way, so that it may still
// be used concurrently.
func (l4 *L4Policy) detach(selectorCache *SelectorCache, isDelete bool, endpointID uint64) {
l4.Ingress.Detach(selectorCache)
l4.Egress.Detach(selectorCache)
l4.mutex.Lock()
defer l4.mutex.Unlock()
// If this detach is a delete there is no reason to initiate
// a regenerate.
if !isDelete {
for ePolicy := range l4.users {
if endpointID != ePolicy.PolicyOwner.GetID() {
go ePolicy.PolicyOwner.RegenerateIfAlive(®eneration.ExternalRegenerationMetadata{
Reason: "selector policy has changed because of another endpoint with the same identity",
RegenerationLevel: regeneration.RegenerateWithoutDatapath,
})
}
}
}
l4.users = nil
}
// Attach makes all the L4Filters to point back to the L4Policy that contains them.
// This is done before the L4Policy is exposed to concurrent access.
func (l4 *L4Policy) Attach(ctx PolicyContext) {
ingressRedirects := l4.Ingress.attach(ctx, l4)
egressRedirects := l4.Egress.attach(ctx, l4)
l4.redirectTypes = ingressRedirects | egressRedirects
}
// HasRedirect returns true if the L4 policy contains at least one port redirection
func (l4 *L4Policy) HasRedirect() bool {
return l4 != nil && l4.redirectTypes != redirectTypeNone
}
// HasEnvoyRedirect returns true if the L4 policy contains at least one port redirection to Envoy
func (l4 *L4Policy) HasEnvoyRedirect() bool {
return l4 != nil && l4.redirectTypes&redirectTypeEnvoy == redirectTypeEnvoy
}
// HasProxylibRedirect returns true if the L4 policy contains at least one port redirection to Proxylib
func (l4 *L4Policy) HasProxylibRedirect() bool {
return l4 != nil && l4.redirectTypes&redirectTypeProxylib == redirectTypeProxylib
}
func (l4 *L4Policy) GetModel() *models.L4Policy {
if l4 == nil {
return nil
}
ingress := []*models.PolicyRule{}
l4.Ingress.PortRules.ForEach(func(v *L4Filter) bool {
rulesBySelector := map[string][][]string{}
derivedFrom := labels.LabelArrayList{}
for sel, rules := range v.RuleOrigin {
lal := rules.GetLabelArrayList()
derivedFrom.MergeSorted(lal)
rulesBySelector[sel.String()] = lal.GetModel()
}
ingress = append(ingress, &models.PolicyRule{
Rule: v.Marshal(),
DerivedFromRules: derivedFrom.GetModel(),
RulesBySelector: rulesBySelector,
})
return true
})
egress := []*models.PolicyRule{}
l4.Egress.PortRules.ForEach(func(v *L4Filter) bool {
// TODO: Add RulesBySelector field like for ingress above?
derivedFrom := labels.LabelArrayList{}
for _, rules := range v.RuleOrigin {
lal := rules.GetLabelArrayList()
derivedFrom.MergeSorted(lal)
}
egress = append(egress, &models.PolicyRule{
Rule: v.Marshal(),
DerivedFromRules: derivedFrom.GetModel(),
})
return true
})
return &models.L4Policy{
Ingress: ingress,
Egress: egress,
}
}
// ProxyPolicy is any type which encodes state needed to redirect to an L7
// proxy.
type ProxyPolicy interface {
GetPerSelectorPolicies() L7DataMap
GetL7Parser() L7ParserType
GetIngress() bool
GetPort() uint16
GetProtocol() u8proto.U8proto
GetListener() string
}
// SPDX-License-Identifier: Apache-2.0
// Copyright Authors of Cilium
package policy
import (
"context"
"fmt"
"log/slog"
"maps"
"slices"
"sync"
"testing"
"github.com/cilium/hive/hivetest"
cilium "github.com/cilium/proxy/go/cilium/api"
"github.com/cilium/proxy/pkg/policy/api/kafka"
"github.com/stretchr/testify/require"
"k8s.io/apimachinery/pkg/types"
"github.com/cilium/cilium/pkg/container/set"
"github.com/cilium/cilium/pkg/crypto/certificatemanager"
envoypolicy "github.com/cilium/cilium/pkg/envoy/policy"
"github.com/cilium/cilium/pkg/identity"
"github.com/cilium/cilium/pkg/labels"
"github.com/cilium/cilium/pkg/option"
"github.com/cilium/cilium/pkg/policy/api"
testpolicy "github.com/cilium/cilium/pkg/testutils/policy"
)
var (
hostSelector = api.ReservedEndpointSelectors[labels.IDNameHost]
dummySelectorCacheUser = &testpolicy.DummySelectorCacheUser{}
fooSelector = api.NewESFromLabels(labels.ParseSelectLabel("foo"))
bazSelector = api.NewESFromLabels(labels.ParseSelectLabel("baz"))
selBar1 = api.NewESFromLabels(labels.ParseSelectLabel("id=bar1"))
selBar2 = api.NewESFromLabels(labels.ParseSelectLabel("id=bar2"))
falseValue = false
)
type testData struct {
sc *SelectorCache
repo *Repository
idSet set.Set[identity.NumericIdentity]
testPolicyContext *testPolicyContextType
cachedSelectorA CachedSelector
cachedSelectorB CachedSelector
cachedSelectorC CachedSelector
cachedSelectorHost CachedSelector
wildcardCachedSelector CachedSelector
cachedFooSelector CachedSelector
cachedBazSelector CachedSelector
cachedSelectorBar1 CachedSelector
cachedSelectorBar2 CachedSelector
cachedSelectorWorld CachedSelector
cachedSelectorWorldV4 CachedSelector
cachedSelectorWorldV6 CachedSelector
}
func newTestData(logger *slog.Logger) *testData {
td := &testData{
sc: testNewSelectorCache(logger, nil),
repo: NewPolicyRepository(logger, nil, &fakeCertificateManager{}, envoypolicy.NewEnvoyL7RulesTranslator(logger, certificatemanager.NewMockSecretManagerInline()), nil, testpolicy.NewPolicyMetricsNoop()),
idSet: set.NewSet[identity.NumericIdentity](),
testPolicyContext: &testPolicyContextType{logger: logger},
}
td.testPolicyContext.sc = td.sc
td.repo.selectorCache = td.sc
td.wildcardCachedSelector, _ = td.sc.AddIdentitySelector(dummySelectorCacheUser, EmptyStringLabels, api.WildcardEndpointSelector)
td.cachedSelectorA = td.getCachedSelectorForTest(endpointSelectorA, idA.ID)
td.cachedSelectorB = td.getCachedSelectorForTest(endpointSelectorB, idB.ID)
td.cachedSelectorC = td.getCachedSelectorForTest(endpointSelectorC, idC.ID)
td.cachedSelectorHost = td.getCachedSelectorForTest(hostSelector, identity.ReservedIdentityHost)
td.cachedFooSelector = td.getCachedSelectorForTest(fooSelector)
td.cachedBazSelector = td.getCachedSelectorForTest(bazSelector)
td.cachedSelectorBar1 = td.getCachedSelectorForTest(selBar1)
td.cachedSelectorBar2 = td.getCachedSelectorForTest(selBar2)
td.cachedSelectorWorld = td.getCachedSelectorForTest(api.EntitySelectorMapping[api.EntityWorld][0], identity.ReservedIdentityWorld)
td.cachedSelectorWorldV4 = td.getCachedSelectorForTest(api.EntitySelectorMapping[api.EntityWorldIPv4][0], identity.ReservedIdentityWorldIPv4)
td.cachedSelectorWorldV6 = td.getCachedSelectorForTest(api.EntitySelectorMapping[api.EntityWorldIPv6][0], identity.ReservedIdentityWorldIPv6)
return td
}
func (td *testData) getCachedSelectorForTest(es api.EndpointSelector, selections ...identity.NumericIdentity) CachedSelector {
idSel := &identitySelector{
logger: td.sc.logger,
key: es.CachedString(),
users: make(map[CachedSelectionUser]struct{}),
cachedSelections: make(map[identity.NumericIdentity]struct{}),
}
for _, sel := range selections {
idSel.cachedSelections[sel] = struct{}{}
}
return idSel
}
// withIDs loads the set of IDs in to the SelectorCache. Returns
// the same testData for easy chaining.
func (td *testData) withIDs(initIDs ...identity.IdentityMap) *testData {
initial := identity.IdentityMap{}
for _, im := range initIDs {
maps.Copy(initial, im)
}
wg := &sync.WaitGroup{}
td.sc.UpdateIdentities(initial, nil, wg)
wg.Wait()
for id := range initial {
td.idSet.Insert(id)
}
return td
}
func (td *testData) addIdentity(id *identity.Identity) {
wg := &sync.WaitGroup{}
td.sc.UpdateIdentities(
identity.IdentityMap{
id.ID: id.LabelArray,
}, nil, wg)
wg.Wait()
td.idSet.Insert(id.ID)
}
func (td *testData) removeIdentity(id *identity.Identity) {
wg := &sync.WaitGroup{}
td.sc.UpdateIdentities(
nil,
identity.IdentityMap{
id.ID: id.LabelArray,
}, wg)
wg.Wait()
td.idSet.Remove(id.ID)
}
func (td *testData) addIdentitySelector(sel api.EndpointSelector) bool {
_, added := td.sc.AddIdentitySelector(dummySelectorCacheUser, EmptyStringLabels, sel)
return added
}
func (td *testData) verifyL4PolicyMapEqual(t *testing.T, expected, actual L4PolicyMap, availableIDs ...identity.NumericIdentity) {
t.Helper()
require.Equal(t, expected.Len(), actual.Len())
expected.ForEach(func(l4 *L4Filter) bool {
port := l4.PortName
if len(port) == 0 {
port = fmt.Sprintf("%d", l4.Port)
}
l4B := actual.ExactLookup(port, l4.EndPort, string(l4.Protocol))
require.NotNil(t, l4B, "Port Protocol lookup failed: [Port: %s, EndPort: %d, Protocol: %s]", port, l4.EndPort, string(l4.Protocol))
// If no available IDs are provided, we assume the same pointer for
// cached selector is used for both expected and actual L4PolicyMap,
// just make sure L4 filter is equal
if len(availableIDs) == 0 {
require.True(t, l4.Equals(l4B), "Expected: %s\nActual: %s", l4.String(), l4B.String())
return true
}
require.Equal(t, l4.Port, l4B.Port)
require.Equal(t, l4.EndPort, l4B.EndPort)
require.Equal(t, l4.PortName, l4B.PortName)
require.Equal(t, l4.Protocol, l4B.Protocol)
require.Equal(t, l4.Ingress, l4B.Ingress)
require.Equal(t, l4.wildcard, l4B.wildcard)
require.Len(t, l4B.PerSelectorPolicies, len(l4.PerSelectorPolicies))
for k, v := range l4.PerSelectorPolicies {
found := false
for bK, bV := range l4B.PerSelectorPolicies {
if k.String() == bK.String() {
require.True(t, v.Equal(bV), "Expected: %s\nActual: %s", perSelectorPolicyToString(v), perSelectorPolicyToString(bV))
selActual := bK.(*identitySelector).cachedSelections
selExpected := make(map[identity.NumericIdentity]struct{})
for id := range k.(*identitySelector).cachedSelections {
if slices.Contains(availableIDs, id) {
selExpected[id] = struct{}{}
}
}
require.True(t, maps.Equal(selExpected, selActual), "Expected: %v\nActual: %v", selExpected, selActual)
found = true
}
}
require.True(t, found, "Failed to find expected cached selector in PerSelectorPolicy: %s", k.String())
}
return true
})
}
func (td *testData) validateResolvedPolicy(t *testing.T, selPolicy *selectorPolicy, epPolicy *EndpointPolicy, expectedIn, expectedOut L4PolicyMap) {
t.Helper()
logger := hivetest.Logger(t)
if expectedIn != nil {
td.verifyL4PolicyMapEqual(t, expectedIn, selPolicy.L4Policy.Ingress.PortRules, td.idSet.AsSlice()...)
}
if expectedOut != nil {
td.verifyL4PolicyMapEqual(t, expectedOut, selPolicy.L4Policy.Egress.PortRules, td.idSet.AsSlice()...)
}
// Resolve the policy again and compare against the inputs to verify incremental updates
// are applied properly.
sp, err := td.repo.resolvePolicyLocked(idA)
require.NoError(t, err)
epp := sp.DistillPolicy(logger, DummyOwner{logger: logger}, nil)
require.NotNil(t, epp)
epp.Ready()
closer, _ := epPolicy.ConsumeMapChanges()
closer()
epPolicy.Ready()
require.True(t, epPolicy.policyMapState.Equal(&epp.policyMapState), epPolicy.policyMapState.diff(&epp.policyMapState))
epp.Detach(logger)
}
// policyMapEquals takes a set of policies and an expected L4PolicyMap. The policies are assumed to
// select identity A.
//
// The repository is cleared when called.
func (td *testData) policyMapEquals(t *testing.T, expectedIn, expectedOut L4PolicyMap, rules ...*api.Rule) {
t.Helper()
logger := hivetest.Logger(t)
// Initialize with test identity
td.addIdentity(idA)
defer td.removeIdentity(idA)
// Add the rules to policy repository.
for _, r := range rules {
if r.EndpointSelector.LabelSelector == nil {
r.EndpointSelector = endpointSelectorA
}
require.NoError(t, r.Sanitize())
}
td.repo.ReplaceByLabels(rules, []labels.LabelArray{{}})
// Resolve the Selector policy for test identity
td.repo.mutex.RLock()
defer td.repo.mutex.RUnlock()
selPolicy, err := td.repo.resolvePolicyLocked(idA)
require.NoError(t, err)
defer selPolicy.detach(true, 0)
// Distill Selector policy to Endpoint Policy
epPolicy := selPolicy.DistillPolicy(logger, DummyOwner{logger: logger}, nil)
epPolicy.Ready()
td.validateResolvedPolicy(t, selPolicy, epPolicy, expectedIn, expectedOut)
// Incrementally add identities
td.addIdentity(idB)
td.addIdentity(idC)
td.validateResolvedPolicy(t, selPolicy, epPolicy, expectedIn, expectedOut)
// Incrementally delete identities
td.removeIdentity(idB)
td.removeIdentity(idC)
td.validateResolvedPolicy(t, selPolicy, epPolicy, expectedIn, expectedOut)
}
// policyInvalid checks that the set of rules results in an error
func (td *testData) policyInvalid(t *testing.T, errStr string, rules ...*api.Rule) {
t.Helper()
td.withIDs(ruleTestIDs)
for _, r := range rules {
if r.EndpointSelector.LabelSelector == nil {
r.EndpointSelector = endpointSelectorA
}
require.NoError(t, r.Sanitize())
}
td.repo.ReplaceByLabels(rules, []labels.LabelArray{{}})
_, err := td.repo.resolvePolicyLocked(idA)
require.Error(t, err)
require.ErrorContains(t, err, errStr)
}
// policyValid checks that the set of rules does not results in an error
func (td *testData) policyValid(t *testing.T, rules ...*api.Rule) {
t.Helper()
td.withIDs(ruleTestIDs)
for _, r := range rules {
if r.EndpointSelector.LabelSelector == nil {
r.EndpointSelector = endpointSelectorA
}
require.NoError(t, r.Sanitize())
}
td.repo.ReplaceByLabels(rules, []labels.LabelArray{{}})
_, err := td.repo.resolvePolicyLocked(idA)
require.NoError(t, err)
}
// testPolicyContexttype is a dummy context used when evaluating rules.
type testPolicyContextType struct {
isDeny bool
ns string
sc *SelectorCache
fromFile bool
defaultDenyIngress bool
defaultDenyEgress bool
logger *slog.Logger
}
func (p *testPolicyContextType) GetNamespace() string {
return p.ns
}
func (p *testPolicyContextType) GetSelectorCache() *SelectorCache {
return p.sc
}
func (p *testPolicyContextType) GetTLSContext(tls *api.TLSContext) (ca, public, private string, fromFile bool, err error) {
switch tls.Secret.Name {
case "tls-cert":
return "", "fake public cert", "fake private key", p.fromFile, nil
case "tls-ca-certs":
return "fake CA certs", "", "", p.fromFile, nil
}
return "", "", "", p.fromFile, fmt.Errorf("Unknown test secret '%s'", tls.Secret.Name)
}
func (p *testPolicyContextType) GetEnvoyHTTPRules(*api.L7Rules) (*cilium.HttpNetworkPolicyRules, bool) {
return nil, true
}
func (p *testPolicyContextType) SetDeny(isDeny bool) bool {
oldDeny := p.isDeny
p.isDeny = isDeny
return oldDeny
}
func (p *testPolicyContextType) IsDeny() bool {
return p.isDeny
}
func (p *testPolicyContextType) DefaultDenyIngress() bool {
return p.defaultDenyIngress
}
func (p *testPolicyContextType) DefaultDenyEgress() bool {
return p.defaultDenyEgress
}
func (p *testPolicyContextType) GetLogger() *slog.Logger {
return p.logger
}
func (p *testPolicyContextType) Origin() ruleOrigin {
return NilRuleOrigin
}
func (p *testPolicyContextType) SetOrigin(ruleOrigin) {
panic("SetOrigin not implemented")
}
func (p *testPolicyContextType) PolicyTrace(format string, a ...any) {
p.logger.Info(fmt.Sprintf(format, a...))
}
// Tests in this file:
//
// How to read this table:
// Case: The test / subtest number.
// L3: Matches at L3 for rule 1, followed by rule 2.
// L4: Matches at L4.
// L7: Rules at L7 for rule 1, followed by rule 2.
// Notes: Extra information about the test.
//
// +-----+-----------------+----------+-----------------+------------------------------------------------------+
// |Case | L3 (1, 2) match | L4 match | L7 match (1, 2) | Notes |
// +=====+=================+==========+=================+======================================================+
// | 1A | *, * | 80/TCP | *, * | Allow all communication on the specified port |
// | 1B | -, - | 80/TCP | *, * | Deny all with an empty FromEndpoints slice |
// | 2A | *, * | 80/TCP | *, "GET /" | Rule 1 shadows rule 2 |
// | 2B | *, * | 80/TCP | "GET /", * | Same as 2A, but import in reverse order |
// | 3 | *, * | 80/TCP | "GET /","GET /" | Exactly duplicate rules (HTTP) |
// | 4 | *, * | 9092/TCP | "foo","foo" | Exactly duplicate rules (Kafka) |
// | 5A | *, * | 80/TCP | "foo","GET /" | Rules with conflicting L7 parser |
// | 5B | *, * | 80/TCP | "GET /","foo" | Same as 5A, but import in reverse order |
// | 6A | "id=a", * | 80/TCP | *, * | Rule 2 is a superset of rule 1 |
// | 6B | *, "id=a" | 80/TCP | *, * | Same as 6A, but import in reverse order |
// | 7A | "id=a", * | 80/TCP | "GET /", * | All traffic is allowed; traffic to A goes via proxy |
// | 7B | *, "id=a" | 80/TCP | *, "GET /" | Same as 7A, but import in reverse order |
// | 8A | "id=a", * | 80/TCP | "GET /","GET /" | Rule 2 is the same as rule 1, except matching all L3 |
// | 8B | *, "id=a" | 80/TCP | "GET /","GET /" | Same as 8A, but import in reverse order |
// | 9A | "id=a", * | 80/TCP | "foo","GET /" | Rules with conflicting L7 parser (+L3 match) |
// | 9B | *, "id=a" | 80/TCP | "GET /","foo" | Same as 9A, but import in reverse order |
// | 10 | "id=a", "id=c" | 80/TCP | "GET /","GET /" | Allow at L7 for two distinct labels (disjoint set) |
// | 11 | "id=a", "id=c" | 80/TCP | *, * | Allow at L4 for two distinct labels (disjoint set) |
// | 12 | "id=a", | 80/TCP | "GET /" | Configure to allow localhost traffic always |
// | 13 | -, - | 80/TCP | *, * | Deny all with an empty ToEndpoints slice |
// | 14 | *, * | 53/UDP | "example.com" | DNS L7 rules with default-allow adds wildcard |
// | 15 | *, * | 80/TCP | "GET /" | HTTP L7 rules with default-allow adds empty rule |
// | 16 | *, * | 9092/TCP | "topic" | Kafka L7 rules with default-allow adds empty topic |
// | 17 | "id=a", * | 53/UDP | "example.com" | DNS L7 + L3 filter with default-allow adds wildcard |
// | 18 | *, * | 80/TCP | "GET /", deny | Default-allow doesn't add wildcard to deny rules |
// +-----+-----------------+----------+-----------------+------------------------------------------------------+
func TestMergeAllowAllL3AndAllowAllL7(t *testing.T) {
td := newTestData(hivetest.Logger(t))
// Case 1A: Specify WildcardEndpointSelector explicitly.
rule1 := api.Rule{
EndpointSelector: endpointSelectorA,
Ingress: []api.IngressRule{
{
IngressCommonRule: api.IngressCommonRule{
FromEndpoints: []api.EndpointSelector{api.WildcardEndpointSelector},
},
ToPorts: []api.PortRule{{
Ports: []api.PortProtocol{
{Port: "80", Protocol: api.ProtoTCP},
},
}},
},
{
IngressCommonRule: api.IngressCommonRule{
FromEndpoints: []api.EndpointSelector{api.WildcardEndpointSelector},
},
ToPorts: []api.PortRule{{
Ports: []api.PortProtocol{
{Port: "80", Protocol: api.ProtoTCP},
},
}},
},
},
}
expected := NewL4PolicyMapWithValues(map[string]*L4Filter{"80/TCP": {
Port: 80, Protocol: api.ProtoTCP, U8Proto: 6,
Ingress: true, wildcard: td.wildcardCachedSelector,
PerSelectorPolicies: L7DataMap{
td.wildcardCachedSelector: nil,
},
RuleOrigin: OriginForTest(map[CachedSelector]labels.LabelArrayList{
td.wildcardCachedSelector: {nil},
}),
}})
td.policyMapEquals(t, expected, nil, &rule1)
// Case1B: an empty non-nil FromEndpoints does not select any identity.
rule2 := api.Rule{
EndpointSelector: endpointSelectorA,
Ingress: []api.IngressRule{
{
IngressCommonRule: api.IngressCommonRule{
FromEndpoints: []api.EndpointSelector{},
},
ToPorts: []api.PortRule{{
Ports: []api.PortProtocol{
{Port: "80", Protocol: api.ProtoTCP},
},
}},
},
{
IngressCommonRule: api.IngressCommonRule{
FromEndpoints: []api.EndpointSelector{},
},
ToPorts: []api.PortRule{{
Ports: []api.PortProtocol{
{Port: "80", Protocol: api.ProtoTCP},
},
}},
},
},
}
expected = NewL4PolicyMap()
td.policyMapEquals(t, expected, nil, &rule2)
}
// Case 2: allow all at L3 in both rules. Allow all in one L7 rule, but second
// rule restricts at L7. Because one L7 rule allows at L7, all traffic is allowed
// at L7, but still redirected at the proxy.
// Should resolve to one rule.
func TestMergeAllowAllL3AndShadowedL7(t *testing.T) {
td := newTestData(hivetest.Logger(t))
rule1 := api.Rule{
EndpointSelector: endpointSelectorA,
Ingress: []api.IngressRule{
{
IngressCommonRule: api.IngressCommonRule{
FromEndpoints: []api.EndpointSelector{api.WildcardEndpointSelector},
},
ToPorts: []api.PortRule{{
Ports: []api.PortProtocol{
{Port: "80", Protocol: api.ProtoTCP},
},
}},
},
{
IngressCommonRule: api.IngressCommonRule{
FromEndpoints: []api.EndpointSelector{api.WildcardEndpointSelector},
},
ToPorts: []api.PortRule{{
Ports: []api.PortProtocol{
{Port: "80", Protocol: api.ProtoTCP},
},
Rules: &api.L7Rules{
HTTP: []api.PortRuleHTTP{
{Method: "GET", Path: "/"},
},
},
}},
},
},
}
expected := NewL4PolicyMapWithValues(map[string]*L4Filter{"80/TCP": {
Port: 80,
Protocol: api.ProtoTCP,
U8Proto: 6,
wildcard: td.wildcardCachedSelector,
PerSelectorPolicies: L7DataMap{
td.wildcardCachedSelector: &PerSelectorPolicy{
L7Parser: ParserTypeHTTP,
Priority: ListenerPriorityHTTP,
L7Rules: api.L7Rules{
HTTP: []api.PortRuleHTTP{{Path: "/", Method: "GET"}, {}},
},
},
},
Ingress: true,
RuleOrigin: OriginForTest(map[CachedSelector]labels.LabelArrayList{td.wildcardCachedSelector: {nil}}),
}})
td.policyMapEquals(t, expected, nil, &rule1)
// Case 2B: Flip order of case 2A so that rule being merged with is different
// than rule being consumed.
rule2 := api.Rule{
EndpointSelector: endpointSelectorA,
Ingress: []api.IngressRule{
{
IngressCommonRule: api.IngressCommonRule{
FromEndpoints: []api.EndpointSelector{api.WildcardEndpointSelector},
},
ToPorts: []api.PortRule{{
Ports: []api.PortProtocol{
{Port: "80", Protocol: api.ProtoTCP},
},
Rules: &api.L7Rules{
HTTP: []api.PortRuleHTTP{
{Method: "GET", Path: "/"},
},
},
}},
},
{
IngressCommonRule: api.IngressCommonRule{
FromEndpoints: []api.EndpointSelector{api.WildcardEndpointSelector},
},
ToPorts: []api.PortRule{{
Ports: []api.PortProtocol{
{Port: "80", Protocol: api.ProtoTCP},
},
}},
},
},
}
expected = NewL4PolicyMapWithValues(map[string]*L4Filter{"80/TCP": {
Port: 80,
Protocol: api.ProtoTCP,
U8Proto: 6,
wildcard: td.wildcardCachedSelector,
PerSelectorPolicies: L7DataMap{
td.wildcardCachedSelector: &PerSelectorPolicy{
L7Parser: ParserTypeHTTP,
Priority: ListenerPriorityHTTP,
L7Rules: api.L7Rules{
HTTP: []api.PortRuleHTTP{{Path: "/", Method: "GET"}, {}},
},
},
},
Ingress: true,
RuleOrigin: OriginForTest(map[CachedSelector]labels.LabelArrayList{
td.wildcardCachedSelector: {nil},
}),
}})
td.policyMapEquals(t, expected, nil, &rule2)
}
// Case 3: allow all at L3 in both rules. Both rules have same parser type and
// same API resource specified at L7 for HTTP.
func TestMergeIdenticalAllowAllL3AndRestrictedL7HTTP(t *testing.T) {
td := newTestData(hivetest.Logger(t))
identicalHTTPRule := api.Rule{
EndpointSelector: endpointSelectorA,
Ingress: []api.IngressRule{
{
IngressCommonRule: api.IngressCommonRule{
FromEndpoints: []api.EndpointSelector{api.WildcardEndpointSelector},
},
ToPorts: []api.PortRule{{
Ports: []api.PortProtocol{
{Port: "80", Protocol: api.ProtoTCP},
},
Rules: &api.L7Rules{
HTTP: []api.PortRuleHTTP{
{Method: "GET", Path: "/"},
},
},
}},
},
{
IngressCommonRule: api.IngressCommonRule{
FromEndpoints: []api.EndpointSelector{api.WildcardEndpointSelector},
},
ToPorts: []api.PortRule{{
Ports: []api.PortProtocol{
{Port: "80", Protocol: api.ProtoTCP},
},
Rules: &api.L7Rules{
HTTP: []api.PortRuleHTTP{
{Method: "GET", Path: "/"},
},
},
}},
},
},
}
expected := NewL4PolicyMapWithValues(map[string]*L4Filter{"80/TCP": {
Port: 80,
Protocol: api.ProtoTCP,
U8Proto: 6,
wildcard: td.wildcardCachedSelector,
PerSelectorPolicies: L7DataMap{
td.wildcardCachedSelector: &PerSelectorPolicy{
L7Parser: ParserTypeHTTP,
Priority: ListenerPriorityHTTP,
L7Rules: api.L7Rules{
HTTP: []api.PortRuleHTTP{{Path: "/", Method: "GET"}},
},
},
},
Ingress: true,
RuleOrigin: OriginForTest(map[CachedSelector]labels.LabelArrayList{td.wildcardCachedSelector: {nil}}),
}})
td.policyMapEquals(t, expected, nil, &identicalHTTPRule)
}
// Case 4: identical allow all at L3 with identical restrictions on Kafka.
func TestMergeIdenticalAllowAllL3AndRestrictedL7Kafka(t *testing.T) {
td := newTestData(hivetest.Logger(t))
identicalKafkaRule := api.Rule{
EndpointSelector: endpointSelectorA,
Ingress: []api.IngressRule{
{
IngressCommonRule: api.IngressCommonRule{
FromEndpoints: api.EndpointSelectorSlice{api.WildcardEndpointSelector},
},
ToPorts: []api.PortRule{{
Ports: []api.PortProtocol{
{Port: "9092", Protocol: api.ProtoTCP},
},
Rules: &api.L7Rules{
Kafka: []kafka.PortRule{
{Topic: "foo"},
},
},
}},
},
{
IngressCommonRule: api.IngressCommonRule{
FromEndpoints: api.EndpointSelectorSlice{api.WildcardEndpointSelector},
},
ToPorts: []api.PortRule{{
Ports: []api.PortProtocol{
{Port: "9092", Protocol: api.ProtoTCP},
},
Rules: &api.L7Rules{
Kafka: []kafka.PortRule{
{Topic: "foo"},
},
},
}},
},
},
}
expected := NewL4PolicyMapWithValues(map[string]*L4Filter{"9092/TCP": {
Port: 9092,
Protocol: api.ProtoTCP,
U8Proto: 6,
wildcard: td.wildcardCachedSelector,
PerSelectorPolicies: L7DataMap{
td.wildcardCachedSelector: &PerSelectorPolicy{
L7Parser: ParserTypeKafka,
Priority: ListenerPriorityKafka,
L7Rules: api.L7Rules{
Kafka: []kafka.PortRule{{Topic: "foo"}},
},
},
},
Ingress: true,
RuleOrigin: OriginForTest(map[CachedSelector]labels.LabelArrayList{td.wildcardCachedSelector: {nil}}),
}})
td.policyMapEquals(t, expected, nil, &identicalKafkaRule)
}
// Case 5: use conflicting protocols on the same port in different rules. This
// is not supported, so return an error.
func TestMergeIdenticalAllowAllL3AndMismatchingParsers(t *testing.T) {
td := newTestData(hivetest.Logger(t))
// Case 5A: Kafka first, HTTP second.
conflictingParsersRule := api.Rule{
EndpointSelector: endpointSelectorA,
Ingress: []api.IngressRule{
{
IngressCommonRule: api.IngressCommonRule{
FromEndpoints: api.EndpointSelectorSlice{api.WildcardEndpointSelector},
},
ToPorts: []api.PortRule{{
Ports: []api.PortProtocol{
{Port: "80", Protocol: api.ProtoTCP},
},
Rules: &api.L7Rules{
Kafka: []kafka.PortRule{
{Topic: "foo"},
},
},
}},
},
{
IngressCommonRule: api.IngressCommonRule{
FromEndpoints: []api.EndpointSelector{api.WildcardEndpointSelector},
},
ToPorts: []api.PortRule{{
Ports: []api.PortProtocol{
{Port: "80", Protocol: api.ProtoTCP},
},
Rules: &api.L7Rules{
HTTP: []api.PortRuleHTTP{
{Method: "GET", Path: "/"},
},
},
}},
},
},
}
td.policyInvalid(t, "cannot merge conflicting L7 parsers", &conflictingParsersRule)
// Case 5B: HTTP first, Kafka second.
conflictingParsersRule = api.Rule{
EndpointSelector: endpointSelectorA,
Ingress: []api.IngressRule{
{
IngressCommonRule: api.IngressCommonRule{
FromEndpoints: []api.EndpointSelector{api.WildcardEndpointSelector},
},
ToPorts: []api.PortRule{{
Ports: []api.PortProtocol{
{Port: "80", Protocol: api.ProtoTCP},
},
Rules: &api.L7Rules{
HTTP: []api.PortRuleHTTP{
{Method: "GET", Path: "/"},
},
},
}},
},
{
IngressCommonRule: api.IngressCommonRule{
FromEndpoints: api.EndpointSelectorSlice{api.WildcardEndpointSelector},
},
ToPorts: []api.PortRule{{
Ports: []api.PortProtocol{
{Port: "80", Protocol: api.ProtoTCP},
},
Rules: &api.L7Rules{
Kafka: []kafka.PortRule{
{Topic: "foo"},
},
},
}},
},
},
}
td.policyInvalid(t, "cannot merge conflicting L7 parsers", &conflictingParsersRule)
// Case 5B+: HTTP first, generic L7 second.
conflictingParsersIngressRule := api.Rule{
EndpointSelector: endpointSelectorA,
Ingress: []api.IngressRule{
{
IngressCommonRule: api.IngressCommonRule{
FromEndpoints: []api.EndpointSelector{api.WildcardEndpointSelector},
},
ToPorts: []api.PortRule{{
Ports: []api.PortProtocol{
{Port: "80", Protocol: api.ProtoTCP},
},
Rules: &api.L7Rules{
HTTP: []api.PortRuleHTTP{
{Method: "GET", Path: "/"},
},
},
}},
},
{
IngressCommonRule: api.IngressCommonRule{
FromEndpoints: api.EndpointSelectorSlice{api.WildcardEndpointSelector},
},
ToPorts: []api.PortRule{{
Ports: []api.PortProtocol{
{Port: "80", Protocol: api.ProtoTCP},
},
Rules: &api.L7Rules{
L7Proto: "testing",
L7: []api.PortRuleL7{
{"method": "PUT", "path": "/Foo"},
},
},
}},
},
},
}
td.policyInvalid(t, "cannot merge conflicting L7 parsers", &conflictingParsersIngressRule)
// Case 5B++: generic L7 without rules first, HTTP second.
conflictingParsersEgressRule := api.Rule{
EndpointSelector: endpointSelectorA,
Egress: []api.EgressRule{
{
EgressCommonRule: api.EgressCommonRule{
ToEndpoints: []api.EndpointSelector{endpointSelectorC},
},
ToPorts: []api.PortRule{{
Ports: []api.PortProtocol{
{Port: "80", Protocol: api.ProtoTCP},
},
Rules: &api.L7Rules{
L7Proto: "testing",
},
}},
},
{
EgressCommonRule: api.EgressCommonRule{
ToEndpoints: []api.EndpointSelector{endpointSelectorC},
},
ToPorts: []api.PortRule{{
Ports: []api.PortProtocol{
{Port: "80", Protocol: api.ProtoTCP},
},
Rules: &api.L7Rules{
HTTP: []api.PortRuleHTTP{
{Method: "GET", Path: "/"},
},
},
}},
},
},
}
td.policyInvalid(t, "cannot merge conflicting L7 parsers", &conflictingParsersEgressRule)
}
// TLS policies with and without interception
// TLS policy without L7 rules does not inspect L7, uses L7ParserType "tls"
func TestMergeTLSTCPPolicy(t *testing.T) {
td := newTestData(hivetest.Logger(t))
egressRule := api.Rule{
EndpointSelector: endpointSelectorA,
Egress: []api.EgressRule{
{
EgressCommonRule: api.EgressCommonRule{
ToEndpoints: []api.EndpointSelector{endpointSelectorB},
},
ToPorts: []api.PortRule{{
Ports: []api.PortProtocol{
{Port: "443", Protocol: api.ProtoTCP},
},
}},
},
{
EgressCommonRule: api.EgressCommonRule{
ToEndpoints: []api.EndpointSelector{endpointSelectorC},
},
ToPorts: []api.PortRule{{
Ports: []api.PortProtocol{
{Port: "443", Protocol: api.ProtoTCP},
},
TerminatingTLS: &api.TLSContext{
Secret: &api.Secret{
Name: "tls-cert",
},
},
OriginatingTLS: &api.TLSContext{
Secret: &api.Secret{
Name: "tls-ca-certs",
},
},
}},
},
},
}
// Since cachedSelectorA's map entry is 'nil', it will not be redirected to the proxy.
expected := NewL4PolicyMapWithValues(map[string]*L4Filter{"443/TCP": {
Port: 443,
Protocol: api.ProtoTCP,
U8Proto: 6,
wildcard: nil,
PerSelectorPolicies: L7DataMap{
td.cachedSelectorB: nil, // no proxy redirect
td.cachedSelectorC: &PerSelectorPolicy{
L7Parser: ParserTypeTLS,
Priority: ListenerPriorityTLS,
TerminatingTLS: &TLSContext{
FromFile: true,
TrustedCA: "fake ca tls-cert",
CertificateChain: "fake public key tls-cert",
PrivateKey: "fake private key tls-cert",
Secret: types.NamespacedName{
Name: "tls-cert",
},
},
OriginatingTLS: &TLSContext{
FromFile: true,
TrustedCA: "fake ca tls-ca-certs",
CertificateChain: "fake public key tls-ca-certs",
PrivateKey: "fake private key tls-ca-certs",
Secret: types.NamespacedName{
Name: "tls-ca-certs",
},
},
},
},
Ingress: false,
RuleOrigin: OriginForTest(map[CachedSelector]labels.LabelArrayList{
td.cachedSelectorB: {nil},
td.cachedSelectorC: {nil},
}),
}})
td.policyMapEquals(t, nil, expected, &egressRule)
}
func TestMergeTLSHTTPPolicy(t *testing.T) {
td := newTestData(hivetest.Logger(t))
egressRule := api.Rule{
Egress: []api.EgressRule{
{
EgressCommonRule: api.EgressCommonRule{
ToEndpoints: []api.EndpointSelector{endpointSelectorB},
},
ToPorts: []api.PortRule{{
Ports: []api.PortProtocol{
{Port: "443", Protocol: api.ProtoTCP},
},
}},
},
{
EgressCommonRule: api.EgressCommonRule{
ToEndpoints: []api.EndpointSelector{endpointSelectorC},
},
ToPorts: []api.PortRule{{
Ports: []api.PortProtocol{
{Port: "443", Protocol: api.ProtoTCP},
},
TerminatingTLS: &api.TLSContext{
Secret: &api.Secret{
Name: "tls-cert",
},
},
OriginatingTLS: &api.TLSContext{
Secret: &api.Secret{
Name: "tls-ca-certs",
},
},
Rules: &api.L7Rules{
HTTP: []api.PortRuleHTTP{{}},
},
}},
},
},
}
// Since cachedSelectorA's map entry is 'nil', it will not be redirected to the proxy.
expected := NewL4PolicyMapWithValues(map[string]*L4Filter{"443/TCP": {
Port: 443,
Protocol: api.ProtoTCP,
U8Proto: 6,
wildcard: nil,
PerSelectorPolicies: L7DataMap{
td.cachedSelectorB: nil, // no proxy redirect
td.cachedSelectorC: &PerSelectorPolicy{
L7Parser: ParserTypeHTTP,
Priority: ListenerPriorityHTTP,
TerminatingTLS: &TLSContext{
FromFile: true,
TrustedCA: "fake ca tls-cert",
CertificateChain: "fake public key tls-cert",
PrivateKey: "fake private key tls-cert",
Secret: types.NamespacedName{
Name: "tls-cert",
},
},
OriginatingTLS: &TLSContext{
FromFile: true,
TrustedCA: "fake ca tls-ca-certs",
CertificateChain: "fake public key tls-ca-certs",
PrivateKey: "fake private key tls-ca-certs",
Secret: types.NamespacedName{
Name: "tls-ca-certs",
},
},
L7Rules: api.L7Rules{
HTTP: []api.PortRuleHTTP{{}},
},
},
},
Ingress: false,
RuleOrigin: OriginForTest(map[CachedSelector]labels.LabelArrayList{
td.cachedSelectorB: {nil},
td.cachedSelectorC: {nil},
}),
}})
td.policyMapEquals(t, nil, expected, &egressRule)
}
func TestMergeTLSSNIPolicy(t *testing.T) {
td := newTestData(hivetest.Logger(t))
egressRule := api.Rule{
Egress: []api.EgressRule{
{
EgressCommonRule: api.EgressCommonRule{
ToEndpoints: []api.EndpointSelector{endpointSelectorB},
},
ToPorts: []api.PortRule{{
Ports: []api.PortProtocol{
{Port: "443", Protocol: api.ProtoTCP},
},
}},
},
{
EgressCommonRule: api.EgressCommonRule{
ToEndpoints: []api.EndpointSelector{endpointSelectorC},
},
ToPorts: []api.PortRule{{
Ports: []api.PortProtocol{
{Port: "443", Protocol: api.ProtoTCP},
},
TerminatingTLS: &api.TLSContext{
Secret: &api.Secret{
Name: "tls-cert",
},
},
OriginatingTLS: &api.TLSContext{
Secret: &api.Secret{
Name: "tls-ca-certs",
},
},
ServerNames: []api.ServerName{"www.foo.com"},
}},
},
{
EgressCommonRule: api.EgressCommonRule{
ToEndpoints: []api.EndpointSelector{endpointSelectorC},
},
ToPorts: []api.PortRule{{
Ports: []api.PortProtocol{
{Port: "443", Protocol: api.ProtoTCP},
},
ServerNames: []api.ServerName{"www.bar.com"},
}, {
Ports: []api.PortProtocol{
{Port: "443", Protocol: api.ProtoTCP},
},
Rules: &api.L7Rules{
HTTP: []api.PortRuleHTTP{{}},
},
}},
},
},
}
// Since cachedSelectorA's map entry is 'nil', it will not be redirected to the proxy.
expected := NewL4PolicyMapWithValues(map[string]*L4Filter{"443/TCP": {
Port: 443,
Protocol: api.ProtoTCP,
U8Proto: 6,
wildcard: nil,
PerSelectorPolicies: L7DataMap{
td.cachedSelectorB: nil, // no proxy redirect
td.cachedSelectorC: &PerSelectorPolicy{
L7Parser: ParserTypeHTTP,
Priority: ListenerPriorityHTTP,
TerminatingTLS: &TLSContext{
FromFile: true,
TrustedCA: "fake ca tls-cert",
CertificateChain: "fake public key tls-cert",
PrivateKey: "fake private key tls-cert",
Secret: types.NamespacedName{
Name: "tls-cert",
},
},
OriginatingTLS: &TLSContext{
FromFile: true,
TrustedCA: "fake ca tls-ca-certs",
CertificateChain: "fake public key tls-ca-certs",
PrivateKey: "fake private key tls-ca-certs",
Secret: types.NamespacedName{
Name: "tls-ca-certs",
},
},
ServerNames: StringSet{"www.foo.com": {}, "www.bar.com": {}},
L7Rules: api.L7Rules{
HTTP: []api.PortRuleHTTP{{}},
},
},
},
Ingress: false,
RuleOrigin: OriginForTest(map[CachedSelector]labels.LabelArrayList{
td.cachedSelectorB: {nil},
td.cachedSelectorC: {nil},
}),
}})
td.policyMapEquals(t, nil, expected, &egressRule)
}
func TestMergeListenerPolicy(t *testing.T) {
td := newTestData(hivetest.Logger(t))
//
// no namespace (NodeFirewall policy): Can not refer to EnvoyConfig
//
egressRule := api.Rule{
NodeSelector: hostSelector,
Egress: []api.EgressRule{
{
EgressCommonRule: api.EgressCommonRule{
ToEndpoints: []api.EndpointSelector{endpointSelectorB},
},
ToPorts: []api.PortRule{{
Ports: []api.PortProtocol{
{Port: "443", Protocol: api.ProtoTCP},
},
}},
},
{
EgressCommonRule: api.EgressCommonRule{
ToEndpoints: []api.EndpointSelector{endpointSelectorC},
},
ToPorts: []api.PortRule{{
Ports: []api.PortProtocol{
{Port: "443", Protocol: api.ProtoTCP},
},
Listener: &api.Listener{
EnvoyConfig: &api.EnvoyConfig{
Kind: "CiliumEnvoyConfig",
Name: "test-cec",
},
Name: "test",
},
}},
},
},
}
old := option.Config.EnableHostFirewall
defer func() {
option.Config.EnableHostFirewall = old
}()
option.Config.EnableHostFirewall = true
idHost := identity.NewIdentity(identity.ReservedIdentityHost, labels.NewFrom(labels.LabelHost))
td.withIDs(identity.IdentityMap{idHost.ID: idHost.LabelArray})
td.repo.mustAdd(egressRule)
_, err := td.repo.resolvePolicyLocked(idHost)
require.ErrorContains(t, err, `Listener "test" in CCNP can not use Kind CiliumEnvoyConfig`)
//
// no namespace in policyContext (Clusterwide policy): Must to ClusterwideEnvoyConfig
//
egressRule = api.Rule{
Egress: []api.EgressRule{
{
EgressCommonRule: api.EgressCommonRule{
ToEndpoints: []api.EndpointSelector{endpointSelectorB},
},
ToPorts: []api.PortRule{{
Ports: []api.PortProtocol{
{Port: "443", Protocol: api.ProtoTCP},
},
}},
},
{
EgressCommonRule: api.EgressCommonRule{
ToEndpoints: []api.EndpointSelector{endpointSelectorC},
},
ToPorts: []api.PortRule{{
Ports: []api.PortProtocol{
{Port: "443", Protocol: api.ProtoTCP},
},
Listener: &api.Listener{
EnvoyConfig: &api.EnvoyConfig{
Kind: "CiliumClusterwideEnvoyConfig",
Name: "shared-cec",
},
Name: "test",
},
}},
},
},
}
// Since cachedSelectorB's map entry is 'nil', it will not be redirected to the proxy.
expected := NewL4PolicyMapWithValues(map[string]*L4Filter{"443/TCP": {
Port: 443,
Protocol: api.ProtoTCP,
U8Proto: 6,
wildcard: nil,
PerSelectorPolicies: L7DataMap{
td.cachedSelectorB: nil, // no proxy redirect
td.cachedSelectorC: &PerSelectorPolicy{
L7Parser: ParserTypeCRD,
Priority: ListenerPriorityCRD,
Listener: "/shared-cec/test",
},
},
Ingress: false,
RuleOrigin: OriginForTest(map[CachedSelector]labels.LabelArrayList{
td.cachedSelectorB: {nil},
td.cachedSelectorC: {nil},
}),
}})
td.policyMapEquals(t, nil, expected, &egressRule)
//
// namespace in policyContext (Namespaced policy): Can refer to EnvoyConfig
//
egressRule = api.Rule{
Egress: []api.EgressRule{
{
EgressCommonRule: api.EgressCommonRule{
ToEndpoints: []api.EndpointSelector{endpointSelectorB},
},
ToPorts: []api.PortRule{{
Ports: []api.PortProtocol{
{Port: "443", Protocol: api.ProtoTCP},
},
}},
},
{
EgressCommonRule: api.EgressCommonRule{
ToEndpoints: []api.EndpointSelector{endpointSelectorC},
},
ToPorts: []api.PortRule{{
Ports: []api.PortProtocol{
{Port: "443", Protocol: api.ProtoTCP},
},
Listener: &api.Listener{
EnvoyConfig: &api.EnvoyConfig{
Kind: "CiliumEnvoyConfig",
Name: "test-cec",
},
Name: "test",
},
}},
},
},
}
// Since cachedSelectorA's map entry is 'nil', it will not be redirected to the proxy.
expected = NewL4PolicyMapWithValues(map[string]*L4Filter{"443/TCP": {
Port: 443,
Protocol: api.ProtoTCP,
U8Proto: 6,
wildcard: nil,
PerSelectorPolicies: L7DataMap{
td.cachedSelectorB: nil, // no proxy redirect
td.cachedSelectorC: &PerSelectorPolicy{
L7Parser: ParserTypeCRD,
Priority: ListenerPriorityCRD,
Listener: "default/test-cec/test",
},
},
Ingress: false,
RuleOrigin: OriginForTest(map[CachedSelector]labels.LabelArrayList{
td.cachedSelectorB: {nil},
td.cachedSelectorC: {nil},
}),
}})
td.policyMapEquals(t, nil, expected, &egressRule)
//
// namespace in policyContext (Namespaced policy): Can refer to Cluster-socoped
// CiliumClusterwideEnvoyConfig
//
egressRule = api.Rule{
Egress: []api.EgressRule{
{
EgressCommonRule: api.EgressCommonRule{
ToEndpoints: []api.EndpointSelector{endpointSelectorB},
},
ToPorts: []api.PortRule{{
Ports: []api.PortProtocol{
{Port: "443", Protocol: api.ProtoTCP},
},
}},
},
{
EgressCommonRule: api.EgressCommonRule{
ToEndpoints: []api.EndpointSelector{endpointSelectorC},
},
ToPorts: []api.PortRule{{
Ports: []api.PortProtocol{
{Port: "443", Protocol: api.ProtoTCP},
},
Listener: &api.Listener{
EnvoyConfig: &api.EnvoyConfig{
Kind: "CiliumClusterwideEnvoyConfig",
Name: "shared-cec",
},
Name: "test",
},
}},
},
},
}
// Since cachedSelectorA's map entry is 'nil', it will not be redirected to the proxy.
expected = NewL4PolicyMapWithValues(map[string]*L4Filter{"443/TCP": {
Port: 443,
Protocol: api.ProtoTCP,
U8Proto: 6,
wildcard: nil,
PerSelectorPolicies: L7DataMap{
td.cachedSelectorB: nil, // no proxy redirect
td.cachedSelectorC: &PerSelectorPolicy{
L7Parser: ParserTypeCRD,
Priority: ListenerPriorityCRD,
Listener: "/shared-cec/test",
},
},
Ingress: false,
RuleOrigin: OriginForTest(map[CachedSelector]labels.LabelArrayList{
td.cachedSelectorB: {nil},
td.cachedSelectorC: {nil},
}),
}})
td.policyMapEquals(t, nil, expected, &egressRule)
}
// Case 6: allow all at L3/L7 in one rule, and select an endpoint and allow all on L7
// in another rule. Should resolve to just allowing all on L3/L7 (first rule
// shadows the second).
func TestL3RuleShadowedByL3AllowAll(t *testing.T) {
td := newTestData(hivetest.Logger(t))
// Case 6A: Specify WildcardEndpointSelector explicitly.
shadowRule := api.Rule{
EndpointSelector: endpointSelectorA,
Ingress: []api.IngressRule{
{
IngressCommonRule: api.IngressCommonRule{
FromEndpoints: []api.EndpointSelector{endpointSelectorB},
},
ToPorts: []api.PortRule{{
Ports: []api.PortProtocol{
{Port: "80", Protocol: api.ProtoTCP},
},
}},
},
{
IngressCommonRule: api.IngressCommonRule{
FromEndpoints: []api.EndpointSelector{api.WildcardEndpointSelector},
},
ToPorts: []api.PortRule{{
Ports: []api.PortProtocol{
{Port: "80", Protocol: api.ProtoTCP},
},
}},
},
},
}
expected := NewL4PolicyMapWithValues(map[string]*L4Filter{"80/TCP": {
Port: 80,
Protocol: api.ProtoTCP,
U8Proto: 6,
wildcard: td.wildcardCachedSelector,
PerSelectorPolicies: L7DataMap{
td.cachedSelectorB: nil,
td.wildcardCachedSelector: nil,
},
Ingress: true,
RuleOrigin: OriginForTest(map[CachedSelector]labels.LabelArrayList{
td.cachedSelectorB: {nil},
td.wildcardCachedSelector: {nil},
}),
}})
td.policyMapEquals(t, expected, nil, &shadowRule)
// Case 6B: Reverse the ordering of the rules. Result should be the same.
shadowRule = api.Rule{
EndpointSelector: endpointSelectorA,
Ingress: []api.IngressRule{
{
IngressCommonRule: api.IngressCommonRule{
FromEndpoints: []api.EndpointSelector{api.WildcardEndpointSelector},
},
ToPorts: []api.PortRule{{
Ports: []api.PortProtocol{
{Port: "80", Protocol: api.ProtoTCP},
},
}},
},
{
IngressCommonRule: api.IngressCommonRule{
FromEndpoints: []api.EndpointSelector{endpointSelectorB},
},
ToPorts: []api.PortRule{{
Ports: []api.PortProtocol{
{Port: "80", Protocol: api.ProtoTCP},
},
}},
},
},
}
expected = NewL4PolicyMapWithValues(map[string]*L4Filter{"80/TCP": {
Port: 80,
Protocol: api.ProtoTCP,
U8Proto: 6,
wildcard: td.wildcardCachedSelector,
PerSelectorPolicies: L7DataMap{
td.wildcardCachedSelector: nil,
td.cachedSelectorB: nil,
},
Ingress: true,
RuleOrigin: OriginForTest(map[CachedSelector]labels.LabelArrayList{
td.cachedSelectorB: {nil},
td.wildcardCachedSelector: {nil},
}),
}})
td.policyMapEquals(t, expected, nil, &shadowRule)
}
// Case 7: allow all at L3/L7 in one rule, and in another rule, select an endpoint
// which restricts on L7. Should resolve to just allowing all on L3/L7 (first rule
// shadows the second), but setting traffic to the HTTP proxy.
func TestL3RuleWithL7RulePartiallyShadowedByL3AllowAll(t *testing.T) {
td := newTestData(hivetest.Logger(t))
// Case 7A: selects specific endpoint with L7 restrictions rule first, then
// rule which selects all endpoints and allows all on L7. Net result sets
// parser type to whatever is in first rule, but without the restriction
// on L7.
shadowRule := api.Rule{
EndpointSelector: endpointSelectorA,
Ingress: []api.IngressRule{
{
IngressCommonRule: api.IngressCommonRule{
FromEndpoints: []api.EndpointSelector{endpointSelectorA},
},
ToPorts: []api.PortRule{{
Ports: []api.PortProtocol{
{Port: "80", Protocol: api.ProtoTCP},
},
Rules: &api.L7Rules{
HTTP: []api.PortRuleHTTP{
{Method: "GET", Path: "/"},
},
},
}},
},
{
IngressCommonRule: api.IngressCommonRule{
FromEndpoints: []api.EndpointSelector{api.WildcardEndpointSelector},
},
ToPorts: []api.PortRule{{
Ports: []api.PortProtocol{
{Port: "80", Protocol: api.ProtoTCP},
},
}},
},
},
}
expected := NewL4PolicyMapWithValues(map[string]*L4Filter{"80/TCP": {
Port: 80,
Protocol: api.ProtoTCP,
U8Proto: 6,
wildcard: td.wildcardCachedSelector,
PerSelectorPolicies: L7DataMap{
td.wildcardCachedSelector: nil,
td.cachedSelectorA: &PerSelectorPolicy{
L7Parser: ParserTypeHTTP,
Priority: ListenerPriorityHTTP,
L7Rules: api.L7Rules{
HTTP: []api.PortRuleHTTP{{Path: "/", Method: "GET"}},
},
},
},
Ingress: true,
RuleOrigin: OriginForTest(map[CachedSelector]labels.LabelArrayList{
td.cachedSelectorA: {nil},
td.wildcardCachedSelector: {nil},
}),
}})
td.policyMapEquals(t, expected, nil, &shadowRule)
// Case 7B: selects all endpoints and allows all on L7, then selects specific
// endpoint with L7 restrictions rule. Net result sets parser type to whatever
// is in first rule, but without the restriction on L7.
shadowRule = api.Rule{
EndpointSelector: endpointSelectorA,
Ingress: []api.IngressRule{
{
IngressCommonRule: api.IngressCommonRule{
FromEndpoints: []api.EndpointSelector{api.WildcardEndpointSelector},
},
ToPorts: []api.PortRule{{
Ports: []api.PortProtocol{
{Port: "80", Protocol: api.ProtoTCP},
},
}},
},
{
IngressCommonRule: api.IngressCommonRule{
FromEndpoints: []api.EndpointSelector{endpointSelectorA},
},
ToPorts: []api.PortRule{{
Ports: []api.PortProtocol{
{Port: "80", Protocol: api.ProtoTCP},
},
Rules: &api.L7Rules{
HTTP: []api.PortRuleHTTP{
{Method: "GET", Path: "/"},
},
},
}},
},
},
}
expected = NewL4PolicyMapWithValues(map[string]*L4Filter{"80/TCP": {
Port: 80,
Protocol: api.ProtoTCP,
U8Proto: 6,
wildcard: td.wildcardCachedSelector,
PerSelectorPolicies: L7DataMap{
td.wildcardCachedSelector: nil,
td.cachedSelectorA: &PerSelectorPolicy{
L7Parser: ParserTypeHTTP,
Priority: ListenerPriorityHTTP,
L7Rules: api.L7Rules{
HTTP: []api.PortRuleHTTP{{Path: "/", Method: "GET"}},
},
},
},
Ingress: true,
RuleOrigin: OriginForTest(map[CachedSelector]labels.LabelArrayList{
td.wildcardCachedSelector: {nil},
td.cachedSelectorA: {nil},
}),
}})
td.policyMapEquals(t, expected, nil, &shadowRule)
}
// Case 8: allow all at L3 and restricts on L7 in one rule, and in another rule,
// select an endpoint which restricts the same as the first rule on L7.
// Should resolve to just allowing all on L3, but restricting on L7 for both
// wildcard and the specified endpoint.
func TestL3RuleWithL7RuleShadowedByL3AllowAll(t *testing.T) {
td := newTestData(hivetest.Logger(t))
// Case 8A: selects specific endpoint with L7 restrictions rule first, then
// rule which selects all endpoints and restricts on the same resource on L7.
// PerSelectorPolicies contains entries for both endpoints selected in each rule
// on L7 restriction.
case8Rule := api.Rule{
EndpointSelector: endpointSelectorA,
Ingress: []api.IngressRule{
{
IngressCommonRule: api.IngressCommonRule{
FromEndpoints: []api.EndpointSelector{endpointSelectorA},
},
ToPorts: []api.PortRule{{
Ports: []api.PortProtocol{
{Port: "80", Protocol: api.ProtoTCP},
},
Rules: &api.L7Rules{
HTTP: []api.PortRuleHTTP{
{Method: "GET", Path: "/"},
},
},
}},
},
{
IngressCommonRule: api.IngressCommonRule{
FromEndpoints: []api.EndpointSelector{api.WildcardEndpointSelector},
},
ToPorts: []api.PortRule{{
Ports: []api.PortProtocol{
{Port: "80", Protocol: api.ProtoTCP},
},
Rules: &api.L7Rules{
HTTP: []api.PortRuleHTTP{
{Method: "GET", Path: "/"},
},
},
}},
},
},
}
expected := NewL4PolicyMapWithValues(map[string]*L4Filter{"80/TCP": {
Port: 80,
Protocol: api.ProtoTCP,
U8Proto: 6,
wildcard: td.wildcardCachedSelector,
PerSelectorPolicies: L7DataMap{
td.wildcardCachedSelector: &PerSelectorPolicy{
L7Parser: ParserTypeHTTP,
Priority: ListenerPriorityHTTP,
L7Rules: api.L7Rules{
HTTP: []api.PortRuleHTTP{{Path: "/", Method: "GET"}},
},
},
td.cachedSelectorA: &PerSelectorPolicy{
L7Parser: ParserTypeHTTP,
Priority: ListenerPriorityHTTP,
L7Rules: api.L7Rules{
HTTP: []api.PortRuleHTTP{{Path: "/", Method: "GET"}},
},
},
},
Ingress: true,
RuleOrigin: OriginForTest(map[CachedSelector]labels.LabelArrayList{
td.cachedSelectorA: {nil},
td.wildcardCachedSelector: {nil},
}),
}})
td.policyMapEquals(t, expected, nil, &case8Rule)
// Case 8B: first insert rule which selects all endpoints and restricts on
// the same resource on L7. Then, insert rule which selects specific endpoint
// with L7 restrictions rule. PerSelectorPolicies contains entries for both
// endpoints selected in each rule on L7 restriction.
case8Rule = api.Rule{
EndpointSelector: endpointSelectorA,
Ingress: []api.IngressRule{
{
IngressCommonRule: api.IngressCommonRule{
FromEndpoints: []api.EndpointSelector{api.WildcardEndpointSelector},
},
ToPorts: []api.PortRule{{
Ports: []api.PortProtocol{
{Port: "80", Protocol: api.ProtoTCP},
},
Rules: &api.L7Rules{
HTTP: []api.PortRuleHTTP{
{Method: "GET", Path: "/"},
},
},
}},
},
{
IngressCommonRule: api.IngressCommonRule{
FromEndpoints: []api.EndpointSelector{endpointSelectorA},
},
ToPorts: []api.PortRule{{
Ports: []api.PortProtocol{
{Port: "80", Protocol: api.ProtoTCP},
},
Rules: &api.L7Rules{
HTTP: []api.PortRuleHTTP{
{Method: "GET", Path: "/"},
},
},
}},
},
},
}
expected = NewL4PolicyMapWithValues(map[string]*L4Filter{"80/TCP": {
Port: 80,
Protocol: api.ProtoTCP,
U8Proto: 6,
wildcard: td.wildcardCachedSelector,
PerSelectorPolicies: L7DataMap{
td.wildcardCachedSelector: &PerSelectorPolicy{
L7Parser: ParserTypeHTTP,
Priority: ListenerPriorityHTTP,
L7Rules: api.L7Rules{
HTTP: []api.PortRuleHTTP{{Path: "/", Method: "GET"}},
},
},
td.cachedSelectorA: &PerSelectorPolicy{
L7Parser: ParserTypeHTTP,
Priority: ListenerPriorityHTTP,
L7Rules: api.L7Rules{
HTTP: []api.PortRuleHTTP{{Path: "/", Method: "GET"}},
},
},
},
Ingress: true,
RuleOrigin: OriginForTest(map[CachedSelector]labels.LabelArrayList{
td.cachedSelectorA: {nil},
td.wildcardCachedSelector: {nil},
}),
}})
td.policyMapEquals(t, expected, nil, &case8Rule)
}
// Case 9: allow all at L3 and restricts on L7 in one rule, and in another rule,
// on the same selector restricts on different L7 protocol.
// Should fail as cannot have conflicting parsers on same port.
func TestL3SelectingEndpointAndL3AllowAllMergeConflictingL7(t *testing.T) {
td := newTestData(hivetest.Logger(t))
// Case 9A: Kafka first, then HTTP.
conflictingL7Rule := api.Rule{
EndpointSelector: endpointSelectorA,
Ingress: []api.IngressRule{
{
IngressCommonRule: api.IngressCommonRule{
FromEndpoints: []api.EndpointSelector{endpointSelectorB},
},
ToPorts: []api.PortRule{{
Ports: []api.PortProtocol{
{Port: "80", Protocol: api.ProtoTCP},
},
Rules: &api.L7Rules{
Kafka: []kafka.PortRule{
{Topic: "foo"},
},
},
}},
},
{
IngressCommonRule: api.IngressCommonRule{
FromEndpoints: []api.EndpointSelector{endpointSelectorB},
},
ToPorts: []api.PortRule{{
Ports: []api.PortProtocol{
{Port: "80", Protocol: api.ProtoTCP},
},
Rules: &api.L7Rules{
HTTP: []api.PortRuleHTTP{
{Method: "GET", Path: "/"},
},
},
}},
},
},
}
td.policyInvalid(t, "cannot merge conflicting L7 parsers", &conflictingL7Rule)
// Case 9B: HTTP first, then Kafka.
conflictingL7Rule = api.Rule{
EndpointSelector: endpointSelectorA,
Ingress: []api.IngressRule{
{
IngressCommonRule: api.IngressCommonRule{
FromEndpoints: []api.EndpointSelector{endpointSelectorB},
},
ToPorts: []api.PortRule{{
Ports: []api.PortProtocol{
{Port: "80", Protocol: api.ProtoTCP},
},
Rules: &api.L7Rules{
HTTP: []api.PortRuleHTTP{
{Method: "GET", Path: "/"},
},
},
}},
},
{
IngressCommonRule: api.IngressCommonRule{
FromEndpoints: []api.EndpointSelector{endpointSelectorB},
},
ToPorts: []api.PortRule{{
Ports: []api.PortProtocol{
{Port: "80", Protocol: api.ProtoTCP},
},
Rules: &api.L7Rules{
Kafka: []kafka.PortRule{
{Topic: "foo"},
},
},
}},
},
},
}
td.policyInvalid(t, "cannot merge conflicting L7 parsers", &conflictingL7Rule)
}
// Case 9b: allow all at L3 and restricts on L7 in one rule, and in another rule,
// select an endpoint which restricts on different L7 protocol, but on different selectors,
// which is now possible.
func TestL3SelectingEndpointAndL3AllowAllMergeDifferentL7(t *testing.T) {
td := newTestData(hivetest.Logger(t))
// Case 9A: Kafka first, then HTTP.
conflictingL7Rule := api.Rule{
EndpointSelector: endpointSelectorA,
Ingress: []api.IngressRule{
{
IngressCommonRule: api.IngressCommonRule{
FromEndpoints: []api.EndpointSelector{endpointSelectorB},
},
ToPorts: []api.PortRule{{
Ports: []api.PortProtocol{
{Port: "80", Protocol: api.ProtoTCP},
},
Rules: &api.L7Rules{
Kafka: []kafka.PortRule{
{Topic: "foo"},
},
},
}},
},
{
IngressCommonRule: api.IngressCommonRule{
FromEndpoints: []api.EndpointSelector{api.WildcardEndpointSelector},
},
ToPorts: []api.PortRule{{
Ports: []api.PortProtocol{
{Port: "80", Protocol: api.ProtoTCP},
},
Rules: &api.L7Rules{
HTTP: []api.PortRuleHTTP{
{Method: "GET", Path: "/"},
},
},
}},
},
},
}
td.policyValid(t, &conflictingL7Rule)
// Case 9B: HTTP first, then Kafka.
conflictingL7Rule = api.Rule{
EndpointSelector: endpointSelectorA,
Ingress: []api.IngressRule{
{
IngressCommonRule: api.IngressCommonRule{
FromEndpoints: []api.EndpointSelector{api.WildcardEndpointSelector},
},
ToPorts: []api.PortRule{{
Ports: []api.PortProtocol{
{Port: "80", Protocol: api.ProtoTCP},
},
Rules: &api.L7Rules{
HTTP: []api.PortRuleHTTP{
{Method: "GET", Path: "/"},
},
},
}},
},
{
IngressCommonRule: api.IngressCommonRule{
FromEndpoints: []api.EndpointSelector{endpointSelectorA},
},
ToPorts: []api.PortRule{{
Ports: []api.PortProtocol{
{Port: "80", Protocol: api.ProtoTCP},
},
Rules: &api.L7Rules{
Kafka: []kafka.PortRule{
{Topic: "foo"},
},
},
}},
},
},
}
td.policyValid(t, &conflictingL7Rule)
}
// Case 10: restrict same path / method on L7 in both rules,
// but select different endpoints in each rule.
func TestMergingWithDifferentEndpointsSelectedAllowSameL7(t *testing.T) {
td := newTestData(hivetest.Logger(t))
selectDifferentEndpointsRestrictL7 := api.Rule{
EndpointSelector: endpointSelectorA,
Ingress: []api.IngressRule{
{
IngressCommonRule: api.IngressCommonRule{
FromEndpoints: []api.EndpointSelector{endpointSelectorA},
},
ToPorts: []api.PortRule{{
Ports: []api.PortProtocol{
{Port: "80", Protocol: api.ProtoTCP},
},
Rules: &api.L7Rules{
HTTP: []api.PortRuleHTTP{
{Method: "GET", Path: "/"},
},
},
}},
},
{
IngressCommonRule: api.IngressCommonRule{
FromEndpoints: []api.EndpointSelector{endpointSelectorC},
},
ToPorts: []api.PortRule{{
Ports: []api.PortProtocol{
{Port: "80", Protocol: api.ProtoTCP},
},
Rules: &api.L7Rules{
HTTP: []api.PortRuleHTTP{
{Method: "GET", Path: "/"},
},
},
}},
},
},
}
expected := NewL4PolicyMapWithValues(map[string]*L4Filter{"80/TCP": {
Port: 80,
Protocol: api.ProtoTCP,
U8Proto: 6,
wildcard: nil,
PerSelectorPolicies: L7DataMap{
td.cachedSelectorC: &PerSelectorPolicy{
L7Parser: ParserTypeHTTP,
Priority: ListenerPriorityHTTP,
L7Rules: api.L7Rules{
HTTP: []api.PortRuleHTTP{{Path: "/", Method: "GET"}},
},
},
td.cachedSelectorA: &PerSelectorPolicy{
L7Parser: ParserTypeHTTP,
Priority: ListenerPriorityHTTP,
L7Rules: api.L7Rules{
HTTP: []api.PortRuleHTTP{{Path: "/", Method: "GET"}},
},
},
},
Ingress: true,
RuleOrigin: OriginForTest(map[CachedSelector]labels.LabelArrayList{
td.cachedSelectorA: {nil},
td.cachedSelectorC: {nil},
}),
}})
td.policyMapEquals(t, expected, nil, &selectDifferentEndpointsRestrictL7)
}
// Case 11: allow all on L7 in both rules, but select different endpoints in each rule.
func TestMergingWithDifferentEndpointSelectedAllowAllL7(t *testing.T) {
td := newTestData(hivetest.Logger(t))
selectDifferentEndpointsAllowAllL7 := api.Rule{
EndpointSelector: endpointSelectorA,
Ingress: []api.IngressRule{
{
IngressCommonRule: api.IngressCommonRule{
FromEndpoints: []api.EndpointSelector{endpointSelectorA},
},
ToPorts: []api.PortRule{{
Ports: []api.PortProtocol{
{Port: "80", Protocol: api.ProtoTCP},
},
}},
},
{
IngressCommonRule: api.IngressCommonRule{
FromEndpoints: []api.EndpointSelector{endpointSelectorC},
},
ToPorts: []api.PortRule{{
Ports: []api.PortProtocol{
{Port: "80", Protocol: api.ProtoTCP},
},
}},
},
},
}
expected := NewL4PolicyMapWithValues(map[string]*L4Filter{"80/TCP": {
Port: 80,
Protocol: api.ProtoTCP,
U8Proto: 6,
wildcard: nil,
PerSelectorPolicies: L7DataMap{
td.cachedSelectorA: nil,
td.cachedSelectorC: nil,
},
Ingress: true,
RuleOrigin: OriginForTest(map[CachedSelector]labels.LabelArrayList{
td.cachedSelectorA: {nil},
td.cachedSelectorC: {nil},
}),
}})
td.policyMapEquals(t, expected, nil, &selectDifferentEndpointsAllowAllL7)
}
// Case 12: allow all at L3 in one rule with restrictions at L7. Determine that
// the host should always be allowed. From Host should go to proxy allow all;
// other L3 should restrict at L7 in a separate filter.
func TestAllowingLocalhostShadowsL7(t *testing.T) {
td := newTestData(hivetest.Logger(t))
// This test checks that when the AllowLocalhost=always option is
// enabled, we always wildcard the host at L7. That means we need to
// set the option in the config, and of course clean up afterwards so
// that this test doesn't affect subsequent tests.
// XXX: Does this affect other tests being run concurrently?
oldLocalhostOpt := option.Config.AllowLocalhost
option.Config.AllowLocalhost = option.AllowLocalhostAlways
defer func() { option.Config.AllowLocalhost = oldLocalhostOpt }()
rule := api.Rule{
EndpointSelector: endpointSelectorA,
Ingress: []api.IngressRule{
{
IngressCommonRule: api.IngressCommonRule{
FromEndpoints: []api.EndpointSelector{api.WildcardEndpointSelector},
},
ToPorts: []api.PortRule{{
Ports: []api.PortProtocol{
{Port: "80", Protocol: api.ProtoTCP},
},
Rules: &api.L7Rules{
HTTP: []api.PortRuleHTTP{
{Method: "GET", Path: "/"},
},
},
}},
},
},
}
expected := NewL4PolicyMapWithValues(map[string]*L4Filter{"80/TCP": {
Port: 80,
Protocol: api.ProtoTCP,
U8Proto: 6,
wildcard: td.wildcardCachedSelector,
PerSelectorPolicies: L7DataMap{
td.wildcardCachedSelector: &PerSelectorPolicy{
L7Parser: ParserTypeHTTP,
Priority: ListenerPriorityHTTP,
L7Rules: api.L7Rules{
HTTP: []api.PortRuleHTTP{{Path: "/", Method: "GET"}},
},
},
td.cachedSelectorHost: nil, // no proxy redirect
},
Ingress: true,
RuleOrigin: OriginForTest(map[CachedSelector]labels.LabelArrayList{td.wildcardCachedSelector: {nil}}),
}})
td.policyMapEquals(t, expected, nil, &rule)
}
func TestEntitiesL3(t *testing.T) {
td := newTestData(hivetest.Logger(t))
allowWorldRule := api.Rule{
EndpointSelector: endpointSelectorA,
Egress: []api.EgressRule{
{
EgressCommonRule: api.EgressCommonRule{
ToEntities: api.EntitySlice{api.EntityAll},
},
},
},
}
expected := NewL4PolicyMapWithValues(map[string]*L4Filter{"0/ANY": {
Port: 0,
Protocol: api.ProtoAny,
U8Proto: 0,
wildcard: td.wildcardCachedSelector,
PerSelectorPolicies: L7DataMap{
td.wildcardCachedSelector: nil,
},
Ingress: false,
RuleOrigin: OriginForTest(map[CachedSelector]labels.LabelArrayList{td.wildcardCachedSelector: {nil}}),
}})
td.policyMapEquals(t, nil, expected, &allowWorldRule)
}
// Case 13: deny all at L3 in case of an empty non-nil toEndpoints slice.
func TestEgressEmptyToEndpoints(t *testing.T) {
td := newTestData(hivetest.Logger(t))
rule := api.Rule{
EndpointSelector: endpointSelectorA,
Egress: []api.EgressRule{
{
EgressCommonRule: api.EgressCommonRule{
ToEndpoints: []api.EndpointSelector{},
},
ToPorts: []api.PortRule{{
Ports: []api.PortProtocol{
{Port: "80", Protocol: api.ProtoTCP},
},
}},
},
},
}
expected := NewL4PolicyMap()
td.policyMapEquals(t, nil, expected, &rule)
}
// Case 14: Test that DNS L7 rules in default-allow mode add a wildcard
func TestDNSWildcardInDefaultAllow(t *testing.T) {
logger := hivetest.Logger(t)
td := newTestData(logger)
rule := api.Rule{
EndpointSelector: endpointSelectorA,
// Set EnableDefaultDeny.Egress to false to ensure default-allow mode
EnableDefaultDeny: api.DefaultDenyConfig{Egress: &falseValue},
Egress: []api.EgressRule{
{
EgressCommonRule: api.EgressCommonRule{
ToEndpoints: []api.EndpointSelector{api.WildcardEndpointSelector},
},
ToPorts: []api.PortRule{{
Ports: []api.PortProtocol{
{Port: "53", Protocol: api.ProtoUDP},
},
Rules: &api.L7Rules{
DNS: []api.PortRuleDNS{{
MatchPattern: "example.com",
}},
},
}},
},
},
}
expected := NewL4PolicyMapWithValues(map[string]*L4Filter{
"53/UDP": {
Port: 53,
Protocol: api.ProtoUDP,
U8Proto: 17,
wildcard: td.wildcardCachedSelector,
PerSelectorPolicies: L7DataMap{
td.wildcardCachedSelector: &PerSelectorPolicy{
L7Rules: api.L7Rules{
DNS: []api.PortRuleDNS{{
MatchPattern: "example.com",
}, {
// Wildcard rule should be added
MatchPattern: "*",
}},
},
L7Parser: ParserTypeDNS,
Priority: ListenerPriorityDNS,
},
},
Ingress: false,
RuleOrigin: OriginForTest(map[CachedSelector]labels.LabelArrayList{td.wildcardCachedSelector: {nil}}),
},
// L3 wildcard rule is also added
"0/ANY": {
Port: 0,
Protocol: api.ProtoAny,
U8Proto: 0,
wildcard: td.wildcardCachedSelector,
PerSelectorPolicies: L7DataMap{
td.wildcardCachedSelector: nil,
},
Ingress: false,
RuleOrigin: OriginForTest(map[CachedSelector]labels.LabelArrayList{td.wildcardCachedSelector: {nil}}),
},
})
td.policyMapEquals(t, nil, expected, &rule)
}
// Case 15: Test that HTTP L7 rules in default-allow mode add an empty rule
func TestHTTPWildcardInDefaultAllow(t *testing.T) {
logger := hivetest.Logger(t)
td := newTestData(logger)
rule := api.Rule{
EndpointSelector: endpointSelectorA,
// Set EnableDefaultDeny.Ingress to false to ensure default-allow mode
EnableDefaultDeny: api.DefaultDenyConfig{Ingress: &falseValue},
Ingress: []api.IngressRule{
{
IngressCommonRule: api.IngressCommonRule{
FromEndpoints: []api.EndpointSelector{api.WildcardEndpointSelector},
},
ToPorts: []api.PortRule{{
Ports: []api.PortProtocol{
{Port: "80", Protocol: api.ProtoTCP},
},
Rules: &api.L7Rules{
HTTP: []api.PortRuleHTTP{{
Path: "/api",
Method: "GET",
}},
},
}},
},
},
}
expected := NewL4PolicyMapWithValues(map[string]*L4Filter{
"80/TCP": {
Port: 80,
Protocol: api.ProtoTCP,
U8Proto: 6,
wildcard: td.wildcardCachedSelector,
PerSelectorPolicies: L7DataMap{
td.wildcardCachedSelector: &PerSelectorPolicy{
L7Rules: api.L7Rules{
HTTP: []api.PortRuleHTTP{{
Path: "/api",
Method: "GET",
}, {
// Empty HTTP rule should be added
}},
},
L7Parser: ParserTypeHTTP,
Priority: ListenerPriorityHTTP,
},
},
Ingress: true,
RuleOrigin: OriginForTest(map[CachedSelector]labels.LabelArrayList{td.wildcardCachedSelector: {nil}}),
},
// L3 wildcard rule is also added
"0/ANY": {
Port: 0,
Protocol: api.ProtoAny,
U8Proto: 0,
wildcard: td.wildcardCachedSelector,
PerSelectorPolicies: L7DataMap{
td.wildcardCachedSelector: nil,
},
Ingress: true,
RuleOrigin: OriginForTest(map[CachedSelector]labels.LabelArrayList{td.wildcardCachedSelector: {nil}}),
},
})
td.policyMapEquals(t, expected, nil, &rule)
}
// Case 16: Test that Kafka L7 rules in default-allow mode add an empty topic rule
func TestKafkaWildcardInDefaultAllow(t *testing.T) {
logger := hivetest.Logger(t)
td := newTestData(logger)
rule := api.Rule{
EndpointSelector: endpointSelectorA,
// Set EnableDefaultDeny.Ingress to false to ensure default-allow mode
EnableDefaultDeny: api.DefaultDenyConfig{Ingress: &falseValue},
Ingress: []api.IngressRule{
{
IngressCommonRule: api.IngressCommonRule{
FromEndpoints: []api.EndpointSelector{api.WildcardEndpointSelector},
},
ToPorts: []api.PortRule{{
Ports: []api.PortProtocol{
{Port: "9092", Protocol: api.ProtoTCP},
},
Rules: &api.L7Rules{
Kafka: []kafka.PortRule{{
Topic: "important-topic",
}},
},
}},
},
},
}
expected := NewL4PolicyMapWithValues(map[string]*L4Filter{
"9092/TCP": {
Port: 9092,
Protocol: api.ProtoTCP,
U8Proto: 6,
wildcard: td.wildcardCachedSelector,
PerSelectorPolicies: L7DataMap{
td.wildcardCachedSelector: &PerSelectorPolicy{
L7Rules: api.L7Rules{
Kafka: []kafka.PortRule{{
Topic: "important-topic",
}, {
// Empty topic rule should be added
Topic: "",
}},
},
L7Parser: ParserTypeKafka,
Priority: ListenerPriorityKafka,
},
},
Ingress: true,
RuleOrigin: OriginForTest(map[CachedSelector]labels.LabelArrayList{td.wildcardCachedSelector: {nil}}),
},
// L3 wildcard rule is also added
"0/ANY": {
Port: 0,
Protocol: api.ProtoAny,
U8Proto: 0,
wildcard: td.wildcardCachedSelector,
PerSelectorPolicies: L7DataMap{
td.wildcardCachedSelector: nil,
},
Ingress: true,
RuleOrigin: OriginForTest(map[CachedSelector]labels.LabelArrayList{td.wildcardCachedSelector: {nil}}),
},
})
td.policyMapEquals(t, expected, nil, &rule)
}
// Case 17: Test that DNS L7 rules with L3 filtering in default-allow mode add a wildcard
func TestDNSWildcardWithL3FilterInDefaultAllow(t *testing.T) {
logger := hivetest.Logger(t)
td := newTestData(logger)
rule := api.Rule{
EndpointSelector: endpointSelectorA,
// Set EnableDefaultDeny.Egress to false to ensure default-allow mode
EnableDefaultDeny: api.DefaultDenyConfig{Egress: &falseValue},
Egress: []api.EgressRule{
{
EgressCommonRule: api.EgressCommonRule{
// Specific L3 endpoint selection
ToEndpoints: []api.EndpointSelector{endpointSelectorB},
},
ToPorts: []api.PortRule{{
Ports: []api.PortProtocol{
{Port: "53", Protocol: api.ProtoUDP},
},
Rules: &api.L7Rules{
DNS: []api.PortRuleDNS{{
MatchPattern: "example.com",
}},
},
}},
},
},
}
expected := NewL4PolicyMapWithValues(map[string]*L4Filter{
"53/UDP": {
Port: 53,
Protocol: api.ProtoUDP,
U8Proto: 17,
PerSelectorPolicies: L7DataMap{
td.cachedSelectorB: &PerSelectorPolicy{
L7Rules: api.L7Rules{
DNS: []api.PortRuleDNS{{
MatchPattern: "example.com",
}, {
// Wildcard rule should be added
MatchPattern: "*",
}},
},
L7Parser: ParserTypeDNS,
Priority: ListenerPriorityDNS,
},
},
Ingress: false,
RuleOrigin: OriginForTest(map[CachedSelector]labels.LabelArrayList{td.cachedSelectorB: {nil}}),
},
// L3 wildcard rule is also added
"0/ANY": {
Port: 0,
Protocol: api.ProtoAny,
U8Proto: 0,
wildcard: td.wildcardCachedSelector,
PerSelectorPolicies: L7DataMap{
td.wildcardCachedSelector: nil,
},
Ingress: false,
RuleOrigin: OriginForTest(map[CachedSelector]labels.LabelArrayList{td.wildcardCachedSelector: {nil}}),
},
})
td.policyMapEquals(t, nil, expected, &rule)
}
// Case 18: Test that deny rules in default-allow mode don't add wildcards
func TestDenyRuleNoWildcardInDefaultAllow(t *testing.T) {
logger := hivetest.Logger(t)
td := newTestData(logger)
// Set policy as a deny rule
origIsDeny := td.testPolicyContext.isDeny
td.testPolicyContext.isDeny = true
defer func() {
td.testPolicyContext.isDeny = origIsDeny
}()
rule := api.Rule{
EndpointSelector: endpointSelectorA,
Ingress: []api.IngressRule{
{
IngressCommonRule: api.IngressCommonRule{
FromEndpoints: []api.EndpointSelector{api.WildcardEndpointSelector},
},
ToPorts: []api.PortRule{{
Ports: []api.PortProtocol{
{Port: "80", Protocol: api.ProtoTCP},
},
Rules: &api.L7Rules{
HTTP: []api.PortRuleHTTP{{
Path: "/api",
Method: "GET",
}},
},
}},
},
},
}
expected := NewL4PolicyMapWithValues(map[string]*L4Filter{
"80/TCP": {
Port: 80,
Protocol: api.ProtoTCP,
U8Proto: 6,
wildcard: td.wildcardCachedSelector,
PerSelectorPolicies: L7DataMap{
td.wildcardCachedSelector: &PerSelectorPolicy{
L7Rules: api.L7Rules{
HTTP: []api.PortRuleHTTP{{
Path: "/api",
Method: "GET",
// No wildcard rule should be added
}},
},
L7Parser: ParserTypeHTTP,
Priority: ListenerPriorityHTTP,
},
},
Ingress: true,
RuleOrigin: OriginForTest(map[CachedSelector]labels.LabelArrayList{td.wildcardCachedSelector: {nil}}),
},
})
td.policyMapEquals(t, expected, nil, &rule)
}
// TestDefaultAllowL7Rules tests that when EnableDefaultDeny=false, L7 wildcard rules of various
// types are added and don't accidentally block other traffic of the same type.
func TestDefaultAllowL7Rules(t *testing.T) {
testCases := []struct {
name string
l7Rules *api.L7Rules
l7Parser L7ParserType
port string
proto api.L4Proto
verifyWildcard func(t *testing.T, policy *PerSelectorPolicy)
}{
{
name: "DNS rules with default-allow",
l7Rules: &api.L7Rules{
DNS: []api.PortRuleDNS{{
MatchPattern: "example.com",
}},
},
l7Parser: ParserTypeDNS,
port: "53",
proto: api.ProtoUDP,
verifyWildcard: func(t *testing.T, policy *PerSelectorPolicy) {
found := false
for _, dnsRule := range policy.L7Rules.DNS {
if dnsRule.MatchPattern == "*" {
found = true
break
}
}
require.True(t, found, "DNS wildcard rule should be added in default-allow mode")
},
},
{
name: "HTTP rules with default-allow",
l7Rules: &api.L7Rules{
HTTP: []api.PortRuleHTTP{{
Path: "/api",
Method: "GET",
}},
},
l7Parser: ParserTypeHTTP,
port: "80",
proto: api.ProtoTCP,
verifyWildcard: func(t *testing.T, policy *PerSelectorPolicy) {
found := false
for _, httpRule := range policy.L7Rules.HTTP {
if httpRule.Path == "" && httpRule.Method == "" && httpRule.Host == "" &&
len(httpRule.Headers) == 0 && len(httpRule.HeaderMatches) == 0 {
found = true
break
}
}
require.True(t, found, "HTTP wildcard rule should be added in default-allow mode")
},
},
{
name: "Kafka rules with default-allow",
l7Rules: &api.L7Rules{
Kafka: []kafka.PortRule{{
Topic: "important-topic",
}},
},
l7Parser: ParserTypeKafka,
port: "9092",
proto: api.ProtoTCP,
verifyWildcard: func(t *testing.T, policy *PerSelectorPolicy) {
found := false
for _, kafkaRule := range policy.L7Rules.Kafka {
if kafkaRule.Topic == "" {
found = true
break
}
}
require.True(t, found, "Kafka wildcard rule should be added in default-allow mode")
},
},
{
name: "Custom L7 rules with default-allow",
l7Rules: &api.L7Rules{
L7Proto: "envoy.filter.protocol.dubbo",
L7: []api.PortRuleL7{{
"method": "Login",
}},
},
l7Parser: "envoy.filter.protocol.dubbo",
port: "8080",
proto: api.ProtoTCP,
verifyWildcard: func(t *testing.T, policy *PerSelectorPolicy) {
found := false
for _, l7Rule := range policy.L7Rules.L7 {
if len(l7Rule) == 0 {
found = true
break
}
}
require.True(t, found, "Custom L7 wildcard rule should be added in default-allow mode")
},
},
}
for _, tc := range testCases {
t.Run(tc.name, func(t *testing.T) {
logger := hivetest.Logger(t)
td := newTestData(logger)
ctx := &testPolicyContextType{
logger: logger,
sc: td.sc,
defaultDenyEgress: false, // EnableDefaultDeny=false
}
egressRule := &api.PortRule{
Ports: []api.PortProtocol{{
Port: tc.port,
Protocol: tc.proto,
}},
Rules: tc.l7Rules,
}
portProto := api.PortProtocol{
Port: tc.port,
Protocol: tc.proto,
}
toEndpoints := api.EndpointSelectorSlice{api.NewESFromLabels(labels.ParseSelectLabel("foo"))}
l4Filter, err := createL4EgressFilter(ctx, toEndpoints, nil, egressRule, portProto, tc.proto, nil)
require.NoError(t, err)
require.NotNil(t, l4Filter)
anyPerSelectorPolicy := false
for _, policy := range l4Filter.PerSelectorPolicies {
if policy != nil {
anyPerSelectorPolicy = true
require.Equal(t, tc.l7Parser, policy.L7Parser, "L7Parser should match")
tc.verifyWildcard(t, policy)
}
}
require.True(t, anyPerSelectorPolicy, "Should have at least one PerSelectorPolicy")
})
}
}
type fakeCertificateManager struct{}
const (
fakeCA = "fake ca"
fakePublicKey = "fake public key"
fakePrivateKey = "fake private key"
)
func (_ *fakeCertificateManager) GetTLSContext(ctx context.Context, tlsCtx *api.TLSContext, ns string) (ca, public, private string, inlineSecrets bool, err error) {
name := tlsCtx.Secret.Name
public = fakePublicKey + " " + name
private = fakePrivateKey + " " + name
ca = fakeCA + " " + name
inlineSecrets = true
return
}
// SPDX-License-Identifier: Apache-2.0
// Copyright Authors of Cilium
package policy
import (
"bytes"
"encoding/json"
"fmt"
"math/rand/v2"
"slices"
"sort"
"strconv"
"testing"
"github.com/cilium/hive/hivetest"
"github.com/stretchr/testify/require"
"github.com/cilium/cilium/api/v1/models"
"github.com/cilium/cilium/pkg/container/versioned"
"github.com/cilium/cilium/pkg/labels"
"github.com/cilium/cilium/pkg/policy/api"
"github.com/cilium/cilium/pkg/u8proto"
)
func perSelectorPolicyToString(psp *PerSelectorPolicy) string {
b, err := json.Marshal(psp)
if err != nil {
return err.Error()
}
return string(b)
}
func TestRedirectType(t *testing.T) {
require.Equal(t, redirectTypeNone, redirectTypes(0))
require.Equal(t, redirectTypeDNS, redirectTypes(0x1))
require.Equal(t, redirectTypeEnvoy, redirectTypes(0x2))
require.Equal(t, redirectTypes(0x4)|redirectTypeEnvoy, redirectTypeProxylib)
require.Equal(t, redirectTypeEnvoy, redirectTypeProxylib&redirectTypeEnvoy)
}
func TestParserTypeMerge(t *testing.T) {
for _, tt := range []struct {
a, b, c L7ParserType
success bool
}{
// trivially true
{ParserTypeNone, ParserTypeNone, ParserTypeNone, true},
{ParserTypeDNS, ParserTypeDNS, ParserTypeDNS, true},
{ParserTypeHTTP, ParserTypeHTTP, ParserTypeHTTP, true},
{ParserTypeKafka, ParserTypeKafka, ParserTypeKafka, true},
{L7ParserType("foo"), L7ParserType("foo"), L7ParserType("foo"), true},
{ParserTypeTLS, ParserTypeTLS, ParserTypeTLS, true},
// None can be promoted to any other type
{ParserTypeNone, ParserTypeDNS, ParserTypeDNS, true},
{ParserTypeDNS, ParserTypeNone, ParserTypeDNS, true},
{ParserTypeNone, ParserTypeHTTP, ParserTypeHTTP, true},
{ParserTypeHTTP, ParserTypeNone, ParserTypeHTTP, true},
{ParserTypeNone, ParserTypeKafka, ParserTypeKafka, true},
{ParserTypeKafka, ParserTypeNone, ParserTypeKafka, true},
{ParserTypeNone, L7ParserType("foo"), L7ParserType("foo"), true},
{L7ParserType("foo"), ParserTypeNone, L7ParserType("foo"), true},
{ParserTypeNone, ParserTypeTLS, ParserTypeTLS, true},
{ParserTypeTLS, ParserTypeNone, ParserTypeTLS, true},
{ParserTypeNone, ParserTypeCRD, ParserTypeCRD, true},
{ParserTypeCRD, ParserTypeNone, ParserTypeCRD, true},
// None of the actual parser types can be promoted to CRD
{ParserTypeHTTP, ParserTypeCRD, ParserTypeNone, false},
{ParserTypeCRD, ParserTypeHTTP, ParserTypeNone, false},
{ParserTypeTLS, ParserTypeCRD, ParserTypeNone, false},
{ParserTypeCRD, ParserTypeTLS, ParserTypeNone, false},
{ParserTypeKafka, ParserTypeCRD, ParserTypeNone, false},
{ParserTypeCRD, ParserTypeKafka, ParserTypeNone, false},
{L7ParserType("foo"), ParserTypeCRD, ParserTypeNone, false},
{ParserTypeCRD, L7ParserType("foo"), ParserTypeNone, false},
// TLS can also be promoted to any other type except for DNS (but not demoted to
// None)
{ParserTypeTLS, ParserTypeHTTP, ParserTypeHTTP, true},
{ParserTypeHTTP, ParserTypeTLS, ParserTypeHTTP, true},
{ParserTypeTLS, ParserTypeKafka, ParserTypeKafka, true},
{ParserTypeKafka, ParserTypeTLS, ParserTypeKafka, true},
{ParserTypeTLS, L7ParserType("foo"), L7ParserType("foo"), true},
{L7ParserType("foo"), ParserTypeTLS, L7ParserType("foo"), true},
// DNS does not merge with anything else
{ParserTypeCRD, ParserTypeDNS, ParserTypeNone, false},
{ParserTypeDNS, ParserTypeCRD, ParserTypeNone, false},
{ParserTypeTLS, ParserTypeDNS, ParserTypeNone, false},
{ParserTypeDNS, ParserTypeTLS, ParserTypeNone, false},
{ParserTypeDNS, ParserTypeHTTP, ParserTypeNone, false},
{ParserTypeHTTP, ParserTypeDNS, ParserTypeNone, false},
{ParserTypeDNS, ParserTypeKafka, ParserTypeNone, false},
{ParserTypeKafka, ParserTypeDNS, ParserTypeNone, false},
{ParserTypeDNS, L7ParserType("foo"), ParserTypeNone, false},
{L7ParserType("foo"), ParserTypeDNS, ParserTypeNone, false},
// Proxylib parsers do not merge with other proxylib parsers nor with HTTP
{ParserTypeKafka, ParserTypeHTTP, ParserTypeNone, false},
{ParserTypeHTTP, ParserTypeKafka, ParserTypeNone, false},
{L7ParserType("bar"), L7ParserType("foo"), ParserTypeNone, false},
{L7ParserType("foo"), L7ParserType("bar"), ParserTypeNone, false},
{L7ParserType("bar"), ParserTypeHTTP, ParserTypeNone, false},
{ParserTypeHTTP, L7ParserType("bar"), ParserTypeNone, false},
} {
res, err := tt.a.Merge(tt.b)
if tt.success {
require.NoError(t, err)
} else {
require.Error(t, err)
}
if res != tt.c {
t.Logf("Merge %s with %s, expecting %s\n", tt.a, tt.b, tt.c)
}
require.Equal(t, tt.c, res)
}
}
func TestCreateL4Filter(t *testing.T) {
td := newTestData(hivetest.Logger(t))
tuple := api.PortProtocol{Port: "80", Protocol: api.ProtoTCP}
portrule := &api.PortRule{
Ports: []api.PortProtocol{tuple},
Rules: &api.L7Rules{
HTTP: []api.PortRuleHTTP{
{Path: "/public", Method: "GET"},
},
},
}
selectors := []api.EndpointSelector{
api.NewESFromLabels(),
api.NewESFromLabels(labels.ParseSelectLabel("bar")),
}
for _, selector := range selectors {
eps := []api.EndpointSelector{selector}
// Regardless of ingress/egress, we should end up with
// a single L7 rule whether the selector is wildcarded
// or if it is based on specific labels.
filter, err := createL4IngressFilter(td.testPolicyContext, eps, nil, nil, portrule, tuple, tuple.Protocol)
require.NoError(t, err)
require.Len(t, filter.PerSelectorPolicies, 1)
for _, sp := range filter.PerSelectorPolicies {
explicit, authType := sp.getAuthType()
require.False(t, explicit)
require.Equal(t, AuthTypeDisabled, authType)
require.Equal(t, redirectTypeEnvoy, sp.redirectType())
}
filter, err = createL4EgressFilter(td.testPolicyContext, eps, nil, portrule, tuple, tuple.Protocol, nil)
require.NoError(t, err)
require.Len(t, filter.PerSelectorPolicies, 1)
for _, sp := range filter.PerSelectorPolicies {
explicit, authType := sp.getAuthType()
require.False(t, explicit)
require.Equal(t, AuthTypeDisabled, authType)
require.Equal(t, redirectTypeEnvoy, sp.redirectType())
}
}
}
func TestCreateL4FilterAuthRequired(t *testing.T) {
td := newTestData(hivetest.Logger(t))
tuple := api.PortProtocol{Port: "80", Protocol: api.ProtoTCP}
portrule := &api.PortRule{
Ports: []api.PortProtocol{tuple},
Rules: &api.L7Rules{
HTTP: []api.PortRuleHTTP{
{Path: "/public", Method: "GET"},
},
},
}
selectors := []api.EndpointSelector{
api.NewESFromLabels(),
api.NewESFromLabels(labels.ParseSelectLabel("bar")),
}
auth := &api.Authentication{Mode: api.AuthenticationModeDisabled}
for _, selector := range selectors {
eps := []api.EndpointSelector{selector}
// Regardless of ingress/egress, we should end up with
// a single L7 rule whether the selector is wildcarded
// or if it is based on specific labels.
filter, err := createL4IngressFilter(td.testPolicyContext, eps, auth, nil, portrule, tuple, tuple.Protocol)
require.NoError(t, err)
require.Len(t, filter.PerSelectorPolicies, 1)
for _, sp := range filter.PerSelectorPolicies {
explicit, authType := sp.getAuthType()
require.True(t, explicit)
require.Equal(t, AuthTypeDisabled, authType)
require.Equal(t, redirectTypeEnvoy, sp.redirectType())
}
filter, err = createL4EgressFilter(td.testPolicyContext, eps, auth, portrule, tuple, tuple.Protocol, nil)
require.NoError(t, err)
require.Len(t, filter.PerSelectorPolicies, 1)
for _, sp := range filter.PerSelectorPolicies {
explicit, authType := sp.getAuthType()
require.True(t, explicit)
require.Equal(t, AuthTypeDisabled, authType)
require.Equal(t, redirectTypeEnvoy, sp.redirectType())
}
}
}
func TestCreateL4FilterMissingSecret(t *testing.T) {
// Suppress the expected warning logs for this test
td := newTestData(hivetest.Logger(t))
tuple := api.PortProtocol{Port: "80", Protocol: api.ProtoTCP}
portrule := &api.PortRule{
Ports: []api.PortProtocol{tuple},
TerminatingTLS: &api.TLSContext{
Secret: &api.Secret{
Name: "notExisting",
},
},
Rules: &api.L7Rules{
HTTP: []api.PortRuleHTTP{
{Path: "/public", Method: "GET"},
},
},
}
selectors := []api.EndpointSelector{
api.NewESFromLabels(),
api.NewESFromLabels(labels.ParseSelectLabel("bar")),
}
for _, selector := range selectors {
eps := []api.EndpointSelector{selector}
// Regardless of ingress/egress, we should end up with
// a single L7 rule whether the selector is wildcarded
// or if it is based on specific labels.
_, err := createL4IngressFilter(td.testPolicyContext, eps, nil, nil, portrule, tuple, tuple.Protocol)
require.Error(t, err)
_, err = createL4EgressFilter(td.testPolicyContext, eps, nil, portrule, tuple, tuple.Protocol, nil)
require.Error(t, err)
}
}
type SortablePolicyRules []*models.PolicyRule
func (a SortablePolicyRules) Len() int { return len(a) }
func (a SortablePolicyRules) Swap(i, j int) { a[i], a[j] = a[j], a[i] }
func (a SortablePolicyRules) Less(i, j int) bool { return a[i].Rule < a[j].Rule }
func TestJSONMarshal(t *testing.T) {
td := newTestData(hivetest.Logger(t))
model := &models.L4Policy{}
require.Equal(t, "[]", fmt.Sprintf("%+v", model.Egress))
require.Equal(t, "[]", fmt.Sprintf("%+v", model.Ingress))
policy := L4Policy{
Egress: L4DirectionPolicy{PortRules: NewL4PolicyMapWithValues(map[string]*L4Filter{
"8080/TCP": {
Port: 8080,
Protocol: api.ProtoTCP,
Ingress: false,
},
})},
Ingress: L4DirectionPolicy{PortRules: NewL4PolicyMapWithValues(map[string]*L4Filter{
"80/TCP": {
Port: 80, Protocol: api.ProtoTCP,
PerSelectorPolicies: L7DataMap{
td.cachedFooSelector: &PerSelectorPolicy{
L7Parser: ParserTypeHTTP,
L7Rules: api.L7Rules{
HTTP: []api.PortRuleHTTP{{Path: "/", Method: "GET"}},
},
},
},
Ingress: true,
},
"9090/TCP": {
Port: 9090, Protocol: api.ProtoTCP,
PerSelectorPolicies: L7DataMap{
td.cachedFooSelector: &PerSelectorPolicy{
L7Parser: "tester",
L7Rules: api.L7Rules{
L7Proto: "tester",
L7: []api.PortRuleL7{
map[string]string{
"method": "PUT",
"path": "/"},
map[string]string{
"method": "GET",
"path": "/"},
},
},
},
},
Ingress: true,
},
"8080/TCP": {
Port: 8080, Protocol: api.ProtoTCP,
PerSelectorPolicies: L7DataMap{
td.cachedFooSelector: &PerSelectorPolicy{
L7Parser: ParserTypeHTTP,
L7Rules: api.L7Rules{
HTTP: []api.PortRuleHTTP{
{Path: "/", Method: "GET"},
{Path: "/bar", Method: "GET"},
},
},
},
td.wildcardCachedSelector: &PerSelectorPolicy{
L7Rules: api.L7Rules{
HTTP: []api.PortRuleHTTP{{Path: "/", Method: "GET"}},
},
},
},
Ingress: true,
},
})},
}
policy.Attach(td.testPolicyContext)
model = policy.GetModel()
require.NotNil(t, model)
expectedEgress := []string{`{
"port": 8080,
"protocol": "TCP"
}`}
sort.StringSlice(expectedEgress).Sort()
sort.Sort(SortablePolicyRules(model.Egress))
require.Len(t, model.Egress, len(expectedEgress))
for i := range expectedEgress {
expected := new(bytes.Buffer)
err := json.Compact(expected, []byte(expectedEgress[i]))
require.NoError(t, err, "Could not compact expected json")
require.Equal(t, expected.String(), model.Egress[i].Rule)
}
expectedIngress := []string{`{
"port": 80,
"protocol": "TCP",
"l7-rules": [
{
"\u0026LabelSelector{MatchLabels:map[string]string{any.foo: ,},MatchExpressions:[]LabelSelectorRequirement{},}": {
"http": [
{
"path": "/",
"method": "GET"
}
]
}
}
]
}`,
`{
"port": 9090,
"protocol": "TCP",
"l7-rules": [
{
"\u0026LabelSelector{MatchLabels:map[string]string{any.foo: ,},MatchExpressions:[]LabelSelectorRequirement{},}": {
"l7proto": "tester",
"l7": [
{
"method": "PUT",
"path": "/"
},
{
"method": "GET",
"path": "/"
}
]
}
}
]
}`,
`{
"port": 8080,
"protocol": "TCP",
"l7-rules": [
{
"\u0026LabelSelector{MatchLabels:map[string]string{any.foo: ,},MatchExpressions:[]LabelSelectorRequirement{},}": {
"http": [
{
"path": "/",
"method": "GET"
},
{
"path": "/bar",
"method": "GET"
}
]
}
},
{
"\u0026LabelSelector{MatchLabels:map[string]string{},MatchExpressions:[]LabelSelectorRequirement{},}": {
"http": [
{
"path": "/",
"method": "GET"
}
]
}
}
]
}`}
sort.StringSlice(expectedIngress).Sort()
sort.Sort(SortablePolicyRules(model.Ingress))
require.Len(t, model.Ingress, len(expectedIngress))
for i := range expectedIngress {
expected := new(bytes.Buffer)
err := json.Compact(expected, []byte(expectedIngress[i]))
require.NoError(t, err, "Could not compact expected json")
require.Equal(t, expected.String(), model.Ingress[i].Rule)
}
require.True(t, policy.HasEnvoyRedirect())
require.True(t, policy.HasProxylibRedirect())
}
// TestL4PolicyMapPortRangeOverlaps tests the Upsert, ExactLookup,
// and Delete methods with L4Filters that have overlapping ports.
func TestL4PolicyMapPortRangeOverlaps(t *testing.T) {
portRanges := []struct {
startPort, endPort uint16
}{
{1, 65534}, {1, 1023}, {0, 65535}, {1024, 65535},
}
for i, portRange := range portRanges {
t.Run(fmt.Sprintf("%d-%d", portRange.startPort, portRange.endPort), func(tt *testing.T) {
l4Map := NewL4PolicyMap()
startFilter := &L4Filter{
U8Proto: u8proto.TCP,
Protocol: api.ProtoTCP,
Port: portRange.startPort,
EndPort: portRange.endPort,
}
startPort := fmt.Sprintf("%d", portRange.startPort)
l4Map.Upsert(startPort, portRange.endPort, "TCP", startFilter)
// we need to make a copy of portRanges to splice.
pRs := make([]struct{ startPort, endPort uint16 }, len(portRanges))
copy(pRs, portRanges)
// Iterate over every port range except the one being tested.
for _, altPR := range slices.Delete(pRs, i, i+1) {
t.Logf("Checking for port range %d-%d on main port range %d-%d", altPR.startPort, altPR.endPort, portRange.startPort, portRange.endPort)
altStartPort := fmt.Sprintf("%d", altPR.startPort)
// This range should not exist yet.
altL4 := l4Map.ExactLookup(altStartPort, altPR.endPort, "TCP")
if altL4 != nil {
require.Nilf(t, altL4, "%d-%d range found and it should not have been as %d-%d", altPR.startPort, altPR.endPort, altL4.Port, altL4.EndPort)
}
require.Nil(t, altL4)
altFilter := &L4Filter{
U8Proto: u8proto.TCP,
Protocol: api.ProtoTCP,
Port: altPR.startPort,
EndPort: altPR.endPort,
}
// Upsert overlapping port range.
l4Map.Upsert(altStartPort, altPR.endPort, "TCP", altFilter)
altL4 = l4Map.ExactLookup(altStartPort, altPR.endPort, "TCP")
require.NotNilf(t, altL4, "%d-%d range not found and it should have been", altPR.startPort, altPR.endPort)
require.True(t, altL4.Equals(altFilter), "%d-%d range lookup returned a range of %d-%d",
altPR.startPort, altPR.endPort, altL4.Port, altL4.EndPort)
gotMainFilter := l4Map.ExactLookup(startPort, portRange.endPort, "TCP")
require.Truef(t, gotMainFilter.Equals(startFilter), "main range look up failed after %d-%d range upsert", altPR.startPort, altPR.endPort)
// Delete overlapping port range, and make sure it's not there.
l4Map.Delete(altStartPort, altPR.endPort, "TCP")
altL4 = l4Map.ExactLookup(altStartPort, altPR.endPort, "TCP")
if altL4 != nil {
require.Nilf(t, altL4, "%d-%d range found after a delete and it should not have been as %d-%d", altPR.startPort, altPR.endPort, altL4.Port, altL4.EndPort)
}
require.Nil(t, altL4)
gotMainFilter = l4Map.ExactLookup(startPort, portRange.endPort, "TCP")
require.Truef(t, gotMainFilter.Equals(startFilter), "main range look up failed after %d-%d range delete", altPR.startPort, altPR.endPort)
// Put it back for the next iteration.
l4Map.Upsert(altStartPort, altPR.endPort, "TCP", altFilter)
}
})
}
}
func BenchmarkContainsAllL3L4(b *testing.B) {
id := uint16(rand.IntN(65535))
port := uint16(rand.IntN(65535))
b.ReportAllocs()
for range 1000 {
b.StartTimer()
proxyID := ProxyID(id, true, "TCP", port, "")
if proxyID != strconv.FormatInt(int64(id), 10)+"ingress:TCP:8080:" {
b.Failed()
}
_, _, _, _, _, err := ParseProxyID(proxyID)
if err != nil {
b.Failed()
}
b.StopTimer()
}
}
func BenchmarkEvaluateL4PolicyMapState(b *testing.B) {
logger := hivetest.Logger(b)
owner := DummyOwner{logger: logger}
newEmptyEndpointPolicy := func() *EndpointPolicy {
return &EndpointPolicy{
selectorPolicy: &selectorPolicy{},
PolicyOwner: owner,
policyMapState: emptyMapState(logger),
policyMapChanges: MapChanges{logger: logger},
}
}
ws := newTestCachedSelector("wildcard", true)
testSelA := newTestCachedSelector("test-selector-a", false, 101, 102, 103)
testSelB := newTestCachedSelector("test-selector-b", false, 201, 202, 203)
testSelC := newTestCachedSelector("test-selector-c", false, 301, 302, 303)
testL4Filters := []*L4Filter{
// L4 wildcard selector.
{
Port: 9000,
Protocol: api.ProtoTCP,
wildcard: ws,
PerSelectorPolicies: L7DataMap{
ws: nil,
},
Ingress: true,
},
// L4 with multiple selectors.
{
Port: 9001,
Protocol: api.ProtoTCP,
PerSelectorPolicies: L7DataMap{
testSelA: nil,
testSelB: nil,
testSelC: nil,
},
Ingress: true,
},
// L4 with multiple selectors and wildcard.
{
Port: 9002,
Protocol: api.ProtoTCP,
wildcard: ws,
PerSelectorPolicies: L7DataMap{
ws: nil,
testSelA: nil,
testSelB: nil,
testSelC: nil,
},
Ingress: true,
},
}
b.ReportAllocs()
b.Run("ToMapState", func(b *testing.B) {
for b.Loop() {
b.StopTimer()
epPolicy := newEmptyEndpointPolicy()
b.StartTimer()
for _, filter := range testL4Filters {
filter.toMapState(logger, epPolicy, 0, ChangeState{})
}
}
})
b.Run("IncrementalToMapState", func(b *testing.B) {
for b.Loop() {
b.StopTimer()
epPolicy := newEmptyEndpointPolicy()
l4Policy := L4Policy{
users: map[*EndpointPolicy]struct{}{
epPolicy: {},
},
}
// Compute initial policy with just the wildcard selectors.
for _, filter := range testL4Filters {
if filter.wildcard != nil {
psp := filter.PerSelectorPolicies
filter.PerSelectorPolicies = L7DataMap{ws: nil}
filter.toMapState(logger, epPolicy, 0, ChangeState{})
filter.PerSelectorPolicies = psp
}
}
b.StartTimer()
for _, filter := range testL4Filters {
for cs := range filter.PerSelectorPolicies {
testSel, ok := cs.(*testCachedSelector)
if !ok {
b.FailNow()
}
l4Policy.AccumulateMapChanges(logger, filter, cs, testSel.selections, nil)
l4Policy.SyncMapChanges(filter, versioned.LatestTx)
closer, _ := epPolicy.ConsumeMapChanges()
closer()
}
}
}
})
}
// SPDX-License-Identifier: Apache-2.0
// Copyright Authors of Cilium
package policy
// This includes some utilities for simulating policy verdicts.
//
// It is only used for tests, but is used by multiple packages.
import (
"fmt"
"log/slog"
"github.com/cilium/cilium/pkg/endpoint/regeneration"
"github.com/cilium/cilium/pkg/identity"
"github.com/cilium/cilium/pkg/policy/api"
"github.com/cilium/cilium/pkg/spanstat"
"github.com/cilium/cilium/pkg/u8proto"
)
type Flow struct {
From, To *identity.Identity
Proto u8proto.U8proto
Dport uint16
}
type EndpointInfo struct {
ID uint64
TCPNamedPorts map[string]uint16
UDPNamedPorts map[string]uint16
Logger *slog.Logger
// Used when determining peer named ports
remoteEndpoint *EndpointInfo
}
// LookupFlow determines the policy verdict for a given flow.
//
// The flow's identities must have been loaded in to the repository's SelectorCache,
// or policy will not be correctly computed.
//
// This function is only used for testing, but in multiple packages.
//
// TODO: add support for redirects
func LookupFlow(logger *slog.Logger, repo PolicyRepository, flow Flow, srcEP, dstEP *EndpointInfo) (verdict api.Decision, egress, ingress RuleMeta, err error) {
if flow.From.ID == 0 || flow.To.ID == 0 {
return api.Undecided, ingress, egress, fmt.Errorf("cannot lookup flow: numeric IDs missing")
}
if _, exists := repo.GetSelectorCache().idCache[flow.From.ID]; !exists {
return api.Undecided, ingress, egress, fmt.Errorf("From.ID not in SelectorCache!")
}
if _, exists := repo.GetSelectorCache().idCache[flow.To.ID]; !exists {
return api.Undecided, ingress, egress, fmt.Errorf("To.ID not in SelectorCache!")
}
if flow.Dport == 0 {
return api.Undecided, ingress, egress, fmt.Errorf("cannot lookup flow: port number missing")
}
if flow.Proto == 0 {
return api.Undecided, ingress, egress, fmt.Errorf("cannot lookup flow: protocol missing")
}
if srcEP == nil {
srcEP = &EndpointInfo{
ID: uint64(flow.From.ID),
}
}
if dstEP == nil {
dstEP = &EndpointInfo{
ID: uint64(flow.To.ID),
}
}
srcEP.remoteEndpoint = dstEP
dstEP.remoteEndpoint = srcEP
// Resolve and look up the flow as egress from the source
selPolSrc, _, err := repo.GetSelectorPolicy(flow.From, 0, &dummyPolicyStats{}, srcEP.ID)
if err != nil {
return api.Undecided, ingress, egress, fmt.Errorf("GetSelectorPolicy(from) failed: %w", err)
}
epp := selPolSrc.DistillPolicy(logger, srcEP, nil)
epp.Ready()
epp.Detach(logger)
key := EgressKey().WithIdentity(flow.To.ID).WithPortProto(flow.Proto, flow.Dport)
entry, ingress, _ := epp.Lookup(key)
if entry.IsDeny() {
return api.Denied, ingress, egress, nil
}
// Resolve ingress policy for destination
selPolDst, _, err := repo.GetSelectorPolicy(flow.To, 0, &dummyPolicyStats{}, dstEP.ID)
if err != nil {
return api.Undecided, ingress, egress, fmt.Errorf("GetSelectorPolicy(to) failed: %w", err)
}
epp = selPolDst.DistillPolicy(logger, dstEP, nil)
epp.Ready()
epp.Detach(logger)
key = IngressKey().WithIdentity(flow.From.ID).WithPortProto(flow.Proto, flow.Dport)
entry, egress, _ = epp.Lookup(key)
if entry.IsDeny() {
return api.Denied, ingress, egress, nil
}
return api.Allowed, ingress, egress, nil
}
var _ PolicyOwner = &EndpointInfo{}
func (ei *EndpointInfo) GetID() uint64 {
return ei.ID
}
// GetNamedPort determines the named port of the *destination*. So, if ingress
// is false, then this looks up the peer.
func (ei *EndpointInfo) GetNamedPort(ingress bool, name string, proto u8proto.U8proto) uint16 {
if !ingress && ei.remoteEndpoint != nil {
return ei.remoteEndpoint.GetNamedPort(true, name, proto)
}
switch {
case proto == u8proto.TCP && ei.TCPNamedPorts != nil:
return ei.TCPNamedPorts[name]
case proto == u8proto.UDP && ei.UDPNamedPorts != nil:
return ei.UDPNamedPorts[name]
}
return 0
}
func (ei *EndpointInfo) PolicyDebug(msg string, attrs ...any) {
if ei.Logger != nil {
ei.Logger.Debug(msg, attrs...)
}
}
func (ei *EndpointInfo) IsHost() bool {
return false
}
// MapStateSize returns the size of the current desired policy map, used for preallocation of the
// new map. Return 0 here as this is only used for testing.
func (ei *EndpointInfo) MapStateSize() int {
return 0
}
// RegenerateIfAlive returns immediately as there is nothing to regenerate
func (ei *EndpointInfo) RegenerateIfAlive(_ *regeneration.ExternalRegenerationMetadata) <-chan bool {
ch := make(chan bool)
close(ch)
return ch
}
type dummyPolicyStats struct {
waitingForPolicyRepository spanstat.SpanStat
policyCalculation spanstat.SpanStat
}
func (s *dummyPolicyStats) WaitingForPolicyRepository() *spanstat.SpanStat {
return &s.waitingForPolicyRepository
}
func (s *dummyPolicyStats) SelectorPolicyCalculation() *spanstat.SpanStat {
return &s.policyCalculation
}
// SPDX-License-Identifier: Apache-2.0
// Copyright Authors of Cilium
package policy
import (
"iter"
"log/slog"
"github.com/hashicorp/go-hclog"
"github.com/cilium/cilium/pkg/container/bitlpm"
"github.com/cilium/cilium/pkg/container/versioned"
"github.com/cilium/cilium/pkg/identity"
"github.com/cilium/cilium/pkg/labels"
"github.com/cilium/cilium/pkg/lock"
"github.com/cilium/cilium/pkg/logging/logfields"
"github.com/cilium/cilium/pkg/option"
"github.com/cilium/cilium/pkg/policy/trafficdirection"
"github.com/cilium/cilium/pkg/policy/types"
)
// Key and Keys are types used both internally and externally.
// The types have been lifted out, but an alias is being used
// so we don't have to change all the code everywhere.
//
// Do not use these types outside of pkg/policy or pkg/endpoint,
// lest ye find yourself with hundreds of unnecessary imports.
type Key = types.Key
type Keys = types.Keys
type MapStateEntry = types.MapStateEntry
type MapStateMap = types.MapStateMap
const NoAuthRequirement = types.NoAuthRequirement
type mapStateMap map[Key]mapStateEntry
func EgressKey() types.Key {
return types.EgressKey()
}
func IngressKey() types.Key {
return types.IngressKey()
}
func KeyForDirection(direction trafficdirection.TrafficDirection) Key {
return types.KeyForDirection(direction)
}
var (
// localHostKey represents an ingress L3 allow from the local host.
localHostKey = IngressKey().WithIdentity(identity.ReservedIdentityHost)
// allKey represents a key for unknown traffic, i.e., all traffic.
// We have one for each traffic direction
allKey = [2]Key{
IngressKey(),
EgressKey(),
}
)
const (
LabelKeyPolicyDerivedFrom = "io.cilium.policy.derived-from"
LabelAllowLocalHostIngress = "allow-localhost-ingress"
LabelAllowAnyIngress = "allow-any-ingress"
LabelAllowAnyEgress = "allow-any-egress"
)
var (
LabelsAllowAnyIngress = labels.LabelArray{
labels.NewLabel(LabelKeyPolicyDerivedFrom, LabelAllowAnyIngress, labels.LabelSourceReserved)}
LabelsAllowAnyEgress = labels.LabelArray{
labels.NewLabel(LabelKeyPolicyDerivedFrom, LabelAllowAnyEgress, labels.LabelSourceReserved)}
LabelsLocalHostIngress = labels.LabelArray{
labels.NewLabel(LabelKeyPolicyDerivedFrom, LabelAllowLocalHostIngress, labels.LabelSourceReserved)}
)
// mapState is an indexed container for policymap keys and entries.
//
// The `bitlpm.Trie` indexes the TrafficDirection, Protocol, and Port of
// a policy Key but does **not** index the identity. Instead identities
// that share TrafficDirection, Protocol, and Port are indexed in a builtin
// map type that is the associated value of the key-prefix of TrafficDirection,
// Protocol, and Port. This is done so that Identity does not explode
// the size of the Trie. Consider the case of a policy that selects
// many identities. In this case, if Identity was indexed then every
// identity associated with the policy would create at least one
// intermediate node in the Trie with its own sub node associated with
// TrafficDirection, Protocol, and Port. When identity is not indexed
// then one policy will map to one key-prefix with a builtin map type
// that associates each identity with a mapStateEntry. This strategy
// greatly enhances the usefuleness of the Trie and improves lookup,
// deletion, and insertion times.
type mapState struct {
logger *slog.Logger
// entries is the map containing the MapStateEntries
entries mapStateMap
// trie is a Trie that indexes policy Keys without their identity
// and stores the identities in an associated builtin map.
trie bitlpm.Trie[types.LPMKey, IDSet]
}
type IDSet map[identity.NumericIdentity]struct{}
// Valid returns true if the entries map has been initialized
func (ms *mapState) Valid() bool {
return ms.entries != nil
}
func (ms *mapState) Empty() bool {
return len(ms.entries) == 0
}
func (ms *mapState) upsert(k Key, e mapStateEntry) {
_, exists := ms.entries[k]
// upsert entry
ms.entries[k] = e
// Update indices if 'k' is a new key
if !exists {
// Update trie
idSet, ok := ms.trie.ExactLookup(k.PrefixLength(), k.LPMKey)
if !ok {
idSet = make(IDSet)
ms.trie.Upsert(k.PrefixLength(), k.LPMKey, idSet)
}
idSet[k.Identity] = struct{}{}
}
}
func (ms *mapState) delete(k Key) {
_, exists := ms.entries[k]
if exists {
delete(ms.entries, k)
idSet, ok := ms.trie.ExactLookup(k.PrefixLength(), k.LPMKey)
if ok {
delete(idSet, k.Identity)
if len(idSet) == 0 {
ms.trie.Delete(k.PrefixLength(), k.LPMKey)
}
}
}
}
func (ms *mapState) ForEach(f func(Key, MapStateEntry) bool) bool {
for k, e := range ms.entries {
if !f(k, e.MapStateEntry) {
return false
}
}
return true
}
func (ms *mapState) forEach(f func(Key, mapStateEntry) bool) bool {
for k, e := range ms.entries {
if !f(k, e) {
return false
}
}
return true
}
func (ms *mapState) forKey(k Key, f func(Key, mapStateEntry) bool) bool {
e, ok := ms.entries[k]
if ok {
return f(k, e)
}
ms.logger.Error(
"Missing MapStateEntry",
logfields.Stacktrace, hclog.Stacktrace(),
logfields.PolicyKey, k,
)
return true
}
// forIDs calls 'f' for each ID in 'idSet' with port/proto from 'k'.
func (ms *mapState) forIDs(k Key, idSet IDSet, f func(Key, mapStateEntry) bool) bool {
for id := range idSet {
k.Identity = id
if !ms.forKey(k, f) {
return false
}
}
return true
}
// forID calls 'f' for 'k' if 'k.Identity' exists in 'idSet'.
func (ms *mapState) forID(k Key, idSet IDSet, f func(Key, mapStateEntry) bool) bool {
if _, exists := idSet[k.Identity]; exists {
if !ms.forKey(k, f) {
return false
}
}
return true
}
// BroaderOrEqualKeys iterates over broader or equal (broader or equal port/proto and the same
// or wildcard ID) in the trie.
func (ms *mapState) BroaderOrEqualKeys(key Key) iter.Seq2[Key, mapStateEntry] {
return func(yield func(Key, mapStateEntry) bool) {
iter := ms.trie.AncestorIterator(key.PrefixLength(), key.LPMKey)
for ok, lpmKey, idSet := iter.Next(); ok; ok, lpmKey, idSet = iter.Next() {
k := Key{LPMKey: lpmKey}
// ANY identity is broader or equal to all identities, visit it first if it exists
if !ms.forID(k.WithIdentity(0), idSet, yield) {
return
}
// Visit key with the same identity, if it exists.
// ANY identity was already visited above.
if key.Identity != 0 && !ms.forID(k.WithIdentity(key.Identity), idSet, yield) {
return
}
}
}
}
// NarrowerOrEqualKeys iterates over narrower or equal keys in the trie.
// Iterated keys can be safely deleted during iteration due to DescendantIterator holding enough
// state that allows iteration to be continued even if the current trie node is removed.
func (ms *mapState) NarrowerOrEqualKeys(key Key) iter.Seq2[Key, mapStateEntry] {
return func(yield func(Key, mapStateEntry) bool) {
iter := ms.trie.DescendantIterator(key.PrefixLength(), key.LPMKey)
for ok, lpmKey, idSet := iter.Next(); ok; ok, lpmKey, idSet = iter.Next() {
k := Key{LPMKey: lpmKey}
// All identities are narrower or equal to ANY identity.
if key.Identity == 0 {
if !ms.forIDs(k, idSet, yield) {
return
}
} else { // key has a specific identity
// Need to visit the key with the same identity, if it exists.
if !ms.forID(k.WithIdentity(key.Identity), idSet, yield) {
return
}
}
}
}
}
// CoveringKeysWithSameID iterates over broader or equal port/proto entries in the trie in LPM order,
// with most specific match with the same ID as in 'key' being returned first.
func (ms *mapState) CoveringKeysWithSameID(key Key) iter.Seq2[Key, mapStateEntry] {
return func(yield func(Key, mapStateEntry) bool) {
iter := ms.trie.AncestorLongestPrefixFirstIterator(key.PrefixLength(), key.LPMKey)
for ok, lpmKey, idSet := iter.Next(); ok; ok, lpmKey, idSet = iter.Next() {
k := Key{LPMKey: lpmKey}
// Visit key with the same identity
if !ms.forID(k.WithIdentity(key.Identity), idSet, yield) {
return
}
}
}
}
// SubsetKeysWithSameID iterates over narrower or equal port/proto entries in the trie in an LPM
// order (least specific match first).
func (ms *mapState) SubsetKeysWithSameID(key Key) iter.Seq2[Key, mapStateEntry] {
return func(yield func(Key, mapStateEntry) bool) {
iter := ms.trie.DescendantShortestPrefixFirstIterator(key.PrefixLength(), key.LPMKey)
for ok, lpmKey, idSet := iter.Next(); ok; ok, lpmKey, idSet = iter.Next() {
k := Key{LPMKey: lpmKey}
// Visit key with the same identity
if !ms.forID(k.WithIdentity(key.Identity), idSet, yield) {
return
}
}
}
}
// LPMAncestors iterates over broader or equal port/proto entries in the trie in LPM order,
// with most specific match with the same ID as in 'key' being returned first.
func (ms *mapState) LPMAncestors(key Key) iter.Seq2[Key, mapStateEntry] {
return func(yield func(Key, mapStateEntry) bool) {
iter := ms.trie.AncestorLongestPrefixFirstIterator(key.PrefixLength(), key.LPMKey)
for ok, lpmKey, idSet := iter.Next(); ok; ok, lpmKey, idSet = iter.Next() {
k := Key{LPMKey: lpmKey}
// Visit key with the same identity, if one exists.
if !ms.forID(k.WithIdentity(key.Identity), idSet, yield) {
return
}
// Then visit key with zero identity if not already done above and one
// exists.
if key.Identity != 0 && !ms.forID(k.WithIdentity(0), idSet, yield) {
return
}
}
}
}
// lookup finds the policy verdict applicable to the given 'key' using the same precedence logic
// between L3 and L4-only policies as the bpf datapath when both match the given 'key'.
// To be used in testing in place of the bpf datapath when full integration testing is not desired.
// Returns the closest matching covering policy entry and 'true' if found.
// 'key' must not have a wildcard identity or port.
func (ms *mapState) lookup(key Key) (mapStateEntry, bool) {
// Validate that the search key has no wildcards
if key.Identity == 0 || key.Nexthdr == 0 || key.DestPort == 0 || key.EndPort() != key.DestPort {
panic("invalid key for Lookup")
}
var l3key, l4key Key
var l3entry, l4entry mapStateEntry
var haveL3, haveL4 bool
for k, v := range ms.LPMAncestors(key) {
if !haveL3 && k.Identity != 0 {
l3key, l3entry = k, v
haveL3 = true
}
if !haveL4 && k.Identity == 0 {
l4key, l4entry = k, v
haveL4 = true
}
if haveL3 && haveL4 {
break
}
}
authOverride := func(entry, other mapStateEntry) mapStateEntry {
// This logic needs to be the same as in authPreferredInsert() where the newEntry's
// auth type may be overridden by a covering key.
// This also needs to reflect the logic in bpf/lib/policy.h __account_and_check().
if !entry.AuthRequirement.IsExplicit() &&
other.AuthRequirement.AuthType() > entry.AuthRequirement.AuthType() {
entry.AuthRequirement = other.AuthRequirement.AsDerived()
}
return entry
}
// only one entry found
if haveL3 != haveL4 {
if haveL3 {
return l3entry, true
}
return l4entry, true
}
// both L3 and L4 matches found
if haveL3 && haveL4 {
// Precedence rules of the bpf datapath between two policy entries:
// 1. Deny is selected, if any
// 2. Higher proxy port priority wins
// 3. If both entries are allows at the same proxy port priority, the one with more
// specific L4 is selected
// 4. If the two allows on the same proxy port priority have equal port/proto, then
// the policy for a specific L3 is selected (rather than the L4-only entry)
//
// If the selected entry has non-explicit auth type, it gets the auth type from the
// other entry, if the other entry's auth type is numerically higher.
// 1. Deny wins
// Check for the L3 deny first to match the datapath behavior
if l3entry.IsDeny() {
return l3entry, true
}
if l4entry.IsDeny() {
return l4entry, true
}
// 2. Entry with higher proxy port priority is selected.
// Auth requirement does not propagate from a lower proxy port priority rule to a
// higher proxy port priority rule!
if l3entry.ProxyPortPriority > l4entry.ProxyPortPriority {
return l3entry, true
}
if l4entry.ProxyPortPriority > l3entry.ProxyPortPriority {
return l4entry, true
}
// 3. Two allow entries, select the one with more specific L4
// L3-entry must be selected if prefix lengths are the same!
if l4key.PrefixLength() > l3key.PrefixLength() {
return authOverride(l4entry, l3entry), true
}
// 4. Two allow entries are equally specific port/proto or L3-entry is more specific
return authOverride(l3entry, l4entry), true
}
// Deny by default if no matches are found
return mapStateEntry{MapStateEntry: types.DenyEntry(), derivedFromRules: NilRuleOrigin}, false
}
func (ms *mapState) Len() int {
return len(ms.entries)
}
// mapStateEntry is the entry type with additional internal bookkeping of the relation between
// explicitly and implicitly added entries.
type mapStateEntry struct {
MapStateEntry
// derivedFromRules tracks the policy rules this entry derives from.
// Must be initialized explicitly, zero-intialization does not work with unique.Handle[].
derivedFromRules ruleOrigin
}
// newMapStateEntry creates a map state entry.
func newMapStateEntry(derivedFrom ruleOrigin, proxyPort uint16, priority ListenerPriority, deny bool, authReq AuthRequirement) mapStateEntry {
return mapStateEntry{
MapStateEntry: types.NewMapStateEntry(deny, proxyPort, priority, authReq),
derivedFromRules: derivedFrom,
}
}
// newAllowEntryWithLabels creates an allow entry with the specified labels.
// Used for adding allow-all entries when policy enforcement is not wanted.
func newAllowEntryWithLabels(lbls labels.LabelArray) mapStateEntry {
return newMapStateEntry(makeSingleRuleOrigin(lbls, ""), 0, 0, false, NoAuthRequirement)
}
func NewMapStateEntry(e MapStateEntry) mapStateEntry {
return mapStateEntry{
MapStateEntry: e,
derivedFromRules: NilRuleOrigin,
}
}
func emptyMapState(logger *slog.Logger) mapState {
return newMapState(logger, 0)
}
func newMapState(logger *slog.Logger, size int) mapState {
return mapState{
logger: logger,
entries: make(mapStateMap, size),
trie: bitlpm.NewTrie[types.LPMKey, IDSet](types.MapStatePrefixLen),
}
}
// Get the MapStateEntry that matches the Key.
func (ms *mapState) Get(k Key) (MapStateEntry, bool) {
v, ok := ms.get(k)
if ok {
return v.MapStateEntry, ok
}
return MapStateEntry{}, false
}
// Get the mapStateEntry that matches the Key.
func (ms *mapState) get(k Key) (mapStateEntry, bool) {
if k.DestPort == 0 && k.PortPrefixLen() > 0 {
ms.logger.Error(
"mapState.Get: invalid port prefix length for wildcard port",
logfields.Stacktrace, hclog.Stacktrace(),
logfields.PolicyKey, k,
)
}
v, ok := ms.entries[k]
return v, ok
}
// insert the Key and MapStateEntry into the MapState
func (ms *mapState) insert(k Key, v mapStateEntry) {
if k.DestPort == 0 && k.PortPrefixLen() > 0 {
ms.logger.Error(
"mapState.insert: invalid port prefix length for wildcard port",
logfields.Stacktrace, hclog.Stacktrace(),
logfields.PolicyKey, k,
)
}
ms.upsert(k, v)
}
// updateExisting re-inserts an existing entry to its map, to be used to persist changes in the
// entry. Indices are not updated.
func (ms *mapState) updateExisting(k Key, v mapStateEntry) {
ms.entries[k] = v
}
// Equals determines if this MapState is equal to the
// argument (exported) MapStateMap
// Only used for testing from other packages.
func (msA *mapState) Equals(msB MapStateMap) bool {
if msA.Len() != len(msB) {
return false
}
return msA.forEach(func(kA Key, vA mapStateEntry) bool {
vB, ok := msB[kA]
return ok && vB == vA.MapStateEntry
})
}
// Equal determines if this mapState is equal to the argument mapState.
// Only used for testing.
func (msA *mapState) Equal(msB *mapState) bool {
if msA.Len() != msB.Len() {
return false
}
return msA.forEach(func(kA Key, vA mapStateEntry) bool {
vB, ok := msB.get(kA)
return ok && (&vB).Equal(&vA)
})
}
// Diff returns the string of differences between 'obtained' and 'expected' prefixed with
// '+ ' or '- ' for obtaining something unexpected, or not obtaining the expected, respectively.
// For use in debugging from other packages.
func (obtained *mapState) Diff(expected MapStateMap) (res string) {
res += "Missing (-), Unexpected (+):\n"
for kE, vE := range expected {
if vO, ok := obtained.get(kE); ok {
if vO.MapStateEntry != vE {
res += "- " + kE.String() + ": " + vE.String() + "\n"
res += "+ " + kE.String() + ": " + vO.MapStateEntry.String() + "\n"
}
} else {
res += "- " + kE.String() + ": " + vE.String() + "\n"
}
}
obtained.ForEach(func(kO Key, vO MapStateEntry) bool {
if _, ok := expected[kO]; !ok {
res += "+ " + kO.String() + ": " + vO.String() + "\n"
}
return true
})
return res
}
// diff returns the string of differences between 'obtained' and 'expected' prefixed with
// '+ ' or '- ' for obtaining something unexpected, or not obtaining the expected, respectively.
// For use in debugging.
func (obtained *mapState) diff(expected *mapState) (res string) {
res += "Missing (-), Unexpected (+):\n"
expected.forEach(func(kE Key, vE mapStateEntry) bool {
if vO, ok := obtained.get(kE); ok {
if !(&vO).Equal(&vE) {
res += "- " + kE.String() + ": " + vE.String() + "\n"
res += "+ " + kE.String() + ": " + vO.String() + "\n"
}
} else {
res += "- " + kE.String() + ": " + vE.String() + "\n"
}
return true
})
obtained.forEach(func(kO Key, vO mapStateEntry) bool {
if _, ok := expected.get(kO); !ok {
res += "+ " + kO.String() + ": " + vO.String() + "\n"
}
return true
})
return res
}
func (ms mapState) String() (res string) {
ms.forEach(func(kO Key, vO mapStateEntry) bool {
res += kO.String() + ": " + vO.String() + "\n"
return true
})
return res
}
// Equal returns true of two entries are equal.
// This is used for testing only via mapState.Equal and mapState.Diff.
func (e *mapStateEntry) Equal(o *mapStateEntry) bool {
if e == nil || o == nil {
return e == o
}
return e.MapStateEntry == o.MapStateEntry && e.derivedFromRules == o.derivedFromRules
}
// String returns a string representation of the MapStateEntry
func (e mapStateEntry) String() string {
return e.MapStateEntry.String() + ",derivedFromRules=" + string(e.derivedFromRules.LabelsString()) + ",log=" + e.derivedFromRules.LogString()
}
// addKeyWithChanges adds a 'key' with value 'entry' to 'keys' keeping track of incremental changes in 'adds' and 'deletes', and any changed or removed old values in 'old', if not nil.
func (ms *mapState) addKeyWithChanges(key Key, entry mapStateEntry, changes ChangeState) bool {
var datapathEqual bool
oldEntry, exists := ms.get(key)
// Only merge if both old and new are allows or denies
if exists && oldEntry.IsDeny() == entry.IsDeny() {
// Do nothing if entries are equal
if entry.Equal(&oldEntry) {
return false // nothing to do
}
// Save old value before any changes, if desired
changes.insertOldIfNotExists(key, oldEntry)
// Compare for datapath equalness before merging, as the old entry is updated in
// place!
datapathEqual = oldEntry.MapStateEntry == entry.MapStateEntry
oldEntry.MapStateEntry.Merge(entry.MapStateEntry)
oldEntry.derivedFromRules = oldEntry.derivedFromRules.Merge(entry.derivedFromRules)
ms.updateExisting(key, oldEntry)
} else if !exists || entry.IsDeny() {
// Insert a new entry if one did not exist or a deny entry is overwriting an allow
// entry
// Save old value before any changes, if any
if exists {
changes.insertOldIfNotExists(key, oldEntry)
}
// Callers already have cloned the containers, no need to do it again here
ms.insert(key, entry)
} else {
// Do not record and incremental add if nothing was done
return false
}
// Record an incremental Add if desired and entry is new or changed
if changes.Adds != nil && (!exists || !datapathEqual) {
changes.Adds[key] = struct{}{}
// Key add overrides any previous delete of the same key
if changes.Deletes != nil {
delete(changes.Deletes, key)
}
}
return true
}
// deleteKeyWithChanges deletes a 'key' from 'ms' keeping track of incremental changes in 'changes'
func (ms *mapState) deleteKeyWithChanges(key Key, changes ChangeState) {
if entry, exists := ms.get(key); exists {
// Only record as a delete if the entry was not added on the same round of changes
if changes.insertOldIfNotExists(key, entry) && changes.Deletes != nil {
changes.Deletes[key] = struct{}{}
}
// Remove a potential previously added key
if changes.Adds != nil {
delete(changes.Adds, key)
}
ms.delete(key)
}
}
// RevertChanges undoes changes to 'keys' as indicated by 'changes.adds' and 'changes.old' collected
// via insertWithChanges().
func (ms *mapState) revertChanges(changes ChangeState) {
for k := range changes.Adds {
ms.delete(k)
}
// 'old' contains all the original values of both modified and deleted entries
for k, v := range changes.old {
ms.insert(k, v)
}
}
// insertWithChanges contains the most important business logic for policy insertions. It inserts a
// key and entry into the map only if not covered by a deny entry.
//
// Whenever the bpf datapath finds both L4-only and L3/L4 matching policy entries for a given
// packet, it uses the following logic to choose the policy entry:
// 1. Deny is selected, if any
// 2. Among two allows the one with higher proxy port priority is selected
// 3. Otherwise, the L4-only entry is chosen if it has more specific port/proto than
// the L3/L4 entry
// 4. Otherwise the L3/L4 entry is chosen
//
// This selects the higher precedence rule either by the deny status, or by the more
// specific L4, and for the L3/L4 entry overwise. This means that it suffices to manage
// deny precedence among the keys with the same ID here, the datapath take care of the precedence
// between different IDs (that is, between a specific ID and the wildcard ID (==0)
//
// Note on bailed or deleted entries:
//
// It would seem like that when we bail out due to being covered by an existing entry, or delete an
// entry due to being covered by the new one, we would want this action reversed if the existing
// entry or this new one is incremantally removed, respectively.
//
// Consider these facts:
// 1. Whenever a key covers an another, the covering key has broader or equal
// protocol/port, and the keys have the same identity, or the covering key has wildcard identity
// (ID == 0).
// 2. Only keys with a specific identity (ID != 0) can be incrementally added or deleted.
// 3. Due to the selector cache being transactional, when an identity is removed, all keys
// with that identity are incrementally deleted.
//
// Hence, if a covering key is incrementally deleted, it is a key with a specific identity (2), and
// all keys covered by it will be deleted as well (3), so there is no situation where this
// bailed-out or deleted key should be reinstated due to the covering key being incrementally
// deleted.
//
// Incremental changes performed are recorded in 'changes'.
func (ms *mapState) insertWithChanges(newKey Key, newEntry mapStateEntry, features policyFeatures, changes ChangeState) {
if newEntry.IsDeny() {
// Bail if covered by another (different) deny key
for k, v := range ms.BroaderOrEqualKeys(newKey) {
if v.IsDeny() && k != newKey {
return
}
}
// Delete covered allows and denies with a different key
for k, v := range ms.NarrowerOrEqualKeys(newKey) {
if !v.IsDeny() || k != newKey {
ms.deleteKeyWithChanges(k, changes)
}
}
} else {
// authPreferredInsert takes care for precedence and auth
if features.contains(authRules) {
ms.authPreferredInsert(newKey, newEntry, features, changes)
return
}
// Bail if covered by a deny key or a key with a higher proxy port priority.
//
// This can be skipped if no rules have denies or proxy redirects
if features.contains(denyRules | redirectRules) {
for _, v := range ms.BroaderOrEqualKeys(newKey) {
if v.IsDeny() || v.ProxyPortPriority > newEntry.ProxyPortPriority {
return
}
}
}
// Delete covered allow entries with lower proxy port priority.
//
// This is only needed if the newEntry has a proxy port priority greater than zero.
if newEntry.ProxyPortPriority > 0 {
for k, v := range ms.NarrowerOrEqualKeys(newKey) {
if !v.IsDeny() && v.ProxyPortPriority < newEntry.ProxyPortPriority {
ms.deleteKeyWithChanges(k, changes)
}
}
}
}
ms.addKeyWithChanges(newKey, newEntry, changes)
}
// overrideProxyPortForAuth sets the proxy port and priority of 'v' to that of 'newKey', saving the
// old entry in 'changes'.
// Returns 'true' if changes were made.
func (ms *mapState) overrideProxyPortForAuth(newEntry mapStateEntry, k Key, v mapStateEntry, changes ChangeState) bool {
if v.AuthRequirement.IsExplicit() {
// Save the old value first
changes.insertOldIfNotExists(k, v)
// Proxy port can be changed in-place, trie is not affected
v.ProxyPort = newEntry.ProxyPort
v.ProxyPortPriority = newEntry.ProxyPortPriority
ms.entries[k] = v
return true
}
return false
}
// overrideAuthRequirement sets the AuthRequirement of 'v' to that of 'newKey', saving the old entry
// in 'changes'.
func (ms *mapState) overrideAuthRequirement(newEntry mapStateEntry, k Key, v mapStateEntry, changes ChangeState) {
if v.AuthRequirement.AuthType() != newEntry.AuthRequirement.AuthType() {
// Save the old value first
changes.insertOldIfNotExists(k, v)
// Auth type can be changed in-place, trie is not affected
// Only derived auth type is ever overridden, so the explicit flag is not copied
v.AuthRequirement = newEntry.AuthRequirement.AsDerived()
ms.entries[k] = v
}
}
// authPreferredInsert applies AuthRequirement of a more generic entry to more specific entries, if
// not explicitly specified.
//
// This function is expected to be called for a map insertion after deny
// entry evaluation. If there is a covering map key for 'newKey'
// which denies traffic matching 'newKey', then this function should not be called.
func (ms *mapState) authPreferredInsert(newKey Key, newEntry mapStateEntry, features policyFeatures, changes ChangeState) {
// Bail if covered by a deny key or a key with a higher proxy port priority and current
// entry has no explicit auth.
var derived bool
newEntryHasExplicitAuth := newEntry.AuthRequirement.IsExplicit()
for k, v := range ms.CoveringKeysWithSameID(newKey) {
if v.IsDeny() {
return // bail if covered by deny
}
if v.ProxyPortPriority > newEntry.ProxyPortPriority {
if !newEntryHasExplicitAuth {
// Covering entry has higher proxy port priority and newEntry has a
// default auth type => can bail out
return
}
// newEnry has a different explicit auth requirement, must propagate
// proxy port and priority and keep it
newEntry.ProxyPort = v.ProxyPort
newEntry.ProxyPortPriority = v.ProxyPortPriority
// Can break out:
// - if there were covering denies the allow 'v' would
// not have existed, and
// - since the new entry has explicit auth it does not need to be
// derived.
break
}
// Fill in the AuthType from the most specific covering key with the same ID and an
// explicit auth type
if !derived && !newEntryHasExplicitAuth && !k.PortProtoIsEqual(newKey) && v.AuthRequirement.IsExplicit() {
// AuthType from the most specific covering key is applied to 'newEntry' as
// derived auth type.
newEntry.AuthRequirement = v.AuthRequirement.AsDerived()
derived = true
}
}
// Delete covered allow entries with lower proxy port priority, but keep
// entries with different "auth" and propagate proxy port and priority to them.
//
// Check if the new key is the most specific covering key of any other key
// with the same ID and default auth type, and propagate the auth type from the new
// entry to such entries.
var propagated bool
for k, v := range ms.SubsetKeysWithSameID(newKey) {
if !v.IsDeny() && v.ProxyPortPriority < newEntry.ProxyPortPriority {
if !ms.overrideProxyPortForAuth(newEntry, k, v, changes) {
ms.deleteKeyWithChanges(k, changes)
continue
}
}
if !propagated && newEntryHasExplicitAuth && !k.PortProtoIsEqual(newKey) {
// New entry has an explicit auth type
if v.IsDeny() || v.AuthRequirement.IsExplicit() {
// Stop if a subset entry is deny or also has an explicit auth type, as
// that is the more specific covering key for all remaining subset
// keys
propagated = true
continue
}
ms.overrideAuthRequirement(newEntry, k, v, changes)
}
}
ms.addKeyWithChanges(newKey, newEntry, changes)
}
// insertIfNotExists only inserts an entry in 'changes.Old' if 'key' does not exist in there already
// and 'key' does not already exist in 'changes.Adds'. This prevents recording "old" values for
// newly added keys. When an entry is updated, we are called before the key is added to
// 'changes.Adds' so we'll record the old value as expected.
// Returns 'true' if an old entry was added.
func (changes *ChangeState) insertOldIfNotExists(key Key, entry mapStateEntry) bool {
if changes == nil || changes.old == nil {
return false
}
if _, exists := changes.old[key]; !exists {
// Only insert the old entry if the entry was not first added on this round of
// changes.
if _, added := changes.Adds[key]; !added {
changes.old[key] = entry
return true
}
}
return false
}
// determineAllowLocalhostIngress determines whether communication should be allowed
// from the localhost. It inserts the Key corresponding to the localhost in
// the desiredPolicyKeys if the localhost is allowed to communicate with the
// endpoint. Authentication for localhost traffic is not required.
func (ms *mapState) determineAllowLocalhostIngress() {
if option.Config.AlwaysAllowLocalhost() {
entry := newAllowEntryWithLabels(LabelsLocalHostIngress)
ms.insertWithChanges(localHostKey, entry, allFeatures, ChangeState{})
}
}
// allowAllIdentities translates all identities in selectorCache to their
// corresponding Keys in the specified direction (ingress, egress) which allows
// all at L3.
// Note that this is used when policy is not enforced, so authentication is explicitly not required.
func (ms *mapState) allowAllIdentities(ingress, egress bool) {
if ingress {
ms.upsert(allKey[trafficdirection.Ingress], newAllowEntryWithLabels(LabelsAllowAnyIngress))
}
if egress {
ms.upsert(allKey[trafficdirection.Egress], newAllowEntryWithLabels(LabelsAllowAnyEgress))
}
}
// MapChanges collects updates to the endpoint policy on the
// granularity of individual mapstate key-value pairs for both adds
// and deletes. 'mutex' must be held for any access.
type MapChanges struct {
logger *slog.Logger
firstVersion versioned.KeepVersion
mutex lock.Mutex
changes []mapChange
synced []mapChange
version *versioned.VersionHandle
}
type mapChange struct {
Add bool // false deletes
Key Key
Value mapStateEntry
}
type MapChange struct {
Add bool // false deletes
Key Key
Value MapStateEntry
}
// AccumulateMapChanges accumulates the given changes to the
// MapChanges.
//
// The caller is responsible for making sure the same identity is not
// present in both 'adds' and 'deletes'.
func (mc *MapChanges) AccumulateMapChanges(adds, deletes []identity.NumericIdentity, keys []Key, value mapStateEntry) {
mc.mutex.Lock()
defer mc.mutex.Unlock()
for _, id := range adds {
for _, k := range keys {
k.Identity = id
mc.changes = append(mc.changes, mapChange{
Add: true,
Key: k,
Value: value,
})
}
}
for _, id := range deletes {
for _, k := range keys {
k.Identity = id
mc.changes = append(mc.changes, mapChange{
Add: false,
Key: k,
Value: value,
})
}
}
}
// SyncMapChanges moves the current batch of changes to 'synced' to be consumed as a unit
func (mc *MapChanges) SyncMapChanges(txn *versioned.Tx) {
mc.mutex.Lock()
defer mc.mutex.Unlock()
if len(mc.changes) > 0 {
// Only apply changes after the initial version
if txn.After(mc.firstVersion) {
mc.synced = append(mc.synced, mc.changes...)
mc.version.Close()
mc.version = txn.GetVersionHandle()
mc.logger.Debug(
"SyncMapChanges: Got handle on the new version",
logfields.NewVersion, mc.version,
)
} else {
mc.logger.Debug(
"SyncMapChanges: Discarding already applied changes",
logfields.Version, mc.firstVersion,
logfields.OldVersion, txn,
)
}
}
mc.changes = nil
}
// detach releases any version handle we may hold
func (mc *MapChanges) detach() {
mc.mutex.Lock()
mc.version.Close()
mc.mutex.Unlock()
}
// consumeMapChanges transfers the incremental changes from MapChanges to the caller,
// while applying the changes to PolicyMapState.
func (mc *MapChanges) consumeMapChanges(p *EndpointPolicy, features policyFeatures) (*versioned.VersionHandle, ChangeState) {
mc.mutex.Lock()
defer mc.mutex.Unlock()
changes := ChangeState{
Adds: make(Keys, len(mc.synced)),
Deletes: make(Keys, len(mc.synced)),
old: make(mapStateMap, len(mc.synced)),
}
for i := range mc.synced {
key := mc.synced[i].Key
entry := mc.synced[i].Value
if mc.synced[i].Add {
// Insert the key to and collect the incremental changes to the overall
// state in 'changes'
p.policyMapState.insertWithChanges(key, entry, features, changes)
} else {
// Delete the contribution of this cs to the key and collect incremental
// changes
p.policyMapState.deleteKeyWithChanges(key, changes)
}
}
// move version to the caller
version := mc.version
mc.version = nil
mc.synced = nil
return version, changes
}
// SPDX-License-Identifier: Apache-2.0
// Copyright Authors of Cilium
package policy
import (
"testing"
"github.com/cilium/hive/hivetest"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
"github.com/cilium/cilium/pkg/container/versioned"
"github.com/cilium/cilium/pkg/identity"
"github.com/cilium/cilium/pkg/labels"
"github.com/cilium/cilium/pkg/policy/trafficdirection"
"github.com/cilium/cilium/pkg/policy/types"
"github.com/cilium/cilium/pkg/u8proto"
)
func (e mapStateEntry) withLabels(lbls labels.LabelArrayList) mapStateEntry {
e.derivedFromRules = makeRuleOrigin(lbls, nil)
return e
}
// withExplicitAuth sets an explicit auth requirement
func (e mapStateEntry) withExplicitAuth(authType AuthType) mapStateEntry {
e.AuthRequirement = authType.AsExplicitRequirement()
return e
}
// withDerivedAuth sets a derived auth requirement
func (e mapStateEntry) withDerivedAuth(authType AuthType) mapStateEntry {
e.AuthRequirement = authType.AsDerivedRequirement()
return e
}
func (e mapStateEntry) WithProxyPort(proxyPort uint16) mapStateEntry {
e.MapStateEntry = e.MapStateEntry.WithProxyPort(proxyPort)
return e
}
func (ms mapState) withState(initMap mapStateMap) mapState {
for k, v := range initMap {
ms.insert(k, v)
}
return ms
}
func ingressKey(identity identity.NumericIdentity, proto u8proto.U8proto, port uint16, prefixLen uint8) Key {
return IngressKey().WithIdentity(identity).WithPortProtoPrefix(proto, port, prefixLen)
}
func ingressL3OnlyKey(identity identity.NumericIdentity) Key {
return IngressKey().WithIdentity(identity)
}
func egressKey(identity identity.NumericIdentity, proto u8proto.U8proto, port uint16, prefixLen uint8) Key {
return EgressKey().WithIdentity(identity).WithPortProtoPrefix(proto, port, prefixLen)
}
func egressL3OnlyKey(identity identity.NumericIdentity) Key {
return EgressKey().WithIdentity(identity)
}
func TestPolicyKeyTrafficDirection(t *testing.T) {
k := IngressKey()
require.True(t, k.IsIngress())
require.False(t, k.IsEgress())
k = EgressKey()
require.False(t, k.IsIngress())
require.True(t, k.IsEgress())
}
// validatePortProto makes sure each Key in MapState abides by the contract that protocol/nexthdr
// can only be wildcarded if the destination port is also wildcarded.
func (ms *mapState) validatePortProto(t *testing.T) {
ms.forEach(func(k Key, _ mapStateEntry) bool {
if k.Nexthdr == 0 {
require.Equal(t, uint16(0), k.DestPort)
}
return true
})
}
func (e mapStateEntry) withHTTPProxyPort(proxyPort uint16) mapStateEntry {
e.MapStateEntry = e.MapStateEntry.WithProxyPort(proxyPort).WithListenerPriority(ListenerPriorityHTTP)
return e
}
func (e mapStateEntry) withProxyPortPriority(proxyPort uint16, priority ListenerPriority) mapStateEntry {
e.MapStateEntry = e.MapStateEntry.WithProxyPort(proxyPort).WithListenerPriority(priority)
return e
}
func TestMapState_insertWithChanges(t *testing.T) {
allowEntry := NewMapStateEntry(AllowEntry).withLabels(labels.LabelArrayList{nil})
denyEntry := NewMapStateEntry(DenyEntry).withLabels(labels.LabelArrayList{nil})
type args struct {
key Key
entry MapStateEntry
}
tests := []struct {
name string
ms, want mapState
wantAdds, wantDeletes Keys
wantOld mapStateMap
args args
}{
{
name: "test-1 - no KV added, map should remain the same",
ms: testMapState(t, mapStateMap{
IngressKey(): allowEntry,
}),
args: args{
key: IngressKey(),
entry: AllowEntry,
},
want: testMapState(t, mapStateMap{
IngressKey(): allowEntry,
}),
wantAdds: Keys{},
wantDeletes: Keys{},
wantOld: mapStateMap{},
},
{
name: "test-2a - L3 allow KV should not overwrite deny entry",
ms: testMapState(t, mapStateMap{
ingressKey(1, 3, 80, 0): denyEntry,
}),
args: args{
key: ingressL3OnlyKey(1),
entry: AllowEntry,
},
want: testMapState(t, mapStateMap{
ingressL3OnlyKey(1): allowEntry,
ingressKey(1, 3, 80, 0): denyEntry,
}),
wantAdds: Keys{
ingressL3OnlyKey(1): struct{}{},
},
wantDeletes: Keys{},
wantOld: mapStateMap{},
},
{
name: "test-2b - L3 port-range allow KV should not overwrite deny entry",
ms: testMapState(t, mapStateMap{
ingressKey(1, 3, 80, 0): denyEntry,
}),
args: args{
key: ingressKey(1, 3, 64, 10), // port range 64-127 (64/10)
entry: AllowEntry,
},
want: testMapState(t, mapStateMap{
ingressKey(1, 3, 64, 10): allowEntry, // port range 64-127 (64/10)
ingressKey(1, 3, 80, 0): denyEntry,
}),
wantAdds: Keys{
ingressKey(1, 3, 64, 10): struct{}{},
},
wantDeletes: Keys{},
wantOld: mapStateMap{},
},
{
name: "test-3a - L3-L4 allow KV should not overwrite deny entry",
ms: testMapState(t, mapStateMap{
ingressKey(1, 3, 80, 0): denyEntry,
}),
args: args{
key: ingressKey(1, 3, 80, 0),
entry: AllowEntry,
},
want: testMapState(t, mapStateMap{
ingressKey(1, 3, 80, 0): denyEntry,
}),
wantAdds: Keys{},
wantDeletes: Keys{},
wantOld: mapStateMap{},
},
{
name: "test-3b - L3-L4 port-range allow KV should not overwrite deny entry",
ms: testMapState(t, mapStateMap{
ingressKey(1, 3, 64, 10): denyEntry, // port range 64-127 (64/10)
}),
args: args{
key: ingressKey(1, 3, 64, 10), // port range 64-127 (64/10)
entry: AllowEntry,
},
want: testMapState(t, mapStateMap{
ingressKey(1, 3, 64, 10): denyEntry, // port range 64-127 (64/10)
}),
wantAdds: Keys{},
wantDeletes: Keys{},
wantOld: mapStateMap{},
},
{
name: "test-4a - L3-L4 deny KV should overwrite allow entry",
ms: testMapState(t, mapStateMap{
ingressKey(1, 3, 80, 0): allowEntry,
}),
args: args{
key: ingressKey(1, 3, 80, 0),
entry: DenyEntry,
},
want: testMapState(t, mapStateMap{
ingressKey(1, 3, 80, 0): denyEntry,
}),
wantAdds: Keys{
ingressKey(1, 3, 80, 0): struct{}{},
},
wantDeletes: Keys{},
wantOld: mapStateMap{
ingressKey(1, 3, 80, 0): allowEntry,
},
},
{
name: "test-4b - L3-L4 port-range deny KV should overwrite allow entry",
ms: testMapState(t, mapStateMap{
ingressKey(1, 3, 80, 0): allowEntry,
}),
args: args{
key: ingressKey(1, 3, 64, 10), // port range 64-127 (64/10)
entry: DenyEntry,
},
want: testMapState(t, mapStateMap{
ingressKey(1, 3, 64, 10): denyEntry, // port range 64-127 (64/10)
}),
wantAdds: Keys{
ingressKey(1, 3, 64, 10): struct{}{},
},
wantDeletes: Keys{
ingressKey(1, 3, 80, 0): struct{}{},
},
wantOld: mapStateMap{
ingressKey(1, 3, 80, 0): allowEntry,
},
},
{
name: "test-5a - L3 deny KV should overwrite all L3-L4 allow and L3 allow entries for the same L3",
ms: testMapState(t, mapStateMap{
ingressKey(1, 3, 80, 0): allowEntry,
ingressL3OnlyKey(1): allowEntry,
ingressKey(2, 3, 80, 0): allowEntry,
ingressL3OnlyKey(2): allowEntry,
}),
args: args{
key: ingressL3OnlyKey(1),
entry: DenyEntry,
},
want: testMapState(t, mapStateMap{
ingressL3OnlyKey(1): denyEntry,
ingressKey(2, 3, 80, 0): allowEntry,
ingressL3OnlyKey(2): allowEntry,
}),
wantAdds: Keys{
ingressL3OnlyKey(1): struct{}{},
},
wantDeletes: Keys{
ingressKey(1, 3, 80, 0): struct{}{},
},
wantOld: mapStateMap{
ingressL3OnlyKey(1): allowEntry,
ingressKey(1, 3, 80, 0): allowEntry,
},
},
{
name: "test-5b - L3 port-range deny KV should overwrite all L3-L4 allow and L3 allow entries for the same L3",
ms: testMapState(t, mapStateMap{
ingressKey(1, 3, 80, 0): allowEntry,
ingressKey(1, 3, 64, 10): allowEntry, // port range 64-127 (64/10)
ingressKey(2, 3, 80, 0): allowEntry,
ingressKey(2, 3, 64, 10): allowEntry, // port range 64-127 (64/10)
}),
args: args{
key: ingressKey(1, 3, 64, 10), // port range 64-127 (64/10)
entry: DenyEntry,
},
want: testMapState(t, mapStateMap{
ingressKey(1, 3, 64, 10): denyEntry, // port range 64-127 (64/10)
ingressKey(2, 3, 80, 0): allowEntry,
ingressKey(2, 3, 64, 10): allowEntry, // port range 64-127 (64/10)
}),
wantAdds: Keys{
ingressKey(1, 3, 64, 10): struct{}{},
},
wantDeletes: Keys{
ingressKey(1, 3, 80, 0): struct{}{},
},
wantOld: mapStateMap{
ingressKey(1, 3, 64, 10): allowEntry, // port range 64-127 (64/10)
ingressKey(1, 3, 80, 0): allowEntry,
},
},
{
name: "test-6a - L3 egress deny KV should not overwrite any existing ingress allow",
ms: testMapState(t, mapStateMap{
ingressKey(1, 3, 80, 0): allowEntry,
ingressL3OnlyKey(1): allowEntry,
ingressKey(2, 3, 80, 0): allowEntry,
ingressL3OnlyKey(2): allowEntry,
}),
args: args{
key: egressL3OnlyKey(1),
entry: DenyEntry,
},
want: testMapState(t, mapStateMap{
ingressKey(1, 3, 80, 0): allowEntry,
ingressL3OnlyKey(1): allowEntry,
egressL3OnlyKey(1): denyEntry,
ingressKey(2, 3, 80, 0): allowEntry,
ingressL3OnlyKey(2): allowEntry,
}),
wantAdds: Keys{
egressL3OnlyKey(1): struct{}{},
},
wantDeletes: Keys{},
wantOld: mapStateMap{},
},
{
name: "test-6b - L3 egress port-range deny KV should not overwrite any existing ingress allow",
ms: testMapState(t, mapStateMap{
ingressKey(1, 3, 80, 0): allowEntry,
ingressKey(1, 3, 64, 10): allowEntry, // port range 64-127 (64/10)
ingressKey(2, 3, 80, 0): allowEntry,
ingressKey(2, 3, 64, 10): allowEntry, // port range 64-127 (64/10)
}),
args: args{
key: egressKey(1, 3, 64, 10), // port range 64-127 (64/10)
entry: DenyEntry,
},
want: testMapState(t, mapStateMap{
ingressKey(1, 3, 80, 0): allowEntry,
ingressKey(1, 3, 64, 10): allowEntry, // port range 64-127 (64/10)
egressKey(1, 3, 64, 10): denyEntry, // port range 64-127 (64/10)
ingressKey(2, 3, 80, 0): allowEntry,
ingressKey(2, 3, 64, 10): allowEntry, // port range 64-127 (64/10)
}),
wantAdds: Keys{
egressKey(1, 3, 64, 10): struct{}{},
},
wantDeletes: Keys{},
wantOld: mapStateMap{},
},
{
name: "test-7a - L3 ingress deny KV should not be overwritten by a L3-L4 ingress allow",
ms: testMapState(t, mapStateMap{
ingressL3OnlyKey(1): denyEntry,
}),
args: args{
key: ingressKey(1, 3, 80, 0),
entry: AllowEntry,
},
want: testMapState(t, mapStateMap{
ingressL3OnlyKey(1): denyEntry,
}),
wantAdds: Keys{},
wantDeletes: Keys{},
wantOld: mapStateMap{},
},
{
name: "test-7b - L3 ingress deny KV should not be overwritten by a L3-L4 port-range ingress allow",
ms: testMapState(t, mapStateMap{
ingressL3OnlyKey(1): denyEntry,
}),
args: args{
key: ingressKey(1, 3, 64, 10), // port range 64-127 (64/10)
entry: AllowEntry,
},
want: testMapState(t, mapStateMap{
ingressL3OnlyKey(1): denyEntry,
}),
wantAdds: Keys{},
wantDeletes: Keys{},
wantOld: mapStateMap{},
},
{
name: "test-8a - L3 ingress deny KV should not be overwritten by a L3-L4-L7 ingress allow",
ms: testMapState(t, mapStateMap{
ingressL3OnlyKey(1): denyEntry,
}),
args: args{
key: ingressKey(1, 3, 80, 0),
entry: AllowEntry.WithProxyPort(8080),
},
want: testMapState(t, mapStateMap{
ingressL3OnlyKey(1): denyEntry,
}),
wantAdds: Keys{},
wantDeletes: Keys{},
wantOld: mapStateMap{},
},
{
name: "test-8b - L3 ingress deny KV should not be overwritten by a L3-L4-L7 port-range ingress allow",
ms: testMapState(t, mapStateMap{
ingressL3OnlyKey(1): denyEntry,
}),
args: args{
key: ingressKey(1, 3, 64, 10), // port range 64-127 (64/10)
entry: AllowEntry.WithProxyPort(8080),
},
want: testMapState(t, mapStateMap{
ingressL3OnlyKey(1): denyEntry,
}),
wantAdds: Keys{},
wantDeletes: Keys{},
wantOld: mapStateMap{},
},
{
name: "test-9a - L3 ingress deny KV should overwrite a L3-L4-L7 ingress allow",
ms: testMapState(t, mapStateMap{
ingressKey(1, 3, 80, 0): allowEntry.withHTTPProxyPort(8080),
}),
args: args{
key: ingressL3OnlyKey(1),
entry: DenyEntry,
},
want: testMapState(t, mapStateMap{
ingressL3OnlyKey(1): denyEntry,
}),
wantAdds: Keys{
ingressL3OnlyKey(1): struct{}{},
},
wantDeletes: Keys{
ingressKey(1, 3, 80, 0): struct{}{},
},
wantOld: mapStateMap{
ingressKey(1, 3, 80, 0): allowEntry.withHTTPProxyPort(8080),
},
},
{
name: "test-9b - L3 ingress deny KV should overwrite a L3-L4-L7 port-range ingress allow",
ms: testMapState(t, mapStateMap{
ingressKey(1, 3, 64, 10): allowEntry.withHTTPProxyPort(8080), // port range 64-127 (64/10)
}),
args: args{
key: ingressL3OnlyKey(1),
entry: DenyEntry,
},
want: testMapState(t, mapStateMap{
ingressL3OnlyKey(1): denyEntry,
}),
wantAdds: Keys{
ingressL3OnlyKey(1): struct{}{},
},
wantDeletes: Keys{
ingressKey(1, 3, 64, 10): struct{}{},
},
wantOld: mapStateMap{
ingressKey(1, 3, 64, 10): allowEntry.withHTTPProxyPort(8080), // port range 64-127 (64/10)
},
},
{
name: "test-10a - L3 ingress deny KV should overwrite a L3-L4-L7 ingress allow and a L3-L4 deny",
ms: testMapState(t, mapStateMap{
ingressKey(1, 3, 80, 0): allowEntry.withHTTPProxyPort(8080),
ingressKey(1, 3, 80, 0): denyEntry,
}),
args: args{
key: ingressL3OnlyKey(1),
entry: DenyEntry,
},
want: testMapState(t, mapStateMap{
ingressL3OnlyKey(1): denyEntry,
}),
wantAdds: Keys{
ingressL3OnlyKey(1): struct{}{},
},
wantDeletes: Keys{
ingressKey(1, 3, 80, 0): struct{}{},
ingressKey(1, 3, 80, 0): struct{}{},
},
wantOld: mapStateMap{
ingressKey(1, 3, 80, 0): allowEntry.withHTTPProxyPort(8080),
ingressKey(1, 3, 80, 0): denyEntry,
},
},
{
name: "test-10b - L3 ingress deny KV should overwrite a L3-L4-L7 port-range ingress allow and a L3-L4 port-range deny",
ms: testMapState(t, mapStateMap{
ingressKey(1, 3, 64, 10): allowEntry.withHTTPProxyPort(8080), // port range 64-127 (64/10)
ingressKey(1, 3, 64, 10): denyEntry, // port range 64-127 (64/10)
}),
args: args{
key: ingressL3OnlyKey(1),
entry: DenyEntry,
},
want: testMapState(t, mapStateMap{
ingressL3OnlyKey(1): denyEntry,
}),
wantAdds: Keys{
ingressL3OnlyKey(1): struct{}{},
},
wantDeletes: Keys{
ingressKey(1, 3, 64, 10): struct{}{},
ingressKey(1, 3, 64, 10): struct{}{},
},
wantOld: mapStateMap{
ingressKey(1, 3, 64, 10): allowEntry.withHTTPProxyPort(8080), // port range 64-127 (64/10)
ingressKey(1, 3, 64, 10): denyEntry, // port range 64-127 (64/10)
},
},
{
name: "test-11a - L3 ingress allow should not be allowed if there is a L3 'all' deny",
ms: testMapState(t, mapStateMap{
egressKey(1, 3, 80, 0): allowEntry.withHTTPProxyPort(8080),
IngressKey(): denyEntry,
}),
args: args{
key: ingressL3OnlyKey(100),
entry: AllowEntry,
},
want: testMapState(t, mapStateMap{
egressKey(1, 3, 80, 0): allowEntry.withHTTPProxyPort(8080),
IngressKey(): denyEntry,
}),
wantAdds: Keys{},
wantDeletes: Keys{},
wantOld: mapStateMap{},
},
{
name: "test-11b - L3 ingress allow should not be allowed if there is a L3 'all' deny",
ms: testMapState(t, mapStateMap{
egressKey(1, 3, 64, 10): allowEntry.withHTTPProxyPort(8080), // port range 64-127 (64/10)
IngressKey(): denyEntry,
}),
args: args{
key: ingressKey(100, 0, 0, 0),
entry: AllowEntry,
},
want: testMapState(t, mapStateMap{
egressKey(1, 3, 64, 10): allowEntry.withHTTPProxyPort(8080), // port range 64-127 (64/10)
IngressKey(): denyEntry,
}),
wantAdds: Keys{},
wantDeletes: Keys{},
wantOld: mapStateMap{},
},
{
name: "test-12a - inserting a L3 'all' deny should delete all entries for that direction",
ms: testMapState(t, mapStateMap{
ingressKey(1, 3, 80, 0): allowEntry.withHTTPProxyPort(8080),
ingressKey(1, 3, 5, 0): allowEntry.withHTTPProxyPort(8080),
egressKey(100, 3, 5, 0): allowEntry.withHTTPProxyPort(8080),
}),
args: args{
key: IngressKey(),
entry: DenyEntry,
},
want: testMapState(t, mapStateMap{
IngressKey(): denyEntry,
egressKey(100, 3, 5, 0): allowEntry.withHTTPProxyPort(8080),
}),
wantAdds: Keys{
IngressKey(): struct{}{},
},
wantDeletes: Keys{
ingressKey(1, 3, 80, 0): struct{}{},
ingressKey(1, 3, 5, 0): struct{}{},
},
wantOld: mapStateMap{
ingressKey(1, 3, 80, 0): allowEntry.withHTTPProxyPort(8080),
ingressKey(1, 3, 5, 0): allowEntry.withHTTPProxyPort(8080),
},
},
{
name: "test-12b - inserting a L3 'all' deny should delete all entries for that direction (including port ranges)",
ms: testMapState(t, mapStateMap{
ingressKey(1, 3, 64, 10): allowEntry.withHTTPProxyPort(8080), // port range 64-127 (64/10)
ingressKey(1, 3, 4, 14): allowEntry.withHTTPProxyPort(8080),
egressKey(100, 3, 4, 14): allowEntry.withHTTPProxyPort(8080),
}),
args: args{
key: IngressKey(),
entry: DenyEntry,
},
want: testMapState(t, mapStateMap{
IngressKey(): denyEntry,
egressKey(100, 3, 4, 14): allowEntry.withHTTPProxyPort(8080),
}),
wantAdds: Keys{
IngressKey(): struct{}{},
},
wantDeletes: Keys{
ingressKey(1, 3, 64, 10): struct{}{},
ingressKey(1, 3, 4, 14): struct{}{},
},
wantOld: mapStateMap{
ingressKey(1, 3, 64, 10): allowEntry.withHTTPProxyPort(8080), // port range 64-127 (64/10)
ingressKey(1, 3, 4, 14): allowEntry.withHTTPProxyPort(8080),
},
},
{
name: "test-13a - L3-L4-L7 ingress allow should overwrite a L3-L4-L7 ingress allow due to lower priority",
ms: testMapState(t, mapStateMap{
ingressKey(1, 3, 80, 0): allowEntry.withHTTPProxyPort(8080),
}),
args: args{
key: ingressKey(1, 3, 80, 0),
entry: AllowEntry.WithProxyPort(9090).WithListenerPriority(1),
},
want: testMapState(t, mapStateMap{
ingressKey(1, 3, 80, 0): allowEntry.withProxyPortPriority(9090, 1),
}),
wantAdds: Keys{
ingressKey(1, 3, 80, 0): struct{}{},
},
wantDeletes: Keys{},
wantOld: mapStateMap{
ingressKey(1, 3, 80, 0): allowEntry.withHTTPProxyPort(8080),
},
},
{
name: "test-13b - L3-L4-L7 port-range ingress allow should overwrite a L3-L4-L7 port-range ingress allow due to lower priority",
ms: testMapState(t, mapStateMap{
ingressKey(1, 3, 64, 10): allowEntry.withHTTPProxyPort(8080),
}),
args: args{
key: ingressKey(1, 3, 64, 10),
entry: AllowEntry.WithProxyPort(9090).WithListenerPriority(1),
},
want: testMapState(t, mapStateMap{
ingressKey(1, 3, 64, 10): allowEntry.withProxyPortPriority(9090, 1),
}),
wantAdds: Keys{
ingressKey(1, 3, 64, 10): struct{}{},
},
wantDeletes: Keys{},
wantOld: mapStateMap{
ingressKey(1, 3, 64, 10): allowEntry.withHTTPProxyPort(8080),
},
},
{
name: "test-14a - L3-L4-L7 ingress allow should overwrite a L3-L4-L7 ingress allow due to higher priority on the same port",
ms: testMapState(t, mapStateMap{
ingressKey(1, 3, 80, 0): allowEntry.withHTTPProxyPort(8080),
}),
args: args{
key: ingressKey(1, 3, 80, 0),
entry: AllowEntry.WithProxyPort(8080).WithListenerPriority(1),
},
want: testMapState(t, mapStateMap{
ingressKey(1, 3, 80, 0): allowEntry.withProxyPortPriority(8080, 1),
}),
wantAdds: Keys{
ingressKey(1, 3, 80, 0): struct{}{}, // precedence changed
},
wantDeletes: Keys{},
wantOld: mapStateMap{
ingressKey(1, 3, 80, 0): allowEntry.withHTTPProxyPort(8080),
},
},
{
name: "test-14b - L3-L4-L7 port-range ingress allow should overwrite a L3-L4-L7 port-range ingress allow due to higher priority on the same port",
ms: testMapState(t, mapStateMap{
ingressKey(1, 3, 64, 10): allowEntry.withHTTPProxyPort(8080),
}),
args: args{
key: ingressKey(1, 3, 64, 10),
entry: AllowEntry.WithProxyPort(8080).WithListenerPriority(1),
},
want: testMapState(t, mapStateMap{
ingressKey(1, 3, 64, 10): allowEntry.withProxyPortPriority(8080, 1),
}),
wantAdds: Keys{
ingressKey(1, 3, 64, 10): struct{}{}, // precedence changed
},
wantDeletes: Keys{},
wantOld: mapStateMap{
ingressKey(1, 3, 64, 10): allowEntry.withHTTPProxyPort(8080),
},
},
{
name: "test-14c - L3-L4 ingress allow should not overwrite a L3-L4-L7 port-range ingress allow on overlapping port",
ms: testMapState(t, mapStateMap{
ingressKey(1, 3, 64, 10): allowEntry.withHTTPProxyPort(8080),
}),
args: args{
key: ingressKey(1, 3, 80, 16),
entry: AllowEntry,
},
want: testMapState(t, mapStateMap{
ingressKey(1, 3, 64, 10): allowEntry.withHTTPProxyPort(8080),
}),
wantAdds: Keys{},
wantDeletes: Keys{},
wantOld: mapStateMap{},
},
{
name: "test-15a - L3 port-range allow KV should not overwrite a wildcard deny entry",
ms: testMapState(t, mapStateMap{
ingressKey(0, 3, 80, 0): denyEntry,
}),
args: args{
key: ingressKey(1, 3, 64, 10), // port range 64-127 (64/10)
entry: AllowEntry,
},
want: testMapState(t, mapStateMap{
ingressKey(1, 3, 64, 10): allowEntry, // port range 64-127 (64/10)
ingressKey(0, 3, 80, 0): denyEntry,
}),
wantAdds: Keys{
ingressKey(1, 3, 64, 10): struct{}{},
},
wantDeletes: Keys{},
wantOld: mapStateMap{},
},
{
name: "test-15b-reverse - L3 port-range allow KV should not overwrite a wildcard deny entry",
ms: testMapState(t, mapStateMap{
ingressKey(1, 3, 64, 10): allowEntry, // port range 64-127 (64/10)
}),
args: args{
key: ingressKey(0, 3, 80, 0),
entry: DenyEntry,
},
want: testMapState(t, mapStateMap{
ingressKey(1, 3, 64, 10): allowEntry, // port range 64-127 (64/10)
ingressKey(0, 3, 80, 0): denyEntry,
}),
wantAdds: Keys{
ingressKey(0, 3, 80, 16): struct{}{},
},
wantDeletes: Keys{},
wantOld: mapStateMap{},
},
{
name: "test-16a - No added entry for L3 port-range allow + wildcard allow entry",
ms: testMapState(t, mapStateMap{
ingressKey(0, 3, 80, 0): allowEntry.withHTTPProxyPort(8080),
}),
args: args{
key: ingressKey(1, 3, 64, 10), // port range 64-127 (64/10)
entry: AllowEntry,
},
want: testMapState(t, mapStateMap{
ingressKey(0, 3, 80, 0): allowEntry.withHTTPProxyPort(8080),
ingressKey(1, 3, 64, 10): allowEntry, // port range 64-127 (64/10)
}),
wantAdds: Keys{
ingressKey(1, 3, 64, 10): struct{}{},
},
wantDeletes: Keys{},
wantOld: mapStateMap{},
},
{
name: "test-17 - Added entry for wildcarded port for the specified protocol",
ms: testMapState(t, mapStateMap{}),
args: args{
key: ingressKey(1, 2, 0, 0),
entry: AllowEntry,
},
want: testMapState(t, mapStateMap{
ingressKey(1, 2, 0, 0): allowEntry,
}),
wantAdds: Keys{
ingressKey(1, 2, 0, 0): struct{}{},
},
wantDeletes: Keys{},
wantOld: mapStateMap{},
},
{
name: "test-18 - Wildcard port entry should not overwrite deny entry",
ms: testMapState(t, mapStateMap{
ingressKey(1, 2, 0, 0): denyEntry,
}),
args: args{
key: ingressKey(1, 2, 0, 0),
entry: AllowEntry,
},
want: testMapState(t, mapStateMap{
ingressKey(1, 2, 0, 0): denyEntry,
}),
wantAdds: Keys{},
wantDeletes: Keys{},
wantOld: mapStateMap{},
},
{
name: "test-18 - Deny entry overwrites allow wildcard port entry",
ms: testMapState(t, mapStateMap{
ingressKey(1, 2, 0, 0): allowEntry,
}),
args: args{
key: ingressKey(1, 2, 0, 0),
entry: DenyEntry,
},
want: testMapState(t, mapStateMap{
ingressKey(1, 2, 0, 0): denyEntry,
}),
wantAdds: Keys{
ingressKey(1, 2, 0, 0): struct{}{},
},
wantDeletes: Keys{},
wantOld: mapStateMap{
ingressKey(1, 2, 0, 0): allowEntry,
},
},
}
for _, tt := range tests {
t.Log(tt.name)
changes := ChangeState{
Adds: make(Keys),
Deletes: make(Keys),
old: make(mapStateMap),
}
// copy the starting point
ms := testMapState(t, make(mapStateMap, tt.ms.Len()))
tt.ms.forEach(func(k Key, v mapStateEntry) bool {
ms.insert(k, v)
return true
})
entry := NewMapStateEntry(tt.args.entry).withLabels(labels.LabelArrayList{nil})
ms.insertWithChanges(tt.args.key, entry, denyRules, changes)
ms.validatePortProto(t)
require.Truef(t, ms.Equal(&tt.want), "%s: MapState mismatch:\n%s", tt.name, ms.diff(&tt.want))
require.Equalf(t, tt.wantAdds, changes.Adds, "%s: Adds mismatch", tt.name)
require.Equalf(t, tt.wantDeletes, changes.Deletes, "%s: Deletes mismatch", tt.name)
require.Equalf(t, tt.wantOld, changes.old, "%s: OldValues mismatch allows", tt.name)
// Revert changes and check that we get the original mapstate
ms.revertChanges(changes)
require.Truef(t, ms.Equal(&tt.ms), "%s: MapState mismatch:\n%s", tt.name, ms.diff(&tt.ms))
}
}
func DNSUDPEgressKey(id identity.NumericIdentity) Key {
return EgressKey().WithIdentity(id).WithUDPPort(53)
}
func DNSTCPEgressKey(id identity.NumericIdentity) Key {
return EgressKey().WithIdentity(id).WithTCPPort(53)
}
func HostIngressKey() Key {
return IngressKey().WithIdentity(identity.ReservedIdentityHost)
}
func AnyIngressKey() Key {
return IngressKey()
}
func AnyEgressKey() Key {
return EgressKey()
}
func HttpIngressKey(id identity.NumericIdentity) Key {
return IngressKey().WithIdentity(id).WithTCPPort(80)
}
func HttpEgressKey(id identity.NumericIdentity) Key {
return EgressKey().WithIdentity(id).WithTCPPort(80)
}
func TcpEgressKey(id identity.NumericIdentity) Key {
return EgressKey().WithIdentity(id).WithTCPPort(0)
}
func allowEntry() mapStateEntry {
return NewMapStateEntry(AllowEntry).withLabels(labels.LabelArrayList{nil})
}
func proxyEntryHTTP(proxyPort uint16) mapStateEntry {
return NewMapStateEntry(AllowEntry.WithProxyPort(proxyPort).WithListenerPriority(ListenerPriorityHTTP)).withLabels(labels.LabelArrayList{nil})
}
func proxyEntryDNS(proxyPort uint16) mapStateEntry {
return NewMapStateEntry(AllowEntry.WithProxyPort(proxyPort).WithListenerPriority(ListenerPriorityDNS)).withLabels(labels.LabelArrayList{nil})
}
func proxyEntryCRD(proxyPort uint16) mapStateEntry {
return NewMapStateEntry(AllowEntry.WithProxyPort(proxyPort).WithListenerPriority(ListenerPriorityCRD)).withLabels(labels.LabelArrayList{nil})
}
func denyEntry() mapStateEntry {
return NewMapStateEntry(DenyEntry).withLabels(labels.LabelArrayList{nil})
}
func TestMapState_AccumulateMapChangesDeny(t *testing.T) {
csFoo := newTestCachedSelector("Foo", false)
csBar := newTestCachedSelector("Bar", false)
identityCache := identity.IdentityMap{
identity.NumericIdentity(identityFoo): labelsFoo,
}
selectorCache := testNewSelectorCache(hivetest.Logger(t), identityCache)
type args struct {
cs *testCachedSelector
adds []int
deletes []int
port uint16
proto u8proto.U8proto
ingress bool
redirect ListenerPriority
deny bool
}
tests := []struct {
continued bool // Start from the end state of the previous test
name string
setup mapState
args []args // changes applied, in order
state mapState
adds Keys
deletes Keys
}{{
name: "test-0 - Adding L4-only redirect allow key to an existing allow-all with L3-only deny",
setup: testMapState(t, mapStateMap{
AnyIngressKey(): allowEntry(),
ingressL3OnlyKey(41): denyEntry(),
}),
args: []args{
{cs: csFoo, adds: []int{0}, deletes: []int{}, port: 80, proto: 6, ingress: true, redirect: ListenerPriorityHTTP, deny: false},
},
state: testMapState(t, mapStateMap{
AnyIngressKey(): allowEntry(),
ingressL3OnlyKey(41): denyEntry(),
HttpIngressKey(0): proxyEntryHTTP(1),
}),
adds: Keys{
HttpIngressKey(0): {},
},
deletes: Keys{},
}, {
name: "test-1a - Adding L3-deny to an existing allow-all with L4-only allow redirect map state entries",
setup: testMapState(t, mapStateMap{
AnyIngressKey(): allowEntry(),
HttpIngressKey(0): proxyEntryHTTP(12345),
}),
args: []args{
{cs: csFoo, adds: []int{41}, deletes: []int{}, port: 0, proto: 0, ingress: true, redirect: ListenerPriorityHTTP, deny: true},
},
state: testMapState(t, mapStateMap{
AnyIngressKey(): allowEntry(),
ingressL3OnlyKey(41): denyEntry(),
HttpIngressKey(0): proxyEntryHTTP(12345),
}),
adds: Keys{
ingressL3OnlyKey(41): {},
},
deletes: Keys{},
}, {
continued: true,
name: "test-1b - Adding 2nd identity",
args: []args{
{cs: csFoo, adds: []int{42}, deletes: []int{}, port: 0, proto: 0, ingress: true, redirect: ListenerPriorityHTTP, deny: true},
},
state: testMapState(t, mapStateMap{
AnyIngressKey(): allowEntry(),
ingressL3OnlyKey(41): denyEntry(),
ingressL3OnlyKey(42): denyEntry(),
HttpIngressKey(0): proxyEntryHTTP(12345),
}),
adds: Keys{
ingressL3OnlyKey(42): {},
},
deletes: Keys{},
}, {
continued: true,
name: "test-1c - Removing the same key",
args: []args{
{cs: csFoo, adds: nil, deletes: []int{42}, port: 0, proto: 0, ingress: true, redirect: ListenerPriorityHTTP, deny: true},
},
state: testMapState(t, mapStateMap{
AnyIngressKey(): allowEntry(),
ingressL3OnlyKey(41): denyEntry(),
HttpIngressKey(0): proxyEntryHTTP(12345),
}),
adds: Keys{},
deletes: Keys{
ingressL3OnlyKey(42): {},
},
}, {
name: "test-2a - Adding 2 identities, and deleting a nonexisting key on an empty state",
args: []args{
{cs: csFoo, adds: []int{42, 43}, deletes: []int{50}, port: 80, proto: 6, ingress: true, deny: true},
},
state: testMapState(t, mapStateMap{
HttpIngressKey(42): denyEntry(),
HttpIngressKey(43): denyEntry(),
}),
adds: Keys{
HttpIngressKey(42): {},
HttpIngressKey(43): {},
},
deletes: Keys{},
}, {
continued: true,
name: "test-2b - Adding Bar also selecting 42 (and 44)",
args: []args{
{cs: csBar, adds: []int{42, 44}, deletes: []int{}, port: 80, proto: 6, ingress: true, deny: true},
},
state: testMapState(t, mapStateMap{
HttpIngressKey(42): denyEntry(),
HttpIngressKey(43): denyEntry(),
HttpIngressKey(44): denyEntry(),
}),
adds: Keys{
HttpIngressKey(44): {},
},
deletes: Keys{},
}, {
continued: true,
name: "test-2c - Deleting 42",
args: []args{
{cs: csFoo, adds: []int{}, deletes: []int{42}, port: 80, proto: 6, ingress: true, deny: true},
{cs: csBar, adds: []int{}, deletes: []int{42}, port: 80, proto: 6, ingress: true, deny: true},
},
state: testMapState(t, mapStateMap{
HttpIngressKey(43): denyEntry(),
HttpIngressKey(44): denyEntry(),
}),
adds: Keys{},
deletes: Keys{
HttpIngressKey(42): {},
},
}, {
continued: true,
name: "test-2d - Adding an entry that already exists, no adds",
args: []args{
{cs: csBar, adds: []int{44}, deletes: []int{}, port: 80, proto: 6, ingress: true, deny: true},
},
state: testMapState(t, mapStateMap{
HttpIngressKey(43): denyEntry(),
HttpIngressKey(44): denyEntry(),
}),
adds: Keys{},
deletes: Keys{},
}, {
continued: false,
name: "test-3a - egress allow with deny-L3",
setup: testMapState(t, mapStateMap{
AnyIngressKey(): allowEntry(),
HostIngressKey(): allowEntry(),
egressKey(42, 0, 0, 0): denyEntry(),
}),
args: []args{
{cs: csBar, adds: []int{42}, deletes: []int{}, port: 53, proto: 17, ingress: false, deny: false},
{cs: csBar, adds: []int{42}, deletes: []int{}, port: 53, proto: 6, ingress: false, deny: false},
},
state: testMapState(t, mapStateMap{
AnyIngressKey(): allowEntry(),
HostIngressKey(): allowEntry(),
egressKey(42, 0, 0, 0): denyEntry(),
}),
adds: Keys{},
deletes: Keys{},
}, {
continued: true,
name: "test-3b - egress allow DNS on another ID with deny-L3",
args: []args{
{cs: csBar, adds: []int{43}, deletes: []int{}, port: 53, proto: 17, ingress: false, deny: false},
{cs: csBar, adds: []int{43}, deletes: []int{}, port: 53, proto: 6, ingress: false, deny: false},
},
state: testMapState(t, mapStateMap{
AnyIngressKey(): allowEntry(),
HostIngressKey(): allowEntry(),
egressKey(42, 0, 0, 0): denyEntry(),
DNSUDPEgressKey(43): allowEntry(),
DNSTCPEgressKey(43): allowEntry(),
}),
adds: Keys{
DNSUDPEgressKey(43): {},
DNSTCPEgressKey(43): {},
},
deletes: Keys{},
}, {
continued: true,
name: "test-3c - egress allow HTTP proxy with deny-L3",
args: []args{
{cs: csFoo, adds: []int{43}, deletes: []int{}, port: 80, proto: 6, ingress: false, redirect: ListenerPriorityHTTP, deny: false},
},
state: testMapState(t, mapStateMap{
AnyIngressKey(): allowEntry(),
HostIngressKey(): allowEntry(),
egressKey(42, 0, 0, 0): denyEntry(),
DNSUDPEgressKey(43): allowEntry(),
DNSTCPEgressKey(43): allowEntry(),
HttpEgressKey(43): proxyEntryHTTP(1),
}),
adds: Keys{
HttpEgressKey(43): {},
},
deletes: Keys{},
}, {
continued: false,
name: "test-4a - Add L7 skipped due to covering L3 deny",
setup: testMapState(t, mapStateMap{
AnyIngressKey(): allowEntry(),
HostIngressKey(): allowEntry(),
egressKey(42, 0, 0, 0): denyEntry(),
}),
args: []args{
{cs: csFoo, adds: []int{42}, deletes: []int{}, port: 80, proto: 6, ingress: false, redirect: ListenerPriorityHTTP, deny: false},
},
state: testMapState(t, mapStateMap{
AnyIngressKey(): allowEntry(),
HostIngressKey(): allowEntry(),
egressKey(42, 0, 0, 0): denyEntry(),
}),
adds: Keys{},
deletes: Keys{},
}, {
continued: true,
name: "test-4b - Add & delete L7 skipped due to covering L3 deny",
args: []args{
{cs: csFoo, adds: []int{42}, deletes: []int{}, port: 80, proto: 6, ingress: false, redirect: ListenerPriorityHTTP, deny: false},
{cs: csFoo, adds: []int{}, deletes: []int{42}, port: 80, proto: 6, ingress: false, redirect: ListenerPriorityHTTP, deny: false},
},
state: testMapState(t, mapStateMap{
AnyIngressKey(): allowEntry(),
HostIngressKey(): allowEntry(),
egressKey(42, 0, 0, 0): denyEntry(),
}),
adds: Keys{},
deletes: Keys{},
}, {
name: "test-5 - Adding L3-deny to an existing allow-all",
setup: testMapState(t, mapStateMap{
AnyIngressKey(): allowEntry(),
}),
args: []args{
{cs: csFoo, adds: []int{41}, deletes: []int{}, port: 0, proto: 0, ingress: true, deny: true},
},
state: testMapState(t, mapStateMap{
AnyIngressKey(): allowEntry(),
ingressL3OnlyKey(41): denyEntry(),
}),
adds: Keys{
ingressL3OnlyKey(41): {},
},
deletes: Keys{},
}, {
name: "test-6 - Multiple entries",
setup: testMapState(t, mapStateMap{
AnyEgressKey(): allowEntry(),
HttpEgressKey(0): proxyEntryHTTP(12345),
DNSUDPEgressKey(0): proxyEntryDNS(12346),
}),
args: []args{
{cs: csFoo, adds: []int{41}, deletes: []int{}, port: 0, proto: 0, ingress: false, deny: true},
},
state: testMapState(t, mapStateMap{
AnyEgressKey(): allowEntry(),
egressKey(41, 0, 0, 0): denyEntry(),
HttpEgressKey(0): proxyEntryHTTP(12345),
DNSUDPEgressKey(0): proxyEntryDNS(12346),
}),
adds: Keys{
egressKey(41, 0, 0, 0): {},
},
deletes: Keys{},
}, {
continued: false,
name: "test-n - title",
args: []args{
// {cs: csFoo, adds: []int{42, 43}, deletes: []int{50}, port: 80, proto: 6, ingress: true, redirect: ListenerPriorityHTTP, deny: false},
},
state: emptyMapState(hivetest.Logger(t)),
adds: Keys{
// HttpIngressKey(42): allowEntry(),
},
deletes: Keys{
// HttpIngressKey(43): allowEntry(),
},
},
}
epPolicy := &EndpointPolicy{
selectorPolicy: &selectorPolicy{
SelectorCache: selectorCache,
},
PolicyOwner: DummyOwner{logger: hivetest.Logger(t)},
}
policyMapState := emptyMapState(hivetest.Logger(t))
for _, tt := range tests {
policyMaps := MapChanges{logger: hivetest.Logger(t)}
if !tt.continued {
if tt.setup.Valid() {
policyMapState = tt.setup
} else {
policyMapState = testMapState(t, nil)
}
}
epPolicy.policyMapState = policyMapState
for _, x := range tt.args {
dir := trafficdirection.Egress
if x.ingress {
dir = trafficdirection.Ingress
}
adds := x.cs.addSelections(x.adds...)
deletes := x.cs.deleteSelections(x.deletes...)
key := KeyForDirection(dir).WithPortProto(x.proto, x.port)
var proxyPort uint16
var priority ListenerPriority
if x.redirect != 0 {
proxyPort = 1
priority = x.redirect
}
value := newMapStateEntry(NilRuleOrigin, proxyPort, priority, x.deny, NoAuthRequirement)
policyMaps.AccumulateMapChanges(adds, deletes, []Key{key}, value)
}
policyMaps.SyncMapChanges(versioned.LatestTx)
handle, changes := policyMaps.consumeMapChanges(epPolicy, denyRules)
if handle != nil {
handle.Close()
}
policyMapState.validatePortProto(t)
require.True(t, policyMapState.Equal(&tt.state), "%s (MapState):\n%s", tt.name, policyMapState.diff(&tt.state))
require.Equal(t, tt.adds, changes.Adds, tt.name+" (adds)")
require.Equal(t, tt.deletes, changes.Deletes, tt.name+" (deletes)")
}
}
func TestMapState_AccumulateMapChanges(t *testing.T) {
csFoo := newTestCachedSelector("Foo", false)
csBar := newTestCachedSelector("Bar", false)
csWildcard := newTestCachedSelector("wildcard", true)
identityCache := identity.IdentityMap{
identity.NumericIdentity(identityFoo): labelsFoo,
}
selectorCache := testNewSelectorCache(hivetest.Logger(t), identityCache)
type args struct {
cs *testCachedSelector
adds []int
deletes []int
port uint16
prefix uint8
proto u8proto.U8proto
ingress bool
redirect ListenerPriority
deny bool
authReq AuthRequirement
}
tests := []struct {
continued bool // Start from the end state of the previous test
name string
args []args // changes applied, in order
state mapState
adds Keys
deletes Keys
}{{
name: "test-2a - Adding 2 identities, and deleting a nonexisting key on an empty state",
args: []args{
{cs: csFoo, adds: []int{42, 43}, deletes: []int{50}, port: 80, proto: 6, ingress: true},
},
state: testMapState(t, mapStateMap{
HttpIngressKey(42): allowEntry(),
HttpIngressKey(43): allowEntry(),
}),
adds: Keys{
HttpIngressKey(42): {},
HttpIngressKey(43): {},
},
deletes: Keys{},
}, {
continued: true,
name: "test-2b - Adding Bar also selecting 42",
args: []args{
{cs: csBar, adds: []int{42, 44}, deletes: []int{}, port: 80, proto: 6, ingress: true},
},
state: testMapState(t, mapStateMap{
HttpIngressKey(42): allowEntry(),
HttpIngressKey(43): allowEntry(),
HttpIngressKey(44): allowEntry(),
}),
adds: Keys{
HttpIngressKey(44): {},
},
deletes: Keys{},
}, {
continued: true,
name: "test-2c - Deleting 42",
args: []args{
{cs: csFoo, adds: []int{}, deletes: []int{42}, port: 80, proto: 6, ingress: true},
{cs: csBar, adds: []int{}, deletes: []int{42}, port: 80, proto: 6, ingress: true},
},
state: testMapState(t, mapStateMap{
HttpIngressKey(43): allowEntry(),
HttpIngressKey(44): allowEntry(),
}),
adds: Keys{},
deletes: Keys{
HttpIngressKey(42): {},
},
}, {
continued: true,
name: "test-2f - Adding an entry that already exists, no adds",
args: []args{
{cs: csBar, adds: []int{44}, deletes: []int{}, port: 80, proto: 6, ingress: true},
},
state: testMapState(t, mapStateMap{
HttpIngressKey(43): allowEntry(),
HttpIngressKey(44): allowEntry(),
}),
adds: Keys{},
deletes: Keys{},
}, {
continued: false,
name: "test-3a - egress HTTP proxy (setup)",
args: []args{
{cs: nil, adds: []int{0}, deletes: []int{}, port: 0, proto: 0, ingress: true},
{cs: nil, adds: []int{1}, deletes: []int{}, port: 0, proto: 0, ingress: true},
{cs: csBar, adds: []int{42}, deletes: []int{}, port: 53, proto: 17, ingress: false},
{cs: csBar, adds: []int{42}, deletes: []int{}, port: 53, proto: 6, ingress: false},
},
state: testMapState(t, mapStateMap{
AnyIngressKey(): allowEntry(),
HostIngressKey(): allowEntry(),
DNSUDPEgressKey(42): allowEntry(),
DNSTCPEgressKey(42): allowEntry(),
}),
adds: Keys{
AnyIngressKey(): {},
HostIngressKey(): {},
DNSUDPEgressKey(42): {},
DNSTCPEgressKey(42): {},
},
deletes: Keys{},
}, {
continued: true,
name: "test-3b - egress HTTP proxy (incremental update)",
args: []args{
{cs: csFoo, adds: []int{43}, deletes: []int{}, port: 80, proto: 6, ingress: false, redirect: ListenerPriorityHTTP},
},
state: testMapState(t, mapStateMap{
AnyIngressKey(): allowEntry(),
HostIngressKey(): allowEntry(),
DNSUDPEgressKey(42): allowEntry(),
DNSTCPEgressKey(42): allowEntry(),
HttpEgressKey(43): proxyEntryHTTP(1),
}),
adds: Keys{
HttpEgressKey(43): {},
},
deletes: Keys{},
}, {
continued: false,
name: "test-4a - Add & delete; delete cancels the add in reply",
args: []args{
{cs: csFoo, adds: []int{44}, deletes: []int{}, port: 80, proto: 6, ingress: false, redirect: ListenerPriorityHTTP},
{cs: csFoo, adds: []int{}, deletes: []int{44}, port: 80, proto: 6, ingress: false, redirect: ListenerPriorityHTTP},
},
state: emptyMapState(hivetest.Logger(t)),
adds: Keys{},
deletes: Keys{},
}, {
continued: true,
name: "test-4b - Add, delete, & add; delete suppressed",
args: []args{
{cs: csFoo, adds: []int{44}, deletes: []int{}, port: 80, proto: 6, ingress: false, redirect: ListenerPriorityHTTP},
{cs: csFoo, adds: []int{}, deletes: []int{44}, port: 80, proto: 6, ingress: false, redirect: ListenerPriorityHTTP},
{cs: csFoo, adds: []int{44}, deletes: []int{}, port: 80, proto: 6, ingress: false, redirect: ListenerPriorityHTTP},
},
state: testMapState(t, mapStateMap{
HttpEgressKey(44): proxyEntryHTTP(1),
}),
adds: Keys{
HttpEgressKey(44): {},
},
deletes: Keys{},
}, {
continued: false,
name: "test-5a - auth type propagation from the most specific covering key",
args: []args{
{cs: csFoo, adds: []int{43}, authReq: AuthTypeAlwaysFail.AsExplicitRequirement()},
{cs: csFoo, adds: []int{0}, proto: 6, authReq: AuthTypeSpire.AsExplicitRequirement()},
{cs: csBar, adds: []int{43}, port: 80, proto: 6, redirect: ListenerPriorityHTTP},
},
state: testMapState(t, mapStateMap{
egressKey(43, 0, 0, 0): allowEntry().withExplicitAuth(AuthTypeAlwaysFail),
egressKey(0, 6, 0, 0): allowEntry().withExplicitAuth(AuthTypeSpire),
egressKey(43, 6, 80, 0): proxyEntryHTTP(1).withDerivedAuth(AuthTypeAlwaysFail),
}),
adds: Keys{
egressKey(43, 0, 0, 0): {},
egressKey(0, 6, 0, 0): {},
egressKey(43, 6, 80, 0): {},
},
deletes: Keys{},
}, {
continued: false,
name: "test-5a-r - auth type propagation from the most specific covering key - reverse",
args: []args{
{cs: csBar, adds: []int{43}, port: 80, proto: 6, redirect: ListenerPriorityHTTP},
{cs: csFoo, adds: []int{0}, proto: 6, authReq: AuthTypeSpire.AsExplicitRequirement()},
{cs: csFoo, adds: []int{43}, authReq: AuthTypeAlwaysFail.AsExplicitRequirement()},
},
state: testMapState(t, mapStateMap{
egressKey(43, 0, 0, 0): allowEntry().withExplicitAuth(AuthTypeAlwaysFail),
egressKey(0, 6, 0, 0): allowEntry().withExplicitAuth(AuthTypeSpire),
egressKey(43, 6, 80, 0): proxyEntryHTTP(1).withDerivedAuth(AuthTypeAlwaysFail),
}),
adds: Keys{
egressKey(43, 0, 0, 0): {},
egressKey(0, 6, 0, 0): {},
egressKey(43, 6, 80, 0): {},
},
deletes: Keys{},
}, {
continued: false,
name: "test-5b - higher priority proxy port override with auth entries",
args: []args{
{cs: csFoo, adds: []int{43}, proto: 6, redirect: ListenerPriorityHTTP},
// lower priority redirect (ListenerPriorityCRD) is overridden by ListenerPriorityHTTP
{cs: csFoo, adds: []int{43}, port: 80, proto: 6, prefix: 12, redirect: ListenerPriorityCRD},
// but more specific entries with different auth are not
{cs: csBar, adds: []int{43}, port: 80, proto: 6, prefix: 16, authReq: AuthTypeSpire.AsExplicitRequirement()},
{cs: csBar, adds: []int{43}, port: 81, proto: 6, prefix: 16, authReq: AuthTypeSpire.AsExplicitRequirement()},
},
state: testMapState(t, mapStateMap{
egressKey(43, 6, 0, 0): proxyEntryHTTP(1),
// egressKey(43, 6, 80, 12): proxyEntryCRD(1),
egressKey(43, 6, 80, 0): proxyEntryHTTP(1).withExplicitAuth(AuthTypeSpire),
egressKey(43, 6, 81, 0): proxyEntryHTTP(1).withExplicitAuth(AuthTypeSpire),
}),
adds: Keys{
egressKey(43, 6, 0, 0): {},
// egressKey(43, 6, 80, 12): {},
egressKey(43, 6, 80, 16): {},
egressKey(43, 6, 81, 16): {},
},
deletes: Keys{},
}, {
continued: false,
name: "test-5b-r - higher priority proxy port override with auth entries - reverse",
args: []args{
{cs: csBar, adds: []int{43}, port: 80, proto: 6, prefix: 16, authReq: AuthTypeSpire.AsExplicitRequirement()},
{cs: csFoo, adds: []int{43}, port: 80, proto: 6, prefix: 12, redirect: ListenerPriorityHTTP},
},
state: testMapState(t, mapStateMap{
egressKey(43, 6, 80, 12): proxyEntryHTTP(1),
egressKey(43, 6, 80, 0): proxyEntryHTTP(1).withExplicitAuth(AuthTypeSpire),
}),
adds: Keys{
egressKey(43, 6, 80, 12): {},
egressKey(43, 6, 80, 0): {},
},
deletes: Keys{},
}, {
continued: false,
name: "test-5c - higher priority proxy port not overridden with auth entries",
args: []args{
{cs: csFoo, adds: []int{43}, proto: 6, redirect: ListenerPriorityCRD},
// higher priority redirect (ListenerPriorityHTTP) is not overridden by ListenerPriorityCRD
{cs: csFoo, adds: []int{43}, port: 80, proto: 6, prefix: 12, redirect: ListenerPriorityHTTP},
// more specific entries with different auth are not overridden
{cs: csBar, adds: []int{43}, port: 80, proto: 6, prefix: 16, authReq: AuthTypeSpire.AsExplicitRequirement()},
{cs: csBar, adds: []int{43}, port: 81, proto: 6, prefix: 16, authReq: AuthTypeSpire.AsExplicitRequirement()},
},
state: testMapState(t, mapStateMap{
egressKey(43, 6, 0, 0): proxyEntryCRD(1),
egressKey(43, 6, 80, 12): proxyEntryHTTP(1),
egressKey(43, 6, 80, 0): proxyEntryHTTP(1).withExplicitAuth(AuthTypeSpire),
egressKey(43, 6, 81, 0): proxyEntryHTTP(1).withExplicitAuth(AuthTypeSpire),
}),
adds: Keys{
egressKey(43, 6, 0, 0): {},
egressKey(43, 6, 80, 12): {},
egressKey(43, 6, 80, 16): {},
egressKey(43, 6, 81, 16): {},
},
deletes: Keys{},
}, {
continued: false,
name: "test-5c-r - higher priority proxy port not overridden with auth entries - reverse",
args: []args{
// more specific entries with different auth are not overridden
{cs: csBar, adds: []int{43}, port: 81, proto: 6, prefix: 16, authReq: AuthTypeSpire.AsExplicitRequirement()},
{cs: csBar, adds: []int{43}, port: 80, proto: 6, prefix: 16, authReq: AuthTypeSpire.AsExplicitRequirement()},
// higher priority redirect (ListenerPriorityHTTP) is not overridden by ListenerPriorityCRD
{cs: csFoo, adds: []int{43}, port: 80, proto: 6, prefix: 12, redirect: ListenerPriorityHTTP},
{cs: csFoo, adds: []int{43}, proto: 6, redirect: ListenerPriorityCRD},
},
state: testMapState(t, mapStateMap{
egressKey(43, 6, 0, 0): proxyEntryCRD(1),
egressKey(43, 6, 80, 12): proxyEntryHTTP(1),
egressKey(43, 6, 80, 0): proxyEntryHTTP(1).withExplicitAuth(AuthTypeSpire),
egressKey(43, 6, 81, 0): proxyEntryHTTP(1).withExplicitAuth(AuthTypeSpire),
}),
adds: Keys{
egressKey(43, 6, 0, 0): {},
egressKey(43, 6, 80, 12): {},
egressKey(43, 6, 80, 16): {},
egressKey(43, 6, 81, 16): {},
},
deletes: Keys{},
}, {
continued: false,
name: "test-5d - higher priority proxy port propagation to auth entries",
args: []args{
{cs: csFoo, adds: []int{43}, proto: 6, redirect: ListenerPriorityHTTP},
// lower priority redirect (ListenerPriorityCRD) is overridden by ListenerPriorityHTTP redirect, but kept due to different auth requirement
{cs: csFoo, adds: []int{43}, port: 80, proto: 6, prefix: 12, redirect: ListenerPriorityCRD, authReq: AuthTypeSpire.AsExplicitRequirement()},
// but more specific entries without redirect and the same auth are not added
{cs: csBar, adds: []int{43}, port: 80, proto: 6, prefix: 16, authReq: AuthTypeSpire.AsExplicitRequirement()},
{cs: csBar, adds: []int{43}, port: 81, proto: 6, prefix: 16, authReq: AuthTypeSpire.AsExplicitRequirement()},
},
state: testMapState(t, mapStateMap{
egressKey(43, 6, 0, 0): proxyEntryHTTP(1),
egressKey(43, 6, 80, 12): proxyEntryHTTP(1).withExplicitAuth(AuthTypeSpire),
egressKey(43, 6, 80, 16): proxyEntryHTTP(1).withExplicitAuth(AuthTypeSpire),
egressKey(43, 6, 81, 16): proxyEntryHTTP(1).withExplicitAuth(AuthTypeSpire),
}),
adds: Keys{
egressKey(43, 6, 0, 0): {},
egressKey(43, 6, 80, 12): {},
egressKey(43, 6, 80, 16): {},
egressKey(43, 6, 81, 16): {},
},
deletes: Keys{},
}, {
continued: false,
name: "test-5d-r - higher priority proxy port propagation to auth entries - reverse",
args: []args{
{cs: csBar, adds: []int{43}, port: 81, proto: 6, prefix: 16, authReq: AuthTypeSpire.AsExplicitRequirement()},
{cs: csBar, adds: []int{43}, port: 80, proto: 6, prefix: 16, authReq: AuthTypeSpire.AsExplicitRequirement()},
{cs: csFoo, adds: []int{43}, port: 80, proto: 6, prefix: 12, redirect: ListenerPriorityCRD, authReq: AuthTypeSpire.AsExplicitRequirement()},
{cs: csFoo, adds: []int{43}, proto: 6, redirect: ListenerPriorityHTTP},
},
state: testMapState(t, mapStateMap{
egressKey(43, 6, 0, 0): proxyEntryHTTP(1),
egressKey(43, 6, 80, 12): proxyEntryHTTP(1).withExplicitAuth(AuthTypeSpire),
egressKey(43, 6, 80, 16): proxyEntryHTTP(1).withExplicitAuth(AuthTypeSpire),
egressKey(43, 6, 81, 16): proxyEntryHTTP(1).withExplicitAuth(AuthTypeSpire),
}),
adds: Keys{
egressKey(43, 6, 0, 0): {},
egressKey(43, 6, 80, 12): {},
egressKey(43, 6, 80, 16): {},
egressKey(43, 6, 81, 16): {},
},
deletes: Keys{},
}, {
continued: false,
name: "test-5e - higher priority proxy port propagation to auth proxy entry",
args: []args{
{cs: csFoo, adds: []int{43}, proto: 6, redirect: ListenerPriorityHTTP},
// lower priority redirect (ListenerPriorityCRD) is overridden by ListenerPriorityHTTP redirect, but kept due to different auth requirement
{cs: csFoo, adds: []int{43}, port: 80, proto: 6, prefix: 12, redirect: ListenerPriorityCRD, authReq: AuthTypeSpire.AsExplicitRequirement()},
// but more specific entries with same auth are not added
{cs: csBar, adds: []int{43}, port: 80, proto: 6, prefix: 16},
{cs: csBar, adds: []int{43}, port: 81, proto: 6, prefix: 16},
},
state: testMapState(t, mapStateMap{
egressKey(43, 6, 0, 0): proxyEntryHTTP(1),
egressKey(43, 6, 80, 12): proxyEntryHTTP(1).withExplicitAuth(AuthTypeSpire),
}),
adds: Keys{
egressKey(43, 6, 0, 0): {},
egressKey(43, 6, 80, 12): {},
},
deletes: Keys{},
}, {
continued: false,
name: "test-5e-r - higher priority proxy port propagation to auth proxy entry - reverse",
args: []args{
{cs: csBar, adds: []int{43}, port: 81, proto: 6, prefix: 16},
{cs: csBar, adds: []int{43}, port: 80, proto: 6, prefix: 16},
{cs: csFoo, adds: []int{43}, port: 80, proto: 6, prefix: 12, redirect: ListenerPriorityCRD, authReq: AuthTypeSpire.AsExplicitRequirement()},
{cs: csFoo, adds: []int{43}, proto: 6, redirect: ListenerPriorityHTTP},
},
state: testMapState(t, mapStateMap{
egressKey(43, 6, 0, 0): proxyEntryHTTP(1),
egressKey(43, 6, 80, 12): proxyEntryHTTP(1).withExplicitAuth(AuthTypeSpire),
}),
adds: Keys{
egressKey(43, 6, 0, 0): {},
egressKey(43, 6, 80, 12): {},
},
deletes: Keys{},
}, {
continued: false,
name: "test-6a - L3-only explicit auth type and L4-only without",
args: []args{
{cs: csFoo, adds: []int{43}, authReq: AuthTypeSpire.AsExplicitRequirement()},
{cs: csWildcard, adds: []int{0}, port: 80, proto: 6, redirect: ListenerPriorityHTTP},
},
state: testMapState(t, mapStateMap{
egressKey(43, 0, 0, 0): allowEntry().withExplicitAuth(AuthTypeSpire),
egressKey(0, 6, 80, 0): proxyEntryHTTP(1),
}),
adds: Keys{
egressKey(43, 0, 0, 0): {},
egressKey(0, 6, 80, 0): {},
},
deletes: Keys{},
}, {
continued: false,
name: "test-6a-r - L3-only explicit auth type and L4-only without - reverse",
args: []args{
{cs: csWildcard, adds: []int{0}, port: 80, proto: 6, redirect: ListenerPriorityHTTP},
{cs: csFoo, adds: []int{43}, authReq: AuthTypeSpire.AsExplicitRequirement()},
},
state: testMapState(t, mapStateMap{
egressKey(43, 0, 0, 0): allowEntry().withExplicitAuth(AuthTypeSpire),
egressKey(0, 6, 80, 0): proxyEntryHTTP(1),
}),
adds: Keys{
egressKey(43, 0, 0, 0): {},
egressKey(0, 6, 80, 0): {},
},
deletes: Keys{},
}, {
continued: false,
name: "test-7a - L3/proto explicit auth type and L4-only without",
args: []args{
{cs: csFoo, adds: []int{43}, proto: 6, authReq: AuthTypeSpire.AsExplicitRequirement()},
{cs: csWildcard, adds: []int{0}, port: 80, proto: 6, redirect: ListenerPriorityHTTP},
},
state: testMapState(t, mapStateMap{
egressKey(43, 6, 0, 0): allowEntry().withExplicitAuth(AuthTypeSpire),
egressKey(0, 6, 80, 0): proxyEntryHTTP(1),
}),
adds: Keys{
egressKey(43, 6, 0, 0): {},
egressKey(0, 6, 80, 0): {},
},
deletes: Keys{},
}, {
continued: false,
name: "test-7a-1 - L3/proto explicit auth type and L4-only without - reverse",
args: []args{
{cs: csWildcard, adds: []int{0}, port: 80, proto: 6, redirect: ListenerPriorityHTTP},
{cs: csFoo, adds: []int{43}, proto: 6, authReq: AuthTypeSpire.AsExplicitRequirement()},
},
state: testMapState(t, mapStateMap{
egressKey(43, 6, 0, 0): allowEntry().withExplicitAuth(AuthTypeSpire),
egressKey(0, 6, 80, 0): proxyEntryHTTP(1),
}),
adds: Keys{
egressKey(43, 6, 0, 0): {},
egressKey(0, 6, 80, 0): {},
},
deletes: Keys{},
}, {
continued: false,
name: "test-n - title",
args: []args{
// {cs: csFoo, adds: []int{42, 43}, deletes: []int{50}, port: 80, proto: 6, ingress: true, redirect: ListenerPriorityHTTP, deny: false},
},
state: emptyMapState(hivetest.Logger(t)),
adds: Keys{
// HttpIngressKey(42): allowEntry(),
},
deletes: Keys{
// HttpIngressKey(43): allowEntry(),
},
},
}
epPolicy := &EndpointPolicy{
selectorPolicy: &selectorPolicy{
SelectorCache: selectorCache,
},
PolicyOwner: DummyOwner{logger: hivetest.Logger(t)},
}
policyMapState := emptyMapState(hivetest.Logger(t))
for _, tt := range tests {
t.Log(tt.name)
policyMaps := MapChanges{logger: hivetest.Logger(t)}
if !tt.continued {
policyMapState = emptyMapState(hivetest.Logger(t))
}
epPolicy.policyMapState = policyMapState
for _, x := range tt.args {
dir := trafficdirection.Egress
if x.ingress {
dir = trafficdirection.Ingress
}
adds := x.cs.addSelections(x.adds...)
deletes := x.cs.deleteSelections(x.deletes...)
key := KeyForDirection(dir).WithPortProtoPrefix(x.proto, x.port, x.prefix)
var proxyPort uint16
var priority ListenerPriority
if x.redirect != 0 {
proxyPort = 1
priority = x.redirect
}
value := newMapStateEntry(NilRuleOrigin, proxyPort, priority, x.deny, x.authReq)
policyMaps.AccumulateMapChanges(adds, deletes, []Key{key}, value)
}
policyMaps.SyncMapChanges(versioned.LatestTx)
handle, changes := policyMaps.consumeMapChanges(epPolicy, authRules|denyRules)
if handle != nil {
handle.Close()
}
policyMapState.validatePortProto(t)
require.True(t, policyMapState.Equal(&tt.state), "%s (MapState):\n%s", tt.name, policyMapState.diff(&tt.state))
require.Equal(t, tt.adds, changes.Adds, tt.name+" (adds)")
require.Equal(t, tt.deletes, changes.Deletes, tt.name+" (deletes)")
}
}
func TestMapState_denyPreferredInsertWithSubnets(t *testing.T) {
// Mock the identities what would be selected by the world, IP, and subnet selectors
// Selections for the label selector 'reserved:world'
reservedWorldSelections := identity.NumericIdentitySlice{identity.ReservedIdentityWorld, worldIPIdentity, worldSubnetIdentity}
// Selections for the CIDR selector 'cidr:192.0.2.3/32'
worldIPSelections := identity.NumericIdentitySlice{worldIPIdentity}
// Selections for the CIDR selector 'cidr:192.0.2.0/24'
worldSubnetSelections := identity.NumericIdentitySlice{worldSubnetIdentity, worldIPIdentity}
type action uint32
const (
noAction = action(iota)
insertAllowAll = action(1 << iota)
insertA
insertB
worldIPl3only // Do not expect L4 keys for IP covered by a subnet
worldIPProtoOnly // Do not expect port keys for IP covered by a subnet
worldSubnetl3only // Do not expect L4 keys for IP subnet
worldSubnetProtoOnly // Do not expect port keys for IP subnet
insertDenyWorld
insertDenyWorldTCP
insertDenyWorldHTTP
insertAL3NotInB
insertBL3NotInA
insertBoth = insertA | insertB
)
type withAllowAll bool
const (
WithAllowAll = withAllowAll(true)
WithoutAllowAll = withAllowAll(false)
)
// these tests are based on the sheet https://docs.google.com/spreadsheets/d/1WANIoZGB48nryylQjjOw6lKjI80eVgPShrdMTMalLEw#gid=2109052536
tests := []struct {
name string
withAllowAll withAllowAll
aIdentities identity.NumericIdentitySlice
bIdentities identity.NumericIdentitySlice
aIsDeny, bIsDeny bool
aPort uint16
aProto u8proto.U8proto
bPort uint16
bProto u8proto.U8proto
outcome action
}{
// deny-allow insertions
{"deny-allow: a superset a|b L3-only; subset allow inserted as deny", WithAllowAll, reservedWorldSelections, worldSubnetSelections, true, false, 0, 0, 0, 0, insertAllowAll | insertA},
{"deny-allow: a superset a|b L3-only; without allow-all", WithoutAllowAll, reservedWorldSelections, worldSubnetSelections, true, false, 0, 0, 0, 0, insertA},
{"deny-allow: b superset a|b L3-only", WithAllowAll, worldIPSelections, worldSubnetSelections, true, false, 0, 0, 0, 0, insertAllowAll | insertBoth},
{"deny-allow: b superset a|b L3-only; without allow-all", WithoutAllowAll, worldIPSelections, worldSubnetSelections, true, false, 0, 0, 0, 0, insertBoth},
{"deny-allow: a superset a L3-only, b L4; subset allow inserted as deny", WithAllowAll, reservedWorldSelections, worldSubnetSelections, true, false, 0, 0, 0, 6, insertAllowAll | insertA},
{"deny-allow: a superset a L3-only, b L4; without allow-all, subset allow inserted as deny", WithoutAllowAll, reservedWorldSelections, worldSubnetSelections, true, false, 0, 0, 0, 6, insertA},
{"deny-allow: b superset a L3-only, b L4", WithAllowAll, worldIPSelections, worldSubnetSelections, true, false, 0, 0, 0, 6, insertAllowAll | insertBoth | worldIPl3only},
{"deny-allow: b superset a L3-only, b L4; without allow-all, added deny TCP due to intersecting deny", WithoutAllowAll, worldIPSelections, worldSubnetSelections, true, false, 0, 0, 0, 6, insertBoth | worldIPl3only},
{"deny-allow: a superset a L3-only, b L3L4; subset allow inserted as deny", WithAllowAll, reservedWorldSelections, worldSubnetSelections, true, false, 0, 0, 80, 6, insertAllowAll | insertA},
{"deny-allow: a superset a L3-only, b L3L4; without allow-all, subset allow inserted as deny", WithoutAllowAll, reservedWorldSelections, worldSubnetSelections, true, false, 0, 0, 80, 6, insertA},
{"deny-allow: b superset a L3-only, b L3L4; IP allow not inserted", WithAllowAll, worldIPSelections, worldSubnetSelections, true, false, 0, 0, 80, 6, insertAllowAll | insertBoth | worldIPl3only},
{"deny-allow: b superset a L3-only, b L3L4; without allow-all, IP allow not inserted", WithoutAllowAll, worldIPSelections, worldSubnetSelections, true, false, 0, 0, 80, 6, insertBoth | worldIPl3only},
{"deny-allow: a superset a L4, b L3-only", WithAllowAll, reservedWorldSelections, worldSubnetSelections, true, false, 0, 6, 0, 0, insertAllowAll | insertBoth},
{"deny-allow: a superset a L4, b L3-only; without allow-all", WithoutAllowAll, reservedWorldSelections, worldSubnetSelections, true, false, 0, 6, 0, 0, insertBoth},
{"deny-allow: b superset a L4, b L3-only", WithAllowAll, worldIPSelections, worldSubnetSelections, true, false, 0, 6, 0, 0, insertAllowAll | insertBoth},
{"deny-allow: b superset a L4, b L3-only; without allow-all, more specific deny added", WithoutAllowAll, worldIPSelections, worldSubnetSelections, true, false, 0, 6, 0, 0, insertBoth},
{"deny-allow: a superset a L4, b L4; subset allow inserted as deny", WithAllowAll, reservedWorldSelections, worldSubnetSelections, true, false, 0, 6, 0, 6, insertAllowAll | insertA},
{"deny-allow: a superset a L4, b L4; without allow-all, subset allow inserted as deny", WithoutAllowAll, reservedWorldSelections, worldSubnetSelections, true, false, 0, 6, 0, 6, insertA},
{"deny-allow: b superset a L4, b L4", WithAllowAll, worldIPSelections, worldSubnetSelections, true, false, 0, 6, 0, 6, insertAllowAll | insertBoth},
{"deny-allow: b superset a L4, b L4; without allow-all", WithoutAllowAll, worldIPSelections, worldSubnetSelections, true, false, 0, 6, 0, 6, insertBoth},
{"deny-allow: a superset a L4, b L3L4; subset allow not inserted", WithAllowAll, reservedWorldSelections, worldSubnetSelections, true, false, 0, 6, 80, 6, insertAllowAll | insertA},
{"deny-allow: a superset a L4, b L3L4; without allow-all, subset allow not inserted", WithoutAllowAll, reservedWorldSelections, worldSubnetSelections, true, false, 0, 6, 80, 6, insertA},
{"deny-allow: b superset a L4, b L3L4", WithAllowAll, worldIPSelections, worldSubnetSelections, true, false, 0, 6, 80, 6, insertAllowAll | insertBoth | worldIPProtoOnly},
{"deny-allow: b superset a L4, b L3L4; without allow-all", WithoutAllowAll, worldIPSelections, worldSubnetSelections, true, false, 0, 6, 80, 6, insertBoth | worldIPProtoOnly},
{"deny-allow: a superset a L3L4, b L3-only", WithAllowAll, reservedWorldSelections, worldSubnetSelections, true, false, 80, 6, 0, 0, insertAllowAll | insertBoth},
{"deny-allow: a superset a L3L4, b L3-only; without allow-all", WithoutAllowAll, reservedWorldSelections, worldSubnetSelections, true, false, 80, 6, 0, 0, insertBoth},
{"deny-allow: b superset a L3L4, b L3-only", WithAllowAll, worldIPSelections, worldSubnetSelections, true, false, 80, 6, 0, 0, insertAllowAll | insertBoth},
{"deny-allow: b superset a L3L4, b L3-only; without allow-all", WithoutAllowAll, worldIPSelections, worldSubnetSelections, true, false, 80, 6, 0, 0, insertBoth},
{"deny-allow: a superset a L3L4, b L4", WithAllowAll, reservedWorldSelections, worldSubnetSelections, true, false, 80, 6, 0, 6, insertAllowAll | insertBoth},
{"deny-allow: a superset a L3L4, b L4; without allow-all", WithoutAllowAll, reservedWorldSelections, worldSubnetSelections, true, false, 80, 6, 0, 6, insertBoth},
{"deny-allow: b superset a L3L4, b L4", WithAllowAll, worldIPSelections, worldSubnetSelections, true, false, 80, 6, 0, 6, insertAllowAll | insertBoth},
{"deny-allow: b superset a L3L4, b L4; without allow-all", WithoutAllowAll, worldIPSelections, worldSubnetSelections, true, false, 80, 6, 0, 6, insertBoth},
{"deny-allow: a superset a L3L4, b L3L4", WithAllowAll, reservedWorldSelections, worldSubnetSelections, true, false, 80, 6, 80, 6, insertAllowAll | insertA},
{"deny-allow: a superset a L3L4, b L3L4; without allow-all", WithoutAllowAll, reservedWorldSelections, worldSubnetSelections, true, false, 80, 6, 80, 6, insertA},
{"deny-allow: b superset a L3L4, b L3L4", WithAllowAll, worldIPSelections, worldSubnetSelections, true, false, 80, 6, 80, 6, insertAllowAll | insertBoth},
{"deny-allow: b superset a L3L4, b L3L4; without allow-all", WithoutAllowAll, worldIPSelections, worldSubnetSelections, true, false, 80, 6, 80, 6, insertBoth},
// deny-deny insertions: Note: There is no redundancy between different non-zero security IDs on the
// datapath, even if one would be a CIDR subset of another. Situation would be different if we could
// completely remove (or not add in the first place) the redundant ID from the ipcache so that
// datapath could never assign that ID to a packet for policy enforcement.
// These test case are left here for such future improvement.
{"deny-deny: a superset a|b L3-only", WithAllowAll, worldSubnetSelections, worldIPSelections, true, true, 0, 0, 0, 0, insertAllowAll | insertBoth},
{"deny-deny: a superset a|b L3-only; without allow-all", WithoutAllowAll, worldSubnetSelections, worldIPSelections, true, true, 0, 0, 0, 0, insertBoth},
{"deny-deny: b superset a|b L3-only", WithAllowAll, worldSubnetSelections, reservedWorldSelections, true, true, 0, 0, 0, 0, insertAllowAll | insertBoth},
{"deny-deny: b superset a|b L3-only; without allow-all", WithoutAllowAll, worldSubnetSelections, reservedWorldSelections, true, true, 0, 0, 0, 0, insertBoth},
{"deny-deny: a superset a L3-only, b L4", WithAllowAll, worldSubnetSelections, worldIPSelections, true, true, 0, 0, 0, 6, insertAllowAll | insertA},
{"deny-deny: a superset a L3-only, b L4; without allow-all", WithoutAllowAll, worldSubnetSelections, worldIPSelections, true, true, 0, 0, 0, 6, insertA},
{"deny-deny: b superset a L3-only, b L4", WithAllowAll, worldSubnetSelections, reservedWorldSelections, true, true, 0, 0, 0, 6, insertAllowAll | insertBoth | insertBL3NotInA},
{"deny-deny: b superset a L3-only, b L4; without allow-all", WithoutAllowAll, worldSubnetSelections, reservedWorldSelections, true, true, 0, 0, 0, 6, insertBoth | insertBL3NotInA},
{"deny-deny: a superset a L3-only, b L3L4", WithAllowAll, worldSubnetSelections, worldIPSelections, true, true, 0, 0, 80, 6, insertAllowAll | insertA},
{"deny-deny: a superset a L3-only, b L3L4; without allow-all", WithoutAllowAll, worldSubnetSelections, worldIPSelections, true, true, 0, 0, 80, 6, insertA},
{"deny-deny: b superset a L3-only, b L3L4", WithAllowAll, worldSubnetSelections, reservedWorldSelections, true, true, 0, 0, 80, 6, insertAllowAll | insertBoth | insertBL3NotInA},
{"deny-deny: b superset a L3-only, b L3L4; without allow-all", WithoutAllowAll, worldSubnetSelections, reservedWorldSelections, true, true, 0, 0, 80, 6, insertBoth | insertBL3NotInA},
{"deny-deny: a superset a L4, b L3-only", WithAllowAll, worldSubnetSelections, worldIPSelections, true, true, 0, 6, 0, 0, insertAllowAll | insertBoth | insertAL3NotInB},
{"deny-deny: a superset a L4, b L3-only; without allow-all", WithoutAllowAll, worldSubnetSelections, worldIPSelections, true, true, 0, 6, 0, 0, insertBoth | insertAL3NotInB},
{"deny-deny: b superset a L4, b L3-only", WithAllowAll, worldSubnetSelections, reservedWorldSelections, true, true, 0, 6, 0, 0, insertAllowAll | insertB},
{"deny-deny: b superset a L4, b L3-only; without allow-all", WithoutAllowAll, worldSubnetSelections, reservedWorldSelections, true, true, 0, 6, 0, 0, insertB},
{"deny-deny: a superset a L4, b L4", WithAllowAll, worldSubnetSelections, worldIPSelections, true, true, 0, 6, 0, 6, insertAllowAll | insertBoth},
{"deny-deny: a superset a L4, b L4; without allow-all", WithoutAllowAll, worldSubnetSelections, worldIPSelections, true, true, 0, 6, 0, 6, insertBoth},
{"deny-deny: b superset a L4, b L4", WithAllowAll, worldSubnetSelections, reservedWorldSelections, true, true, 0, 6, 0, 6, insertAllowAll | insertBoth},
{"deny-deny: b superset a L4, b L4; without allow-all", WithoutAllowAll, worldSubnetSelections, reservedWorldSelections, true, true, 0, 6, 0, 6, insertBoth},
{"deny-deny: a superset a L4, b L3L4", WithAllowAll, worldSubnetSelections, worldIPSelections, true, true, 0, 6, 80, 6, insertAllowAll | insertA},
{"deny-deny: a superset a L4, b L3L4; without allow-all", WithoutAllowAll, worldSubnetSelections, worldIPSelections, true, true, 0, 6, 80, 6, insertA},
{"deny-deny: b superset a L4, b L3L4", WithAllowAll, worldSubnetSelections, reservedWorldSelections, true, true, 0, 6, 80, 6, insertAllowAll | insertBoth | insertBL3NotInA},
{"deny-deny: b superset a L4, b L3L4; without allow-all", WithoutAllowAll, worldSubnetSelections, reservedWorldSelections, true, true, 0, 6, 80, 6, insertBoth | insertBL3NotInA},
{"deny-deny: a superset a L3L4, b L3-only", WithAllowAll, worldSubnetSelections, worldIPSelections, true, true, 80, 6, 0, 0, insertAllowAll | insertBoth | insertAL3NotInB},
{"deny-deny: a superset a L3L4, b L3-only; without allow-all", WithoutAllowAll, worldSubnetSelections, worldIPSelections, true, true, 80, 6, 0, 0, insertBoth | insertAL3NotInB},
{"deny-deny: b superset a L3L4, b L3-only", WithAllowAll, worldSubnetSelections, reservedWorldSelections, true, true, 80, 6, 0, 0, insertAllowAll | insertB},
{"deny-deny: b superset a L3L4, b L3-only; without allow-all", WithoutAllowAll, worldSubnetSelections, reservedWorldSelections, true, true, 80, 6, 0, 0, insertB},
{"deny-deny: a superset a L3L4, b L4", WithAllowAll, worldSubnetSelections, worldIPSelections, true, true, 80, 6, 0, 6, insertAllowAll | insertBoth | insertAL3NotInB},
{"deny-deny: a superset a L3L4, b L4; without allow-all", WithoutAllowAll, worldSubnetSelections, worldIPSelections, true, true, 80, 6, 0, 6, insertBoth | insertAL3NotInB},
{"deny-deny: b superset a L3L4, b L4", WithAllowAll, worldSubnetSelections, reservedWorldSelections, true, true, 80, 6, 0, 6, insertAllowAll | insertB},
{"deny-deny: b superset a L3L4, b L4; without allow-all", WithoutAllowAll, worldSubnetSelections, reservedWorldSelections, true, true, 80, 6, 0, 6, insertB},
{"deny-deny: a superset a L3L4, b L3L4", WithAllowAll, worldSubnetSelections, worldIPSelections, true, true, 80, 6, 80, 6, insertAllowAll | insertBoth},
{"deny-deny: a superset a L3L4, b L3L4; without allow-all", WithoutAllowAll, worldSubnetSelections, worldIPSelections, true, true, 80, 6, 80, 6, insertBoth},
{"deny-deny: b superset a L3L4, b L3L4", WithAllowAll, worldSubnetSelections, reservedWorldSelections, true, true, 80, 6, 80, 6, insertAllowAll | insertBoth},
{"deny-deny: b superset a L3L4, b L3L4; without allow-all", WithoutAllowAll, worldSubnetSelections, reservedWorldSelections, true, true, 80, 6, 80, 6, insertBoth},
// allow-allow insertions do not need tests as their affect on one another does not matter.
}
for _, tt := range tests {
anyIngressKey := IngressKey()
allowEntry := allowEntry()
var aKeys []Key
for _, idA := range tt.aIdentities {
if tt.outcome&worldIPl3only > 0 && idA == worldIPIdentity &&
(tt.aProto != 0 || tt.aPort != 0) {
continue
}
if tt.outcome&worldIPProtoOnly > 0 && idA == worldIPIdentity &&
tt.aPort != 0 {
continue
}
if tt.outcome&worldSubnetl3only > 0 && idA == worldSubnetIdentity &&
(tt.aProto != 0 || tt.aPort != 0) {
continue
}
if tt.outcome&worldSubnetProtoOnly > 0 && idA == worldSubnetIdentity &&
tt.aPort != 0 {
continue
}
aKeys = append(aKeys, IngressKey().WithIdentity(idA).WithPortProto(tt.aProto, tt.aPort))
}
aEntry := NewMapStateEntry(types.NewMapStateEntry(tt.aIsDeny, 0, 0, types.NoAuthRequirement))
var bKeys []Key
for _, idB := range tt.bIdentities {
if tt.outcome&worldIPl3only > 0 && idB == worldIPIdentity &&
(tt.bProto != 0 || tt.bPort != 0) {
continue
}
if tt.outcome&worldIPProtoOnly > 0 && idB == worldIPIdentity &&
tt.bPort != 0 {
continue
}
if tt.outcome&worldSubnetl3only > 0 && idB == worldSubnetIdentity &&
(tt.bProto != 0 || tt.bPort != 0) {
continue
}
if tt.outcome&worldSubnetProtoOnly > 0 && idB == worldSubnetIdentity &&
tt.bPort != 0 {
continue
}
bKeys = append(bKeys, IngressKey().WithIdentity(idB).WithPortProto(tt.bProto, tt.bPort))
}
bEntry := NewMapStateEntry(types.NewMapStateEntry(tt.bIsDeny, 0, 0, types.NoAuthRequirement))
expectedKeys := emptyMapState(hivetest.Logger(t))
if tt.outcome&insertAllowAll > 0 {
expectedKeys.insert(anyIngressKey, allowEntry)
}
// insert allow expectations before deny expectations to manage overlap
if tt.outcome&insertB > 0 {
BLoop1:
for _, bKey := range bKeys {
if tt.outcome&insertBL3NotInA > 0 {
for _, aKey := range aKeys {
if bKey.Identity == aKey.Identity {
continue BLoop1
}
}
}
expectedKeys.insert(bKey, bEntry)
}
}
if tt.outcome&insertA > 0 {
ALoop:
for _, aKey := range aKeys {
if tt.outcome&insertAL3NotInB > 0 {
for _, bKey := range bKeys {
if aKey.Identity == bKey.Identity {
continue ALoop
}
}
}
expectedKeys.insert(aKey, aEntry)
}
}
if tt.outcome&insertDenyWorld > 0 {
worldIngressKey := IngressKey().WithIdentity(2)
denyEntry := NewMapStateEntry(DenyEntry)
expectedKeys.insert(worldIngressKey, denyEntry)
}
if tt.outcome&insertDenyWorldTCP > 0 {
worldIngressKey := IngressKey().WithIdentity(2).WithTCPPort(0)
denyEntry := NewMapStateEntry(DenyEntry)
expectedKeys.insert(worldIngressKey, denyEntry)
}
if tt.outcome&insertDenyWorldHTTP > 0 {
worldIngressKey := IngressKey().WithIdentity(2).WithTCPPort(80)
denyEntry := NewMapStateEntry(DenyEntry)
expectedKeys.insert(worldIngressKey, denyEntry)
}
outcomeKeys := emptyMapState(hivetest.Logger(t))
changes := ChangeState{}
if tt.withAllowAll {
outcomeKeys.insertWithChanges(anyIngressKey, allowEntry, allFeatures, changes)
}
for _, idA := range tt.aIdentities {
aKey := IngressKey().WithIdentity(idA).WithPortProto(tt.aProto, tt.aPort)
outcomeKeys.insertWithChanges(aKey, aEntry, allFeatures, changes)
}
for _, idB := range tt.bIdentities {
bKey := IngressKey().WithIdentity(idB).WithPortProto(tt.bProto, tt.bPort)
outcomeKeys.insertWithChanges(bKey, bEntry, allFeatures, changes)
}
outcomeKeys.validatePortProto(t)
require.True(t, expectedKeys.Equal(&outcomeKeys), "%s (MapState):\n%s\nExpected:\n%s\nObtained:\n%s\n", tt.name, outcomeKeys.diff(&expectedKeys), expectedKeys, outcomeKeys)
// Test also with reverse insertion order
outcomeKeys = emptyMapState(hivetest.Logger(t))
for _, idB := range tt.bIdentities {
bKey := IngressKey().WithIdentity(idB).WithPortProto(tt.bProto, tt.bPort)
outcomeKeys.insertWithChanges(bKey, bEntry, allFeatures, changes)
}
for _, idA := range tt.aIdentities {
aKey := IngressKey().WithIdentity(idA).WithPortProto(tt.aProto, tt.aPort)
outcomeKeys.insertWithChanges(aKey, aEntry, allFeatures, changes)
}
if tt.withAllowAll {
outcomeKeys.insertWithChanges(anyIngressKey, allowEntry, allFeatures, changes)
}
outcomeKeys.validatePortProto(t)
require.True(t, expectedKeys.Equal(&outcomeKeys), "%s (in reverse) (MapState):\n%s\nExpected:\n%s\nObtained:\n%s\n", tt.name, outcomeKeys.diff(&expectedKeys), expectedKeys, outcomeKeys)
}
// Now test all cases with different traffic directions.
// This should result in both entries being inserted with
// no changes, as they do not affect one another anymore.
for _, tt := range tests {
anyIngressKey := IngressKey()
anyEgressKey := EgressKey()
allowEntry := allowEntry()
var aKeys []Key
for _, idA := range tt.aIdentities {
aKeys = append(aKeys, IngressKey().WithIdentity(idA).WithPortProto(tt.aProto, tt.aPort))
}
aEntry := NewMapStateEntry(types.NewMapStateEntry(tt.aIsDeny, 0, 0, types.NoAuthRequirement))
var bKeys []Key
for _, idB := range tt.bIdentities {
bKeys = append(bKeys, EgressKey().WithIdentity(idB).WithPortProto(tt.bProto, tt.bPort))
}
bEntry := NewMapStateEntry(types.NewMapStateEntry(tt.bIsDeny, 0, 0, types.NoAuthRequirement))
expectedKeys := emptyMapState(hivetest.Logger(t))
if tt.outcome&insertAllowAll > 0 {
expectedKeys.insert(anyIngressKey, allowEntry)
expectedKeys.insert(anyEgressKey, allowEntry)
}
for _, aKey := range aKeys {
expectedKeys.insert(aKey, aEntry)
}
for _, bKey := range bKeys {
expectedKeys.insert(bKey, bEntry)
}
outcomeKeys := emptyMapState(hivetest.Logger(t))
changes := ChangeState{}
if tt.withAllowAll {
outcomeKeys.insertWithChanges(anyIngressKey, allowEntry, allFeatures, changes)
outcomeKeys.insertWithChanges(anyEgressKey, allowEntry, allFeatures, changes)
}
for _, aKey := range aKeys {
outcomeKeys.insertWithChanges(aKey, aEntry, allFeatures, changes)
}
for _, bKey := range bKeys {
outcomeKeys.insertWithChanges(bKey, bEntry, allFeatures, changes)
}
outcomeKeys.validatePortProto(t)
require.True(t, expectedKeys.Equal(&outcomeKeys), "%s different traffic directions (MapState):\n%s", tt.name, outcomeKeys.diff(&expectedKeys))
// Test also with reverse insertion order
outcomeKeys = emptyMapState(hivetest.Logger(t))
for _, bKey := range bKeys {
outcomeKeys.insertWithChanges(bKey, bEntry, allFeatures, changes)
}
for _, aKey := range aKeys {
outcomeKeys.insertWithChanges(aKey, aEntry, allFeatures, changes)
}
if tt.withAllowAll {
outcomeKeys.insertWithChanges(anyEgressKey, allowEntry, allFeatures, changes)
outcomeKeys.insertWithChanges(anyIngressKey, allowEntry, allFeatures, changes)
}
outcomeKeys.validatePortProto(t)
require.True(t, expectedKeys.Equal(&outcomeKeys), "%s different traffic directions (in reverse) (MapState):\n%s", tt.name, outcomeKeys.diff(&expectedKeys))
}
}
func TestMapState_Get_stacktrace(t *testing.T) {
ms := emptyMapState(hivetest.Logger(t))
// This should produce a stacktrace in the error log. It is not validated here but can be
// observed manually.
// Example log (with newlines expanded):
// time="2024-06-22T23:21:27+03:00" level=error msg="mapState.Get: invalid wildcard port with non-zero mask: Identity=0,DestPort=0,Nexthdr=0,TrafficDirection=0. Stacktrace:
// github.com/hashicorp/go-hclog.Stacktrace
// github.com/cilium/cilium/vendor/github.com/hashicorp/go-hclog/stacktrace.go:51
// github.com/cilium/cilium/pkg/policy.(*mapState).Get
// github.com/cilium/cilium/pkg/policy/mapstate.go:355
// github.com/cilium/cilium/pkg/policy.TestMapState_Get_stacktrace
// github.com/cilium/cilium/pkg/policy/mapstate_test.go:3699
// testing.tRunner
// go/src/testing/testing.go:1689" subsys=policy
log := hivetest.Logger(t)
log.Error("Expecting an error log on the next log line!")
_, ok := ms.Get(Key{})
assert.False(t, ok)
}
// TestDenyPreferredInsertLogic is now less valuable since we do not have the mapstate
// validator any more, but may still catch bugs.
func TestDenyPreferredInsertLogic(t *testing.T) {
td := newTestData(hivetest.Logger(t))
td.bootstrapRepo(GenerateCIDRDenyRules, 1000, t)
p, _ := td.repo.resolvePolicyLocked(fooIdentity)
epPolicy := p.DistillPolicy(hivetest.Logger(t), DummyOwner{logger: hivetest.Logger(t)}, nil)
epPolicy.Ready()
n := epPolicy.policyMapState.Len()
p.detach(true, 0)
assert.Positive(t, n)
}
// SPDX-License-Identifier: Apache-2.0
// Copyright Authors of Cilium
package policy
import (
"github.com/cilium/cilium/pkg/metrics"
"github.com/prometheus/client_golang/prometheus"
)
const (
// LabelSelectorClass indicates the class of selector being measured
LabelSelectorClass = "class"
// LabelValueSCFQDN is used for regular security identities
// shared between all nodes in the cluster.
LabelValueSCFQDN = "fqdn"
// LabelValueSCCluster is used for the cluster entity.
LabelValueSCCluster = "cluster"
// LabelValueSCWorld is used for the world entity.
LabelValueSCWorld = "world"
// LabelValueSCOther is used for security identities allocated locally
// on the current node.
LabelValueSCOther = "other"
)
var (
selectorCacheMetricsDesc = prometheus.NewDesc(
prometheus.BuildFQName(metrics.CiliumAgentNamespace, "policy_selector", "match_count_max"),
"The maximum number of identities selected by a network policy peer selector",
[]string{LabelSelectorClass},
nil,
)
)
type selectorStats struct {
maxCardinalityByClass map[string]int
}
func newSelectorStats() selectorStats {
return selectorStats{
maxCardinalityByClass: map[string]int{
LabelValueSCFQDN: 0,
LabelValueSCCluster: 0,
LabelValueSCWorld: 0,
LabelValueSCOther: 0,
},
}
}
type selectorStatsCollector interface {
Stats() selectorStats
}
type selectorCacheMetrics struct {
prometheus.Collector
selectorStatsCollector
}
func newSelectorCacheMetrics(sc selectorStatsCollector) prometheus.Collector {
return &selectorCacheMetrics{selectorStatsCollector: sc}
}
func (scm *selectorCacheMetrics) Describe(ch chan<- *prometheus.Desc) {
ch <- selectorCacheMetricsDesc
}
func (scm *selectorCacheMetrics) Collect(ch chan<- prometheus.Metric) {
stats := scm.selectorStatsCollector.Stats()
for class, stat := range stats.maxCardinalityByClass {
ch <- prometheus.MustNewConstMetric(
selectorCacheMetricsDesc, prometheus.GaugeValue, float64(stat), class,
)
}
}
// SPDX-License-Identifier: Apache-2.0
// Copyright Authors of Cilium
package policy
import (
"encoding/json"
"slices"
"strings"
"unique"
"github.com/cilium/cilium/pkg/container/set"
"github.com/cilium/cilium/pkg/labels"
)
const separator = "\x1f" // ascii information separator 1
type stringList string
// RuleMeta is the set of meta-information from the owning rules.
// To save memory, it is an interned type. Thus all the struct members
// are strings (but are really delimited lists)
type RuleMeta struct {
labels labels.LabelArrayListString // from LabelArrayList.String()
log stringList
}
func (rm RuleMeta) LabelArray() labels.LabelArrayList {
return labels.LabelArrayListFromString(rm.labels)
}
func (rm RuleMeta) LabelArrayListString() labels.LabelArrayListString {
return rm.labels
}
func newStringList(items ...string) stringList {
slices.Sort(items)
return stringList(strings.Join(items, separator))
}
func (sl stringList) List() []string {
return strings.Split(string(sl), separator)
}
func mergeStringList(a, b stringList) stringList {
if a == "" {
return b
}
if b == "" {
return a
}
logLines := set.NewSet[string]()
for _, line := range a.List() {
logLines.Insert(line)
}
for _, line := range b.List() {
logLines.Insert(line)
}
return newStringList(logLines.AsSlice()...)
}
// ruleOrigin is an interned labels.LabelArrayList.String(), a list of rule labels tracking which
// policy rules are the origin for this policy. This information is used when distilling a policy to
// an EndpointPolicy, to track which policy rules were involved for a specific verdict.
type ruleOrigin unique.Handle[RuleMeta]
func (ro ruleOrigin) Value() RuleMeta {
return (unique.Handle[RuleMeta])(ro).Value()
}
func (ro ruleOrigin) LabelsString() labels.LabelArrayListString {
return ro.Value().labels
}
func (ro ruleOrigin) LogString() string {
out, _ := json.Marshal(ro.Value().log.List())
return string(out)
}
func (ro ruleOrigin) GetLabelArrayList() labels.LabelArrayList {
return labels.LabelArrayListFromString(ro.LabelsString())
}
func (ro ruleOrigin) stringLabels() stringLabels {
return newStringLabels(ro.LabelsString())
}
func (rm RuleMeta) Log() []string {
return rm.log.List()
}
func newRuleOrigin(rm RuleMeta) ruleOrigin {
return ruleOrigin(unique.Make(rm))
}
func makeRuleOrigin(lbls labels.LabelArrayList, logs []string) ruleOrigin {
return newRuleOrigin(RuleMeta{
labels: lbls.ArrayListString(),
log: newStringList(logs...),
})
}
func makeSingleRuleOrigin(lbls labels.LabelArray, log string) ruleOrigin {
return makeRuleOrigin(labels.LabelArrayList{lbls}, []string{log})
}
// Merge combines two rule origins.
// Returns the merged value
func (ro ruleOrigin) Merge(other ruleOrigin) ruleOrigin {
if ro == other {
return ro
}
// do not merge zero values
if ro.Value() == (RuleMeta{}) {
return other
}
if other.Value() == (RuleMeta{}) {
return ro
}
new := RuleMeta{
labels: labels.MergeSortedLabelArrayListStrings(ro.LabelsString(), other.LabelsString()),
log: mergeStringList(ro.Value().log, other.Value().log),
}
return ruleOrigin(unique.Make(new))
}
var NilRuleOrigin = newRuleOrigin(RuleMeta{labels: "[]"})
type testOrigin map[CachedSelector]labels.LabelArrayList
func OriginForTest(m testOrigin) map[CachedSelector]ruleOrigin {
res := make(map[CachedSelector]ruleOrigin, len(m))
for cs, lbls := range m {
res[cs] = makeRuleOrigin(lbls, nil)
}
return res
}
// stringLabels is an interned labels.LabelArray.String()
type stringLabels unique.Handle[labels.LabelArrayListString]
var EmptyStringLabels = makeStringLabels(nil)
func (sl stringLabels) Value() labels.LabelArrayListString {
return unique.Handle[labels.LabelArrayListString](sl).Value()
}
func makeStringLabels(lbls labels.LabelArray) stringLabels {
return newStringLabels(labels.LabelArrayList{lbls.Sort()}.ArrayListString())
}
func newStringLabels(lbls labels.LabelArrayListString) stringLabels {
return stringLabels(unique.Make(lbls))
}
// SPDX-License-Identifier: Apache-2.0
// Copyright Authors of Cilium
package policy
import (
"fmt"
"math"
"math/bits"
)
// MaskedPort is a port with a wild card mask value.
// The port range is represented by a masked port
// because we need to use masks for policy Keys
// that are indexed in the datapath by a bitwise
// longest-prefix-match trie.
type MaskedPort struct {
port uint16
mask uint16
}
func (m MaskedPort) String() string {
return fmt.Sprintf("{port: 0x%x, mask: 0x%x}", m.port, m.mask)
}
// maskedPort returns a new MaskedPort where 'wildcardBits' lowest bits are wildcarded.
func maskedPort(port uint16, wildcardBits int) MaskedPort {
mask := uint16(math.MaxUint16) << wildcardBits
return MaskedPort{port & mask, mask}
}
// PortRangeToMaskedPorts returns a slice of masked ports for the given port range.
// If the end port is equal to or less then the start port than the start port is returned,
// as a fully masked port.
// Ports are not returned in any particular order, so testing code needs to sort them
// for consistency.
func PortRangeToMaskedPorts(start uint16, end uint16) (ports []MaskedPort) {
// This is a wildcard.
if start == 0 && (end == 0 || end == math.MaxUint16) {
return []MaskedPort{{0, 0}}
}
// This is a single port.
if end <= start {
return []MaskedPort{{start, 0xffff}}
}
// Find the number of common leading bits. The first uncommon bit will be 0 for the start
// and 1 for the end.
commonBits := bits.LeadingZeros16(start ^ end)
// Cover the case where all the bits after the common bits are zeros on start and ones on
// end. In this case the range can be represented by a single masked port instead of two
// that would be produced below.
// For example, if the range is from 16-31 (0b10000 - 0b11111), then we return 0b1xxxx
// instead of 0b10xxx and 0b11xxx that would be produced when approaching the middle from
// the two sides.
//
// This also covers the trivial case where all the bits are in common (i.e., start == end).
mask := uint16(math.MaxUint16) >> commonBits
if start&mask == 0 && ^end&mask == 0 {
return []MaskedPort{maskedPort(start, 16-commonBits)}
}
// Find the "middle point" toward which the masked ports approach from both sides.
// This "middle point" is the highest bit that differs between the range start and end.
middleBit := 16 - 1 - commonBits
middle := uint16(1 << middleBit)
// Wildcard the trailing zeroes to the right of the middle bit of the range start.
// This covers the values immediately following the port range start, including the start itself.
// The middle bit is added to avoid counting zeroes past it.
bit := bits.TrailingZeros16(start | middle)
ports = append(ports, maskedPort(start, bit))
// Find all 0-bits between the trailing zeroes and the middle bit and add MaskedPorts where
// each found 0-bit is set and the lower bits are wildcarded. This covers the range from the
// start to the middle not covered by the trailing zeroes above.
// The current 'bit' is skipped since we know it is 1.
for bit++; bit < middleBit; bit++ {
if start&(1<<bit) == 0 {
// Adding 1<<bit will set the bit since we know it is not set
ports = append(ports, maskedPort(start+1<<bit, bit))
}
}
// Wildcard the trailing ones to the right of the middle bit of the range end.
// This covers the values immediately preceding and including the range end.
// The middle bit is added to avoid counting ones past it.
bit = bits.TrailingZeros16(^end | middle)
ports = append(ports, maskedPort(end, bit))
// Find all 1-bits between the trailing ones and the middle bit and add MaskedPorts where
// each found 1-bit is cleared and the lower bits are wildcarded. This covers the range from
// the end to the middle not covered by the trailing ones above.
// The current 'bit' is skipped since we know it is 0.
for bit++; bit < middleBit; bit++ {
if end&(1<<bit) != 0 {
// Subtracting 1<<bit will clear the bit since we know it is set
ports = append(ports, maskedPort(end-1<<bit, bit))
}
}
return ports
}
// SPDX-License-Identifier: Apache-2.0
// Copyright Authors of Cilium
package policy
import (
"fmt"
"strconv"
"strings"
"github.com/cilium/cilium/pkg/policy/trafficdirection"
"github.com/cilium/cilium/pkg/u8proto"
)
// ProxyStatsKey returns a key for endpoint's proxy stats, which may aggregate stats from multiple
// proxy redirects on the same port.
func ProxyStatsKey(ingress bool, protocol string, port, proxyPort uint16) string {
direction := "egress"
if ingress {
direction = "ingress"
}
portStr := strconv.FormatUint(uint64(port), 10)
proxyPortStr := strconv.FormatUint(uint64(proxyPort), 10)
var str strings.Builder
str.Grow(len(direction) + 1 + len(protocol) + 1 + len(portStr) + 1 + len(proxyPortStr))
str.WriteString(direction)
str.WriteRune(':')
str.WriteString(protocol)
str.WriteRune(':')
str.WriteString(portStr)
str.WriteRune(':')
str.WriteString(proxyPortStr)
return str.String()
}
// ProxyID returns a unique string to identify a proxy mapping.
func ProxyID(endpointID uint16, ingress bool, protocol string, port uint16, listener string) string {
direction := "egress"
if ingress {
direction = "ingress"
}
epStr := strconv.FormatUint(uint64(endpointID), 10)
portStr := strconv.FormatUint(uint64(port), 10)
var str strings.Builder
str.Grow(len(epStr) + 1 + len(direction) + 1 + len(protocol) + 1 + len(portStr) + 1 + len(listener))
str.WriteString(epStr)
str.WriteRune(':')
str.WriteString(direction)
str.WriteRune(':')
str.WriteString(protocol)
str.WriteRune(':')
str.WriteString(portStr)
str.WriteRune(':')
str.WriteString(listener)
return str.String()
}
// ProxyIDFromKey returns a unique string to identify a proxy mapping.
func ProxyIDFromKey(endpointID uint16, key Key, listener string) string {
return ProxyID(endpointID, key.TrafficDirection() == trafficdirection.Ingress, u8proto.U8proto(key.Nexthdr).String(), key.DestPort, listener)
}
// ParseProxyID parses a proxy ID returned by ProxyID and returns its components.
func ParseProxyID(proxyID string) (endpointID uint16, ingress bool, protocol string, port uint16, listener string, err error) {
comps := strings.Split(proxyID, ":")
if len(comps) != 5 {
err = fmt.Errorf("invalid proxy ID structure: %s", proxyID)
return
}
epID, err := strconv.ParseUint(comps[0], 10, 16)
if err != nil {
return
}
endpointID = uint16(epID)
ingress = comps[1] == "ingress"
protocol = comps[2]
l4port, err := strconv.ParseUint(comps[3], 10, 16)
if err != nil {
return
}
port = uint16(l4port)
listener = comps[4]
return
}
// SPDX-License-Identifier: Apache-2.0
// Copyright Authors of Cilium
package policy
import (
"encoding/json"
"log/slog"
"maps"
"slices"
"sync/atomic"
cilium "github.com/cilium/proxy/go/cilium/api"
"k8s.io/apimachinery/pkg/util/sets"
"github.com/cilium/cilium/api/v1/models"
"github.com/cilium/cilium/pkg/container/set"
"github.com/cilium/cilium/pkg/crypto/certificatemanager"
envoypolicy "github.com/cilium/cilium/pkg/envoy/policy"
"github.com/cilium/cilium/pkg/identity"
"github.com/cilium/cilium/pkg/identity/identitymanager"
ipcachetypes "github.com/cilium/cilium/pkg/ipcache/types"
k8sConst "github.com/cilium/cilium/pkg/k8s/apis/cilium.io"
"github.com/cilium/cilium/pkg/labels"
"github.com/cilium/cilium/pkg/lock"
"github.com/cilium/cilium/pkg/logging/logfields"
"github.com/cilium/cilium/pkg/metrics"
"github.com/cilium/cilium/pkg/option"
"github.com/cilium/cilium/pkg/policy/api"
"github.com/cilium/cilium/pkg/policy/types"
"github.com/cilium/cilium/pkg/spanstat"
)
type PolicyRepository interface {
BumpRevision() uint64
GetAuthTypes(localID identity.NumericIdentity, remoteID identity.NumericIdentity) AuthTypes
GetEnvoyHTTPRules(l7Rules *api.L7Rules, ns string) (*cilium.HttpNetworkPolicyRules, bool)
// GetSelectorPolicy computes the SelectorPolicy for a given identity.
//
// It returns nil if skipRevision is >= than the already calculated version.
// This is used to skip policy calculation when a certain revision delta is
// known to not affect the given identity. Pass a skipRevision of 0 to force
// calculation.
GetSelectorPolicy(id *identity.Identity, skipRevision uint64, stats GetPolicyStatistics, endpointID uint64) (SelectorPolicy, uint64, error)
// GetPolicySnapshot returns a map of all the SelectorPolicies in the repository.
GetPolicySnapshot() map[identity.NumericIdentity]SelectorPolicy
GetRevision() uint64
GetRulesList() *models.Policy
GetSelectorCache() *SelectorCache
Iterate(f func(rule *api.Rule))
ReplaceByResource(rules api.Rules, resource ipcachetypes.ResourceID) (affectedIDs *set.Set[identity.NumericIdentity], rev uint64, oldRevCnt int)
ReplaceByLabels(rules api.Rules, searchLabelsList []labels.LabelArray) (affectedIDs *set.Set[identity.NumericIdentity], rev uint64, oldRevCnt int)
Search(lbls labels.LabelArray) (api.Rules, uint64)
}
type GetPolicyStatistics interface {
WaitingForPolicyRepository() *spanstat.SpanStat
SelectorPolicyCalculation() *spanstat.SpanStat
}
// Repository is a list of policy rules which in combination form the security
// policy. A policy repository can be
type Repository struct {
logger *slog.Logger
// mutex protects the whole policy tree
mutex lock.RWMutex
rules map[ruleKey]*rule
rulesByNamespace map[string]sets.Set[ruleKey]
rulesByResource map[ipcachetypes.ResourceID]map[ruleKey]*rule
// We will need a way to synthesize a rule key for rules without a resource;
// these are - in practice - very rare, as they only come from the local API,
// never via k8s.
nextID uint
// revision is the revision of the policy repository. It will be
// incremented whenever the policy repository is changed.
// Always positive (>0).
revision atomic.Uint64
// SelectorCache tracks the selectors used in the policies
// resolved from the repository.
selectorCache *SelectorCache
// PolicyCache tracks the selector policies created from this repo
policyCache *policyCache
certManager certificatemanager.CertificateManager
metricsManager types.PolicyMetrics
l7RulesTranslator envoypolicy.EnvoyL7RulesTranslator
}
func (p *Repository) GetEnvoyHTTPRules(l7Rules *api.L7Rules, ns string) (*cilium.HttpNetworkPolicyRules, bool) {
return p.l7RulesTranslator.GetEnvoyHTTPRules(l7Rules, ns)
}
// GetSelectorCache() returns the selector cache used by the Repository
func (p *Repository) GetSelectorCache() *SelectorCache {
return p.selectorCache
}
// GetAuthTypes returns the AuthTypes required by the policy between the localID and remoteID
func (p *Repository) GetAuthTypes(localID, remoteID identity.NumericIdentity) AuthTypes {
return p.policyCache.getAuthTypes(localID, remoteID)
}
// NewPolicyRepository creates a new policy repository.
func NewPolicyRepository(
logger *slog.Logger,
initialIDs identity.IdentityMap,
certManager certificatemanager.CertificateManager,
l7RulesTranslator envoypolicy.EnvoyL7RulesTranslator,
idmgr identitymanager.IDManager,
metricsManager types.PolicyMetrics,
) *Repository {
selectorCache := NewSelectorCache(logger, initialIDs)
repo := &Repository{
logger: logger,
rules: make(map[ruleKey]*rule),
rulesByNamespace: make(map[string]sets.Set[ruleKey]),
rulesByResource: make(map[ipcachetypes.ResourceID]map[ruleKey]*rule),
selectorCache: selectorCache,
certManager: certManager,
metricsManager: metricsManager,
l7RulesTranslator: l7RulesTranslator,
}
repo.revision.Store(1)
repo.policyCache = newPolicyCache(repo, idmgr)
return repo
}
func (p *Repository) Search(lbls labels.LabelArray) (api.Rules, uint64) {
p.mutex.RLock()
defer p.mutex.RUnlock()
return p.searchRLocked(lbls), p.GetRevision()
}
// searchRLocked searches the policy repository for rules which match the
// specified labels and will return an array of all rules which matched.
func (p *Repository) searchRLocked(lbls labels.LabelArray) api.Rules {
result := api.Rules{}
for _, r := range p.rules {
if r.Labels.Contains(lbls) {
result = append(result, &r.Rule)
}
}
return result
}
// addListLocked inserts a rule into the policy repository with the repository already locked
// Expects that the entire rule list has already been sanitized.
//
// Only used by unit tests, but by multiple packages.
func (p *Repository) addListLocked(rules api.Rules) (ruleSlice, uint64) {
newRules := make(ruleSlice, 0, len(rules))
for _, r := range rules {
newRule := p.newRule(*r, ruleKey{idx: p.nextID})
newRules = append(newRules, newRule)
p.insert(newRule)
p.nextID++
}
return newRules, p.BumpRevision()
}
func (p *Repository) insert(r *rule) {
p.rules[r.key] = r
p.metricsManager.AddRule(r.Rule)
namespace := r.key.resource.Namespace()
if _, ok := p.rulesByNamespace[namespace]; !ok {
p.rulesByNamespace[namespace] = sets.New[ruleKey]()
}
p.rulesByNamespace[namespace].Insert(r.key)
rid := r.key.resource
if len(rid) > 0 {
if p.rulesByResource[rid] == nil {
p.rulesByResource[rid] = map[ruleKey]*rule{}
}
p.rulesByResource[rid][r.key] = r
}
metrics.Policy.Inc()
}
func (p *Repository) del(key ruleKey) {
r := p.rules[key]
if r == nil {
return
}
p.metricsManager.DelRule(r.Rule)
delete(p.rules, key)
namespace := r.key.resource.Namespace()
p.rulesByNamespace[namespace].Delete(key)
if len(p.rulesByNamespace[namespace]) == 0 {
delete(p.rulesByNamespace, namespace)
}
rid := key.resource
if len(rid) > 0 && p.rulesByResource[rid] != nil {
delete(p.rulesByResource[rid], key)
if len(p.rulesByResource[rid]) == 0 {
delete(p.rulesByResource, rid)
}
}
metrics.Policy.Dec()
}
// newRule allocates a CachedSelector for a given rule.
func (p *Repository) newRule(apiRule api.Rule, key ruleKey) *rule {
r := &rule{
Rule: apiRule,
key: key,
}
r.subjectSelector, _ = p.selectorCache.AddIdentitySelector(r, makeStringLabels(r.Labels), *r.getSelector())
return r
}
// releaseRule releases the cached selector for a given rul
func (p *Repository) releaseRule(r *rule) {
if r.subjectSelector != nil {
p.selectorCache.RemoveSelector(r.subjectSelector, r)
}
}
// MustAddList inserts a rule into the policy repository. It is used for
// unit-testing purposes only. Panics if the rule is invalid
func (p *Repository) MustAddList(rules api.Rules) (ruleSlice, uint64) {
for i := range rules {
err := rules[i].Sanitize()
if err != nil {
panic(err)
}
}
p.mutex.Lock()
defer p.mutex.Unlock()
return p.addListLocked(rules)
}
// Iterate iterates the policy repository, calling f for each rule. It is safe
// to execute Iterate concurrently.
func (p *Repository) Iterate(f func(rule *api.Rule)) {
p.mutex.RWMutex.Lock()
defer p.mutex.RWMutex.Unlock()
for _, r := range p.rules {
f(&r.Rule)
}
}
// JSONMarshalRules returns a slice of policy rules as string in JSON
// representation
func JSONMarshalRules(rules api.Rules) string {
b, err := json.MarshalIndent(rules, "", " ")
if err != nil {
return err.Error()
}
return string(b)
}
// GetRevision returns the revision of the policy repository
func (p *Repository) GetRevision() uint64 {
return p.revision.Load()
}
// BumpRevision allows forcing policy regeneration
func (p *Repository) BumpRevision() uint64 {
metrics.PolicyRevision.Inc()
return p.revision.Add(1)
}
// GetRulesList returns the current policy
func (p *Repository) GetRulesList() *models.Policy {
p.mutex.RLock()
defer p.mutex.RUnlock()
lbls := labels.ParseSelectLabelArrayFromArray([]string{})
ruleList := p.searchRLocked(lbls)
return &models.Policy{
Revision: int64(p.GetRevision()),
Policy: JSONMarshalRules(ruleList),
}
}
// resolvePolicyLocked returns the selectorPolicy for the provided
// identity from the set of rules in the repository. If the policy
// cannot be generated due to conflicts at L4 or L7, returns an error.
//
// Must be performed while holding the Repository lock.
func (p *Repository) resolvePolicyLocked(securityIdentity *identity.Identity) (*selectorPolicy, error) {
// First obtain whether policy applies in both traffic directions, as well
// as list of rules which actually select this endpoint. This allows us
// to not have to iterate through the entire rule list multiple times and
// perform the matching decision again when computing policy for each
// protocol layer, which is quite costly in terms of performance.
ingressEnabled, egressEnabled,
hasIngressDefaultDeny, hasEgressDefaultDeny,
matchingRules := p.computePolicyEnforcementAndRules(securityIdentity)
calculatedPolicy := &selectorPolicy{
Revision: p.GetRevision(),
SelectorCache: p.GetSelectorCache(),
L4Policy: NewL4Policy(p.GetRevision()),
IngressPolicyEnabled: ingressEnabled,
EgressPolicyEnabled: egressEnabled,
}
policyCtx := policyContext{
repo: p,
ns: securityIdentity.LabelArray.Get(labels.LabelSourceK8sKeyPrefix + k8sConst.PodNamespaceLabel),
defaultDenyIngress: hasIngressDefaultDeny,
defaultDenyEgress: hasEgressDefaultDeny,
traceEnabled: option.Config.TracingEnabled(),
logger: p.logger.With(logfields.Identity, securityIdentity.ID),
}
if ingressEnabled {
newL4IngressPolicy, err := matchingRules.resolveL4IngressPolicy(&policyCtx)
if err != nil {
return nil, err
}
calculatedPolicy.L4Policy.Ingress.PortRules = newL4IngressPolicy
}
if egressEnabled {
newL4EgressPolicy, err := matchingRules.resolveL4EgressPolicy(&policyCtx)
if err != nil {
return nil, err
}
calculatedPolicy.L4Policy.Egress.PortRules = newL4EgressPolicy
}
// Make the calculated policy ready for incremental updates
calculatedPolicy.Attach(&policyCtx)
return calculatedPolicy, nil
}
// computePolicyEnforcementAndRules returns whether policy applies at ingress or ingress
// for the given security identity, as well as a list of any rules which select
// the set of labels of the given security identity.
//
// Must be called with repo mutex held for reading.
func (p *Repository) computePolicyEnforcementAndRules(securityIdentity *identity.Identity) (
ingress, egress, hasIngressDefaultDeny, hasEgressDefaultDeny bool,
matchingRules ruleSlice,
) {
lbls := securityIdentity.LabelArray
// Check if policy enforcement should be enabled at the daemon level.
if lbls.Has(labels.IDNameHost) && !option.Config.EnableHostFirewall {
return false, false, false, false, nil
}
policyMode := GetPolicyEnabled()
// If policy enforcement isn't enabled, we do not enable policy
// enforcement for the endpoint. We don't care about returning any
// rules that match.
if policyMode == option.NeverEnforce {
return false, false, false, false, nil
}
matchingRules = []*rule{}
// Match cluster-wide rules
for rKey := range p.rulesByNamespace[""] {
r := p.rules[rKey]
if r.matchesSubject(securityIdentity) {
matchingRules = append(matchingRules, r)
}
}
// Match namespace-specific rules
namespace := lbls.Get(labels.LabelSourceK8sKeyPrefix + k8sConst.PodNamespaceLabel)
if namespace != "" {
for rKey := range p.rulesByNamespace[namespace] {
r := p.rules[rKey]
if r.matchesSubject(securityIdentity) {
matchingRules = append(matchingRules, r)
}
}
}
// If policy enforcement is enabled for the daemon, then it has to be
// enabled for the endpoint.
// If the endpoint has the reserved:init label, i.e. if it has not yet
// received any labels, always enforce policy (default deny).
if policyMode == option.AlwaysEnforce || lbls.Has(labels.IDNameInit) {
return true, true, true, true, matchingRules
}
// Determine the default policy for each direction.
//
// By default, endpoints have no policy and all traffic is allowed.
// If any rules select the endpoint, then the endpoint switches to a
// default-deny mode (same as traffic being enabled), per-direction.
//
// Rules, however, can optionally be configured to not enable default deny mode.
// If no rules enable default-deny, then all traffic is allowed except that explicitly
// denied by a Deny rule.
//
// There are three possible cases _per direction_:
// 1: No rules are present,
// 2: At least one default-deny rule is present. Then, policy is enabled
// 3: Only non-default-deny rules are present. Then, policy is enabled, but we must insert
// an additional allow-all rule. We must do this, even if all traffic is allowed, because
// rules may have additional effects such as enabling L7 proxy.
for _, r := range matchingRules {
if !ingress || !hasIngressDefaultDeny { // short-circuit len()
if len(r.Ingress) > 0 || len(r.IngressDeny) > 0 {
ingress = true
if *r.EnableDefaultDeny.Ingress {
hasIngressDefaultDeny = true
}
}
}
if !egress || !hasEgressDefaultDeny { // short-circuit len()
if len(r.Egress) > 0 || len(r.EgressDeny) > 0 {
egress = true
if *r.EnableDefaultDeny.Egress {
hasEgressDefaultDeny = true
}
}
}
if ingress && egress && hasIngressDefaultDeny && hasEgressDefaultDeny {
break
}
}
// If there only ingress default-allow rules, then insert a wildcard rule
if !hasIngressDefaultDeny && ingress {
p.logger.Debug("Only default-allow policies, synthesizing ingress wildcard-allow rule", logfields.Identity, securityIdentity)
matchingRules = append(matchingRules, wildcardRule(securityIdentity.LabelArray, true /*ingress*/))
}
// Same for egress -- synthesize a wildcard rule
if !hasEgressDefaultDeny && egress {
p.logger.Debug("Only default-allow policies, synthesizing egress wildcard-allow rule", logfields.Identity, securityIdentity)
matchingRules = append(matchingRules, wildcardRule(securityIdentity.LabelArray, false /*egress*/))
}
return
}
// wildcardRule generates a wildcard rule that only selects the given identity.
func wildcardRule(lbls labels.LabelArray, ingress bool) *rule {
r := &rule{}
if ingress {
r.Ingress = []api.IngressRule{
{
IngressCommonRule: api.IngressCommonRule{
FromEntities: []api.Entity{api.EntityAll},
},
},
}
} else {
r.Egress = []api.EgressRule{
{
EgressCommonRule: api.EgressCommonRule{
ToEntities: []api.Entity{api.EntityAll},
},
},
}
}
es := api.NewESFromLabels(lbls...)
if lbls.Has(labels.IDNameHost) {
r.NodeSelector = es
} else {
r.EndpointSelector = es
}
_ = r.Sanitize()
return r
}
// GetSelectorPolicy computes the SelectorPolicy for a given identity.
//
// It returns nil if skipRevision is >= than the already calculated version.
// This is used to skip policy calculation when a certain revision delta is
// known to not affect the given identity. Pass a skipRevision of 0 to force
// calculation.
func (r *Repository) GetSelectorPolicy(id *identity.Identity, skipRevision uint64, stats GetPolicyStatistics, endpointID uint64) (SelectorPolicy, uint64, error) {
stats.WaitingForPolicyRepository().Start()
r.mutex.RLock()
defer r.mutex.RUnlock()
stats.WaitingForPolicyRepository().End(true)
rev := r.GetRevision()
// Do we already have a given revision?
// If so, skip calculation.
if skipRevision >= rev {
return nil, rev, nil
}
stats.SelectorPolicyCalculation().Start()
// This may call back in to the (locked) repository to generate the
// selector policy
sp, updated, err := r.policyCache.updateSelectorPolicy(id, endpointID)
stats.SelectorPolicyCalculation().EndError(err)
// If we hit cache, reset the statistics.
if !updated {
stats.SelectorPolicyCalculation().Reset()
}
return sp, rev, err
}
// ReplaceByResource replaces all rules by resource, returning the complete set of affected endpoints.
func (p *Repository) ReplaceByResource(rules api.Rules, resource ipcachetypes.ResourceID) (affectedIDs *set.Set[identity.NumericIdentity], rev uint64, oldRuleCnt int) {
if len(resource) == 0 {
// This should never ever be hit, as the caller should have already validated the resource.
// Out of paranoia, do nothing.
p.logger.Error("Attempt to replace rules by resource with an empty resource.")
return
}
p.mutex.Lock()
defer p.mutex.Unlock()
affectedIDs = &set.Set[identity.NumericIdentity]{}
oldRules := maps.Clone(p.rulesByResource[resource]) // need to clone as `p.del()` mutates this
for key, oldRule := range oldRules {
for _, subj := range oldRule.getSubjects() {
affectedIDs.Insert(subj)
}
p.del(key)
}
if len(rules) > 0 {
p.rulesByResource[resource] = make(map[ruleKey]*rule, len(rules))
for i, r := range rules {
newRule := p.newRule(*r, ruleKey{resource: resource, idx: uint(i)})
p.insert(newRule)
for _, subj := range newRule.getSubjects() {
affectedIDs.Insert(subj)
}
}
}
// Now that selectors have been allocated for new rules,
// we may release the old ones.
for _, r := range oldRules {
p.releaseRule(r)
}
return affectedIDs, p.BumpRevision(), len(oldRules)
}
// ReplaceByLabels implements the somewhat awkward REST local API for providing network policy,
// where the "key" is a list of labels, possibly multiple, that should be removed before
// installing the new rules.
func (p *Repository) ReplaceByLabels(rules api.Rules, searchLabelsList []labels.LabelArray) (affectedIDs *set.Set[identity.NumericIdentity], rev uint64, oldRuleCnt int) {
p.mutex.Lock()
defer p.mutex.Unlock()
var oldRules []*rule
affectedIDs = &set.Set[identity.NumericIdentity]{}
// determine outgoing rules
for ruleKey, rule := range p.rules {
if slices.ContainsFunc(searchLabelsList, rule.Labels.Contains) {
p.del(ruleKey)
oldRules = append(oldRules, rule)
}
}
// Insert new rules, allocating a subject selector
for _, r := range rules {
newRule := p.newRule(*r, ruleKey{idx: p.nextID})
p.insert(newRule)
p.nextID++
for _, nid := range newRule.getSubjects() {
affectedIDs.Insert(nid)
}
}
// Now that subject selectors have been allocated, release the old rules.
for _, oldRule := range oldRules {
for _, nid := range oldRule.getSubjects() {
affectedIDs.Insert(nid)
}
p.releaseRule(oldRule)
}
return affectedIDs, p.BumpRevision(), len(oldRules)
}
// GetPolicySnapshot returns a map of all the SelectorPolicies in the repository.
func (p *Repository) GetPolicySnapshot() map[identity.NumericIdentity]SelectorPolicy {
p.mutex.RLock()
defer p.mutex.RUnlock()
return p.policyCache.GetPolicySnapshot()
}
// SPDX-License-Identifier: Apache-2.0
// Copyright Authors of Cilium
package policy
import (
"fmt"
"sync"
"testing"
"github.com/cilium/hive/hivetest"
"github.com/cilium/proxy/pkg/policy/api/kafka"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
"k8s.io/apimachinery/pkg/util/intstr"
"github.com/cilium/cilium/pkg/identity"
ipcachetypes "github.com/cilium/cilium/pkg/ipcache/types"
k8sConst "github.com/cilium/cilium/pkg/k8s/apis/cilium.io"
slim_metav1 "github.com/cilium/cilium/pkg/k8s/slim/k8s/apis/meta/v1"
"github.com/cilium/cilium/pkg/labels"
"github.com/cilium/cilium/pkg/option"
"github.com/cilium/cilium/pkg/policy/api"
testpolicy "github.com/cilium/cilium/pkg/testutils/policy"
)
// mustAdd inserts a rule into the policy repository
// This is just a helper function for unit testing.
// Only returns error for signature reasons
func (p *Repository) mustAdd(r api.Rule) (uint64, map[uint16]struct{}, error) {
p.mutex.Lock()
defer p.mutex.Unlock()
if err := r.Sanitize(); err != nil {
panic(err)
}
newList := make([]*api.Rule, 1)
newList[0] = &r
_, rev := p.addListLocked(newList)
return rev, map[uint16]struct{}{}, nil
}
func TestComputePolicyEnforcementAndRules(t *testing.T) {
// Cache policy enforcement value from when test was ran to avoid pollution
// across tests.
oldPolicyEnable := GetPolicyEnabled()
defer SetPolicyEnabled(oldPolicyEnable)
SetPolicyEnabled(option.DefaultEnforcement)
td := newTestData(hivetest.Logger(t))
repo := td.repo
fooSelectLabel := labels.ParseSelectLabel("foo")
fooNumericIdentity := 9001
fooIdentity := identity.NewIdentity(identity.NumericIdentity(fooNumericIdentity), lbls)
td.addIdentity(fooIdentity)
fooIngressRule1Label := labels.NewLabel(k8sConst.PolicyLabelName, "fooIngressRule1", labels.LabelSourceAny)
fooIngressRule2Label := labels.NewLabel(k8sConst.PolicyLabelName, "fooIngressRule2", labels.LabelSourceAny)
fooEgressRule1Label := labels.NewLabel(k8sConst.PolicyLabelName, "fooEgressRule1", labels.LabelSourceAny)
fooEgressRule2Label := labels.NewLabel(k8sConst.PolicyLabelName, "fooEgressRule2", labels.LabelSourceAny)
combinedLabel := labels.NewLabel(k8sConst.PolicyLabelName, "combined", labels.LabelSourceAny)
initIdentity := identity.LookupReservedIdentity(identity.ReservedIdentityInit)
// lal takes a single label and returns a []labels.LabelArray containing only that label
lal := func(lbl labels.Label) []labels.LabelArray {
return []labels.LabelArray{{lbl}}
}
fooIngressRule1 := api.Rule{
EndpointSelector: api.NewESFromLabels(fooSelectLabel),
Ingress: []api.IngressRule{
{
IngressCommonRule: api.IngressCommonRule{
FromEndpoints: []api.EndpointSelector{
api.NewESFromLabels(fooSelectLabel),
},
},
},
},
Labels: labels.LabelArray{
fooIngressRule1Label,
},
}
fooIngressRule1.Sanitize()
fooIngressRule2 := api.Rule{
EndpointSelector: api.NewESFromLabels(fooSelectLabel),
Ingress: []api.IngressRule{
{
IngressCommonRule: api.IngressCommonRule{
FromEndpoints: []api.EndpointSelector{
api.NewESFromLabels(fooSelectLabel),
},
},
},
},
Labels: labels.LabelArray{
fooIngressRule2Label,
},
}
fooIngressRule2.Sanitize()
fooEgressRule1 := api.Rule{
EndpointSelector: api.NewESFromLabels(fooSelectLabel),
Egress: []api.EgressRule{
{
EgressCommonRule: api.EgressCommonRule{
ToEndpoints: []api.EndpointSelector{
api.NewESFromLabels(fooSelectLabel),
},
},
},
},
Labels: labels.LabelArray{
fooEgressRule1Label,
},
}
fooEgressRule1.Sanitize()
fooEgressRule2 := api.Rule{
EndpointSelector: api.NewESFromLabels(fooSelectLabel),
Egress: []api.EgressRule{
{
EgressCommonRule: api.EgressCommonRule{
ToEndpoints: []api.EndpointSelector{
api.NewESFromLabels(fooSelectLabel),
},
},
},
},
Labels: labels.LabelArray{
fooEgressRule2Label,
},
}
fooEgressRule2.Sanitize()
combinedRule := api.Rule{
EndpointSelector: api.NewESFromLabels(fooSelectLabel),
Ingress: []api.IngressRule{
{
IngressCommonRule: api.IngressCommonRule{
FromEndpoints: []api.EndpointSelector{
api.NewESFromLabels(fooSelectLabel),
},
},
},
},
Egress: []api.EgressRule{
{
EgressCommonRule: api.EgressCommonRule{
ToEndpoints: []api.EndpointSelector{
api.NewESFromLabels(fooSelectLabel),
},
},
},
},
Labels: labels.LabelArray{
combinedLabel,
},
}
combinedRule.Sanitize()
ing, egr, _, _, matchingRules := repo.computePolicyEnforcementAndRules(fooIdentity)
require.False(t, ing, "ingress policy enforcement should not apply since no rules are in repository")
require.False(t, egr, "egress policy enforcement should not apply since no rules are in repository")
require.Equal(t, ruleSlice{}, matchingRules, "returned matching rules did not match")
_, _, err := repo.mustAdd(fooIngressRule1)
require.NoError(t, err, "unable to add rule to policy repository")
ing, egr, _, _, matchingRules = repo.computePolicyEnforcementAndRules(fooIdentity)
require.True(t, ing, "ingress policy enforcement should apply since ingress rule selects")
require.False(t, egr, "egress policy enforcement should not apply since no egress rules select")
require.Equal(t, fooIngressRule1, matchingRules[0].Rule, "returned matching rules did not match")
_, _, err = repo.mustAdd(fooIngressRule2)
require.NoError(t, err, "unable to add rule to policy repository")
ing, egr, _, _, matchingRules = repo.computePolicyEnforcementAndRules(fooIdentity)
require.True(t, ing, "ingress policy enforcement should apply since ingress rule selects")
require.False(t, egr, "egress policy enforcement should not apply since no egress rules select")
require.ElementsMatch(t, matchingRules.AsPolicyRules(), api.Rules{&fooIngressRule1, &fooIngressRule2})
_, _, numDeleted := repo.ReplaceByLabels(nil, lal(fooIngressRule1Label))
require.Equal(t, 1, numDeleted)
require.NoError(t, err, "unable to add rule to policy repository")
ing, egr, _, _, matchingRules = repo.computePolicyEnforcementAndRules(fooIdentity)
require.True(t, ing, "ingress policy enforcement should apply since ingress rule selects")
require.False(t, egr, "egress policy enforcement should not apply since no egress rules select")
require.Equal(t, fooIngressRule2, matchingRules[0].Rule, "returned matching rules did not match")
_, _, numDeleted = repo.ReplaceByLabels(nil, lal(fooIngressRule2Label))
require.Equal(t, 1, numDeleted)
ing, egr, _, _, matchingRules = repo.computePolicyEnforcementAndRules(fooIdentity)
require.False(t, ing, "ingress policy enforcement should not apply since no rules are in repository")
require.False(t, egr, "egress policy enforcement should not apply since no rules are in repository")
require.Equal(t, ruleSlice{}, matchingRules, "returned matching rules did not match")
_, _, err = repo.mustAdd(fooEgressRule1)
require.NoError(t, err, "unable to add rule to policy repository")
ing, egr, _, _, matchingRules = repo.computePolicyEnforcementAndRules(fooIdentity)
require.False(t, ing, "ingress policy enforcement should not apply since no ingress rules select")
require.True(t, egr, "egress policy enforcement should apply since egress rules select")
require.Equal(t, fooEgressRule1, matchingRules[0].Rule, "returned matching rules did not match")
_, _, numDeleted = repo.ReplaceByLabels(nil, lal(fooEgressRule1Label))
require.Equal(t, 1, numDeleted)
_, _, err = repo.mustAdd(fooEgressRule2)
require.NoError(t, err, "unable to add rule to policy repository")
ing, egr, _, _, matchingRules = repo.computePolicyEnforcementAndRules(fooIdentity)
require.False(t, ing, "ingress policy enforcement should not apply since no ingress rules select")
require.True(t, egr, "egress policy enforcement should apply since egress rules select")
require.Equal(t, fooEgressRule2, matchingRules[0].Rule, "returned matching rules did not match")
_, _, numDeleted = repo.ReplaceByLabels(nil, lal(fooEgressRule2Label))
require.Equal(t, 1, numDeleted)
_, _, err = repo.mustAdd(combinedRule)
require.NoError(t, err, "unable to add rule to policy repository")
ing, egr, _, _, matchingRules = repo.computePolicyEnforcementAndRules(fooIdentity)
require.True(t, ing, "ingress policy enforcement should apply since ingress rule selects")
require.True(t, egr, "egress policy enforcement should apply since egress rules selects")
require.Equal(t, combinedRule, matchingRules[0].Rule, "returned matching rules did not match")
_, _, numDeleted = repo.ReplaceByLabels(nil, lal(combinedLabel))
require.Equal(t, 1, numDeleted)
SetPolicyEnabled(option.AlwaysEnforce)
require.NoError(t, err, "unable to add rule to policy repository")
ing, egr, _, _, matchingRules = repo.computePolicyEnforcementAndRules(fooIdentity)
require.True(t, ing, "ingress policy enforcement should apply since ingress rule selects")
require.True(t, egr, "egress policy enforcement should apply since egress rules selects")
require.Equal(t, ruleSlice{}, matchingRules, "returned matching rules did not match")
SetPolicyEnabled(option.NeverEnforce)
_, _, err = repo.mustAdd(combinedRule)
require.NoError(t, err, "unable to add rule to policy repository")
ing, egr, _, _, matchingRules = repo.computePolicyEnforcementAndRules(fooIdentity)
require.False(t, ing, "ingress policy enforcement should not apply since policy enforcement is disabled ")
require.False(t, egr, "egress policy enforcement should not apply since policy enforcement is disabled")
require.Nil(t, matchingRules, "no rules should be returned since policy enforcement is disabled")
// Test init identity.
SetPolicyEnabled(option.DefaultEnforcement)
// If the mode is "default", check that the policy is always enforced for
// endpoints with the reserved:init label. If no policy rules match
// reserved:init, this drops all ingress and egress traffic.
ingress, egress, _, _, matchingRules := repo.computePolicyEnforcementAndRules(initIdentity)
require.True(t, ingress)
require.True(t, egress)
require.Equal(t, ruleSlice{}, matchingRules, "no rules should be returned since policy enforcement is disabled")
// Check that the "always" and "never" modes are not affected.
SetPolicyEnabled(option.AlwaysEnforce)
ingress, egress, _, _, _ = repo.computePolicyEnforcementAndRules(initIdentity)
require.True(t, ingress)
require.True(t, egress)
SetPolicyEnabled(option.NeverEnforce)
ingress, egress, _, _, _ = repo.computePolicyEnforcementAndRules(initIdentity)
require.False(t, ingress)
require.False(t, egress)
}
func BenchmarkParseLabel(b *testing.B) {
td := newTestData(hivetest.Logger(b))
repo := td.repo
var err error
var cntAdd, cntFound int
lbls := make([]labels.LabelArray, 100)
for i := range 100 {
I := fmt.Sprintf("%d", i)
lbls[i] = labels.LabelArray{labels.NewLabel("tag3", I, labels.LabelSourceK8s), labels.NewLabel("namespace", "default", labels.LabelSourceK8s)}
}
for b.Loop() {
for j := range 100 {
J := fmt.Sprintf("%d", j)
_, _, err = repo.mustAdd(api.Rule{
EndpointSelector: api.NewESFromLabels(labels.NewLabel("foo", J, labels.LabelSourceK8s), labels.NewLabel("namespace", "default", labels.LabelSourceK8s)),
Labels: labels.LabelArray{
labels.ParseLabel("k8s:tag1"),
labels.NewLabel("namespace", "default", labels.LabelSourceK8s),
labels.NewLabel("tag3", J, labels.LabelSourceK8s),
},
})
if err == nil {
cntAdd++
}
}
repo.mutex.RLock()
for j := range 100 {
cntFound += len(repo.searchRLocked(lbls[j]))
}
repo.mutex.RUnlock()
}
b.Log("Added: ", cntAdd)
b.Log("found: ", cntFound)
}
func TestWildcardL3RulesIngress(t *testing.T) {
td := newTestData(hivetest.Logger(t))
labelsL3 := labels.LabelArray{labels.ParseLabel("L3")}
labelsKafka := labels.LabelArray{labels.ParseLabel("kafka")}
labelsICMP := labels.LabelArray{labels.ParseLabel("icmp")}
labelsICMPv6 := labels.LabelArray{labels.ParseLabel("icmpv6")}
labelsHTTP := labels.LabelArray{labels.ParseLabel("http")}
labelsL7 := labels.LabelArray{labels.ParseLabel("l7")}
l3Rule := api.Rule{
Ingress: []api.IngressRule{
{
IngressCommonRule: api.IngressCommonRule{
FromEndpoints: []api.EndpointSelector{selBar1},
},
},
},
Labels: labelsL3,
}
kafkaRule := api.Rule{
Ingress: []api.IngressRule{
{
IngressCommonRule: api.IngressCommonRule{
FromEndpoints: []api.EndpointSelector{selBar2},
},
ToPorts: []api.PortRule{{
Ports: []api.PortProtocol{
{Port: "9092", Protocol: api.ProtoTCP},
},
Rules: &api.L7Rules{
Kafka: []kafka.PortRule{
{APIKey: "produce"},
},
},
}},
},
},
Labels: labelsKafka,
}
httpRule := api.Rule{
Ingress: []api.IngressRule{
{
IngressCommonRule: api.IngressCommonRule{
FromEndpoints: []api.EndpointSelector{selBar2},
},
ToPorts: []api.PortRule{{
Ports: []api.PortProtocol{
{Port: "80", Protocol: api.ProtoTCP},
},
Rules: &api.L7Rules{
HTTP: []api.PortRuleHTTP{
{Method: "GET", Path: "/"},
},
},
}},
},
},
Labels: labelsHTTP,
}
l7Rule := api.Rule{
Ingress: []api.IngressRule{
{
IngressCommonRule: api.IngressCommonRule{
FromEndpoints: []api.EndpointSelector{selBar2},
},
ToPorts: []api.PortRule{{
Ports: []api.PortProtocol{
{Port: "9090", Protocol: api.ProtoTCP},
},
Rules: &api.L7Rules{
L7Proto: "tester",
L7: []api.PortRuleL7{map[string]string{"method": "GET", "path": "/"}},
},
}},
},
},
Labels: labelsL7,
}
icmpV4Type := intstr.FromInt(8)
icmpRule := api.Rule{
Ingress: []api.IngressRule{
{
IngressCommonRule: api.IngressCommonRule{
FromEndpoints: []api.EndpointSelector{selBar2},
},
ICMPs: api.ICMPRules{{
Fields: []api.ICMPField{{
Type: &icmpV4Type,
}},
}},
},
},
Labels: labelsICMP,
}
icmpV6Type := intstr.FromInt(128)
icmpV6Rule := api.Rule{
Ingress: []api.IngressRule{
{
IngressCommonRule: api.IngressCommonRule{
FromEndpoints: []api.EndpointSelector{selBar2},
},
ICMPs: api.ICMPRules{{
Fields: []api.ICMPField{{
Type: &icmpV6Type,
Family: api.IPv6Family,
}},
}},
},
},
Labels: labelsICMPv6,
}
expected := NewL4PolicyMapWithValues(map[string]*L4Filter{
"0/ANY": {
Port: 0,
Protocol: api.ProtoAny,
U8Proto: 0x0,
PerSelectorPolicies: L7DataMap{
td.cachedSelectorBar1: nil,
},
Ingress: true,
RuleOrigin: OriginForTest(map[CachedSelector]labels.LabelArrayList{td.cachedSelectorBar1: {labelsL3}}),
},
"8/ICMP": {
Port: 8,
Protocol: api.ProtoICMP,
U8Proto: 0x1,
PerSelectorPolicies: L7DataMap{
td.cachedSelectorBar2: nil,
},
Ingress: true,
RuleOrigin: OriginForTest(map[CachedSelector]labels.LabelArrayList{td.cachedSelectorBar2: {labelsICMP}}),
},
"128/ICMPV6": {
Port: 128,
Protocol: api.ProtoICMPv6,
U8Proto: 0x3A,
PerSelectorPolicies: L7DataMap{
td.cachedSelectorBar2: nil,
},
Ingress: true,
RuleOrigin: OriginForTest(map[CachedSelector]labels.LabelArrayList{td.cachedSelectorBar2: {labelsICMPv6}}),
},
"9092/TCP": {
Port: 9092,
Protocol: api.ProtoTCP,
U8Proto: 0x6,
Ingress: true,
PerSelectorPolicies: L7DataMap{
td.cachedSelectorBar2: &PerSelectorPolicy{
L7Parser: ParserTypeKafka,
Priority: ListenerPriorityKafka,
L7Rules: api.L7Rules{
Kafka: []kafka.PortRule{kafkaRule.Ingress[0].ToPorts[0].Rules.Kafka[0]},
},
},
},
RuleOrigin: OriginForTest(map[CachedSelector]labels.LabelArrayList{td.cachedSelectorBar2: {labelsKafka}}),
},
"80/TCP": {
Port: 80,
Protocol: api.ProtoTCP,
U8Proto: 0x6,
Ingress: true,
PerSelectorPolicies: L7DataMap{
td.cachedSelectorBar2: &PerSelectorPolicy{
L7Parser: ParserTypeHTTP,
Priority: ListenerPriorityHTTP,
L7Rules: api.L7Rules{
HTTP: []api.PortRuleHTTP{httpRule.Ingress[0].ToPorts[0].Rules.HTTP[0]},
},
},
},
RuleOrigin: OriginForTest(map[CachedSelector]labels.LabelArrayList{td.cachedSelectorBar2: {labelsHTTP}}),
},
"9090/TCP": {
Port: 9090,
Protocol: api.ProtoTCP,
U8Proto: 0x6,
Ingress: true,
PerSelectorPolicies: L7DataMap{
td.cachedSelectorBar2: &PerSelectorPolicy{
L7Parser: L7ParserType("tester"),
Priority: ListenerPriorityProxylib,
L7Rules: api.L7Rules{
L7Proto: "tester",
L7: []api.PortRuleL7{l7Rule.Ingress[0].ToPorts[0].Rules.L7[0]},
},
},
},
RuleOrigin: OriginForTest(map[CachedSelector]labels.LabelArrayList{td.cachedSelectorBar2: {labelsL7}}),
},
})
td.policyMapEquals(t, expected, nil, &l3Rule, &kafkaRule, &httpRule, &l7Rule, &icmpRule, &icmpV6Rule)
}
func TestWildcardL4RulesIngress(t *testing.T) {
td := newTestData(hivetest.Logger(t))
labelsL4Kafka := labels.LabelArray{labels.ParseLabel("L4-kafka")}
labelsL7Kafka := labels.LabelArray{labels.ParseLabel("kafka")}
labelsL4HTTP := labels.LabelArray{labels.ParseLabel("L4-http")}
labelsL7HTTP := labels.LabelArray{labels.ParseLabel("http")}
l49092Rule := api.Rule{
Ingress: []api.IngressRule{
{
IngressCommonRule: api.IngressCommonRule{
FromEndpoints: []api.EndpointSelector{selBar1},
},
ToPorts: []api.PortRule{{
Ports: []api.PortProtocol{
{Port: "9092", Protocol: api.ProtoTCP},
},
}},
},
},
Labels: labelsL4Kafka,
}
kafkaRule := api.Rule{
Ingress: []api.IngressRule{
{
IngressCommonRule: api.IngressCommonRule{
FromEndpoints: []api.EndpointSelector{selBar2},
},
ToPorts: []api.PortRule{{
Ports: []api.PortProtocol{
{Port: "9092", Protocol: api.ProtoTCP},
},
Rules: &api.L7Rules{
Kafka: []kafka.PortRule{
{APIKey: "produce"},
},
},
}},
},
},
Labels: labelsL7Kafka,
}
l480Rule := api.Rule{
Ingress: []api.IngressRule{
{
IngressCommonRule: api.IngressCommonRule{
FromEndpoints: []api.EndpointSelector{selBar1},
},
ToPorts: []api.PortRule{{
Ports: []api.PortProtocol{
{Port: "80", Protocol: api.ProtoTCP},
},
}},
},
},
Labels: labelsL4HTTP,
}
httpRule := api.Rule{
Ingress: []api.IngressRule{
{
IngressCommonRule: api.IngressCommonRule{
FromEndpoints: []api.EndpointSelector{selBar2},
},
ToPorts: []api.PortRule{{
Ports: []api.PortProtocol{
{Port: "80", Protocol: api.ProtoTCP},
},
Rules: &api.L7Rules{
HTTP: []api.PortRuleHTTP{
{Method: "GET", Path: "/"},
},
},
}},
},
},
Labels: labelsL7HTTP,
}
expected := NewL4PolicyMapWithValues(map[string]*L4Filter{
"80/TCP": {
Port: 80,
Protocol: api.ProtoTCP,
U8Proto: 0x6,
Ingress: true,
PerSelectorPolicies: L7DataMap{
td.cachedSelectorBar1: nil,
td.cachedSelectorBar2: &PerSelectorPolicy{
L7Parser: ParserTypeHTTP,
Priority: ListenerPriorityHTTP,
L7Rules: api.L7Rules{
HTTP: []api.PortRuleHTTP{httpRule.Ingress[0].ToPorts[0].Rules.HTTP[0]},
},
},
},
RuleOrigin: OriginForTest(map[CachedSelector]labels.LabelArrayList{
td.cachedSelectorBar1: {labelsL4HTTP},
td.cachedSelectorBar2: {labelsL7HTTP},
}),
},
"9092/TCP": {
Port: 9092,
Protocol: api.ProtoTCP,
U8Proto: 0x6,
Ingress: true,
PerSelectorPolicies: L7DataMap{
td.cachedSelectorBar1: nil,
td.cachedSelectorBar2: &PerSelectorPolicy{
L7Parser: ParserTypeKafka,
Priority: ListenerPriorityKafka,
L7Rules: api.L7Rules{
Kafka: []kafka.PortRule{kafkaRule.Ingress[0].ToPorts[0].Rules.Kafka[0]},
},
},
},
RuleOrigin: OriginForTest(map[CachedSelector]labels.LabelArrayList{
td.cachedSelectorBar1: {labelsL4Kafka},
td.cachedSelectorBar2: {labelsL7Kafka},
}),
},
})
td.policyMapEquals(t, expected, nil, &l49092Rule, &kafkaRule, &l480Rule, &httpRule)
}
func TestL3DependentL4IngressFromRequires(t *testing.T) {
td := newTestData(hivetest.Logger(t))
l480Rule := api.Rule{
Ingress: []api.IngressRule{
{
IngressCommonRule: api.IngressCommonRule{
FromEndpoints: []api.EndpointSelector{
selBar1,
},
},
ToPorts: []api.PortRule{{
Ports: []api.PortProtocol{
{Port: "80", Protocol: api.ProtoTCP},
},
}},
},
{
IngressCommonRule: api.IngressCommonRule{
FromRequires: []api.EndpointSelector{selBar2},
},
},
},
}
expectedSelector := api.NewESFromMatchRequirements(map[string]string{"any.id": "bar1"}, []slim_metav1.LabelSelectorRequirement{
{
Key: "any.id",
Operator: slim_metav1.LabelSelectorOpIn,
Values: []string{"bar2"},
},
})
expectedCachedSelector, _ := td.sc.AddIdentitySelector(dummySelectorCacheUser, EmptyStringLabels, expectedSelector)
expected := NewL4PolicyMapWithValues(map[string]*L4Filter{
"80/TCP": {
Port: 80,
Protocol: api.ProtoTCP,
U8Proto: 0x6,
PerSelectorPolicies: L7DataMap{
expectedCachedSelector: nil,
},
Ingress: true,
RuleOrigin: OriginForTest(map[CachedSelector]labels.LabelArrayList{
expectedCachedSelector: {nil},
}),
},
})
td.policyMapEquals(t, expected, nil, &l480Rule)
}
func TestL3DependentL4EgressFromRequires(t *testing.T) {
td := newTestData(hivetest.Logger(t))
l480Rule := api.Rule{
Egress: []api.EgressRule{
{
EgressCommonRule: api.EgressCommonRule{
ToEndpoints: []api.EndpointSelector{
selBar1,
},
},
ToPorts: []api.PortRule{{
Ports: []api.PortProtocol{
{Port: "80", Protocol: api.ProtoTCP},
},
}},
},
{
EgressCommonRule: api.EgressCommonRule{
ToEndpoints: []api.EndpointSelector{
api.WildcardEndpointSelector,
},
ToRequires: []api.EndpointSelector{selBar2},
},
},
},
}
expectedSelector := api.NewESFromMatchRequirements(map[string]string{"any.id": "bar1"}, []slim_metav1.LabelSelectorRequirement{
{
Key: "any.id",
Operator: slim_metav1.LabelSelectorOpIn,
Values: []string{"bar2"},
},
})
expectedSelector2 := api.NewESFromMatchRequirements(map[string]string{}, []slim_metav1.LabelSelectorRequirement{
{
Key: "any.id",
Operator: slim_metav1.LabelSelectorOpIn,
Values: []string{"bar2"},
},
})
expectedCachedSelector, _ := td.sc.AddIdentitySelector(dummySelectorCacheUser, EmptyStringLabels, expectedSelector)
expectedCachedSelector2, _ := td.sc.AddIdentitySelector(dummySelectorCacheUser, EmptyStringLabels, expectedSelector2)
expected := NewL4PolicyMapWithValues(map[string]*L4Filter{
"0/ANY": {
Port: 0,
Protocol: "ANY",
U8Proto: 0x0,
PerSelectorPolicies: L7DataMap{
expectedCachedSelector2: nil,
},
RuleOrigin: OriginForTest(map[CachedSelector]labels.LabelArrayList{
expectedCachedSelector2: {nil},
}),
},
"80/TCP": {
Port: 80,
Protocol: api.ProtoTCP,
U8Proto: 0x6,
PerSelectorPolicies: L7DataMap{
expectedCachedSelector: nil,
},
RuleOrigin: OriginForTest(map[CachedSelector]labels.LabelArrayList{
expectedCachedSelector: {nil},
}),
},
})
td.policyMapEquals(t, nil, expected, &l480Rule)
}
func TestWildcardL3RulesEgress(t *testing.T) {
td := newTestData(hivetest.Logger(t))
labelsL4 := labels.LabelArray{labels.ParseLabel("L4")}
labelsDNS := labels.LabelArray{labels.ParseLabel("dns")}
labelsHTTP := labels.LabelArray{labels.ParseLabel("http")}
labelsICMP := labels.LabelArray{labels.ParseLabel("icmp")}
labelsICMPv6 := labels.LabelArray{labels.ParseLabel("icmpv6")}
l3Rule := api.Rule{
Egress: []api.EgressRule{
{
EgressCommonRule: api.EgressCommonRule{
ToEndpoints: []api.EndpointSelector{selBar1},
},
},
},
Labels: labelsL4,
}
dnsRule := api.Rule{
Egress: []api.EgressRule{
{
EgressCommonRule: api.EgressCommonRule{
ToEndpoints: []api.EndpointSelector{selBar2},
},
ToPorts: []api.PortRule{{
Ports: []api.PortProtocol{
{Port: "53", Protocol: api.ProtoUDP},
},
Rules: &api.L7Rules{
DNS: []api.PortRuleDNS{
{MatchName: "empire.gov"},
},
},
}},
},
},
Labels: labelsDNS,
}
httpRule := api.Rule{
Egress: []api.EgressRule{
{
EgressCommonRule: api.EgressCommonRule{
ToEndpoints: []api.EndpointSelector{selBar2},
},
ToPorts: []api.PortRule{{
Ports: []api.PortProtocol{
{Port: "80", Protocol: api.ProtoTCP},
},
Rules: &api.L7Rules{
HTTP: []api.PortRuleHTTP{
{Method: "GET", Path: "/"},
},
},
}},
},
},
Labels: labelsHTTP,
}
icmpV4Type := intstr.FromInt(8)
icmpRule := api.Rule{
Egress: []api.EgressRule{
{
EgressCommonRule: api.EgressCommonRule{
ToEndpoints: []api.EndpointSelector{selBar2},
},
ICMPs: api.ICMPRules{{
Fields: []api.ICMPField{{
Type: &icmpV4Type,
}},
}},
},
},
Labels: labelsICMP,
}
icmpV6Type := intstr.FromInt(128)
icmpV6Rule := api.Rule{
Egress: []api.EgressRule{
{
EgressCommonRule: api.EgressCommonRule{
ToEndpoints: []api.EndpointSelector{selBar2},
},
ICMPs: api.ICMPRules{{
Fields: []api.ICMPField{{
Type: &icmpV6Type,
Family: "IPv6",
}},
}},
},
},
Labels: labelsICMPv6,
}
// Traffic to bar1 should not be forwarded to the DNS or HTTP
// proxy at all, but if it is (e.g., for visibility, the
// "0/ANY" rule should allow such traffic through.
expected := NewL4PolicyMapWithValues(map[string]*L4Filter{
"53/UDP": {
Port: 53,
Protocol: api.ProtoUDP,
U8Proto: 0x11,
Ingress: false,
PerSelectorPolicies: L7DataMap{
td.cachedSelectorBar2: &PerSelectorPolicy{
L7Parser: ParserTypeDNS,
Priority: ListenerPriorityDNS,
L7Rules: api.L7Rules{
DNS: []api.PortRuleDNS{dnsRule.Egress[0].ToPorts[0].Rules.DNS[0]},
},
},
},
RuleOrigin: OriginForTest(map[CachedSelector]labels.LabelArrayList{td.cachedSelectorBar2: {labelsDNS}}),
},
"80/TCP": {
Port: 80,
Protocol: api.ProtoTCP,
U8Proto: 0x6,
Ingress: false,
PerSelectorPolicies: L7DataMap{
td.cachedSelectorBar2: &PerSelectorPolicy{
L7Parser: ParserTypeHTTP,
Priority: ListenerPriorityHTTP,
L7Rules: api.L7Rules{
HTTP: []api.PortRuleHTTP{httpRule.Egress[0].ToPorts[0].Rules.HTTP[0]},
},
},
},
RuleOrigin: OriginForTest(map[CachedSelector]labels.LabelArrayList{td.cachedSelectorBar2: {labelsHTTP}}),
},
"8/ICMP": {
Port: 8,
Protocol: api.ProtoICMP,
U8Proto: 0x1,
PerSelectorPolicies: L7DataMap{
td.cachedSelectorBar2: nil,
},
Ingress: false,
RuleOrigin: OriginForTest(map[CachedSelector]labels.LabelArrayList{td.cachedSelectorBar2: {labelsICMP}}),
},
"128/ICMPV6": {
Port: 128,
Protocol: api.ProtoICMPv6,
U8Proto: 0x3A,
PerSelectorPolicies: L7DataMap{
td.cachedSelectorBar2: nil,
},
Ingress: false,
RuleOrigin: OriginForTest(map[CachedSelector]labels.LabelArrayList{td.cachedSelectorBar2: {labelsICMPv6}}),
},
"0/ANY": {
Port: 0,
Protocol: "ANY",
U8Proto: 0x0,
PerSelectorPolicies: L7DataMap{
td.cachedSelectorBar1: nil,
},
Ingress: false,
RuleOrigin: OriginForTest(map[CachedSelector]labels.LabelArrayList{td.cachedSelectorBar1: {labelsL4}}),
},
})
td.policyMapEquals(t, nil, expected, &l3Rule, &dnsRule, &httpRule, &icmpRule, &icmpV6Rule)
}
func TestWildcardL4RulesEgress(t *testing.T) {
td := newTestData(hivetest.Logger(t))
labelsL3DNS := labels.LabelArray{labels.ParseLabel("L3-dns")}
labelsL7DNS := labels.LabelArray{labels.ParseLabel("dns")}
labelsL3HTTP := labels.LabelArray{labels.ParseLabel("L3-http")}
labelsL7HTTP := labels.LabelArray{labels.ParseLabel("http")}
l453Rule := api.Rule{
Egress: []api.EgressRule{
{
EgressCommonRule: api.EgressCommonRule{
ToEndpoints: []api.EndpointSelector{selBar1},
},
ToPorts: []api.PortRule{{
Ports: []api.PortProtocol{
{Port: "53", Protocol: api.ProtoUDP},
},
}},
},
},
Labels: labelsL3DNS,
}
dnsRule := api.Rule{
Egress: []api.EgressRule{
{
EgressCommonRule: api.EgressCommonRule{
ToEndpoints: []api.EndpointSelector{selBar2},
},
ToPorts: []api.PortRule{{
Ports: []api.PortProtocol{
{Port: "53", Protocol: api.ProtoUDP},
},
Rules: &api.L7Rules{
DNS: []api.PortRuleDNS{
{MatchName: "empire.gov"},
},
},
}},
},
},
Labels: labelsL7DNS,
}
l480Rule := api.Rule{
Egress: []api.EgressRule{
{
EgressCommonRule: api.EgressCommonRule{
ToEndpoints: []api.EndpointSelector{selBar1},
},
ToPorts: []api.PortRule{{
Ports: []api.PortProtocol{
{Port: "80", Protocol: api.ProtoTCP},
},
}},
},
},
Labels: labelsL3HTTP,
}
httpRule := api.Rule{
Egress: []api.EgressRule{
{
EgressCommonRule: api.EgressCommonRule{
ToEndpoints: []api.EndpointSelector{selBar2},
},
ToPorts: []api.PortRule{{
Ports: []api.PortProtocol{
{Port: "80", Protocol: api.ProtoTCP},
},
Rules: &api.L7Rules{
HTTP: []api.PortRuleHTTP{
{Method: "GET", Path: "/"},
},
},
}},
},
},
Labels: labelsL7HTTP,
}
// Bar1 should not be forwarded to the proxy, but if it is (e.g., for visibility),
// the L3/L4 allow should pass it without an explicit L7 wildcard.
expected := NewL4PolicyMapWithValues(map[string]*L4Filter{
"80/TCP": {
Port: 80,
Protocol: api.ProtoTCP,
U8Proto: 0x6,
Ingress: false,
PerSelectorPolicies: L7DataMap{
td.cachedSelectorBar1: nil,
td.cachedSelectorBar2: &PerSelectorPolicy{
L7Parser: ParserTypeHTTP,
Priority: ListenerPriorityHTTP,
L7Rules: api.L7Rules{
HTTP: []api.PortRuleHTTP{httpRule.Egress[0].ToPorts[0].Rules.HTTP[0]},
},
},
},
RuleOrigin: OriginForTest(map[CachedSelector]labels.LabelArrayList{
td.cachedSelectorBar1: {labelsL3HTTP},
td.cachedSelectorBar2: {labelsL7HTTP},
}),
},
"53/UDP": {
Port: 53,
Protocol: api.ProtoUDP,
U8Proto: 0x11,
Ingress: false,
PerSelectorPolicies: L7DataMap{
td.cachedSelectorBar1: nil,
td.cachedSelectorBar2: &PerSelectorPolicy{
L7Parser: ParserTypeDNS,
Priority: ListenerPriorityDNS,
L7Rules: api.L7Rules{
DNS: []api.PortRuleDNS{dnsRule.Egress[0].ToPorts[0].Rules.DNS[0]},
},
},
},
RuleOrigin: OriginForTest(map[CachedSelector]labels.LabelArrayList{
td.cachedSelectorBar1: {labelsL3DNS},
td.cachedSelectorBar2: {labelsL7DNS},
}),
},
})
td.policyMapEquals(t, nil, expected, &l453Rule, &dnsRule, &l480Rule, &httpRule)
}
func TestWildcardCIDRRulesEgress(t *testing.T) {
td := newTestData(hivetest.Logger(t))
labelsL3 := labels.LabelArray{labels.ParseLabel("L3")}
labelsHTTP := labels.LabelArray{labels.ParseLabel("http")}
cidrSlice := api.CIDRSlice{"192.0.0.0/3"}
cidrSelectors := cidrSlice.GetAsEndpointSelectors()
var cachedSelectors CachedSelectorSlice
for i := range cidrSelectors {
c, _ := td.sc.AddIdentitySelector(dummySelectorCacheUser, EmptyStringLabels, cidrSelectors[i])
cachedSelectors = append(cachedSelectors, c)
defer td.sc.RemoveSelector(c, dummySelectorCacheUser)
}
l480Get := api.Rule{
Egress: []api.EgressRule{
{
EgressCommonRule: api.EgressCommonRule{
ToCIDR: api.CIDRSlice{"192.0.0.0/3"},
},
ToPorts: []api.PortRule{{
Ports: []api.PortProtocol{
{
Port: "80",
Protocol: api.ProtoTCP,
},
},
Rules: &api.L7Rules{
HTTP: []api.PortRuleHTTP{
{
Headers: []string{"X-My-Header: true"},
Method: "GET",
Path: "/",
},
},
},
}},
},
},
Labels: labelsHTTP,
}
l3Rule := api.Rule{
Egress: []api.EgressRule{
{
EgressCommonRule: api.EgressCommonRule{
ToCIDR: api.CIDRSlice{"192.0.0.0/3"},
},
},
},
Labels: labelsL3,
}
// Port 80 policy does not need the wildcard, as the "0" port policy will allow the traffic.
// HTTP rules can have side-effects, so they need to be retained even if shadowed by a wildcard.
expected := NewL4PolicyMapWithValues(map[string]*L4Filter{
"80/TCP": {
Port: 80,
Protocol: api.ProtoTCP,
U8Proto: 0x6,
Ingress: false,
PerSelectorPolicies: L7DataMap{
cachedSelectors[0]: &PerSelectorPolicy{
L7Parser: ParserTypeHTTP,
Priority: ListenerPriorityHTTP,
L7Rules: api.L7Rules{
HTTP: []api.PortRuleHTTP{{
Headers: []string{"X-My-Header: true"},
Method: "GET",
Path: "/",
}},
},
},
},
RuleOrigin: OriginForTest(map[CachedSelector]labels.LabelArrayList{cachedSelectors[0]: {labelsHTTP}}),
},
"0/ANY": {
Port: 0,
Protocol: api.ProtoAny,
U8Proto: 0x0,
Ingress: false,
PerSelectorPolicies: L7DataMap{
cachedSelectors[0]: nil,
},
RuleOrigin: OriginForTest(map[CachedSelector]labels.LabelArrayList{cachedSelectors[0]: {labelsL3}}),
},
})
td.policyMapEquals(t, nil, expected, &l480Get, &l3Rule)
}
func TestWildcardL3RulesIngressFromEntities(t *testing.T) {
td := newTestData(hivetest.Logger(t))
labelsL3 := labels.LabelArray{labels.ParseLabel("L3")}
labelsKafka := labels.LabelArray{labels.ParseLabel("kafka")}
labelsHTTP := labels.LabelArray{labels.ParseLabel("http")}
l3Rule := api.Rule{
Ingress: []api.IngressRule{
{
IngressCommonRule: api.IngressCommonRule{
FromEntities: api.EntitySlice{api.EntityWorld},
},
},
},
Labels: labelsL3,
}
kafkaRule := api.Rule{
Ingress: []api.IngressRule{
{
IngressCommonRule: api.IngressCommonRule{
FromEndpoints: []api.EndpointSelector{selBar2},
},
ToPorts: []api.PortRule{{
Ports: []api.PortProtocol{
{Port: "9092", Protocol: api.ProtoTCP},
},
Rules: &api.L7Rules{
Kafka: []kafka.PortRule{
{APIKey: "produce"},
},
},
}},
},
},
Labels: labelsKafka,
}
httpRule := api.Rule{
Ingress: []api.IngressRule{
{
IngressCommonRule: api.IngressCommonRule{
FromEndpoints: []api.EndpointSelector{selBar2},
},
ToPorts: []api.PortRule{{
Ports: []api.PortProtocol{
{Port: "80", Protocol: api.ProtoTCP},
},
Rules: &api.L7Rules{
HTTP: []api.PortRuleHTTP{
{Method: "GET", Path: "/"},
},
},
}},
},
},
Labels: labelsHTTP,
}
expected := NewL4PolicyMapWithValues(map[string]*L4Filter{
"0/ANY": {
Port: 0,
Protocol: "ANY",
U8Proto: 0x0,
PerSelectorPolicies: L7DataMap{
td.cachedSelectorWorld: nil,
td.cachedSelectorWorldV4: nil,
td.cachedSelectorWorldV6: nil,
},
Ingress: true,
RuleOrigin: OriginForTest(map[CachedSelector]labels.LabelArrayList{
td.cachedSelectorWorld: {labelsL3},
td.cachedSelectorWorldV4: {labelsL3},
td.cachedSelectorWorldV6: {labelsL3},
}),
},
"9092/TCP": {
Port: 9092,
Protocol: api.ProtoTCP,
U8Proto: 0x6,
Ingress: true,
PerSelectorPolicies: L7DataMap{
td.cachedSelectorBar2: &PerSelectorPolicy{
L7Parser: ParserTypeKafka,
Priority: ListenerPriorityKafka,
L7Rules: api.L7Rules{
Kafka: []kafka.PortRule{kafkaRule.Ingress[0].ToPorts[0].Rules.Kafka[0]},
},
},
},
RuleOrigin: OriginForTest(map[CachedSelector]labels.LabelArrayList{td.cachedSelectorBar2: {labelsKafka}}),
},
"80/TCP": {
Port: 80,
Protocol: api.ProtoTCP,
U8Proto: 0x6,
Ingress: true,
PerSelectorPolicies: L7DataMap{
td.cachedSelectorBar2: &PerSelectorPolicy{
L7Parser: ParserTypeHTTP,
Priority: ListenerPriorityHTTP,
L7Rules: api.L7Rules{
HTTP: []api.PortRuleHTTP{httpRule.Ingress[0].ToPorts[0].Rules.HTTP[0]},
},
},
},
RuleOrigin: OriginForTest(map[CachedSelector]labels.LabelArrayList{td.cachedSelectorBar2: {labelsHTTP}}),
},
})
td.policyMapEquals(t, expected, nil, &l3Rule, &kafkaRule, &httpRule)
}
func TestWildcardL3RulesEgressToEntities(t *testing.T) {
td := newTestData(hivetest.Logger(t))
labelsL3 := labels.LabelArray{labels.ParseLabel("L3")}
labelsDNS := labels.LabelArray{labels.ParseLabel("dns")}
labelsHTTP := labels.LabelArray{labels.ParseLabel("http")}
l3Rule := api.Rule{
Egress: []api.EgressRule{
{
EgressCommonRule: api.EgressCommonRule{
ToEntities: api.EntitySlice{api.EntityWorld},
},
},
},
Labels: labelsL3,
}
dnsRule := api.Rule{
Egress: []api.EgressRule{
{
EgressCommonRule: api.EgressCommonRule{
ToEndpoints: []api.EndpointSelector{selBar2},
},
ToPorts: []api.PortRule{{
Ports: []api.PortProtocol{
{Port: "53", Protocol: api.ProtoUDP},
},
Rules: &api.L7Rules{
DNS: []api.PortRuleDNS{
{MatchName: "empire.gov"},
},
},
}},
},
},
Labels: labelsDNS,
}
httpRule := api.Rule{
Egress: []api.EgressRule{
{
EgressCommonRule: api.EgressCommonRule{
ToEndpoints: []api.EndpointSelector{selBar2},
},
ToPorts: []api.PortRule{{
Ports: []api.PortProtocol{
{Port: "80", Protocol: api.ProtoTCP},
},
Rules: &api.L7Rules{
HTTP: []api.PortRuleHTTP{
{Method: "GET", Path: "/"},
},
},
}},
},
},
Labels: labelsHTTP,
}
expected := NewL4PolicyMapWithValues(map[string]*L4Filter{
"0/ANY": {
Port: 0,
Protocol: "ANY",
U8Proto: 0x0,
PerSelectorPolicies: L7DataMap{
td.cachedSelectorWorld: nil,
td.cachedSelectorWorldV4: nil,
td.cachedSelectorWorldV6: nil,
},
Ingress: false,
RuleOrigin: OriginForTest(map[CachedSelector]labels.LabelArrayList{
td.cachedSelectorWorld: {labelsL3},
td.cachedSelectorWorldV4: {labelsL3},
td.cachedSelectorWorldV6: {labelsL3},
}),
},
"53/UDP": {
Port: 53,
Protocol: api.ProtoUDP,
U8Proto: 0x11,
Ingress: false,
PerSelectorPolicies: L7DataMap{
td.cachedSelectorBar2: &PerSelectorPolicy{
L7Parser: ParserTypeDNS,
Priority: ListenerPriorityDNS,
L7Rules: api.L7Rules{
DNS: []api.PortRuleDNS{dnsRule.Egress[0].ToPorts[0].Rules.DNS[0]},
},
},
},
RuleOrigin: OriginForTest(map[CachedSelector]labels.LabelArrayList{td.cachedSelectorBar2: {labelsDNS}}),
},
"80/TCP": {
Port: 80,
Protocol: api.ProtoTCP,
U8Proto: 0x6,
Ingress: false,
PerSelectorPolicies: L7DataMap{
td.cachedSelectorBar2: &PerSelectorPolicy{
L7Parser: ParserTypeHTTP,
Priority: ListenerPriorityHTTP,
L7Rules: api.L7Rules{
HTTP: []api.PortRuleHTTP{httpRule.Egress[0].ToPorts[0].Rules.HTTP[0]},
},
},
},
RuleOrigin: OriginForTest(map[CachedSelector]labels.LabelArrayList{td.cachedSelectorBar2: {labelsHTTP}}),
},
})
td.policyMapEquals(t, nil, expected, &l3Rule, &dnsRule, &httpRule)
}
func TestMinikubeGettingStarted(t *testing.T) {
td := newTestData(hivetest.Logger(t))
rule1 := api.Rule{
EndpointSelector: endpointSelectorA,
Ingress: []api.IngressRule{
{
IngressCommonRule: api.IngressCommonRule{
FromEndpoints: []api.EndpointSelector{endpointSelectorB},
},
ToPorts: []api.PortRule{{
Ports: []api.PortProtocol{
{Port: "80", Protocol: api.ProtoTCP},
},
}},
},
},
}
rule2 := api.Rule{
EndpointSelector: api.NewESFromLabels(labels.ParseSelectLabel("id=app1")),
Ingress: []api.IngressRule{
{
IngressCommonRule: api.IngressCommonRule{
FromEndpoints: []api.EndpointSelector{endpointSelectorB},
},
ToPorts: []api.PortRule{{
Ports: []api.PortProtocol{
{Port: "80", Protocol: api.ProtoTCP},
},
Rules: &api.L7Rules{
HTTP: []api.PortRuleHTTP{
{Method: "GET", Path: "/"},
},
},
}},
},
},
}
rule3 := api.Rule{
EndpointSelector: endpointSelectorA,
Ingress: []api.IngressRule{
{
IngressCommonRule: api.IngressCommonRule{
FromEndpoints: []api.EndpointSelector{endpointSelectorB},
},
ToPorts: []api.PortRule{{
Ports: []api.PortProtocol{
{Port: "80", Protocol: api.ProtoTCP},
},
Rules: &api.L7Rules{
HTTP: []api.PortRuleHTTP{
{Method: "GET", Path: "/"},
},
},
}},
},
},
}
expected := NewL4PolicyMapWithValues(map[string]*L4Filter{"TCP/80": {
Port: 80, Protocol: api.ProtoTCP, U8Proto: 6,
PerSelectorPolicies: L7DataMap{
td.cachedSelectorB: &PerSelectorPolicy{
L7Parser: ParserTypeHTTP,
Priority: ListenerPriorityHTTP,
L7Rules: api.L7Rules{
HTTP: []api.PortRuleHTTP{{Method: "GET", Path: "/"}, {}},
},
},
},
Ingress: true,
RuleOrigin: OriginForTest(map[CachedSelector]labels.LabelArrayList{td.cachedSelectorB: {nil}}),
}})
td.policyMapEquals(t, expected, nil, &rule1, &rule2, &rule3)
}
func TestIterate(t *testing.T) {
td := newTestData(hivetest.Logger(t))
repo := td.repo
numWithEgress := 0
countEgressRules := func(r *api.Rule) {
if len(r.Egress) > 0 {
numWithEgress++
}
}
repo.Iterate(countEgressRules)
require.Equal(t, 0, numWithEgress)
numRules := 10
lbls := make([]labels.Label, 10)
for i := range numRules {
it := fmt.Sprintf("baz%d", i)
epSelector := api.NewESFromLabels(
labels.NewLabel(
"foo",
it,
labels.LabelSourceK8s,
),
)
lbls[i] = labels.NewLabel("tag3", it, labels.LabelSourceK8s)
_, _, err := repo.mustAdd(api.Rule{
EndpointSelector: epSelector,
Labels: labels.LabelArray{lbls[i]},
Egress: []api.EgressRule{
{
EgressCommonRule: api.EgressCommonRule{
ToEndpoints: []api.EndpointSelector{
epSelector,
},
},
},
},
})
require.NoError(t, err)
}
numWithEgress = 0
repo.Iterate(countEgressRules)
require.Equal(t, numRules, numWithEgress)
numModified := 0
modifyRules := func(r *api.Rule) {
if r.Labels.Contains(labels.LabelArray{lbls[1]}) || r.Labels.Contains(labels.LabelArray{lbls[3]}) {
r.Egress = nil
numModified++
}
}
repo.Iterate(modifyRules)
require.Equal(t, 2, numModified)
numWithEgress = 0
repo.Iterate(countEgressRules)
require.Equal(t, numRules-numModified, numWithEgress)
_, _, numDeleted := repo.ReplaceByLabels(nil, []labels.LabelArray{{lbls[0]}})
require.Equal(t, 1, numDeleted)
numWithEgress = 0
repo.Iterate(countEgressRules)
require.Equal(t, numRules-numModified-numDeleted, numWithEgress)
}
// TestDefaultAllow covers the defaulting logic in determining an identity's default rule
// in the presence or absence of rules that do not enable default-deny mode.
func TestDefaultAllow(t *testing.T) {
// Cache policy enforcement value from when test was ran to avoid pollution
// across tests.
oldPolicyEnable := GetPolicyEnabled()
defer SetPolicyEnabled(oldPolicyEnable)
SetPolicyEnabled(option.DefaultEnforcement)
fooSelectLabel := labels.ParseSelectLabel("foo")
genRule := func(ingress, defaultDeny bool) api.Rule {
name := fmt.Sprintf("%v_%v", ingress, defaultDeny)
r := api.Rule{
EndpointSelector: api.NewESFromLabels(fooSelectLabel),
Labels: labels.LabelArray{labels.NewLabel(k8sConst.PolicyLabelName, name, labels.LabelSourceAny)},
}
if ingress {
r.Ingress = []api.IngressRule{{
IngressCommonRule: api.IngressCommonRule{
FromEndpoints: []api.EndpointSelector{api.NewESFromLabels(fooSelectLabel)}}}}
} else {
r.Egress = []api.EgressRule{{
EgressCommonRule: api.EgressCommonRule{
ToEndpoints: []api.EndpointSelector{api.NewESFromLabels(fooSelectLabel)}}}}
}
if ingress {
r.EnableDefaultDeny.Ingress = &defaultDeny
} else {
r.EnableDefaultDeny.Egress = &defaultDeny
}
require.NoError(t, r.Sanitize())
return r
}
iDeny := genRule(true, true) // ingress default deny
iAllow := genRule(true, false) // ingress default allow
eDeny := genRule(false, true) // egress default deny
eAllow := genRule(false, false) // egress default allow
type testCase struct {
rules []api.Rule
ingress, egress bool
ruleC int // count of rules; indicates wildcard
}
ingressCases := []testCase{
{
rules: nil, // default case, everything disabled
},
{
rules: []api.Rule{iDeny},
ingress: true,
ruleC: 1,
},
{
rules: []api.Rule{iAllow}, // Just a default-allow rule
ingress: true,
ruleC: 2, // wildcard must be added
},
{
rules: []api.Rule{iDeny, iAllow}, // default-deny takes precedence, no wildcard
ingress: true,
ruleC: 2,
},
}
egressCases := []testCase{
{
rules: nil, // default case, everything disabled
},
{
rules: []api.Rule{eDeny},
egress: true,
ruleC: 1,
},
{
rules: []api.Rule{eAllow}, // Just a default-allow rule
egress: true,
ruleC: 2, // wildcard must be added
},
{
rules: []api.Rule{eDeny, eAllow}, // default-deny takes precedence, no wildcard
egress: true,
ruleC: 2,
},
}
// three test runs: ingress, egress, and ingress + egress cartesian
for i, tc := range ingressCases {
td := newTestData(hivetest.Logger(t))
td.addIdentity(fooIdentity)
repo := td.repo
for _, rule := range tc.rules {
_, _, err := repo.mustAdd(rule)
require.NoError(t, err, "unable to add rule to policy repository")
}
ing, egr, _, _, matchingRules := repo.computePolicyEnforcementAndRules(fooIdentity)
require.Equal(t, tc.ingress, ing, "case %d: ingress should match", i)
require.Equal(t, tc.egress, egr, "case %d: egress should match", i)
require.Len(t, matchingRules, tc.ruleC, "case %d: rule count should match", i)
}
for i, tc := range egressCases {
td := newTestData(hivetest.Logger(t))
td.addIdentity(fooIdentity)
repo := td.repo
for _, rule := range tc.rules {
_, _, err := repo.mustAdd(rule)
require.NoError(t, err, "unable to add rule to policy repository")
}
ing, egr, _, _, matchingRules := repo.computePolicyEnforcementAndRules(fooIdentity)
require.Equal(t, tc.ingress, ing, "case %d: ingress should match", i)
require.Equal(t, tc.egress, egr, "case %d: egress should match", i)
require.Len(t, matchingRules, tc.ruleC, "case %d: rule count should match", i)
}
// test all combinations of ingress + egress cases
for e, etc := range egressCases {
for i, itc := range ingressCases {
td := newTestData(hivetest.Logger(t))
td.addIdentity(fooIdentity)
repo := td.repo
for _, rule := range etc.rules {
_, _, err := repo.mustAdd(rule)
require.NoError(t, err, "unable to add rule to policy repository")
}
for _, rule := range itc.rules {
_, _, err := repo.mustAdd(rule)
require.NoError(t, err, "unable to add rule to policy repository")
}
ing, egr, _, _, matchingRules := repo.computePolicyEnforcementAndRules(fooIdentity)
require.Equal(t, itc.ingress, ing, "case ingress %d + egress %d: ingress should match", i, e)
require.Equal(t, etc.egress, egr, "case ingress %d + egress %d: egress should match", i, e)
require.Len(t, matchingRules, itc.ruleC+etc.ruleC, "case ingress %d + egress %d: rule count should match", i, e)
}
}
}
func TestReplaceByResource(t *testing.T) {
// don't use the full testdata() here, since we want to watch
// selectorcache changes carefully
repo := NewPolicyRepository(hivetest.Logger(t), nil, nil, nil, nil, testpolicy.NewPolicyMetricsNoop())
sc := testNewSelectorCache(hivetest.Logger(t), nil)
repo.selectorCache = sc
assert.Empty(t, sc.selectors)
// create 10 rules, each with a subject selector that selects one identity.
numRules := 10
rules := make(api.Rules, 0, numRules)
ids := identity.IdentityMap{}
// share the dest selector
destSelector := api.NewESFromLabels(labels.NewLabel("peer", "pod", "k8s"))
for i := range numRules {
it := fmt.Sprintf("num-%d", i)
ids[identity.NumericIdentity(i+100)] = labels.LabelArray{labels.Label{
Source: labels.LabelSourceK8s,
Key: "subject-pod",
Value: it,
}}
epSelector := api.NewESFromLabels(
labels.NewLabel(
"subject-pod",
it,
labels.LabelSourceK8s,
),
)
lbl := labels.NewLabel("policy-label", it, labels.LabelSourceK8s)
rule := &api.Rule{
EndpointSelector: epSelector,
Labels: labels.LabelArray{lbl},
Egress: []api.EgressRule{
{
EgressCommonRule: api.EgressCommonRule{
ToEndpoints: []api.EndpointSelector{
destSelector,
},
},
},
},
}
require.NoError(t, rule.Sanitize())
rules = append(rules, rule)
}
sc.UpdateIdentities(ids, nil, &sync.WaitGroup{})
rulesMatch := func(s ruleSlice, rs api.Rules) {
t.Helper()
ss := make(api.Rules, 0, len(s))
for _, rule := range s {
ss = append(ss, &rule.Rule)
}
assert.ElementsMatch(t, ss, rs)
}
toSlice := func(m map[ruleKey]*rule) ruleSlice {
out := ruleSlice{}
for _, v := range m {
out = append(out, v)
}
return out
}
rID1 := ipcachetypes.ResourceID("res1")
rID2 := ipcachetypes.ResourceID("res2")
affectedIDs, rev, oldRuleCnt := repo.ReplaceByResource(rules[0:1], rID1)
assert.ElementsMatch(t, []identity.NumericIdentity{100}, affectedIDs.AsSlice())
assert.EqualValues(t, 2, rev)
assert.Equal(t, 0, oldRuleCnt)
// check basic bookkeeping
assert.Len(t, repo.rules, 1)
assert.Len(t, repo.rulesByResource, 1)
assert.Len(t, repo.rulesByResource[rID1], 1)
rulesMatch(toSlice(repo.rulesByResource[rID1]), rules[0:1])
// Check that the selectorcache is sane
// It should have one selector: the subject pod for rule 0
assert.Len(t, sc.selectors, 1)
// add second resource with rules 1, 2
affectedIDs, rev, oldRuleCnt = repo.ReplaceByResource(rules[1:3], rID2)
assert.ElementsMatch(t, []identity.NumericIdentity{101, 102}, affectedIDs.AsSlice())
assert.EqualValues(t, 3, rev)
assert.Equal(t, 0, oldRuleCnt)
// check basic bookkeeping
assert.Len(t, repo.rules, 3)
assert.Len(t, repo.rulesByResource, 2)
assert.Len(t, repo.rulesByResource[rID1], 1)
assert.Len(t, repo.rulesByResource[rID2], 2)
assert.Len(t, sc.selectors, 3)
// replace rid1 with rules 3, 4.
// affected IDs should be 100, 103, 104 (for outgoing)
affectedIDs, rev, oldRuleCnt = repo.ReplaceByResource(rules[3:5], rID1)
assert.ElementsMatch(t, []identity.NumericIdentity{100, 103, 104}, affectedIDs.AsSlice())
assert.EqualValues(t, 4, rev)
assert.Equal(t, 1, oldRuleCnt)
// check basic bookkeeping
assert.Len(t, repo.rules, 4)
assert.Len(t, repo.rulesByResource, 2)
assert.Len(t, repo.rulesByResource[rID1], 2)
assert.Len(t, repo.rulesByResource[rID2], 2)
assert.Len(t, sc.selectors, 4)
rulesMatch(toSlice(repo.rulesByResource[rID1]), rules[3:5])
assert.Equal(t, repo.rules[ruleKey{
resource: rID1,
idx: 0,
}].Rule, *rules[3])
// delete rid1
affectedIDs, _, oldRuleCnt = repo.ReplaceByResource(nil, rID1)
assert.Len(t, repo.rules, 2)
assert.Len(t, repo.rulesByResource, 1)
assert.Len(t, repo.rulesByResource[rID2], 2)
assert.Len(t, sc.selectors, 2)
assert.Equal(t, 2, oldRuleCnt)
assert.ElementsMatch(t, []identity.NumericIdentity{103, 104}, affectedIDs.AsSlice())
// delete rid1 again (noop)
affectedIDs, _, oldRuleCnt = repo.ReplaceByResource(nil, rID1)
assert.Empty(t, affectedIDs.AsSlice())
assert.Len(t, repo.rules, 2)
assert.Len(t, repo.rulesByResource, 1)
assert.Len(t, repo.rulesByResource[rID2], 2)
assert.Len(t, sc.selectors, 2)
assert.Equal(t, 0, oldRuleCnt)
// delete rid2
affectedIDs, _, oldRuleCnt = repo.ReplaceByResource(nil, rID2)
assert.ElementsMatch(t, []identity.NumericIdentity{101, 102}, affectedIDs.AsSlice())
assert.Empty(t, repo.rules)
assert.Empty(t, repo.rulesByResource)
assert.Empty(t, sc.selectors)
assert.Equal(t, 2, oldRuleCnt)
}
func TestReplaceByLabels(t *testing.T) {
// don't use the full testdata() here, since we want to watch
// selectorcache changes carefully
repo := NewPolicyRepository(hivetest.Logger(t), nil, nil, nil, nil, testpolicy.NewPolicyMetricsNoop())
sc := testNewSelectorCache(hivetest.Logger(t), nil)
repo.selectorCache = sc
assert.Empty(t, sc.selectors)
// create 10 rules, each with a subject selector that selects one identity.
numRules := 10
rules := make(api.Rules, 0, numRules)
ids := identity.IdentityMap{}
ruleLabels := make([]labels.LabelArray, 0, numRules)
// share the dest selector
destSelector := api.NewESFromLabels(labels.NewLabel("peer", "pod", "k8s"))
for i := range numRules {
it := fmt.Sprintf("num-%d", i)
ids[identity.NumericIdentity(i+100)] = labels.LabelArray{labels.Label{
Source: labels.LabelSourceK8s,
Key: "subject-pod",
Value: it,
}}
epSelector := api.NewESFromLabels(
labels.NewLabel(
"subject-pod",
it,
labels.LabelSourceK8s,
),
)
lbl := labels.NewLabel("policy-label", it, labels.LabelSourceK8s)
rule := &api.Rule{
EndpointSelector: epSelector,
Labels: labels.LabelArray{lbl},
Egress: []api.EgressRule{
{
EgressCommonRule: api.EgressCommonRule{
ToEndpoints: []api.EndpointSelector{
destSelector,
},
},
},
},
}
require.NoError(t, rule.Sanitize())
rules = append(rules, rule)
ruleLabels = append(ruleLabels, rule.Labels)
}
sc.UpdateIdentities(ids, nil, &sync.WaitGroup{})
rulesMatch := func(s ruleSlice, rs api.Rules) {
t.Helper()
ss := make(api.Rules, 0, len(s))
for _, rule := range s {
ss = append(ss, &rule.Rule)
}
assert.ElementsMatch(t, ss, rs)
}
_ = rulesMatch
toSlice := func(m map[ruleKey]*rule) ruleSlice {
out := ruleSlice{}
for _, v := range m {
out = append(out, v)
}
return out
}
_ = toSlice
affectedIDs, rev, oldRuleCnt := repo.ReplaceByLabels(rules[0:1], ruleLabels[0:1])
assert.ElementsMatch(t, []identity.NumericIdentity{100}, affectedIDs.AsSlice())
assert.EqualValues(t, 2, rev)
assert.Equal(t, 0, oldRuleCnt)
// check basic bookkeeping
assert.Len(t, repo.rules, 1)
assert.Len(t, sc.selectors, 1)
// Replace rule 0 with rule 1
affectedIDs, rev, oldRuleCnt = repo.ReplaceByLabels(rules[1:2], ruleLabels[0:1])
assert.ElementsMatch(t, []identity.NumericIdentity{100, 101}, affectedIDs.AsSlice())
assert.EqualValues(t, 3, rev)
assert.Equal(t, 1, oldRuleCnt)
// check basic bookkeeping
assert.Len(t, repo.rules, 1)
assert.Len(t, sc.selectors, 1)
// Add rules 2, 3
affectedIDs, rev, oldRuleCnt = repo.ReplaceByLabels(rules[2:4], ruleLabels[2:4])
assert.ElementsMatch(t, []identity.NumericIdentity{102, 103}, affectedIDs.AsSlice())
assert.EqualValues(t, 4, rev)
assert.Equal(t, 0, oldRuleCnt)
// check basic bookkeeping
assert.Len(t, repo.rules, 3)
assert.Len(t, sc.selectors, 3)
// Delete rules 2, 3
affectedIDs, rev, oldRuleCnt = repo.ReplaceByLabels(nil, ruleLabels[2:4])
assert.ElementsMatch(t, []identity.NumericIdentity{102, 103}, affectedIDs.AsSlice())
assert.EqualValues(t, 5, rev)
assert.Equal(t, 2, oldRuleCnt)
// check basic bookkeeping
assert.Len(t, repo.rules, 1)
assert.Len(t, sc.selectors, 1)
// delete rules 2, 3 again
affectedIDs, _, oldRuleCnt = repo.ReplaceByLabels(nil, ruleLabels[2:4])
assert.ElementsMatch(t, []identity.NumericIdentity{}, affectedIDs.AsSlice())
assert.Equal(t, 0, oldRuleCnt)
// check basic bookkeeping
assert.Len(t, repo.rules, 1)
assert.Len(t, sc.selectors, 1)
}
// SPDX-License-Identifier: Apache-2.0
// Copyright Authors of Cilium
package policy
import (
"context"
"errors"
"fmt"
"iter"
"log/slog"
"runtime"
"strings"
cilium "github.com/cilium/proxy/go/cilium/api"
"github.com/cilium/cilium/pkg/container/versioned"
"github.com/cilium/cilium/pkg/endpoint/regeneration"
"github.com/cilium/cilium/pkg/logging/logfields"
"github.com/cilium/cilium/pkg/policy/api"
"github.com/cilium/cilium/pkg/u8proto"
)
// PolicyContext is an interface policy resolution functions use to access the Repository.
// This way testing code can run without mocking a full Repository.
type PolicyContext interface {
// return the namespace in which the policy rule is being resolved
GetNamespace() string
// return the SelectorCache
GetSelectorCache() *SelectorCache
// GetTLSContext resolves the given 'api.TLSContext' into CA
// certs and the public and private keys, using secrets from
// k8s or from the local file system.
GetTLSContext(tls *api.TLSContext) (ca, public, private string, inlineSecrets bool, err error)
// GetEnvoyHTTPRules translates the given 'api.L7Rules' into
// the protobuf representation the Envoy can consume. The bool
// return parameter tells whether the rule enforcement can
// be short-circuited upon the first allowing rule. This is
// false if any of the rules has side-effects, requiring all
// such rules being evaluated.
GetEnvoyHTTPRules(l7Rules *api.L7Rules) (*cilium.HttpNetworkPolicyRules, bool)
// IsDeny returns true if the policy computation should be done for the
// policy deny case. This function returns different values depending on the
// code path as it can be changed during the policy calculation.
IsDeny() bool
// SetDeny sets the Deny field of the PolicyContext and returns the old
// value stored.
SetDeny(newValue bool) (oldValue bool)
// DefaultDenyIngress returns true if default deny is enabled for ingress
DefaultDenyIngress() bool
// DefaultDenyEgress returns true if default deny is enabled for egress
DefaultDenyEgress() bool
SetOrigin(ruleOrigin)
Origin() ruleOrigin
GetLogger() *slog.Logger
PolicyTrace(format string, a ...any)
}
type policyContext struct {
repo *Repository
ns string
// isDeny this field is set to true if the given policy computation should
// be done for the policy deny.
isDeny bool
defaultDenyIngress bool
defaultDenyEgress bool
origin ruleOrigin
logger *slog.Logger
traceEnabled bool
}
var _ PolicyContext = &policyContext{}
// GetNamespace() returns the namespace for the policy rule being resolved
func (p *policyContext) GetNamespace() string {
return p.ns
}
// GetSelectorCache() returns the selector cache used by the Repository
func (p *policyContext) GetSelectorCache() *SelectorCache {
return p.repo.GetSelectorCache()
}
// GetTLSContext() returns data for TLS Context via a CertificateManager
func (p *policyContext) GetTLSContext(tls *api.TLSContext) (ca, public, private string, inlineSecrets bool, err error) {
if p.repo.certManager == nil {
return "", "", "", false, fmt.Errorf("No Certificate Manager set on Policy Repository")
}
return p.repo.certManager.GetTLSContext(context.TODO(), tls, p.ns)
}
func (p *policyContext) GetEnvoyHTTPRules(l7Rules *api.L7Rules) (*cilium.HttpNetworkPolicyRules, bool) {
return p.repo.GetEnvoyHTTPRules(l7Rules, p.ns)
}
// IsDeny returns true if the policy computation should be done for the
// policy deny case. This function return different values depending on the
// code path as it can be changed during the policy calculation.
func (p *policyContext) IsDeny() bool {
return p.isDeny
}
// SetDeny sets the Deny field of the PolicyContext and returns the old
// value stored.
func (p *policyContext) SetDeny(deny bool) bool {
oldDeny := p.isDeny
p.isDeny = deny
return oldDeny
}
// DefaultDenyIngress returns true if default deny is enabled for ingress
func (p *policyContext) DefaultDenyIngress() bool {
return p.defaultDenyIngress
}
// DefaultDenyEgress returns true if default deny is enabled for egress
func (p *policyContext) DefaultDenyEgress() bool {
return p.defaultDenyEgress
}
func (p *policyContext) SetOrigin(ro ruleOrigin) {
p.origin = ro
}
func (p *policyContext) Origin() ruleOrigin {
return p.origin
}
func (p *policyContext) GetLogger() *slog.Logger {
return p.logger
}
func (p *policyContext) PolicyTrace(format string, a ...any) {
if p.logger == nil || !p.traceEnabled {
return
}
format = strings.TrimRight(format, " \t\n")
p.logger.Info(fmt.Sprintf(format, a...))
}
// SelectorPolicy represents a selectorPolicy, previously resolved from
// the policy repository and ready to be distilled against a set of identities
// to compute datapath-level policy configuration.
type SelectorPolicy interface {
// CreateRedirects is used to ensure the endpoint has created all the needed redirects
// before a new EndpointPolicy is created.
RedirectFilters() iter.Seq2[*L4Filter, PerSelectorPolicyTuple]
// DistillPolicy returns the policy in terms of connectivity to peer
// Identities.
DistillPolicy(logger *slog.Logger, owner PolicyOwner, redirects map[string]uint16) *EndpointPolicy
}
// selectorPolicy is a structure which contains the resolved policy for a
// particular Identity across all layers (L3, L4, and L7), with the policy
// still determined in terms of EndpointSelectors.
type selectorPolicy struct {
// Revision is the revision of the policy repository used to generate
// this selectorPolicy.
Revision uint64
// SelectorCache managing selectors in L4Policy
SelectorCache *SelectorCache
// L4Policy contains the computed L4 and L7 policy.
L4Policy L4Policy
// IngressPolicyEnabled specifies whether this policy contains any policy
// at ingress.
IngressPolicyEnabled bool
// EgressPolicyEnabled specifies whether this policy contains any policy
// at egress.
EgressPolicyEnabled bool
}
func (p *selectorPolicy) Attach(ctx PolicyContext) {
p.L4Policy.Attach(ctx)
}
// EndpointPolicy is a structure which contains the resolved policy across all
// layers (L3, L4, and L7), distilled against a set of identities.
type EndpointPolicy struct {
// Note that all Endpoints sharing the same identity will be
// referring to a shared selectorPolicy!
*selectorPolicy
// VersionHandle represents the version of the SelectorCache 'policyMapState' was generated
// from.
// Changes after this version appear in 'policyMapChanges'.
// This is updated when incremental changes are applied.
VersionHandle *versioned.VersionHandle
// policyMapState contains the state of this policy as it relates to the
// datapath. In the future, this will be factored out of this object to
// decouple the policy as it relates to the datapath vs. its userspace
// representation.
// It maps each Key to the proxy port if proxy redirection is needed.
// Proxy port 0 indicates no proxy redirection.
// All fields within the Key and the proxy port must be in host byte-order.
// Must only be accessed with PolicyOwner (aka Endpoint) lock taken.
policyMapState mapState
// policyMapChanges collects pending changes to the PolicyMapState
policyMapChanges MapChanges
// PolicyOwner describes any type which consumes this EndpointPolicy object.
PolicyOwner PolicyOwner
// Redirects contains the proxy ports needed for this EndpointPolicy.
// If any redirects are missing a new policy will be computed to rectify it, so this is
// constant for the lifetime of this EndpointPolicy.
Redirects map[string]uint16
}
// LookupRedirectPort returns the redirect L4 proxy port for the given input parameters.
// Returns 0 if not found or the filter doesn't require a redirect.
// Returns an error if the redirect port can not be found.
// This is called when accumulating incremental map changes, endpoint lock must not be taken.
func (p *EndpointPolicy) LookupRedirectPort(ingress bool, protocol string, port uint16, listener string) (uint16, error) {
proxyID := ProxyID(uint16(p.PolicyOwner.GetID()), ingress, protocol, port, listener)
if proxyPort, exists := p.Redirects[proxyID]; exists {
return proxyPort, nil
}
return 0, fmt.Errorf("Proxy port for redirect %q not found", proxyID)
}
// Lookup finds the policy verdict applicable to the given 'key' using the same precedence logic
// between L3 and L4-only policies like the bpf datapath when both match the given 'key'.
// To be used in testing in place of the bpf datapath when full integration testing is not desired.
// Returns the closest matching covering policy entry, the labels of the rules that contributed to
// that verdict, and 'true' if found.
// Returns a deny entry when a match is not found, mirroring the datapath default deny behavior.
// 'key' must not have a wildcard identity or port.
func (p *EndpointPolicy) Lookup(key Key) (MapStateEntry, RuleMeta, bool) {
entry, found := p.policyMapState.lookup(key)
return entry.MapStateEntry, entry.derivedFromRules.Value(), found
}
// CopyMapStateFrom copies the policy map entries from m.
func (p *EndpointPolicy) CopyMapStateFrom(m MapStateMap) {
for key, entry := range m {
p.policyMapState.entries[key] = NewMapStateEntry(entry)
}
}
// PolicyOwner is anything which consumes a EndpointPolicy.
type PolicyOwner interface {
GetID() uint64
GetNamedPort(ingress bool, name string, proto u8proto.U8proto) uint16
PolicyDebug(msg string, attrs ...any)
IsHost() bool
MapStateSize() int
RegenerateIfAlive(regenMetadata *regeneration.ExternalRegenerationMetadata) <-chan bool
}
// newSelectorPolicy returns an empty selectorPolicy stub.
func newSelectorPolicy(selectorCache *SelectorCache) *selectorPolicy {
return &selectorPolicy{
Revision: 0,
SelectorCache: selectorCache,
L4Policy: NewL4Policy(0),
}
}
// insertUser adds a user to the L4Policy so that incremental
// updates of the L4Policy may be fowarded.
func (p *selectorPolicy) insertUser(user *EndpointPolicy) {
p.L4Policy.insertUser(user)
}
// removeUser removes a user from the L4Policy so the EndpointPolicy
// can be freed when not needed any more
func (p *selectorPolicy) removeUser(user *EndpointPolicy) {
p.L4Policy.removeUser(user)
}
// detach releases resources held by a selectorPolicy to enable
// successful eventual GC. Note that the selectorPolicy itself if not
// modified in any way, so that it can be used concurrently.
// The endpointID argument is only necessary if isDelete is false.
// It ensures that detach does not call a regeneration trigger on
// the same endpoint that initiated a selector policy update.
func (p *selectorPolicy) detach(isDelete bool, endpointID uint64) {
p.L4Policy.detach(p.SelectorCache, isDelete, endpointID)
}
// DistillPolicy filters down the specified selectorPolicy (which acts
// upon selectors) into a set of concrete map entries based on the
// SelectorCache. These can subsequently be plumbed into the datapath.
//
// Called without holding the Selector cache or Repository locks.
// PolicyOwner (aka Endpoint) is also unlocked during this call,
// but the Endpoint's build mutex is held.
func (p *selectorPolicy) DistillPolicy(logger *slog.Logger, policyOwner PolicyOwner, redirects map[string]uint16) *EndpointPolicy {
var calculatedPolicy *EndpointPolicy
// EndpointPolicy is initialized while 'GetCurrentVersionHandleFunc' keeps the selector
// cache write locked. This syncronizes the SelectorCache handle creation and the insertion
// of the new policy to the selectorPolicy before any new incremental updated can be
// generated.
//
// With this we have to following guarantees:
// - Selections seen with the 'version' are the ones available at the time of the 'version'
// creation, and the IDs therein have been applied to all Selectors cached at the time.
// - All further incremental updates are delivered to 'policyMapChanges' as whole
// transactions, i.e, changes to all selectors due to addition or deletion of new/old
// identities are visible in the set of changes processed and returned by
// ConsumeMapChanges().
p.SelectorCache.GetVersionHandleFunc(func(version *versioned.VersionHandle) {
calculatedPolicy = &EndpointPolicy{
selectorPolicy: p,
VersionHandle: version,
policyMapState: newMapState(logger, policyOwner.MapStateSize()),
policyMapChanges: MapChanges{
logger: logger,
firstVersion: version.Version(),
},
PolicyOwner: policyOwner,
Redirects: redirects,
}
// Register the new EndpointPolicy as a receiver of incremental
// updates before selector cache lock is released by 'GetCurrentVersionHandleFunc'.
p.insertUser(calculatedPolicy)
})
if !p.IngressPolicyEnabled || !p.EgressPolicyEnabled {
calculatedPolicy.policyMapState.allowAllIdentities(
!p.IngressPolicyEnabled, !p.EgressPolicyEnabled)
}
// Must come after the 'insertUser()' above to guarantee
// PolicyMapChanges will contain all changes that are applied
// after the computation of PolicyMapState has started.
calculatedPolicy.toMapState(logger)
if !policyOwner.IsHost() {
calculatedPolicy.policyMapState.determineAllowLocalhostIngress()
}
return calculatedPolicy
}
// Ready releases the handle on a selector cache version so that stale state can be released.
// This should be called when the policy has been realized.
func (p *EndpointPolicy) Ready() (err error) {
// release resources held for this version
err = p.VersionHandle.Close()
p.VersionHandle = nil
return err
}
// Detach removes EndpointPolicy references from selectorPolicy
// to allow the EndpointPolicy to be GC'd.
// PolicyOwner (aka Endpoint) is also locked during this call.
func (p *EndpointPolicy) Detach(logger *slog.Logger) {
p.selectorPolicy.removeUser(p)
// in case the call was missed previouly
if p.Ready() == nil {
// succeeded, so it was missed previously
_, file, line, _ := runtime.Caller(1)
logger.Warn(
"Detach: EndpointPolicy was not marked as Ready",
logfields.File, file,
logfields.Line, line,
)
}
// Also release the version handle held for incremental updates, if any.
// This must be done after the removeUser() call above, so that we do not get a new version
// handles any more!
p.policyMapChanges.detach()
}
func (p *EndpointPolicy) Len() int {
return p.policyMapState.Len()
}
func (p *EndpointPolicy) Get(key Key) (MapStateEntry, bool) {
return p.policyMapState.Get(key)
}
var errMissingKey = errors.New("Key not found")
// GetRuleMeta returns the list of labels of the rules that contributed
// to the entry at this key.
// The returned string is the string representation of a LabelArrayList.
func (p *EndpointPolicy) GetRuleMeta(k Key) (RuleMeta, error) {
entry, ok := p.policyMapState.get(k)
if !ok {
return RuleMeta{}, errMissingKey
}
return entry.derivedFromRules.Value(), nil
}
func (p *EndpointPolicy) Entries() iter.Seq2[Key, MapStateEntry] {
return func(yield func(Key, MapStateEntry) bool) {
p.policyMapState.ForEach(yield)
}
}
func (p *EndpointPolicy) Equals(other MapStateMap) bool {
return p.policyMapState.Equals(other)
}
func (p *EndpointPolicy) Diff(expected MapStateMap) string {
return p.policyMapState.Diff(expected)
}
func (p *EndpointPolicy) Empty() bool {
return p.policyMapState.Empty()
}
// Updated returns an iterator for all key/entry pairs in 'p' that are either new or updated
// compared to the entries in 'realized'.
// Here 'realized' is another EndpointPolicy.
// This can be used to figure out which entries need to be added to or updated in 'realised'.
func (p *EndpointPolicy) Updated(realized *EndpointPolicy) iter.Seq2[Key, MapStateEntry] {
return func(yield func(Key, MapStateEntry) bool) {
p.policyMapState.ForEach(func(key Key, entry MapStateEntry) bool {
if oldEntry, ok := realized.policyMapState.Get(key); !ok || oldEntry != entry {
if !yield(key, entry) {
return false
}
}
return true
})
}
}
// Missing returns an iterator for all key/entry pairs in 'realized' that missing from 'p'.
// Here 'realized' is another EndpointPolicy.
// This can be used to figure out which entries in 'realised' need to be deleted.
func (p *EndpointPolicy) Missing(realized *EndpointPolicy) iter.Seq2[Key, MapStateEntry] {
return func(yield func(Key, MapStateEntry) bool) {
realized.policyMapState.ForEach(func(key Key, entry MapStateEntry) bool {
// If key that is in realized state is not in desired state, just remove it.
if _, ok := p.policyMapState.Get(key); !ok {
if !yield(key, entry) {
return false
}
}
return true
})
}
}
// UpdatedMap returns an iterator for all key/entry pairs in 'p' that are either new or updated
// compared to the entries in 'realized'.
// Here 'realized' is MapStateMap.
// This can be used to figure out which entries need to be added to or updated in 'realised'.
func (p *EndpointPolicy) UpdatedMap(realized MapStateMap) iter.Seq2[Key, MapStateEntry] {
return func(yield func(Key, MapStateEntry) bool) {
p.policyMapState.ForEach(func(key Key, entry MapStateEntry) bool {
if oldEntry, ok := realized[key]; !ok || oldEntry != entry {
if !yield(key, entry) {
return false
}
}
return true
})
}
}
// Missing returns an iterator for all key/entry pairs in 'realized' that missing from 'p'.
// Here 'realized' is MapStateMap.
// This can be used to figure out which entries in 'realised' need to be deleted.
func (p *EndpointPolicy) MissingMap(realized MapStateMap) iter.Seq2[Key, MapStateEntry] {
return func(yield func(Key, MapStateEntry) bool) {
for k, v := range realized {
// If key that is in realized state is not in desired state, just remove it.
if _, ok := p.policyMapState.Get(k); !ok {
if !yield(k, v) {
break
}
}
}
}
}
func (p *EndpointPolicy) RevertChanges(changes ChangeState) {
// SelectorCache used as Identities interface which only has GetPrefix() that needs no lock
p.policyMapState.revertChanges(changes)
}
// toMapState transforms the EndpointPolicy.L4Policy into
// the datapath-friendly format inside EndpointPolicy.PolicyMapState.
// Called with selectorcache locked for reading.
// Called without holding the Repository lock.
// PolicyOwner (aka Endpoint) is also unlocked during this call,
// but the Endpoint's build mutex is held.
func (p *EndpointPolicy) toMapState(logger *slog.Logger) {
p.L4Policy.Ingress.toMapState(logger, p)
p.L4Policy.Egress.toMapState(logger, p)
}
// toMapState transforms the L4DirectionPolicy into
// the datapath-friendly format inside EndpointPolicy.PolicyMapState.
// Called with selectorcache locked for reading.
// Called without holding the Repository lock.
// PolicyOwner (aka Endpoint) is also unlocked during this call,
// but the Endpoint's build mutex is held.
func (l4policy L4DirectionPolicy) toMapState(logger *slog.Logger, p *EndpointPolicy) {
l4policy.PortRules.ForEach(func(l4 *L4Filter) bool {
l4.toMapState(logger, p, l4policy.features, ChangeState{})
return true
})
}
type PerSelectorPolicyTuple struct {
Policy *PerSelectorPolicy
Selector CachedSelector
}
// RedirectFilters returns an iterator for each L4Filter with a redirect in the policy.
func (p *selectorPolicy) RedirectFilters() iter.Seq2[*L4Filter, PerSelectorPolicyTuple] {
return func(yield func(*L4Filter, PerSelectorPolicyTuple) bool) {
if p.L4Policy.Ingress.forEachRedirectFilter(yield) {
p.L4Policy.Egress.forEachRedirectFilter(yield)
}
}
}
func (l4policy L4DirectionPolicy) forEachRedirectFilter(yield func(*L4Filter, PerSelectorPolicyTuple) bool) bool {
ok := true
l4policy.PortRules.ForEach(func(l4 *L4Filter) bool {
for cs, ps := range l4.PerSelectorPolicies {
if ps != nil && ps.IsRedirect() {
ok = yield(l4, PerSelectorPolicyTuple{ps, cs})
}
}
return ok
})
return ok
}
// ConsumeMapChanges applies accumulated MapChanges to EndpointPolicy 'p' and returns a symmary of changes.
// Caller is responsible for calling the returned 'closer' to release resources held for the new version!
// 'closer' may not be called while selector cache is locked!
func (p *EndpointPolicy) ConsumeMapChanges() (closer func(), changes ChangeState) {
features := p.selectorPolicy.L4Policy.Ingress.features | p.selectorPolicy.L4Policy.Egress.features
version, changes := p.policyMapChanges.consumeMapChanges(p, features)
closer = func() {}
if version.IsValid() {
var msg string
// update the version handle in p.VersionHandle so that any follow-on processing
// acts on the basis of the new version
if p.VersionHandle.IsValid() {
p.VersionHandle.Close()
msg = "ConsumeMapChanges: updated valid version"
} else {
closer = func() {
// p.VersionHandle was not valid, close it
p.Ready()
}
msg = "ConsumeMapChanges: new incremental version"
}
p.VersionHandle = version
p.PolicyOwner.PolicyDebug(msg,
logfields.Version, version,
logfields.Changes, changes,
)
}
return closer, changes
}
// NewEndpointPolicy returns an empty EndpointPolicy stub.
func NewEndpointPolicy(logger *slog.Logger, repo PolicyRepository) *EndpointPolicy {
return &EndpointPolicy{
selectorPolicy: newSelectorPolicy(repo.GetSelectorCache()),
policyMapState: emptyMapState(logger),
}
}
// SPDX-License-Identifier: Apache-2.0
// Copyright Authors of Cilium
package policy
import (
"fmt"
"net/netip"
"sync"
"testing"
"github.com/cilium/hive/hivetest"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
k8stypes "k8s.io/apimachinery/pkg/types"
"github.com/cilium/cilium/pkg/identity"
"github.com/cilium/cilium/pkg/k8s/apis/cilium.io/utils"
"github.com/cilium/cilium/pkg/labels"
"github.com/cilium/cilium/pkg/lock"
"github.com/cilium/cilium/pkg/option"
"github.com/cilium/cilium/pkg/policy/api"
)
func GenerateL3IngressDenyRules(numRules int) (api.Rules, identity.IdentityMap) {
parseFooLabel := labels.ParseSelectLabel("k8s:foo")
fooSelector := api.NewESFromLabels(parseFooLabel)
barSelector := api.NewESFromLabels(labels.ParseSelectLabel("bar"))
// Change ingRule and rule in the for-loop below to change what type of rules
// are added into the policy repository.
ingDenyRule := api.IngressDenyRule{
IngressCommonRule: api.IngressCommonRule{
FromEndpoints: []api.EndpointSelector{barSelector},
},
}
rules := make(api.Rules, 0, numRules)
for i := 1; i <= numRules; i++ {
rule := api.Rule{
EndpointSelector: fooSelector,
IngressDeny: []api.IngressDenyRule{ingDenyRule},
}
rule.Sanitize()
rules = append(rules, &rule)
}
return rules, generateNumIdentities(3000)
}
// generate a CIDR identity for each unique CIDR rule in 'rules'
func generateCIDRIdentities(rules api.Rules) identity.IdentityMap {
c := make(identity.IdentityMap, len(rules))
prefixes := make(map[string]identity.NumericIdentity)
id := identity.IdentityScopeLocal
addPrefix := func(prefix string) {
if _, exists := prefixes[prefix]; !exists {
lbls := labels.GetCIDRLabels(netip.MustParsePrefix(prefix))
id++
c[id] = lbls.LabelArray()
prefixes[prefix] = id
}
}
for _, rule := range rules {
for _, egress := range rule.Egress {
for _, toCIDR := range egress.ToCIDR {
addPrefix(string(toCIDR))
}
}
for _, egress := range rule.EgressDeny {
for _, toCIDR := range egress.ToCIDR {
addPrefix(string(toCIDR))
}
}
for _, egress := range rule.Ingress {
for _, toCIDR := range egress.FromCIDR {
addPrefix(string(toCIDR))
}
}
for _, egress := range rule.IngressDeny {
for _, toCIDR := range egress.FromCIDR {
addPrefix(string(toCIDR))
}
}
}
return c
}
func generateCIDREgressRule(i int) api.EgressRule {
port := fmt.Sprintf("%d", 80+i%97)
prefix := []string{"8", "16", "24", "28", "32"}[i%5]
var net string
switch prefix {
case "8":
net = []string{"10.0.0.0", "192.0.0.0", "244.0.0.0"}[i%3]
case "16":
pat := []string{"10.%d.0.0", "192.%d.0.0", "244.%d.0.0"}[i%3]
net = fmt.Sprintf(pat, i%17)
case "24":
pat := []string{"10.%d.%d.0", "192.%d.%d.0", "244.%d.%d.0"}[i%3]
net = fmt.Sprintf(pat, i%17, i%121)
case "28":
pat := []string{"10.%d.%d.%d", "192.%d.%d.%d", "244.%d.%d.%d"}[i%3]
net = fmt.Sprintf(pat, i%17, i%121, i%16<<4)
case "32":
pat := []string{"10.%d.%d.%d", "192.%d.%d.%d", "244.%d.%d.%d"}[i%3]
net = fmt.Sprintf(pat, i%17, i%121, i%255)
}
cidr := net + "/" + prefix
return api.EgressRule{
EgressCommonRule: api.EgressCommonRule{
ToCIDR: []api.CIDR{api.CIDR(cidr)},
},
ToPorts: []api.PortRule{
{
Ports: []api.PortProtocol{
{
Port: port,
Protocol: api.ProtoTCP,
},
},
},
},
}
}
func generateCIDREgressDenyRule(i int) api.EgressDenyRule {
port := fmt.Sprintf("%d", 80+i%131)
prefix := []string{"8", "16", "24", "28", "32"}[(i+21)%5]
var net string
switch prefix {
case "8":
net = []string{"10.0.0.0", "192.0.0.0", "244.0.0.0"}[i%3]
case "16":
pat := []string{"10.%d.0.0", "192.%d.0.0", "244.%d.0.0"}[i%3]
net = fmt.Sprintf(pat, i%23)
case "24":
pat := []string{"10.%d.%d.0", "192.%d.%d.0", "244.%d.%d.0"}[i%3]
net = fmt.Sprintf(pat, i%23, i%119)
case "28":
pat := []string{"10.%d.%d.%d", "192.%d.%d.%d", "244.%d.%d.%d"}[i%3]
net = fmt.Sprintf(pat, i%23, i%119, i%15<<4)
case "32":
pat := []string{"10.%d.%d.%d", "192.%d.%d.%d", "244.%d.%d.%d"}[i%3]
net = fmt.Sprintf(pat, i%23, i%119, i%253)
}
cidr := net + "/" + prefix
return api.EgressDenyRule{
EgressCommonRule: api.EgressCommonRule{
ToCIDR: []api.CIDR{api.CIDR(cidr)},
},
ToPorts: []api.PortDenyRule{
{
Ports: []api.PortProtocol{
{
Port: port,
Protocol: api.ProtoTCP,
},
},
},
},
}
}
func GenerateCIDRDenyRules(numRules int) (api.Rules, identity.IdentityMap) {
parseFooLabel := labels.ParseSelectLabel("k8s:foo")
fooSelector := api.NewESFromLabels(parseFooLabel)
var rules api.Rules
for i := 1; i <= numRules; i++ {
uuid := k8stypes.UID(fmt.Sprintf("12bba160-ddca-13e8-%04x-0800273b04ff", i))
rule := api.Rule{
EndpointSelector: fooSelector,
Egress: []api.EgressRule{generateCIDREgressRule(i)},
EgressDeny: []api.EgressDenyRule{generateCIDREgressDenyRule(i + 773)},
Labels: utils.GetPolicyLabels("default", fmt.Sprintf("cidr-%d", i), uuid, utils.ResourceTypeCiliumNetworkPolicy),
}
rule.Sanitize()
rules = append(rules, &rule)
}
return rules, generateCIDRIdentities(rules)
}
func BenchmarkRegenerateCIDRDenyPolicyRules(b *testing.B) {
logger := hivetest.Logger(b)
td := newTestData(logger)
td.bootstrapRepo(GenerateCIDRDenyRules, 1000, b)
ip, _ := td.repo.resolvePolicyLocked(fooIdentity)
owner := DummyOwner{logger: logger}
b.ReportAllocs()
for b.Loop() {
epPolicy := ip.DistillPolicy(logger, owner, nil)
owner.mapStateSize = epPolicy.policyMapState.Len()
epPolicy.Ready()
}
ip.detach(true, 0)
b.Logf("Number of MapState entries: %d\n", owner.mapStateSize)
}
func TestRegenerateCIDRDenyPolicyRules(t *testing.T) {
logger := hivetest.Logger(t)
td := newTestData(logger)
td.bootstrapRepo(GenerateCIDRDenyRules, 10, t)
ip, _ := td.repo.resolvePolicyLocked(fooIdentity)
epPolicy := ip.DistillPolicy(logger, DummyOwner{logger: logger}, nil)
n := epPolicy.policyMapState.Len()
epPolicy.Ready()
ip.detach(true, 0)
assert.Positive(t, n)
}
func TestL3WithIngressDenyWildcard(t *testing.T) {
logger := hivetest.Logger(t)
td := newTestData(logger)
repo := td.repo
td.bootstrapRepo(GenerateL3IngressDenyRules, 1000, t)
idFooSelectLabelArray := labels.ParseSelectLabelArray("id=foo")
idFooSelectLabels := labels.Labels{}
for _, lbl := range idFooSelectLabelArray {
idFooSelectLabels[lbl.Key] = lbl
}
fooIdentity := identity.NewIdentity(12345, idFooSelectLabels)
td.addIdentity(fooIdentity)
selFoo := api.NewESFromLabels(labels.ParseSelectLabel("id=foo"))
rule1 := api.Rule{
EndpointSelector: selFoo,
IngressDeny: []api.IngressDenyRule{
{
ToPorts: []api.PortDenyRule{{
Ports: []api.PortProtocol{
{Port: "80", Protocol: api.ProtoTCP},
},
}},
},
},
}
rule1.Sanitize()
_, _, err := repo.mustAdd(rule1)
require.NoError(t, err)
repo.mutex.RLock()
defer repo.mutex.RUnlock()
selPolicy, err := repo.resolvePolicyLocked(fooIdentity)
require.NoError(t, err)
policy := selPolicy.DistillPolicy(hivetest.Logger(t), DummyOwner{logger: hivetest.Logger(t)}, nil)
policy.Ready()
expectedEndpointPolicy := EndpointPolicy{
selectorPolicy: &selectorPolicy{
Revision: repo.GetRevision(),
SelectorCache: repo.GetSelectorCache(),
L4Policy: L4Policy{
Revision: repo.GetRevision(),
Ingress: L4DirectionPolicy{PortRules: NewL4PolicyMapWithValues(map[string]*L4Filter{
"80/TCP": {
Port: 80,
Protocol: api.ProtoTCP,
U8Proto: 0x6,
wildcard: td.wildcardCachedSelector,
Ingress: true,
PerSelectorPolicies: L7DataMap{
td.wildcardCachedSelector: &PerSelectorPolicy{IsDeny: true},
},
RuleOrigin: OriginForTest(map[CachedSelector]labels.LabelArrayList{td.wildcardCachedSelector: {nil}}),
},
}),
features: denyRules,
},
Egress: newL4DirectionPolicy(),
},
IngressPolicyEnabled: true,
},
PolicyOwner: DummyOwner{logger: hivetest.Logger(t)},
// inherit this from the result as it is outside of the scope
// of this test
policyMapState: policy.policyMapState,
policyMapChanges: MapChanges{logger: logger},
}
// Have to remove circular reference before testing to avoid an infinite loop
policy.selectorPolicy.detach(true, 0)
// Assign an empty mutex so that checker.Equal does not complain about the
// difference of the internal time.Time from the lock_debug.go.
policy.selectorPolicy.L4Policy.mutex = lock.RWMutex{}
policy.policyMapChanges.mutex = lock.Mutex{}
policy.policyMapChanges.firstVersion = 0
// policyMapState cannot be compared via DeepEqual
require.Truef(t, policy.policyMapState.Equal(&expectedEndpointPolicy.policyMapState), policy.policyMapState.diff(&expectedEndpointPolicy.policyMapState))
policy.policyMapState = mapState{}
expectedEndpointPolicy.policyMapState = mapState{}
require.Equal(t, &expectedEndpointPolicy, policy)
}
func TestL3WithLocalHostWildcardd(t *testing.T) {
logger := hivetest.Logger(t)
td := newTestData(logger)
td.addIdentitySelector(hostSelector)
repo := td.repo
td.bootstrapRepo(GenerateL3IngressDenyRules, 1000, t)
idFooSelectLabelArray := labels.ParseSelectLabelArray("id=foo")
idFooSelectLabels := labels.Labels{}
for _, lbl := range idFooSelectLabelArray {
idFooSelectLabels[lbl.Key] = lbl
}
fooIdentity := identity.NewIdentity(12345, idFooSelectLabels)
td.addIdentity(fooIdentity)
// Emulate Kubernetes mode with allow from localhost
oldLocalhostOpt := option.Config.AllowLocalhost
option.Config.AllowLocalhost = option.AllowLocalhostAlways
defer func() { option.Config.AllowLocalhost = oldLocalhostOpt }()
selFoo := api.NewESFromLabels(labels.ParseSelectLabel("id=foo"))
rule1 := api.Rule{
EndpointSelector: selFoo,
IngressDeny: []api.IngressDenyRule{
{
ToPorts: []api.PortDenyRule{{
Ports: []api.PortProtocol{
{Port: "80", Protocol: api.ProtoTCP},
},
}},
},
},
}
rule1.Sanitize()
_, _, err := repo.mustAdd(rule1)
require.NoError(t, err)
repo.mutex.RLock()
defer repo.mutex.RUnlock()
selPolicy, err := repo.resolvePolicyLocked(fooIdentity)
require.NoError(t, err)
policy := selPolicy.DistillPolicy(logger, DummyOwner{logger: logger}, nil)
policy.Ready()
cachedSelectorHost := td.sc.FindCachedIdentitySelector(api.ReservedEndpointSelectors[labels.IDNameHost])
require.NotNil(t, cachedSelectorHost)
expectedEndpointPolicy := EndpointPolicy{
selectorPolicy: &selectorPolicy{
Revision: repo.GetRevision(),
SelectorCache: repo.GetSelectorCache(),
L4Policy: L4Policy{
Revision: repo.GetRevision(),
Ingress: L4DirectionPolicy{PortRules: NewL4PolicyMapWithValues(map[string]*L4Filter{
"80/TCP": {
Port: 80,
Protocol: api.ProtoTCP,
U8Proto: 0x6,
wildcard: td.wildcardCachedSelector,
Ingress: true,
PerSelectorPolicies: L7DataMap{
td.wildcardCachedSelector: &PerSelectorPolicy{IsDeny: true},
},
RuleOrigin: OriginForTest(map[CachedSelector]labels.LabelArrayList{td.wildcardCachedSelector: {nil}}),
},
}),
features: denyRules,
},
Egress: newL4DirectionPolicy(),
},
IngressPolicyEnabled: true,
},
PolicyOwner: DummyOwner{logger: logger},
// inherit this from the result as it is outside of the scope
// of this test
policyMapState: policy.policyMapState,
policyMapChanges: MapChanges{logger: logger},
}
// Have to remove circular reference before testing to avoid an infinite loop
policy.selectorPolicy.detach(true, 0)
// Assign an empty mutex so that checker.Equal does not complain about the
// difference of the internal time.Time from the lock_debug.go.
policy.selectorPolicy.L4Policy.mutex = lock.RWMutex{}
policy.policyMapChanges.mutex = lock.Mutex{}
policy.policyMapChanges.firstVersion = 0
// policyMapState cannot be compared via DeepEqual
require.Truef(t, policy.policyMapState.Equal(&expectedEndpointPolicy.policyMapState), policy.policyMapState.diff(&expectedEndpointPolicy.policyMapState))
policy.policyMapState = mapState{}
expectedEndpointPolicy.policyMapState = mapState{}
require.Equal(t, &expectedEndpointPolicy, policy)
}
func TestMapStateWithIngressDenyWildcard(t *testing.T) {
logger := hivetest.Logger(t)
td := newTestData(logger)
repo := td.repo
td.bootstrapRepo(GenerateL3IngressDenyRules, 1000, t)
ruleLabel := labels.ParseLabelArray("rule-foo-allow-port-80")
ruleLabelAllowAnyEgress := labels.LabelArray{
labels.NewLabel(LabelKeyPolicyDerivedFrom, LabelAllowAnyEgress, labels.LabelSourceReserved),
}
idFooSelectLabelArray := labels.ParseSelectLabelArray("id=foo")
idFooSelectLabels := labels.Labels{}
for _, lbl := range idFooSelectLabelArray {
idFooSelectLabels[lbl.Key] = lbl
}
fooIdentity := identity.NewIdentity(12345, idFooSelectLabels)
td.addIdentity(fooIdentity)
selFoo := api.NewESFromLabels(labels.ParseSelectLabel("id=foo"))
rule1 := api.Rule{
EndpointSelector: selFoo,
Labels: ruleLabel,
IngressDeny: []api.IngressDenyRule{
{
ToPorts: []api.PortDenyRule{{
Ports: []api.PortProtocol{
{Port: "80", Protocol: api.ProtoTCP},
},
}},
},
},
}
rule1.Sanitize()
_, _, err := repo.mustAdd(rule1)
require.NoError(t, err)
repo.mutex.RLock()
defer repo.mutex.RUnlock()
selPolicy, err := repo.resolvePolicyLocked(fooIdentity)
require.NoError(t, err)
policy := selPolicy.DistillPolicy(logger, DummyOwner{logger: logger}, nil)
policy.Ready()
rule1MapStateEntry := denyEntry().withLabels(labels.LabelArrayList{ruleLabel})
allowEgressMapStateEntry := newAllowEntryWithLabels(ruleLabelAllowAnyEgress)
expectedEndpointPolicy := EndpointPolicy{
selectorPolicy: &selectorPolicy{
Revision: repo.GetRevision(),
SelectorCache: repo.GetSelectorCache(),
L4Policy: L4Policy{
Revision: repo.GetRevision(),
Ingress: L4DirectionPolicy{PortRules: NewL4PolicyMapWithValues(map[string]*L4Filter{
"80/TCP": {
Port: 80,
Protocol: api.ProtoTCP,
U8Proto: 0x6,
wildcard: td.wildcardCachedSelector,
Ingress: true,
PerSelectorPolicies: L7DataMap{
td.wildcardCachedSelector: &PerSelectorPolicy{IsDeny: true},
},
RuleOrigin: OriginForTest(map[CachedSelector]labels.LabelArrayList{td.wildcardCachedSelector: {ruleLabel}}),
},
}),
features: denyRules,
},
Egress: newL4DirectionPolicy(),
},
IngressPolicyEnabled: true,
},
PolicyOwner: DummyOwner{logger: logger},
policyMapState: emptyMapState(logger).withState(mapStateMap{
// Although we have calculated deny policies, the overall policy
// will still allow egress to world.
EgressKey(): allowEgressMapStateEntry,
IngressKey().WithTCPPort(80): rule1MapStateEntry,
}),
policyMapChanges: MapChanges{logger: logger},
}
// Add new identity to test accumulation of MapChanges
added1 := identity.IdentityMap{
identity.NumericIdentity(192): labels.ParseSelectLabelArray("id=resolve_test_1"),
}
wg := &sync.WaitGroup{}
td.sc.UpdateIdentities(added1, nil, wg)
// Cleanup the identities from the testSelectorCache
defer td.sc.UpdateIdentities(nil, added1, wg)
wg.Wait()
require.Empty(t, policy.policyMapChanges.synced)
// Have to remove circular reference before testing to avoid an infinite loop
policy.selectorPolicy.detach(true, 0)
// Assign an empty mutex so that checker.Equal does not complain about the
// difference of the internal time.Time from the lock_debug.go.
policy.selectorPolicy.L4Policy.mutex = lock.RWMutex{}
policy.policyMapChanges.mutex = lock.Mutex{}
policy.policyMapChanges.firstVersion = 0
// policyMapState cannot be compared via DeepEqual
require.Truef(t, policy.policyMapState.Equal(&expectedEndpointPolicy.policyMapState), policy.policyMapState.diff(&expectedEndpointPolicy.policyMapState))
policy.policyMapState = mapState{}
expectedEndpointPolicy.policyMapState = mapState{}
require.Equal(t, &expectedEndpointPolicy, policy)
}
func TestMapStateWithIngressDeny(t *testing.T) {
logger := hivetest.Logger(t)
td := newTestData(logger)
repo := td.repo
td.bootstrapRepo(GenerateL3IngressDenyRules, 1000, t)
ruleLabel := labels.ParseLabelArray("rule-deny-port-80-world-and-test")
ruleLabelAllowAnyEgress := labels.LabelArray{
labels.NewLabel(LabelKeyPolicyDerivedFrom, LabelAllowAnyEgress, labels.LabelSourceReserved),
}
idFooSelectLabelArray := labels.ParseSelectLabelArray("id=foo")
idFooSelectLabels := labels.Labels{}
for _, lbl := range idFooSelectLabelArray {
idFooSelectLabels[lbl.Key] = lbl
}
fooIdentity := identity.NewIdentity(12345, idFooSelectLabels)
td.addIdentity(fooIdentity)
lblTest := labels.ParseLabel("id=resolve_test_1")
selFoo := api.NewESFromLabels(labels.ParseSelectLabel("id=foo"))
rule1 := api.Rule{
EndpointSelector: selFoo,
Labels: ruleLabel,
IngressDeny: []api.IngressDenyRule{
{
IngressCommonRule: api.IngressCommonRule{
FromEntities: []api.Entity{api.EntityWorld},
},
ToPorts: []api.PortDenyRule{{
Ports: []api.PortProtocol{
{Port: "80", Protocol: api.ProtoTCP},
},
}},
},
{
IngressCommonRule: api.IngressCommonRule{
FromEndpoints: []api.EndpointSelector{
api.NewESFromLabels(lblTest),
},
},
ToPorts: []api.PortDenyRule{{
Ports: []api.PortProtocol{
{Port: "80", Protocol: api.ProtoTCP},
},
}},
},
},
}
rule1.Sanitize()
_, _, err := repo.mustAdd(rule1)
require.NoError(t, err)
repo.mutex.RLock()
defer repo.mutex.RUnlock()
selPolicy, err := repo.resolvePolicyLocked(fooIdentity)
require.NoError(t, err)
policy := selPolicy.DistillPolicy(logger, DummyOwner{logger: logger}, nil)
policy.Ready()
// Add new identity to test accumulation of MapChanges
added1 := identity.IdentityMap{
identity.NumericIdentity(192): labels.ParseSelectLabelArray("id=resolve_test_1", "num=1"),
identity.NumericIdentity(193): labels.ParseSelectLabelArray("id=resolve_test_1", "num=2"),
identity.NumericIdentity(194): labels.ParseSelectLabelArray("id=resolve_test_1", "num=3"),
}
wg := &sync.WaitGroup{}
td.sc.UpdateIdentities(added1, nil, wg)
wg.Wait()
require.Len(t, policy.policyMapChanges.synced, 3)
deleted1 := identity.IdentityMap{
identity.NumericIdentity(193): labels.ParseSelectLabelArray("id=resolve_test_1", "num=2"),
}
wg = &sync.WaitGroup{}
td.sc.UpdateIdentities(nil, deleted1, wg)
wg.Wait()
require.Len(t, policy.policyMapChanges.synced, 4)
cachedSelectorWorld := td.sc.FindCachedIdentitySelector(api.ReservedEndpointSelectors[labels.IDNameWorld])
require.NotNil(t, cachedSelectorWorld)
cachedSelectorWorldV4 := td.sc.FindCachedIdentitySelector(api.ReservedEndpointSelectors[labels.IDNameWorldIPv4])
require.NotNil(t, cachedSelectorWorldV4)
cachedSelectorWorldV6 := td.sc.FindCachedIdentitySelector(api.ReservedEndpointSelectors[labels.IDNameWorldIPv6])
require.NotNil(t, cachedSelectorWorldV6)
cachedSelectorTest := td.sc.FindCachedIdentitySelector(api.NewESFromLabels(lblTest))
require.NotNil(t, cachedSelectorTest)
rule1MapStateEntry := denyEntry().withLabels(labels.LabelArrayList{ruleLabel})
allowEgressMapStateEntry := newAllowEntryWithLabels(ruleLabelAllowAnyEgress)
expectedEndpointPolicy := EndpointPolicy{
selectorPolicy: &selectorPolicy{
Revision: repo.GetRevision(),
SelectorCache: repo.GetSelectorCache(),
L4Policy: L4Policy{
Revision: repo.GetRevision(),
Ingress: L4DirectionPolicy{PortRules: NewL4PolicyMapWithValues(map[string]*L4Filter{
"80/TCP": {
Port: 80,
Protocol: api.ProtoTCP,
U8Proto: 0x6,
Ingress: true,
PerSelectorPolicies: L7DataMap{
cachedSelectorWorld: &PerSelectorPolicy{IsDeny: true},
cachedSelectorWorldV4: &PerSelectorPolicy{IsDeny: true},
cachedSelectorWorldV6: &PerSelectorPolicy{IsDeny: true},
cachedSelectorTest: &PerSelectorPolicy{IsDeny: true},
},
RuleOrigin: OriginForTest(map[CachedSelector]labels.LabelArrayList{
cachedSelectorWorld: {ruleLabel},
cachedSelectorWorldV4: {ruleLabel},
cachedSelectorWorldV6: {ruleLabel},
cachedSelectorTest: {ruleLabel},
}),
},
}),
features: denyRules,
},
Egress: newL4DirectionPolicy(),
},
IngressPolicyEnabled: true,
},
PolicyOwner: DummyOwner{logger: logger},
policyMapState: emptyMapState(logger).withState(mapStateMap{
// Although we have calculated deny policies, the overall policy
// will still allow egress to world.
EgressKey(): allowEgressMapStateEntry,
IngressKey().WithIdentity(identity.ReservedIdentityWorld).WithTCPPort(80): rule1MapStateEntry,
IngressKey().WithIdentity(identity.ReservedIdentityWorldIPv4).WithTCPPort(80): rule1MapStateEntry,
IngressKey().WithIdentity(identity.ReservedIdentityWorldIPv6).WithTCPPort(80): rule1MapStateEntry,
IngressKey().WithIdentity(192).WithTCPPort(80): rule1MapStateEntry,
IngressKey().WithIdentity(194).WithTCPPort(80): rule1MapStateEntry,
}),
policyMapChanges: MapChanges{logger: logger},
}
closer, changes := policy.ConsumeMapChanges()
closer()
// maps on the policy got cleared
require.Equal(t, Keys{
ingressKey(192, 6, 80, 0): {},
ingressKey(194, 6, 80, 0): {},
}, changes.Adds)
require.Equal(t, Keys{}, changes.Deletes)
// Have to remove circular reference before testing for Equality to avoid an infinite loop
policy.selectorPolicy.detach(true, 0)
// Verify that cached selector is not found after Detach().
// Note that this depends on the other tests NOT using the same selector concurrently!
cachedSelectorTest = td.sc.FindCachedIdentitySelector(api.NewESFromLabels(lblTest))
require.Nil(t, cachedSelectorTest)
// Assign an empty mutex so that checker.Equal does not complain about the
// difference of the internal time.Time from the lock_debug.go.
policy.selectorPolicy.L4Policy.mutex = lock.RWMutex{}
policy.policyMapChanges.mutex = lock.Mutex{}
policy.policyMapChanges.firstVersion = 0
// policyMapState cannot be compared via DeepEqual
require.Truef(t, policy.policyMapState.Equal(&expectedEndpointPolicy.policyMapState), policy.policyMapState.diff(&expectedEndpointPolicy.policyMapState))
policy.policyMapState = mapState{}
expectedEndpointPolicy.policyMapState = mapState{}
require.Equal(t, &expectedEndpointPolicy, policy)
}
// SPDX-License-Identifier: Apache-2.0
// Copyright Authors of Cilium
package policy
import (
"fmt"
"log/slog"
"sync"
"testing"
"github.com/cilium/hive/hivetest"
"github.com/stretchr/testify/require"
k8stypes "k8s.io/apimachinery/pkg/types"
"github.com/cilium/cilium/pkg/endpoint/regeneration"
"github.com/cilium/cilium/pkg/identity"
"github.com/cilium/cilium/pkg/k8s/apis/cilium.io/utils"
"github.com/cilium/cilium/pkg/labels"
"github.com/cilium/cilium/pkg/lock"
"github.com/cilium/cilium/pkg/option"
"github.com/cilium/cilium/pkg/policy/api"
"github.com/cilium/cilium/pkg/policy/types"
"github.com/cilium/cilium/pkg/u8proto"
)
var (
fooLabel = labels.NewLabel("k8s:foo", "", "")
lbls = labels.Labels{
"foo": fooLabel,
}
fooIdentity = &identity.Identity{
ID: 303,
Labels: lbls,
LabelArray: lbls.LabelArray(),
}
)
var testRedirects = map[string]uint16{
"1234:ingress:TCP:80:": 1,
}
func generateNumIdentities(numIdentities int) identity.IdentityMap {
c := make(identity.IdentityMap, numIdentities)
for i := range numIdentities {
identityLabel := labels.NewLabel(fmt.Sprintf("k8s:foo%d", i), "", "")
clusterLabel := labels.NewLabel("io.cilium.k8s.policy.cluster=default", "", labels.LabelSourceK8s)
serviceAccountLabel := labels.NewLabel("io.cilium.k8s.policy.serviceaccount=default", "", labels.LabelSourceK8s)
namespaceLabel := labels.NewLabel("io.kubernetes.pod.namespace=monitoring", "", labels.LabelSourceK8s)
funLabel := labels.NewLabel("app=analytics-erneh", "", labels.LabelSourceK8s)
identityLabels := labels.Labels{
fmt.Sprintf("foo%d", i): identityLabel,
"k8s:io.cilium.k8s.policy.cluster=default": clusterLabel,
"k8s:io.cilium.k8s.policy.serviceaccount=default": serviceAccountLabel,
"k8s:io.kubernetes.pod.namespace=monitoring": namespaceLabel,
"k8s:app=analytics-erneh": funLabel,
}
bumpedIdentity := i + 1000
numericIdentity := identity.NumericIdentity(bumpedIdentity)
c[numericIdentity] = identityLabels.LabelArray()
}
return c
}
func GenerateL3IngressRules(numRules int) (api.Rules, identity.IdentityMap) {
parseFooLabel := labels.ParseSelectLabel("k8s:foo")
fooSelector := api.NewESFromLabels(parseFooLabel)
barSelector := api.NewESFromLabels(labels.ParseSelectLabel("bar"))
// Change ingRule and rule in the for-loop below to change what type of rules
// are added into the policy repository.
ingRule := api.IngressRule{
IngressCommonRule: api.IngressCommonRule{
FromEndpoints: []api.EndpointSelector{barSelector},
},
}
var rules api.Rules
uuid := k8stypes.UID("11bba160-ddca-13e8-b697-0800273b04ff")
for i := 1; i <= numRules; i++ {
rule := api.Rule{
EndpointSelector: fooSelector,
Ingress: []api.IngressRule{ingRule},
Labels: utils.GetPolicyLabels("default", "l3-ingress", uuid, utils.ResourceTypeCiliumNetworkPolicy),
}
rule.Sanitize()
rules = append(rules, &rule)
}
return rules, generateNumIdentities(3000)
}
func GenerateL3EgressRules(numRules int) (api.Rules, identity.IdentityMap) {
parseFooLabel := labels.ParseSelectLabel("k8s:foo")
fooSelector := api.NewESFromLabels(parseFooLabel)
barSelector := api.NewESFromLabels(labels.ParseSelectLabel("bar"))
// Change ingRule and rule in the for-loop below to change what type of rules
// are added into the policy repository.
egRule := api.EgressRule{
EgressCommonRule: api.EgressCommonRule{
ToEndpoints: []api.EndpointSelector{barSelector},
},
}
var rules api.Rules
uuid := k8stypes.UID("13bba160-ddca-13e8-b697-0800273b04ff")
for i := 1; i <= numRules; i++ {
rule := api.Rule{
EndpointSelector: fooSelector,
Egress: []api.EgressRule{egRule},
Labels: utils.GetPolicyLabels("default", "l3-egress", uuid, utils.ResourceTypeCiliumNetworkPolicy),
}
rule.Sanitize()
rules = append(rules, &rule)
}
return rules, generateNumIdentities(3000)
}
func GenerateCIDRRules(numRules int) (api.Rules, identity.IdentityMap) {
parseFooLabel := labels.ParseSelectLabel("k8s:foo")
fooSelector := api.NewESFromLabels(parseFooLabel)
// barSelector := api.NewESFromLabels(labels.ParseSelectLabel("bar"))
var rules api.Rules
uuid := k8stypes.UID("12bba160-ddca-13e8-b697-0800273b04ff")
for i := 1; i <= numRules; i++ {
rule := api.Rule{
EndpointSelector: fooSelector,
Egress: []api.EgressRule{generateCIDREgressRule(i)},
Labels: utils.GetPolicyLabels("default", "cidr", uuid, utils.ResourceTypeCiliumNetworkPolicy),
}
rule.Sanitize()
rules = append(rules, &rule)
}
return rules, generateCIDRIdentities(rules)
}
type DummyOwner struct {
logger *slog.Logger
mapStateSize int
}
func (d DummyOwner) CreateRedirects(*L4Filter) {
}
func (d DummyOwner) GetNamedPort(ingress bool, name string, proto u8proto.U8proto) uint16 {
return 80
}
func (d DummyOwner) GetNamedPortLocked(ingress bool, name string, proto u8proto.U8proto) uint16 {
return 80
}
func (d DummyOwner) GetID() uint64 {
return 1234
}
func (d DummyOwner) IsHost() bool {
return false
}
func (d DummyOwner) MapStateSize() int {
return d.mapStateSize
}
func (_ DummyOwner) RegenerateIfAlive(_ *regeneration.ExternalRegenerationMetadata) <-chan bool {
ch := make(chan bool)
close(ch)
return ch
}
func (d DummyOwner) PolicyDebug(msg string, attrs ...any) {
d.logger.Debug(msg, attrs...)
}
func (td *testData) bootstrapRepo(ruleGenFunc func(int) (api.Rules, identity.IdentityMap), numRules int, tb testing.TB) {
SetPolicyEnabled(option.DefaultEnforcement)
wg := &sync.WaitGroup{}
// load in standard reserved identities
c := identity.IdentityMap{
fooIdentity.ID: fooIdentity.LabelArray,
}
identity.IterateReservedIdentities(func(ni identity.NumericIdentity, id *identity.Identity) {
c[ni] = id.Labels.LabelArray()
})
td.sc.UpdateIdentities(c, nil, wg)
apiRules, ids := ruleGenFunc(numRules)
td.sc.UpdateIdentities(ids, nil, wg)
wg.Wait()
td.repo.MustAddList(apiRules)
}
func BenchmarkRegenerateCIDRPolicyRules(b *testing.B) {
td := newTestData(hivetest.Logger(b))
td.bootstrapRepo(GenerateCIDRRules, 1000, b)
ip, _ := td.repo.resolvePolicyLocked(fooIdentity)
owner := DummyOwner{logger: hivetest.Logger(b)}
b.ReportAllocs()
for b.Loop() {
epPolicy := ip.DistillPolicy(hivetest.Logger(b), owner, nil)
owner.mapStateSize = epPolicy.policyMapState.Len()
epPolicy.Ready()
}
ip.detach(true, 0)
b.Logf("Number of MapState entries: %d\n", owner.mapStateSize)
}
func BenchmarkRegenerateL3IngressPolicyRules(b *testing.B) {
td := newTestData(hivetest.Logger(b))
td.bootstrapRepo(GenerateL3IngressRules, 1000, b)
for b.Loop() {
ip, _ := td.repo.resolvePolicyLocked(fooIdentity)
policy := ip.DistillPolicy(hivetest.Logger(b), DummyOwner{logger: hivetest.Logger(b)}, nil)
policy.Ready()
ip.detach(true, 0)
}
}
func BenchmarkRegenerateL3EgressPolicyRules(b *testing.B) {
td := newTestData(hivetest.Logger(b))
td.bootstrapRepo(GenerateL3EgressRules, 1000, b)
for b.Loop() {
ip, _ := td.repo.resolvePolicyLocked(fooIdentity)
policy := ip.DistillPolicy(hivetest.Logger(b), DummyOwner{logger: hivetest.Logger(b)}, nil)
policy.Ready()
ip.detach(true, 0)
}
}
func TestL7WithIngressWildcard(t *testing.T) {
logger := hivetest.Logger(t)
td := newTestData(logger)
repo := td.repo
td.bootstrapRepo(GenerateL3IngressRules, 1000, t)
idFooSelectLabelArray := labels.ParseSelectLabelArray("id=foo")
idFooSelectLabels := labels.Labels{}
for _, lbl := range idFooSelectLabelArray {
idFooSelectLabels[lbl.Key] = lbl
}
fooIdentity := identity.NewIdentity(12345, idFooSelectLabels)
td.addIdentity(fooIdentity)
selFoo := api.NewESFromLabels(labels.ParseSelectLabel("id=foo"))
rule1 := api.Rule{
EndpointSelector: selFoo,
Ingress: []api.IngressRule{
{
ToPorts: []api.PortRule{{
Ports: []api.PortProtocol{
{Port: "80", Protocol: api.ProtoTCP},
},
Rules: &api.L7Rules{
HTTP: []api.PortRuleHTTP{
{Method: "GET", Path: "/good"},
},
},
}},
},
},
}
rule1.Sanitize()
_, _, err := repo.mustAdd(rule1)
require.NoError(t, err)
repo.mutex.RLock()
defer repo.mutex.RUnlock()
selPolicy, err := repo.resolvePolicyLocked(fooIdentity)
require.NoError(t, err)
require.Equal(t, redirectTypeEnvoy, selPolicy.L4Policy.redirectTypes)
policy := selPolicy.DistillPolicy(logger, DummyOwner{logger: logger}, testRedirects)
policy.Ready()
expectedEndpointPolicy := EndpointPolicy{
Redirects: testRedirects,
selectorPolicy: &selectorPolicy{
Revision: repo.GetRevision(),
SelectorCache: repo.GetSelectorCache(),
L4Policy: L4Policy{
Revision: repo.GetRevision(),
Ingress: L4DirectionPolicy{PortRules: NewL4PolicyMapWithValues(map[string]*L4Filter{
"80/TCP": {
Port: 80,
Protocol: api.ProtoTCP,
U8Proto: 0x6,
wildcard: td.wildcardCachedSelector,
Ingress: true,
PerSelectorPolicies: L7DataMap{
td.wildcardCachedSelector: &PerSelectorPolicy{
L7Parser: ParserTypeHTTP,
Priority: ListenerPriorityHTTP,
L7Rules: api.L7Rules{
HTTP: []api.PortRuleHTTP{{Method: "GET", Path: "/good"}},
},
CanShortCircuit: true,
},
},
RuleOrigin: OriginForTest(map[CachedSelector]labels.LabelArrayList{td.wildcardCachedSelector: {nil}}),
},
}),
features: redirectRules,
},
Egress: newL4DirectionPolicy(),
redirectTypes: redirectTypeEnvoy,
},
IngressPolicyEnabled: true,
EgressPolicyEnabled: false,
},
PolicyOwner: DummyOwner{logger: logger},
// inherit this from the result as it is outside of the scope
// of this test
policyMapState: policy.policyMapState,
policyMapChanges: MapChanges{logger: logger},
}
// Have to remove circular reference before testing to avoid an infinite loop
policy.selectorPolicy.detach(true, 0)
// Assign an empty mutex so that checker.Equal does not complain about the
// difference of the internal time.Time from the lock_debug.go.
policy.selectorPolicy.L4Policy.mutex = lock.RWMutex{}
policy.policyMapChanges.mutex = lock.Mutex{}
policy.policyMapChanges.firstVersion = 0
// policyMapState cannot be compared via DeepEqual
require.Truef(t, policy.policyMapState.Equal(&expectedEndpointPolicy.policyMapState), policy.policyMapState.diff(&expectedEndpointPolicy.policyMapState))
policy.policyMapState = mapState{}
expectedEndpointPolicy.policyMapState = mapState{}
// reset cached envoy http rules to avoid unnecessary diff
resetCachedEnvoyHTTPRules(policy)
require.Equal(t, &expectedEndpointPolicy, policy)
}
func TestL7WithLocalHostWildcard(t *testing.T) {
logger := hivetest.Logger(t)
td := newTestData(logger)
repo := td.repo
td.bootstrapRepo(GenerateL3IngressRules, 1000, t)
idFooSelectLabelArray := labels.ParseSelectLabelArray("id=foo")
idFooSelectLabels := labels.Labels{}
for _, lbl := range idFooSelectLabelArray {
idFooSelectLabels[lbl.Key] = lbl
}
fooIdentity := identity.NewIdentity(12345, idFooSelectLabels)
td.addIdentity(fooIdentity)
// Emulate Kubernetes mode with allow from localhost
oldLocalhostOpt := option.Config.AllowLocalhost
option.Config.AllowLocalhost = option.AllowLocalhostAlways
defer func() { option.Config.AllowLocalhost = oldLocalhostOpt }()
selFoo := api.NewESFromLabels(labels.ParseSelectLabel("id=foo"))
rule1 := api.Rule{
EndpointSelector: selFoo,
Ingress: []api.IngressRule{
{
ToPorts: []api.PortRule{{
Ports: []api.PortProtocol{
{Port: "80", Protocol: api.ProtoTCP},
},
Rules: &api.L7Rules{
HTTP: []api.PortRuleHTTP{
{Method: "GET", Path: "/good"},
},
},
}},
},
},
}
rule1.Sanitize()
_, _, err := repo.mustAdd(rule1)
require.NoError(t, err)
repo.mutex.RLock()
defer repo.mutex.RUnlock()
selPolicy, err := repo.resolvePolicyLocked(fooIdentity)
require.NoError(t, err)
policy := selPolicy.DistillPolicy(logger, DummyOwner{logger: logger}, testRedirects)
policy.Ready()
cachedSelectorHost := td.sc.FindCachedIdentitySelector(api.ReservedEndpointSelectors[labels.IDNameHost])
require.NotNil(t, cachedSelectorHost)
expectedEndpointPolicy := EndpointPolicy{
Redirects: testRedirects,
selectorPolicy: &selectorPolicy{
Revision: repo.GetRevision(),
SelectorCache: repo.GetSelectorCache(),
L4Policy: L4Policy{
Revision: repo.GetRevision(),
Ingress: L4DirectionPolicy{PortRules: NewL4PolicyMapWithValues(map[string]*L4Filter{
"80/TCP": {
Port: 80,
Protocol: api.ProtoTCP,
U8Proto: 0x6,
wildcard: td.wildcardCachedSelector,
Ingress: true,
PerSelectorPolicies: L7DataMap{
td.wildcardCachedSelector: &PerSelectorPolicy{
L7Parser: ParserTypeHTTP,
Priority: ListenerPriorityHTTP,
L7Rules: api.L7Rules{
HTTP: []api.PortRuleHTTP{{Method: "GET", Path: "/good"}},
},
CanShortCircuit: true,
},
cachedSelectorHost: nil,
},
RuleOrigin: OriginForTest(map[CachedSelector]labels.LabelArrayList{td.wildcardCachedSelector: {nil}}),
},
}),
features: redirectRules,
},
Egress: newL4DirectionPolicy(),
redirectTypes: redirectTypeEnvoy,
},
IngressPolicyEnabled: true,
EgressPolicyEnabled: false,
},
PolicyOwner: DummyOwner{logger: logger},
// inherit this from the result as it is outside of the scope
// of this test
policyMapState: policy.policyMapState,
policyMapChanges: MapChanges{logger: logger},
}
// Have to remove circular reference before testing to avoid an infinite loop
policy.selectorPolicy.detach(true, 0)
// Assign an empty mutex so that checker.Equal does not complain about the
// difference of the internal time.Time from the lock_debug.go.
policy.selectorPolicy.L4Policy.mutex = lock.RWMutex{}
policy.policyMapChanges.mutex = lock.Mutex{}
policy.policyMapChanges.firstVersion = 0
// policyMapState cannot be compared via DeepEqual
require.Truef(t, policy.policyMapState.Equal(&expectedEndpointPolicy.policyMapState), policy.policyMapState.diff(&expectedEndpointPolicy.policyMapState))
policy.policyMapState = mapState{}
expectedEndpointPolicy.policyMapState = mapState{}
// reset cached envoy http rules to avoid unnecessary diff
resetCachedEnvoyHTTPRules(policy)
require.Equal(t, &expectedEndpointPolicy, policy)
}
func resetCachedEnvoyHTTPRules(policy *EndpointPolicy) {
resetEnvoyHTTPRules := func(l4 *L4Filter) bool {
for _, pol := range l4.PerSelectorPolicies {
if pol != nil {
pol.EnvoyHTTPRules = nil
}
}
return true
}
policy.L4Policy.Ingress.PortRules.ForEach(resetEnvoyHTTPRules)
policy.L4Policy.Egress.PortRules.ForEach(resetEnvoyHTTPRules)
}
func TestMapStateWithIngressWildcard(t *testing.T) {
logger := hivetest.Logger(t)
td := newTestData(logger)
repo := td.repo
td.bootstrapRepo(GenerateL3IngressRules, 1000, t)
ruleLabel := labels.ParseLabelArray("rule-foo-allow-port-80")
ruleLabelAllowAnyEgress := labels.LabelArray{
labels.NewLabel(LabelKeyPolicyDerivedFrom, LabelAllowAnyEgress, labels.LabelSourceReserved),
}
idFooSelectLabelArray := labels.ParseSelectLabelArray("id=foo")
idFooSelectLabels := labels.Labels{}
for _, lbl := range idFooSelectLabelArray {
idFooSelectLabels[lbl.Key] = lbl
}
fooIdentity := identity.NewIdentity(12345, idFooSelectLabels)
td.addIdentity(fooIdentity)
selFoo := api.NewESFromLabels(labels.ParseSelectLabel("id=foo"))
rule1 := api.Rule{
EndpointSelector: selFoo,
Labels: ruleLabel,
Ingress: []api.IngressRule{
{
ToPorts: []api.PortRule{{
Ports: []api.PortProtocol{
{Port: "80", Protocol: api.ProtoTCP},
},
Rules: &api.L7Rules{},
}},
},
},
}
rule1.Sanitize()
_, _, err := repo.mustAdd(rule1)
require.NoError(t, err)
repo.mutex.RLock()
defer repo.mutex.RUnlock()
selPolicy, err := repo.resolvePolicyLocked(fooIdentity)
require.NoError(t, err)
policy := selPolicy.DistillPolicy(logger, DummyOwner{logger: logger}, testRedirects)
policy.Ready()
rule1MapStateEntry := newAllowEntryWithLabels(ruleLabel)
allowEgressMapStateEntry := newAllowEntryWithLabels(ruleLabelAllowAnyEgress)
expectedEndpointPolicy := EndpointPolicy{
Redirects: testRedirects,
selectorPolicy: &selectorPolicy{
Revision: repo.GetRevision(),
SelectorCache: repo.GetSelectorCache(),
L4Policy: L4Policy{
Revision: repo.GetRevision(),
Ingress: L4DirectionPolicy{PortRules: NewL4PolicyMapWithValues(map[string]*L4Filter{
"80/TCP": {
Port: 80,
Protocol: api.ProtoTCP,
U8Proto: 0x6,
wildcard: td.wildcardCachedSelector,
Ingress: true,
PerSelectorPolicies: L7DataMap{
td.wildcardCachedSelector: nil,
},
RuleOrigin: OriginForTest(map[CachedSelector]labels.LabelArrayList{td.wildcardCachedSelector: {ruleLabel}}),
},
})},
Egress: newL4DirectionPolicy(),
},
IngressPolicyEnabled: true,
EgressPolicyEnabled: false,
},
PolicyOwner: DummyOwner{logger: logger},
policyMapState: emptyMapState(logger).withState(mapStateMap{
EgressKey(): allowEgressMapStateEntry,
IngressKey().WithTCPPort(80): rule1MapStateEntry,
}),
policyMapChanges: MapChanges{logger: logger},
}
// Add new identity to test accumulation of MapChanges
added1 := identity.IdentityMap{
identity.NumericIdentity(192): labels.ParseSelectLabelArray("id=resolve_test_1"),
}
wg := &sync.WaitGroup{}
td.sc.UpdateIdentities(added1, nil, wg)
wg.Wait()
require.Empty(t, policy.policyMapChanges.synced) // XXX why 0?
// Have to remove circular reference before testing to avoid an infinite loop
policy.selectorPolicy.detach(true, 0)
// Assign an empty mutex so that checker.Equal does not complain about the
// difference of the internal time.Time from the lock_debug.go.
policy.selectorPolicy.L4Policy.mutex = lock.RWMutex{}
policy.policyMapChanges.mutex = lock.Mutex{}
policy.policyMapChanges.firstVersion = 0
// policyMapState cannot be compared via DeepEqual
require.Truef(t, policy.policyMapState.Equal(&expectedEndpointPolicy.policyMapState), policy.policyMapState.diff(&expectedEndpointPolicy.policyMapState))
policy.policyMapState = mapState{}
expectedEndpointPolicy.policyMapState = mapState{}
require.Equal(t, &expectedEndpointPolicy, policy)
}
func TestMapStateWithIngress(t *testing.T) {
logger := hivetest.Logger(t)
td := newTestData(logger)
repo := td.repo
td.bootstrapRepo(GenerateL3IngressRules, 1000, t)
ruleLabel := labels.ParseLabelArray("rule-world-allow-port-80")
ruleLabelAllowAnyEgress := labels.LabelArray{
labels.NewLabel(LabelKeyPolicyDerivedFrom, LabelAllowAnyEgress, labels.LabelSourceReserved),
}
idFooSelectLabelArray := labels.ParseSelectLabelArray("id=foo")
idFooSelectLabels := labels.Labels{}
for _, lbl := range idFooSelectLabelArray {
idFooSelectLabels[lbl.Key] = lbl
}
fooIdentity := identity.NewIdentity(12345, idFooSelectLabels)
td.addIdentity(fooIdentity)
lblTest := labels.ParseLabel("id=resolve_test_1")
selFoo := api.NewESFromLabels(labels.ParseSelectLabel("id=foo"))
rule1 := api.Rule{
EndpointSelector: selFoo,
Labels: ruleLabel,
Ingress: []api.IngressRule{
{
IngressCommonRule: api.IngressCommonRule{
FromEntities: []api.Entity{api.EntityWorld},
},
ToPorts: []api.PortRule{{
Ports: []api.PortProtocol{
{Port: "80", Protocol: api.ProtoTCP},
},
Rules: &api.L7Rules{},
}},
},
{
Authentication: &api.Authentication{
Mode: api.AuthenticationModeDisabled,
},
IngressCommonRule: api.IngressCommonRule{
FromEndpoints: []api.EndpointSelector{
api.NewESFromLabels(lblTest),
},
},
ToPorts: []api.PortRule{{
Ports: []api.PortProtocol{
{Port: "80", Protocol: api.ProtoTCP},
},
Rules: &api.L7Rules{},
}},
},
},
}
rule1.Sanitize()
_, _, err := repo.mustAdd(rule1)
require.NoError(t, err)
repo.mutex.RLock()
defer repo.mutex.RUnlock()
selPolicy, err := repo.resolvePolicyLocked(fooIdentity)
require.NoError(t, err)
policy := selPolicy.DistillPolicy(logger, DummyOwner{logger: logger}, testRedirects)
policy.Ready()
// Add new identity to test accumulation of MapChanges
added1 := identity.IdentityMap{
identity.NumericIdentity(192): labels.ParseSelectLabelArray("id=resolve_test_1", "num=1"),
identity.NumericIdentity(193): labels.ParseSelectLabelArray("id=resolve_test_1", "num=2"),
identity.NumericIdentity(194): labels.ParseSelectLabelArray("id=resolve_test_1", "num=3"),
}
wg := &sync.WaitGroup{}
td.sc.UpdateIdentities(added1, nil, wg)
wg.Wait()
require.Len(t, policy.policyMapChanges.synced, 3)
deleted1 := identity.IdentityMap{
identity.NumericIdentity(193): labels.ParseSelectLabelArray("id=resolve_test_1", "num=2"),
}
wg = &sync.WaitGroup{}
td.sc.UpdateIdentities(nil, deleted1, wg)
wg.Wait()
require.Len(t, policy.policyMapChanges.synced, 4)
cachedSelectorWorld := td.sc.FindCachedIdentitySelector(api.ReservedEndpointSelectors[labels.IDNameWorld])
require.NotNil(t, cachedSelectorWorld)
cachedSelectorWorldV4 := td.sc.FindCachedIdentitySelector(api.ReservedEndpointSelectors[labels.IDNameWorldIPv4])
require.NotNil(t, cachedSelectorWorldV4)
cachedSelectorWorldV6 := td.sc.FindCachedIdentitySelector(api.ReservedEndpointSelectors[labels.IDNameWorldIPv6])
require.NotNil(t, cachedSelectorWorldV6)
cachedSelectorTest := td.sc.FindCachedIdentitySelector(api.NewESFromLabels(lblTest))
require.NotNil(t, cachedSelectorTest)
rule1MapStateEntry := newAllowEntryWithLabels(ruleLabel)
allowEgressMapStateEntry := newAllowEntryWithLabels(ruleLabelAllowAnyEgress)
expectedEndpointPolicy := EndpointPolicy{
Redirects: testRedirects,
selectorPolicy: &selectorPolicy{
Revision: repo.GetRevision(),
SelectorCache: repo.GetSelectorCache(),
L4Policy: L4Policy{
Revision: repo.GetRevision(),
Ingress: L4DirectionPolicy{PortRules: NewL4PolicyMapWithValues(map[string]*L4Filter{
"80/TCP": {
Port: 80,
Protocol: api.ProtoTCP,
U8Proto: 0x6,
Ingress: true,
PerSelectorPolicies: L7DataMap{
cachedSelectorWorld: nil,
cachedSelectorWorldV4: nil,
cachedSelectorWorldV6: nil,
cachedSelectorTest: &PerSelectorPolicy{
Authentication: &api.Authentication{
Mode: api.AuthenticationModeDisabled,
},
CanShortCircuit: true,
},
},
RuleOrigin: OriginForTest(map[CachedSelector]labels.LabelArrayList{
cachedSelectorWorld: {ruleLabel},
cachedSelectorWorldV4: {ruleLabel},
cachedSelectorWorldV6: {ruleLabel},
cachedSelectorTest: {ruleLabel},
}),
},
}),
features: authRules,
},
Egress: newL4DirectionPolicy(),
},
IngressPolicyEnabled: true,
EgressPolicyEnabled: false,
},
PolicyOwner: DummyOwner{logger: logger},
policyMapState: emptyMapState(logger).withState(mapStateMap{
EgressKey(): allowEgressMapStateEntry,
IngressKey().WithIdentity(identity.ReservedIdentityWorld).WithTCPPort(80): rule1MapStateEntry,
IngressKey().WithIdentity(identity.ReservedIdentityWorldIPv4).WithTCPPort(80): rule1MapStateEntry,
IngressKey().WithIdentity(identity.ReservedIdentityWorldIPv6).WithTCPPort(80): rule1MapStateEntry,
IngressKey().WithIdentity(192).WithTCPPort(80): rule1MapStateEntry.withExplicitAuth(AuthTypeDisabled),
IngressKey().WithIdentity(194).WithTCPPort(80): rule1MapStateEntry.withExplicitAuth(AuthTypeDisabled),
}),
policyMapChanges: MapChanges{logger: logger},
}
// Have to remove circular reference before testing for Equality to avoid an infinite loop
policy.selectorPolicy.detach(true, 0)
// Verify that cached selector is not found after Detach().
// Note that this depends on the other tests NOT using the same selector concurrently!
cachedSelectorTest = td.sc.FindCachedIdentitySelector(api.NewESFromLabels(lblTest))
require.Nil(t, cachedSelectorTest)
closer, changes := policy.ConsumeMapChanges()
closer()
// maps on the policy got cleared
require.Nil(t, policy.policyMapChanges.synced)
require.Equal(t, Keys{
ingressKey(192, 6, 80, 0): {},
ingressKey(194, 6, 80, 0): {},
}, changes.Adds)
require.Equal(t, Keys{}, changes.Deletes)
// Assign an empty mutex so that checker.Equal does not complain about the
// difference of the internal time.Time from the lock_debug.go.
policy.selectorPolicy.L4Policy.mutex = lock.RWMutex{}
policy.policyMapChanges.mutex = lock.Mutex{}
policy.policyMapChanges.firstVersion = 0
// policyMapState cannot be compared via DeepEqual
require.Truef(t, policy.policyMapState.Equal(&expectedEndpointPolicy.policyMapState), policy.policyMapState.diff(&expectedEndpointPolicy.policyMapState))
require.EqualExportedValues(t, &expectedEndpointPolicy, policy)
}
// allowsIdentity returns whether the specified policy allows
// ingress and egress traffic for the specified numeric security identity.
// If the 'secID' is zero, it will check if all traffic is allowed.
//
// Returning true for either return value indicates all traffic is allowed.
func (p *EndpointPolicy) allowsIdentity(identity identity.NumericIdentity) (ingress, egress bool) {
if !p.IngressPolicyEnabled {
ingress = true
} else {
key := IngressKey().WithIdentity(identity)
if v, exists := p.policyMapState.Get(key); exists && !v.IsDeny() {
ingress = true
}
}
if !p.EgressPolicyEnabled {
egress = true
} else {
key := EgressKey().WithIdentity(identity)
if v, exists := p.policyMapState.Get(key); exists && !v.IsDeny() {
egress = true
}
}
return ingress, egress
}
func TestEndpointPolicy_AllowsIdentity(t *testing.T) {
logger := hivetest.Logger(t)
type fields struct {
selectorPolicy *selectorPolicy
PolicyMapState mapState
}
type args struct {
identity identity.NumericIdentity
}
tests := []struct {
name string
fields fields
args args
wantIngress bool
wantEgress bool
}{
{
name: "policy disabled",
fields: fields{
selectorPolicy: &selectorPolicy{
IngressPolicyEnabled: false,
EgressPolicyEnabled: false,
},
PolicyMapState: emptyMapState(logger),
},
args: args{
identity: 0,
},
wantIngress: true,
wantEgress: true,
},
{
name: "policy enabled",
fields: fields{
selectorPolicy: &selectorPolicy{
IngressPolicyEnabled: true,
EgressPolicyEnabled: true,
},
PolicyMapState: emptyMapState(logger),
},
args: args{
identity: 0,
},
wantIngress: false,
wantEgress: false,
},
{
name: "policy enabled for ingress",
fields: fields{
selectorPolicy: &selectorPolicy{
IngressPolicyEnabled: true,
EgressPolicyEnabled: true,
},
PolicyMapState: emptyMapState(logger).withState(mapStateMap{
IngressKey(): {},
}),
},
args: args{
identity: 0,
},
wantIngress: true,
wantEgress: false,
},
{
name: "policy enabled for egress",
fields: fields{
selectorPolicy: &selectorPolicy{
IngressPolicyEnabled: true,
EgressPolicyEnabled: true,
},
PolicyMapState: emptyMapState(logger).withState(mapStateMap{
EgressKey(): {},
}),
},
args: args{
identity: 0,
},
wantIngress: false,
wantEgress: true,
},
{
name: "policy enabled for ingress with deny policy",
fields: fields{
selectorPolicy: &selectorPolicy{
IngressPolicyEnabled: true,
EgressPolicyEnabled: true,
},
PolicyMapState: emptyMapState(logger).withState(mapStateMap{
IngressKey(): NewMapStateEntry(DenyEntry),
}),
},
args: args{
identity: 0,
},
wantIngress: false,
wantEgress: false,
},
{
name: "policy disabled for ingress with deny policy",
fields: fields{
selectorPolicy: &selectorPolicy{
IngressPolicyEnabled: false,
EgressPolicyEnabled: true,
},
PolicyMapState: emptyMapState(logger).withState(mapStateMap{
IngressKey(): NewMapStateEntry(DenyEntry),
}),
},
args: args{
identity: 0,
},
wantIngress: true,
wantEgress: false,
},
{
name: "policy enabled for egress with deny policy",
fields: fields{
selectorPolicy: &selectorPolicy{
IngressPolicyEnabled: true,
EgressPolicyEnabled: true,
},
PolicyMapState: emptyMapState(logger).withState(mapStateMap{
EgressKey(): NewMapStateEntry(DenyEntry),
}),
},
args: args{
identity: 0,
},
wantIngress: false,
wantEgress: false,
},
{
name: "policy disabled for egress with deny policy",
fields: fields{
selectorPolicy: &selectorPolicy{
IngressPolicyEnabled: true,
EgressPolicyEnabled: false,
},
PolicyMapState: emptyMapState(logger).withState(mapStateMap{
EgressKey(): NewMapStateEntry(DenyEntry),
}),
},
args: args{
identity: 0,
},
wantIngress: false,
wantEgress: true,
},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
p := &EndpointPolicy{
selectorPolicy: tt.fields.selectorPolicy,
policyMapState: tt.fields.PolicyMapState,
}
gotIngress, gotEgress := p.allowsIdentity(tt.args.identity)
if gotIngress != tt.wantIngress {
t.Errorf("allowsIdentity() gotIngress = %v, want %v", gotIngress, tt.wantIngress)
}
if gotEgress != tt.wantEgress {
t.Errorf("allowsIdentity() gotEgress = %v, want %v", gotEgress, tt.wantEgress)
}
})
}
}
func TestEndpointPolicy_GetRuleMeta(t *testing.T) {
log := hivetest.Logger(t)
key1 := ingressKey(192, 6, 80, 0)
key2 := ingressKey(193, 6, 80, 0)
lbls := labels.ParseLabelArray("k8s:k=v")
lblss := labels.LabelArrayList{lbls}
logstr := "log"
logstrs := []string{logstr}
// test empty map state
p := &EndpointPolicy{
policyMapState: emptyMapState(log),
}
_, err := p.GetRuleMeta(key1)
require.Error(t, err)
// test non-empty mapstate
p.policyMapState = emptyMapState(log).withState(mapStateMap{
key1: newMapStateEntry(makeSingleRuleOrigin(lbls, logstr), 0, 0, false, NoAuthRequirement),
})
rm, err := p.GetRuleMeta(key1)
require.NoError(t, err)
require.Equal(t, rm.LabelArray(), lblss)
require.Equal(t, rm.Log(), logstrs)
_, err = p.GetRuleMeta(key2)
require.Error(t, err)
// test mapstate from dump
msDump := MapStateMap{
key1: types.NewMapStateEntry(false, 0, 0, NoAuthRequirement),
}
p = &EndpointPolicy{
policyMapState: emptyMapState(log),
}
p.CopyMapStateFrom(msDump)
rm, err = p.GetRuleMeta(key1)
require.NoError(t, err)
require.Equal(t, NilRuleOrigin.Value(), rm)
}
// SPDX-License-Identifier: Apache-2.0
// Copyright Authors of Cilium
package policy
import (
"fmt"
"log/slog"
"github.com/cilium/proxy/pkg/policy/api/kafka"
"github.com/cilium/cilium/pkg/container/versioned"
"github.com/cilium/cilium/pkg/identity"
ipcachetypes "github.com/cilium/cilium/pkg/ipcache/types"
slim_metav1 "github.com/cilium/cilium/pkg/k8s/slim/k8s/apis/meta/v1"
"github.com/cilium/cilium/pkg/labels"
"github.com/cilium/cilium/pkg/option"
"github.com/cilium/cilium/pkg/policy/api"
policytypes "github.com/cilium/cilium/pkg/policy/types"
)
// ruleKey is a synthetic unique identifier for a Rule
type ruleKey struct {
// resource is the owning resource of this rule
resource ipcachetypes.ResourceID
// idx is an arbitrary unique index, as resources can own multiple rules
idx uint
}
type rule struct {
api.Rule
key ruleKey
// subjectSelector is the entry in the SelectorCache that selects subjects (endpoints or nodes).
subjectSelector CachedSelector
}
// IdentitySelectionUpdated is called by the SelectorCache when a new identity is added;
// We can ignore it because the endpoint will be regenerated by the nature of
// identities being updated.
func (r *rule) IdentitySelectionUpdated(logger *slog.Logger, selector policytypes.CachedSelector, added, deleted []identity.NumericIdentity) {
}
func (d *rule) IdentitySelectionCommit(*slog.Logger, *versioned.Tx) {
}
func (r *rule) IsPeerSelector() bool {
return false
}
func (r *rule) String() string {
return r.EndpointSelector.String()
}
func (r *rule) getSelector() *api.EndpointSelector {
if r.NodeSelector.LabelSelector != nil {
return &r.NodeSelector
}
return &r.EndpointSelector
}
func (r *rule) origin() ruleOrigin {
return makeSingleRuleOrigin(r.Labels.Sort(), r.Log.Value)
}
func (epd *PerSelectorPolicy) appendL7WildcardRule(policyContext PolicyContext) api.L7Rules {
// Wildcard rule only needs to be appended if some rules already exist
switch {
case len(epd.L7Rules.HTTP) > 0:
rule := api.PortRuleHTTP{}
if !rule.Exists(epd.L7Rules) {
policyContext.PolicyTrace(" Merging HTTP wildcard rule: %+v\n", rule)
epd.L7Rules.HTTP = append(epd.L7Rules.HTTP, rule)
} else {
policyContext.PolicyTrace(" Merging HTTP wildcard rule, equal rule already exists: %+v\n", rule)
}
case len(epd.L7Rules.Kafka) > 0:
rule := kafka.PortRule{}
rule.Sanitize()
if !rule.Exists(epd.L7Rules.Kafka) {
policyContext.PolicyTrace(" Merging Kafka wildcard rule: %+v\n", rule)
epd.L7Rules.Kafka = append(epd.L7Rules.Kafka, rule)
} else {
policyContext.PolicyTrace(" Merging Kafka wildcard rule, equal rule already exists: %+v\n", rule)
}
case len(epd.L7Rules.DNS) > 0:
// Wildcarding at L7 for DNS is specified via allowing all via
// MatchPattern!
rule := api.PortRuleDNS{MatchPattern: "*"}
rule.Sanitize()
if !rule.Exists(epd.L7Rules) {
policyContext.PolicyTrace(" Merging DNS wildcard rule: %+v\n", rule)
epd.L7Rules.DNS = append(epd.L7Rules.DNS, rule)
} else {
policyContext.PolicyTrace(" Merging DNS wildcard rule, equal rule already exists: %+v\n", rule)
}
case epd.L7Rules.L7Proto != "" && len(epd.L7Rules.L7) > 0:
rule := api.PortRuleL7{}
if !rule.Exists(epd.L7Rules) {
policyContext.PolicyTrace(" Merging L7 wildcard rule: %+v\n", rule)
epd.L7Rules.L7 = append(epd.L7Rules.L7, rule)
} else {
policyContext.PolicyTrace(" Merging L7 wildcard rule, equal rule already exists: %+v\n", rule)
}
}
return epd.L7Rules
}
// takesListenerPrecedenceOver returns true if the listener reference in 'l7Rules' takes precedence
// over the listener reference in 'other'.
func (l7Rules *PerSelectorPolicy) takesListenerPrecedenceOver(other *PerSelectorPolicy) bool {
var priority, otherPriority ListenerPriority
// decrement by one to wrap the undefined value (0) to be the highest numerical
// value of the uint16, which is the lowest possible priority
priority = l7Rules.Priority - 1
otherPriority = other.Priority - 1
return priority < otherPriority
}
// mergeRedirect merges listener reference from 'newL7Rules' to 'l7Rules', giving
// precedence to listener with the lowest priority, if any.
func (l7Rules *PerSelectorPolicy) mergeRedirect(newL7Rules *PerSelectorPolicy) error {
// Merge L7ParserType, if possible
l7Parser, err := l7Rules.L7Parser.Merge(newL7Rules.L7Parser)
if err != nil {
return err
}
if l7Parser != l7Rules.L7Parser {
// Also copy over the listener priority
l7Rules.L7Parser = l7Parser
l7Rules.Priority = newL7Rules.Priority
}
// Nothing to do if 'newL7Rules' has no listener reference
if newL7Rules.Listener == "" {
return nil
}
// Nothing to do if the listeners are already the same and have the same priority
if newL7Rules.Listener == l7Rules.Listener && l7Rules.Priority == newL7Rules.Priority {
return nil
}
// Nothing to do if 'l7Rules' takes precedence
if l7Rules.takesListenerPrecedenceOver(newL7Rules) {
return nil
}
// override if 'l7Rules' has no listener or 'newL7Rules' takes precedence
if l7Rules.Listener == "" || newL7Rules.takesListenerPrecedenceOver(l7Rules) {
l7Rules.Listener = newL7Rules.Listener
l7Rules.Priority = newL7Rules.Priority
return nil
}
// otherwise error on conflict
return fmt.Errorf("cannot merge conflicting CiliumEnvoyConfig Listeners (%v/%v) with the same priority (%d)", newL7Rules.Listener, l7Rules.Listener, l7Rules.Priority)
}
// mergePortProto merges the L7-related data from the filter to merge
// with the L7-related data already in the existing filter.
func mergePortProto(policyCtx PolicyContext, existingFilter, filterToMerge *L4Filter, selectorCache *SelectorCache) (err error) {
for cs, newL7Rules := range filterToMerge.PerSelectorPolicies {
// 'cs' will be merged or moved (see below), either way it needs
// to be removed from the map it is in now.
delete(filterToMerge.PerSelectorPolicies, cs)
if l7Rules, ok := existingFilter.PerSelectorPolicies[cs]; ok {
// existing filter already has 'cs', release and merge L7 rules
selectorCache.RemoveSelector(cs, filterToMerge)
// skip merging for reserved:none, as it is never
// selected, and toFQDN rules currently translate to
// reserved:none as an endpoint selector, causing a
// merge conflict for different toFQDN destinations
// with different TLS contexts.
if cs.IsNone() {
continue
}
if l7Rules.Equal(newL7Rules) {
continue // identical rules need no merging
}
// Merge two non-identical sets of non-nil rules
if l7Rules.GetDeny() {
// If existing rule is deny then it's a no-op
// Denies takes priority over any rule.
continue
} else if newL7Rules.GetDeny() {
// Overwrite existing filter if the new rule is a deny case
// Denies takes priority over any rule.
existingFilter.PerSelectorPolicies[cs] = newL7Rules
continue
}
// One of the rules may be a nil rule, expand it to an empty non-nil rule
if l7Rules == nil {
l7Rules = &PerSelectorPolicy{}
}
if newL7Rules == nil {
newL7Rules = &PerSelectorPolicy{}
}
// Merge Redirect
if err := l7Rules.mergeRedirect(newL7Rules); err != nil {
policyCtx.PolicyTrace(" Merge conflict: %s\n", err.Error())
return err
}
if l7Rules.Authentication == nil || newL7Rules.Authentication == nil {
if newL7Rules.Authentication != nil {
l7Rules.Authentication = newL7Rules.Authentication
}
} else if !newL7Rules.Authentication.DeepEqual(l7Rules.Authentication) {
policyCtx.PolicyTrace(" Merge conflict: mismatching auth types %s/%s\n", newL7Rules.Authentication.Mode, l7Rules.Authentication.Mode)
return fmt.Errorf("cannot merge conflicting authentication types (%s/%s)", newL7Rules.Authentication.Mode, l7Rules.Authentication.Mode)
}
if l7Rules.TerminatingTLS == nil || newL7Rules.TerminatingTLS == nil {
if newL7Rules.TerminatingTLS != nil {
l7Rules.TerminatingTLS = newL7Rules.TerminatingTLS
}
} else if !newL7Rules.TerminatingTLS.Equal(l7Rules.TerminatingTLS) {
policyCtx.PolicyTrace(" Merge conflict: mismatching terminating TLS contexts %s/%s\n", newL7Rules.TerminatingTLS, l7Rules.TerminatingTLS)
return fmt.Errorf("cannot merge conflicting terminating TLS contexts for cached selector %s: (%s/%s)", cs.String(), newL7Rules.TerminatingTLS, l7Rules.TerminatingTLS)
}
if l7Rules.OriginatingTLS == nil || newL7Rules.OriginatingTLS == nil {
if newL7Rules.OriginatingTLS != nil {
l7Rules.OriginatingTLS = newL7Rules.OriginatingTLS
}
} else if !newL7Rules.OriginatingTLS.Equal(l7Rules.OriginatingTLS) {
policyCtx.PolicyTrace(" Merge conflict: mismatching originating TLS contexts %s/%s\n", newL7Rules.OriginatingTLS, l7Rules.OriginatingTLS)
return fmt.Errorf("cannot merge conflicting originating TLS contexts for cached selector %s: (%s/%s)", cs.String(), newL7Rules.OriginatingTLS, l7Rules.OriginatingTLS)
}
// For now we simply merge the set of allowed SNIs from different rules
// to/from the *same remote*, port, and protocol. This means that if any
// rule requires SNI, then all traffic to that remote/port requires TLS,
// even if other merged rules would be fine without TLS. Any SNI from all
// applicable rules is allowed.
//
// Preferably we could allow different rules for each SNI, but for now the
// combination of all L7 rules is allowed for all the SNIs. For example, if
// SNI and TLS termination are used together so that L7 filtering is
// possible, in this example:
//
// - existing: SNI: public.example.com
// - new: SNI: private.example.com HTTP: path="/public"
//
// Separately, these rule allow access to all paths at SNI
// public.example.com and path private.example.com/public, but currently we
// allow all paths also at private.example.com. This may be clamped down if
// there is sufficient demand for SNI and TLS termination together.
//
// Note however that SNI rules are typically used with `toFQDNs`, each of
// which defines a separate destination, so that SNIs for different
// `toFQDNs` will not be merged together.
l7Rules.ServerNames = l7Rules.ServerNames.Merge(newL7Rules.ServerNames)
// L7 rules can be applied with SNI filtering only if the TLS is also
// terminated
if len(l7Rules.ServerNames) > 0 && !l7Rules.L7Rules.IsEmpty() && l7Rules.TerminatingTLS == nil {
policyCtx.PolicyTrace(" Merge conflict: cannot use SNI filtering with L7 rules without TLS termination: %v\n", l7Rules.ServerNames)
return fmt.Errorf("cannot merge L7 rules for cached selector %s with SNI filtering without TLS termination: %v", cs.String(), l7Rules.ServerNames)
}
// empty L7 rules effectively wildcard L7. When merging with a non-empty
// rule, the empty must be expanded to an actual wildcard rule for the
// specific L7
if !l7Rules.HasL7Rules() && newL7Rules.HasL7Rules() {
l7Rules.L7Rules = newL7Rules.appendL7WildcardRule(policyCtx)
existingFilter.PerSelectorPolicies[cs] = l7Rules
continue
}
if l7Rules.HasL7Rules() && !newL7Rules.HasL7Rules() {
l7Rules.appendL7WildcardRule(policyCtx)
existingFilter.PerSelectorPolicies[cs] = l7Rules
continue
}
// We already know from the L7Parser.Merge() above that there are no
// conflicting parser types, and rule validation only allows one type of L7
// rules in a rule, so we can just merge the rules here.
for _, newRule := range newL7Rules.HTTP {
if !newRule.Exists(l7Rules.L7Rules) {
l7Rules.HTTP = append(l7Rules.HTTP, newRule)
}
}
for _, newRule := range newL7Rules.Kafka {
if !newRule.Exists(l7Rules.L7Rules.Kafka) {
l7Rules.Kafka = append(l7Rules.Kafka, newRule)
}
}
if l7Rules.L7Proto == "" && newL7Rules.L7Proto != "" {
l7Rules.L7Proto = newL7Rules.L7Proto
}
for _, newRule := range newL7Rules.L7 {
if !newRule.Exists(l7Rules.L7Rules) {
l7Rules.L7 = append(l7Rules.L7, newRule)
}
}
for _, newRule := range newL7Rules.DNS {
if !newRule.Exists(l7Rules.L7Rules) {
l7Rules.DNS = append(l7Rules.DNS, newRule)
}
}
// Update the pointer in the map in case it was newly allocated
existingFilter.PerSelectorPolicies[cs] = l7Rules
} else { // 'cs' is not in the existing filter yet
// Update selector owner to the existing filter
selectorCache.ChangeUser(cs, filterToMerge, existingFilter)
// Move L7 rules over.
existingFilter.PerSelectorPolicies[cs] = newL7Rules
if cs.IsWildcard() {
existingFilter.wildcard = cs
}
}
}
return nil
}
// mergeIngressPortProto merges all rules which share the same port & protocol that
// select a given set of endpoints. It updates the L4Filter mapped to by the specified
// port and protocol with the contents of the provided PortRule. If the rule
// being merged has conflicting L7 rules with those already in the provided
// L4PolicyMap for the specified port-protocol tuple, it returns an error.
//
// If any rules contain L7 rules that select Host or Remote Node and we should
// accept all traffic from host, the L7 rules will be translated into L7
// wildcards via 'hostWildcardL7'. That is to say, traffic will be
// forwarded to the proxy for endpoints matching those labels, but the proxy
// will allow all such traffic.
func mergeIngressPortProto(policyCtx PolicyContext, endpoints api.EndpointSelectorSlice, auth *api.Authentication, hostWildcardL7 []string,
r api.Ports, p api.PortProtocol, proto api.L4Proto, resMap L4PolicyMap) (int, error) {
// Create a new L4Filter
filterToMerge, err := createL4IngressFilter(policyCtx, endpoints, auth, hostWildcardL7, r, p, proto)
if err != nil {
return 0, err
}
err = addL4Filter(policyCtx, resMap, p, proto, filterToMerge)
if err != nil {
return 0, err
}
return 1, err
}
func mergeIngress(policyCtx PolicyContext, fromEndpoints api.EndpointSelectorSlice, auth *api.Authentication, toPorts, icmp api.PortsIterator, resMap L4PolicyMap) (int, error) {
found := 0
// short-circuit if no endpoint is selected
if fromEndpoints == nil {
return found, nil
}
// Daemon options may induce L3 allows for host/world. In this case, if
// we find any L7 rules matching host/world then we need to turn any L7
// restrictions on these endpoints into L7 allow-all so that the
// traffic is always allowed, but is also always redirected through the
// proxy
hostWildcardL7 := make([]string, 0, 2)
if option.Config.AlwaysAllowLocalhost() {
hostWildcardL7 = append(hostWildcardL7, labels.IDNameHost)
}
var (
cnt int
err error
)
// L3-only rule (with requirements folded into fromEndpoints).
if toPorts.Len() == 0 && icmp.Len() == 0 && len(fromEndpoints) > 0 {
cnt, err = mergeIngressPortProto(policyCtx, fromEndpoints, auth, hostWildcardL7, &api.PortRule{}, api.PortProtocol{Port: "0", Protocol: api.ProtoAny}, api.ProtoAny, resMap)
if err != nil {
return found, err
}
}
found += cnt
err = toPorts.Iterate(func(r api.Ports) error {
// For L4 Policy, an empty slice of EndpointSelector indicates that the
// rule allows all at L3 - explicitly specify this by creating a slice
// with the WildcardEndpointSelector.
if len(fromEndpoints) == 0 {
fromEndpoints = api.EndpointSelectorSlice{api.WildcardEndpointSelector}
}
if !policyCtx.IsDeny() {
policyCtx.PolicyTrace(" Allows port %v\n", r.GetPortProtocols())
} else {
policyCtx.PolicyTrace(" Denies port %v\n", r.GetPortProtocols())
}
pr := r.GetPortRule()
if pr != nil {
if pr.Rules != nil && pr.Rules.L7Proto != "" {
policyCtx.PolicyTrace(" l7proto: \"%s\"\n", pr.Rules.L7Proto)
}
if !pr.Rules.IsEmpty() {
for _, l7 := range pr.Rules.HTTP {
policyCtx.PolicyTrace(" %+v\n", l7)
}
for _, l7 := range pr.Rules.Kafka {
policyCtx.PolicyTrace(" %+v\n", l7)
}
for _, l7 := range pr.Rules.L7 {
policyCtx.PolicyTrace(" %+v\n", l7)
}
}
}
for _, p := range r.GetPortProtocols() {
if p.Protocol.IsAny() {
cnt, err := mergeIngressPortProto(policyCtx, fromEndpoints, auth, hostWildcardL7, r, p, api.ProtoTCP, resMap)
if err != nil {
return err
}
found += cnt
cnt, err = mergeIngressPortProto(policyCtx, fromEndpoints, auth, hostWildcardL7, r, p, api.ProtoUDP, resMap)
if err != nil {
return err
}
found += cnt
cnt, err = mergeIngressPortProto(policyCtx, fromEndpoints, auth, hostWildcardL7, r, p, api.ProtoSCTP, resMap)
if err != nil {
return err
}
found += cnt
} else {
cnt, err := mergeIngressPortProto(policyCtx, fromEndpoints, auth, hostWildcardL7, r, p, p.Protocol, resMap)
if err != nil {
return err
}
found += cnt
}
}
return nil
})
if err != nil {
return found, err
}
err = icmp.Iterate(func(r api.Ports) error {
if len(fromEndpoints) == 0 {
fromEndpoints = api.EndpointSelectorSlice{api.WildcardEndpointSelector}
}
if !policyCtx.IsDeny() {
policyCtx.PolicyTrace(" Allows ICMP type %v\n", r.GetPortProtocols())
} else {
policyCtx.PolicyTrace(" Denies ICMP type %v\n", r.GetPortProtocols())
}
for _, p := range r.GetPortProtocols() {
cnt, err := mergeIngressPortProto(policyCtx, fromEndpoints, auth, hostWildcardL7, r, p, p.Protocol, resMap)
if err != nil {
return err
}
found += cnt
}
return nil
})
return found, err
}
// resolveIngressPolicy analyzes the rule against the given SearchContext, and
// merges it with any prior-generated policy within the provided L4Policy.
// Requirements based off of all Ingress requirements (set in FromRequires) in
// other rules are stored in the specified slice of LabelSelectorRequirement.
// These requirements are dynamically inserted into a copy of the receiver rule,
// as requirements form conjunctions across all rules.
func (r *rule) resolveIngressPolicy(
policyCtx PolicyContext,
state *traceState,
result L4PolicyMap,
requirements, requirementsDeny []slim_metav1.LabelSelectorRequirement,
) error {
state.selectRule(policyCtx, r)
found, foundDeny := 0, 0
policyCtx.SetOrigin(r.origin())
if len(r.Ingress) == 0 && len(r.IngressDeny) == 0 {
policyCtx.PolicyTrace(" No ingress rules\n")
}
for _, ingressRule := range r.Ingress {
fromEndpoints := ingressRule.GetSourceEndpointSelectorsWithRequirements(requirements)
cnt, err := mergeIngress(policyCtx, fromEndpoints, ingressRule.Authentication, ingressRule.ToPorts, ingressRule.ICMPs, result)
if err != nil {
return err
}
if cnt > 0 {
found += cnt
}
}
oldDeny := policyCtx.SetDeny(true)
defer func() {
policyCtx.SetDeny(oldDeny)
}()
for _, ingressRule := range r.IngressDeny {
fromEndpoints := ingressRule.GetSourceEndpointSelectorsWithRequirements(requirementsDeny)
cnt, err := mergeIngress(policyCtx, fromEndpoints, nil, ingressRule.ToPorts, ingressRule.ICMPs, result)
if err != nil {
return err
}
if cnt > 0 {
foundDeny += cnt
}
}
if found != 0 {
state.matchedRules++
}
if foundDeny != 0 {
state.matchedDenyRules++
}
return nil
}
func (r *rule) matchesSubject(securityIdentity *identity.Identity) bool {
subjectIsNode := securityIdentity.ID == identity.ReservedIdentityHost
ruleSelectsNode := r.NodeSelector.LabelSelector != nil
// Short-circuit if the rule's selector type (node vs. endpoint) does not match the
// identity's type
if ruleSelectsNode != subjectIsNode {
return false
}
// Fall back to explicit label matching for the local node
// because local node has mutable labels, which are applied asynchronously to the SelectorCache.
if r.subjectSelector == nil || ruleSelectsNode {
return r.getSelector().Matches(securityIdentity.LabelArray)
}
return r.subjectSelector.Selects(versioned.Latest(), securityIdentity.ID)
}
func (r *rule) getSubjects() []identity.NumericIdentity {
if r.NodeSelector.LabelSelector != nil {
return []identity.NumericIdentity{identity.ReservedIdentityHost}
}
return r.subjectSelector.GetSelections(versioned.Latest())
}
// ****************** EGRESS POLICY ******************
func mergeEgress(policyCtx PolicyContext, toEndpoints api.EndpointSelectorSlice, auth *api.Authentication, toPorts, icmp api.PortsIterator, resMap L4PolicyMap, fqdns api.FQDNSelectorSlice) (int, error) {
found := 0
// short-circuit if no endpoint is selected
if toEndpoints == nil {
return found, nil
}
var (
cnt int
err error
)
// L3-only rule (with requirements folded into toEndpoints).
if toPorts.Len() == 0 && icmp.Len() == 0 && len(toEndpoints) > 0 {
cnt, err = mergeEgressPortProto(policyCtx, toEndpoints, auth, &api.PortRule{}, api.PortProtocol{Port: "0", Protocol: api.ProtoAny}, api.ProtoAny, resMap, fqdns)
if err != nil {
return found, err
}
}
found += cnt
err = toPorts.Iterate(func(r api.Ports) error {
// For L4 Policy, an empty slice of EndpointSelector indicates that the
// rule allows all at L3 - explicitly specify this by creating a slice
// with the WildcardEndpointSelector.
if len(toEndpoints) == 0 {
toEndpoints = api.EndpointSelectorSlice{api.WildcardEndpointSelector}
}
if !policyCtx.IsDeny() {
policyCtx.PolicyTrace(" Allows port %v\n", r.GetPortProtocols())
} else {
policyCtx.PolicyTrace(" Denies port %v\n", r.GetPortProtocols())
}
pr := r.GetPortRule()
if pr != nil {
if !pr.Rules.IsEmpty() {
for _, l7 := range pr.Rules.HTTP {
policyCtx.PolicyTrace(" %+v\n", l7)
}
for _, l7 := range pr.Rules.Kafka {
policyCtx.PolicyTrace(" %+v\n", l7)
}
for _, l7 := range pr.Rules.L7 {
policyCtx.PolicyTrace(" %+v\n", l7)
}
}
}
for _, p := range r.GetPortProtocols() {
if p.Protocol.IsAny() {
cnt, err := mergeEgressPortProto(policyCtx, toEndpoints, auth, r, p, api.ProtoTCP, resMap, fqdns)
if err != nil {
return err
}
found += cnt
cnt, err = mergeEgressPortProto(policyCtx, toEndpoints, auth, r, p, api.ProtoUDP, resMap, fqdns)
if err != nil {
return err
}
found += cnt
cnt, err = mergeEgressPortProto(policyCtx, toEndpoints, auth, r, p, api.ProtoSCTP, resMap, fqdns)
if err != nil {
return err
}
found += cnt
} else {
cnt, err := mergeEgressPortProto(policyCtx, toEndpoints, auth, r, p, p.Protocol, resMap, fqdns)
if err != nil {
return err
}
found += cnt
}
}
return nil
},
)
if err != nil {
return found, err
}
err = icmp.Iterate(func(r api.Ports) error {
if len(toEndpoints) == 0 {
toEndpoints = api.EndpointSelectorSlice{api.WildcardEndpointSelector}
}
if !policyCtx.IsDeny() {
policyCtx.PolicyTrace(" Allows ICMP type %v\n", r.GetPortProtocols())
} else {
policyCtx.PolicyTrace(" Denies ICMP type %v\n", r.GetPortProtocols())
}
for _, p := range r.GetPortProtocols() {
cnt, err := mergeEgressPortProto(policyCtx, toEndpoints, auth, r, p, p.Protocol, resMap, fqdns)
if err != nil {
return err
}
found += cnt
}
return nil
})
return found, err
}
// mergeEgressPortProto merges all rules which share the same port & protocol that
// select a given set of endpoints. It updates the L4Filter mapped to by the specified
// port and protocol with the contents of the provided PortRule. If the rule
// being merged has conflicting L7 rules with those already in the provided
// L4PolicyMap for the specified port-protocol tuple, it returns an error.
func mergeEgressPortProto(policyCtx PolicyContext, endpoints api.EndpointSelectorSlice, auth *api.Authentication, r api.Ports, p api.PortProtocol,
proto api.L4Proto, resMap L4PolicyMap, fqdns api.FQDNSelectorSlice) (int, error) {
// Create a new L4Filter
filterToMerge, err := createL4EgressFilter(policyCtx, endpoints, auth, r, p, proto, fqdns)
if err != nil {
return 0, err
}
err = addL4Filter(policyCtx, resMap, p, proto, filterToMerge)
if err != nil {
return 0, err
}
return 1, err
}
func (r *rule) resolveEgressPolicy(
policyCtx PolicyContext,
state *traceState,
result L4PolicyMap,
requirements, requirementsDeny []slim_metav1.LabelSelectorRequirement,
) error {
state.selectRule(policyCtx, r)
found, foundDeny := 0, 0
policyCtx.SetOrigin(r.origin())
if len(r.Egress) == 0 && len(r.EgressDeny) == 0 {
policyCtx.PolicyTrace(" No egress rules\n")
}
for _, egressRule := range r.Egress {
toEndpoints := egressRule.GetDestinationEndpointSelectorsWithRequirements(requirements)
cnt, err := mergeEgress(policyCtx, toEndpoints, egressRule.Authentication, egressRule.ToPorts, egressRule.ICMPs, result, egressRule.ToFQDNs)
if err != nil {
return err
}
if cnt > 0 {
found += cnt
}
}
oldDeny := policyCtx.SetDeny(true)
defer func() {
policyCtx.SetDeny(oldDeny)
}()
for _, egressRule := range r.EgressDeny {
toEndpoints := egressRule.GetDestinationEndpointSelectorsWithRequirements(requirementsDeny)
cnt, err := mergeEgress(policyCtx, toEndpoints, nil, egressRule.ToPorts, egressRule.ICMPs, result, nil)
if err != nil {
return err
}
if cnt > 0 {
foundDeny += cnt
}
}
if found != 0 {
state.matchedRules++
}
if foundDeny != 0 {
state.matchedDenyRules++
}
return nil
}
// SPDX-License-Identifier: Apache-2.0
// Copyright Authors of Cilium
package policy
import (
"fmt"
"strings"
"testing"
"github.com/cilium/hive/hivetest"
"github.com/cilium/proxy/pkg/policy/api/kafka"
"github.com/stretchr/testify/require"
"k8s.io/apimachinery/pkg/util/intstr"
"github.com/cilium/cilium/pkg/identity"
"github.com/cilium/cilium/pkg/labels"
"github.com/cilium/cilium/pkg/option"
"github.com/cilium/cilium/pkg/policy/api"
"github.com/cilium/cilium/pkg/u8proto"
)
func TestL4Policy(t *testing.T) {
td := newTestData(hivetest.Logger(t))
rule1 := api.Rule{
EndpointSelector: endpointSelectorA,
Ingress: []api.IngressRule{
{
ToPorts: []api.PortRule{{
Ports: []api.PortProtocol{
{Port: "80", Protocol: api.ProtoTCP},
{Port: "8080", Protocol: api.ProtoTCP},
},
Rules: &api.L7Rules{
HTTP: []api.PortRuleHTTP{
{Method: "GET", Path: "/"},
},
},
}},
},
},
Egress: []api.EgressRule{
{
ToPorts: []api.PortRule{{
Ports: []api.PortProtocol{
{Port: "3000", Protocol: api.ProtoAny},
},
}},
},
},
}
l7rules := api.L7Rules{
HTTP: []api.PortRuleHTTP{{Path: "/", Method: "GET"}},
}
l7map := L7DataMap{
td.wildcardCachedSelector: &PerSelectorPolicy{
L7Parser: ParserTypeHTTP,
Priority: ListenerPriorityHTTP,
L7Rules: l7rules,
},
}
expected := NewL4Policy(0)
expected.Ingress.PortRules.Upsert("80", 0, "TCP", &L4Filter{
Port: 80, Protocol: api.ProtoTCP, U8Proto: 6,
wildcard: td.wildcardCachedSelector,
PerSelectorPolicies: l7map, Ingress: true,
RuleOrigin: OriginForTest(map[CachedSelector]labels.LabelArrayList{td.wildcardCachedSelector: {nil}}),
})
expected.Ingress.PortRules.Upsert("8080", 0, "TCP", &L4Filter{
Port: 8080, Protocol: api.ProtoTCP, U8Proto: 6,
wildcard: td.wildcardCachedSelector,
PerSelectorPolicies: l7map, Ingress: true,
RuleOrigin: OriginForTest(map[CachedSelector]labels.LabelArrayList{td.wildcardCachedSelector: {nil}}),
})
expected.Egress.PortRules.Upsert("3000", 0, "TCP", &L4Filter{
Port: 3000, Protocol: api.ProtoTCP, U8Proto: 6, Ingress: false,
wildcard: td.wildcardCachedSelector,
PerSelectorPolicies: L7DataMap{
td.wildcardCachedSelector: nil,
},
RuleOrigin: OriginForTest(map[CachedSelector]labels.LabelArrayList{td.wildcardCachedSelector: {nil}}),
})
expected.Egress.PortRules.Upsert("3000", 0, "UDP", &L4Filter{
Port: 3000, Protocol: api.ProtoUDP, U8Proto: 17, Ingress: false,
wildcard: td.wildcardCachedSelector,
PerSelectorPolicies: L7DataMap{
td.wildcardCachedSelector: nil,
},
RuleOrigin: OriginForTest(map[CachedSelector]labels.LabelArrayList{td.wildcardCachedSelector: {nil}}),
})
expected.Egress.PortRules.Upsert("3000", 0, "SCTP", &L4Filter{
Port: 3000, Protocol: api.ProtoSCTP, U8Proto: 132, Ingress: false,
wildcard: td.wildcardCachedSelector,
PerSelectorPolicies: L7DataMap{
td.wildcardCachedSelector: nil,
},
RuleOrigin: OriginForTest(map[CachedSelector]labels.LabelArrayList{td.wildcardCachedSelector: {nil}}),
})
td.policyMapEquals(t, expected.Ingress.PortRules, expected.Egress.PortRules, &rule1)
// This rule actually overlaps with the existing ingress "http" rule,
// so we'd expect it to merge.
rule2 := api.Rule{
Ingress: []api.IngressRule{
{
// Note that this allows all on 80, so the result should wildcard HTTP
ToPorts: []api.PortRule{{
Ports: []api.PortProtocol{
{Port: "80", Protocol: api.ProtoTCP},
},
}},
},
{
ToPorts: []api.PortRule{{
Ports: []api.PortProtocol{
{Port: "80", Protocol: api.ProtoTCP},
},
Rules: &api.L7Rules{
HTTP: []api.PortRuleHTTP{
{Method: "GET", Path: "/"},
},
},
}},
},
},
Egress: []api.EgressRule{
{
ToPorts: []api.PortRule{{
Ports: []api.PortProtocol{
{Port: "3000", Protocol: api.ProtoAny},
},
}},
},
},
}
expected = NewL4Policy(0)
expected.Ingress.PortRules.Upsert("80", 0, "TCP", &L4Filter{
Port: 80,
Protocol: api.ProtoTCP,
U8Proto: 6,
wildcard: td.wildcardCachedSelector,
PerSelectorPolicies: L7DataMap{
td.wildcardCachedSelector: &PerSelectorPolicy{
L7Parser: ParserTypeHTTP,
Priority: ListenerPriorityHTTP,
L7Rules: api.L7Rules{
HTTP: []api.PortRuleHTTP{{Path: "/", Method: "GET"}, {}},
},
},
},
Ingress: true,
RuleOrigin: OriginForTest(map[CachedSelector]labels.LabelArrayList{td.wildcardCachedSelector: {nil}}),
})
expected.Egress.PortRules.Upsert("3000", 0, "TCP", &L4Filter{
Port: 3000, Protocol: api.ProtoTCP, U8Proto: 6, Ingress: false,
wildcard: td.wildcardCachedSelector,
PerSelectorPolicies: L7DataMap{
td.wildcardCachedSelector: nil,
},
RuleOrigin: OriginForTest(map[CachedSelector]labels.LabelArrayList{td.wildcardCachedSelector: {nil}}),
})
expected.Egress.PortRules.Upsert("3000", 0, "UDP", &L4Filter{
Port: 3000, Protocol: api.ProtoUDP, U8Proto: 17, Ingress: false,
wildcard: td.wildcardCachedSelector,
PerSelectorPolicies: L7DataMap{
td.wildcardCachedSelector: nil,
},
RuleOrigin: OriginForTest(map[CachedSelector]labels.LabelArrayList{td.wildcardCachedSelector: {nil}}),
})
expected.Egress.PortRules.Upsert("3000", 0, "SCTP", &L4Filter{
Port: 3000, Protocol: api.ProtoSCTP, U8Proto: 132, Ingress: false,
wildcard: td.wildcardCachedSelector,
PerSelectorPolicies: L7DataMap{
td.wildcardCachedSelector: nil,
},
RuleOrigin: OriginForTest(map[CachedSelector]labels.LabelArrayList{td.wildcardCachedSelector: {nil}}),
})
td.policyMapEquals(t, expected.Ingress.PortRules, expected.Egress.PortRules, &rule2)
}
func TestMergeL4PolicyIngress(t *testing.T) {
td := newTestData(hivetest.Logger(t))
rule := api.Rule{
Ingress: []api.IngressRule{
{
IngressCommonRule: api.IngressCommonRule{
FromEndpoints: []api.EndpointSelector{fooSelector},
},
ToPorts: []api.PortRule{{
Ports: []api.PortProtocol{
{Port: "80", Protocol: api.ProtoTCP},
},
}},
},
{
IngressCommonRule: api.IngressCommonRule{
FromEndpoints: []api.EndpointSelector{bazSelector},
},
ToPorts: []api.PortRule{{
Ports: []api.PortProtocol{
{Port: "80", Protocol: api.ProtoTCP},
},
}},
},
},
}
mergedES := L7DataMap{
td.cachedFooSelector: nil,
td.cachedBazSelector: nil,
}
expected := NewL4PolicyMapWithValues(map[string]*L4Filter{"80/TCP": {
Port: 80, Protocol: api.ProtoTCP, U8Proto: 6,
PerSelectorPolicies: mergedES, Ingress: true,
RuleOrigin: OriginForTest(map[CachedSelector]labels.LabelArrayList{
td.cachedFooSelector: {nil},
td.cachedBazSelector: {nil},
}),
}})
td.policyMapEquals(t, expected, nil, &rule)
}
func TestMergeL4PolicyEgress(t *testing.T) {
td := newTestData(hivetest.Logger(t))
// A can access B with TCP on port 80, and C with TCP on port 80.
rule1 := api.Rule{
Egress: []api.EgressRule{
{
EgressCommonRule: api.EgressCommonRule{
ToEndpoints: []api.EndpointSelector{endpointSelectorB},
},
ToPorts: []api.PortRule{{
Ports: []api.PortProtocol{
{Port: "80", Protocol: api.ProtoTCP},
},
}},
},
{
EgressCommonRule: api.EgressCommonRule{
ToEndpoints: []api.EndpointSelector{endpointSelectorC},
},
ToPorts: []api.PortRule{{
Ports: []api.PortProtocol{
{Port: "80", Protocol: api.ProtoTCP},
},
}},
},
},
}
mergedES := L7DataMap{
td.cachedSelectorB: nil,
td.cachedSelectorC: nil,
}
expected := NewL4PolicyMapWithValues(map[string]*L4Filter{"80/TCP": {
Port: 80, Protocol: api.ProtoTCP, U8Proto: 6,
PerSelectorPolicies: mergedES, Ingress: false,
RuleOrigin: OriginForTest(map[CachedSelector]labels.LabelArrayList{
td.cachedSelectorB: {nil},
td.cachedSelectorC: {nil},
}),
}})
td.policyMapEquals(t, nil, expected, &rule1)
}
func TestMergeL7PolicyIngress(t *testing.T) {
td := newTestData(hivetest.Logger(t))
rule1 := api.Rule{
Ingress: []api.IngressRule{
{
// Note that this allows all on 80, so the result should wildcard HTTP
ToPorts: []api.PortRule{{
Ports: []api.PortProtocol{
{Port: "80", Protocol: api.ProtoTCP},
},
}},
},
{
ToPorts: []api.PortRule{{
Ports: []api.PortProtocol{
{Port: "80", Protocol: api.ProtoTCP},
},
Rules: &api.L7Rules{
HTTP: []api.PortRuleHTTP{
{Method: "GET", Path: "/"},
},
},
}},
},
{
IngressCommonRule: api.IngressCommonRule{
FromEndpoints: api.EndpointSelectorSlice{endpointSelectorB},
},
ToPorts: []api.PortRule{{
Ports: []api.PortProtocol{
{Port: "80", Protocol: api.ProtoTCP},
},
Rules: &api.L7Rules{
HTTP: []api.PortRuleHTTP{
{Method: "GET", Path: "/"},
},
},
}},
},
},
}
expected := NewL4PolicyMapWithValues(map[string]*L4Filter{"80/TCP": {
Port: 80,
Protocol: api.ProtoTCP,
U8Proto: 6,
wildcard: td.wildcardCachedSelector,
PerSelectorPolicies: L7DataMap{
td.wildcardCachedSelector: &PerSelectorPolicy{
L7Parser: ParserTypeHTTP,
Priority: ListenerPriorityHTTP,
L7Rules: api.L7Rules{
HTTP: []api.PortRuleHTTP{{Path: "/", Method: "GET"}, {}},
},
},
td.cachedSelectorB: &PerSelectorPolicy{
L7Parser: ParserTypeHTTP,
Priority: ListenerPriorityHTTP,
L7Rules: api.L7Rules{
HTTP: []api.PortRuleHTTP{{Path: "/", Method: "GET"}},
},
},
},
Ingress: true,
RuleOrigin: OriginForTest(map[CachedSelector]labels.LabelArrayList{
td.cachedSelectorB: {nil},
td.wildcardCachedSelector: {nil},
}),
}})
td.policyMapEquals(t, expected, nil, &rule1)
rule2 := api.Rule{
Ingress: []api.IngressRule{
{
ToPorts: []api.PortRule{{
Ports: []api.PortProtocol{
{Port: "80", Protocol: api.ProtoTCP},
},
Rules: &api.L7Rules{
Kafka: []kafka.PortRule{
{Topic: "foo"},
},
},
}},
},
{
IngressCommonRule: api.IngressCommonRule{
FromEndpoints: api.EndpointSelectorSlice{endpointSelectorB},
},
ToPorts: []api.PortRule{{
Ports: []api.PortProtocol{
{Port: "80", Protocol: api.ProtoTCP},
},
Rules: &api.L7Rules{
Kafka: []kafka.PortRule{
{Topic: "foo"},
},
},
}},
},
},
}
l7rules := api.L7Rules{
Kafka: []kafka.PortRule{{Topic: "foo"}},
}
l7map := L7DataMap{
td.wildcardCachedSelector: &PerSelectorPolicy{
L7Parser: ParserTypeKafka,
Priority: ListenerPriorityKafka,
L7Rules: l7rules,
},
td.cachedSelectorB: &PerSelectorPolicy{
L7Parser: ParserTypeKafka,
Priority: ListenerPriorityKafka,
L7Rules: l7rules,
},
}
expected = NewL4PolicyMapWithValues(map[string]*L4Filter{"80/TCP": {
Port: 80, Protocol: api.ProtoTCP, U8Proto: 6,
wildcard: td.wildcardCachedSelector,
PerSelectorPolicies: l7map, Ingress: true,
RuleOrigin: OriginForTest(map[CachedSelector]labels.LabelArrayList{
td.cachedSelectorB: {nil},
td.wildcardCachedSelector: {nil},
}),
}})
td.policyMapEquals(t, expected, nil, &rule2)
td.policyInvalid(t, "cannot merge conflicting L7 parsers", &rule1, &rule2)
// Similar to 'rule2', but with different topics for the l3-dependent
// rule and the l4-only rule.
rule3 := api.Rule{
Ingress: []api.IngressRule{
{
IngressCommonRule: api.IngressCommonRule{
FromEndpoints: api.EndpointSelectorSlice{endpointSelectorB},
},
ToPorts: []api.PortRule{{
Ports: []api.PortProtocol{
{Port: "80", Protocol: api.ProtoTCP},
},
Rules: &api.L7Rules{
Kafka: []kafka.PortRule{
{Topic: "foo"},
},
},
}},
},
{
IngressCommonRule: api.IngressCommonRule{
FromEndpoints: []api.EndpointSelector{api.WildcardEndpointSelector},
},
ToPorts: []api.PortRule{{
Ports: []api.PortProtocol{
{Port: "80", Protocol: api.ProtoTCP},
},
Rules: &api.L7Rules{
Kafka: []kafka.PortRule{
{Topic: "bar"},
},
},
}},
},
},
}
fooRules := api.L7Rules{
Kafka: []kafka.PortRule{{Topic: "foo"}},
}
barRules := api.L7Rules{
Kafka: []kafka.PortRule{{Topic: "bar"}},
}
// The L3-dependent L7 rules are not merged together.
l7map = L7DataMap{
td.cachedSelectorB: &PerSelectorPolicy{
L7Parser: ParserTypeKafka,
Priority: ListenerPriorityKafka,
L7Rules: fooRules,
},
td.wildcardCachedSelector: &PerSelectorPolicy{
L7Parser: ParserTypeKafka,
Priority: ListenerPriorityKafka,
L7Rules: barRules,
},
}
expected = NewL4PolicyMapWithValues(map[string]*L4Filter{"80/TCP": {
Port: 80, Protocol: api.ProtoTCP, U8Proto: 6,
wildcard: td.wildcardCachedSelector,
PerSelectorPolicies: l7map, Ingress: true,
RuleOrigin: OriginForTest(map[CachedSelector]labels.LabelArrayList{
td.cachedSelectorB: {nil},
td.wildcardCachedSelector: {nil},
}),
}})
td.policyMapEquals(t, expected, nil, &rule3)
}
func TestMergeL7PolicyEgress(t *testing.T) {
td := newTestData(hivetest.Logger(t))
rule1 := api.Rule{
Egress: []api.EgressRule{
{
// Note that this allows all on 80, so the result should wildcard HTTP
ToPorts: []api.PortRule{{
Ports: []api.PortProtocol{
{Port: "80", Protocol: api.ProtoTCP},
},
}},
},
{
ToPorts: []api.PortRule{{
Ports: []api.PortProtocol{
{Port: "80", Protocol: api.ProtoTCP},
},
Rules: &api.L7Rules{
HTTP: []api.PortRuleHTTP{
{Method: "GET", Path: "/public"},
},
},
}},
},
{
EgressCommonRule: api.EgressCommonRule{
ToEndpoints: []api.EndpointSelector{endpointSelectorB},
},
ToPorts: []api.PortRule{{
Ports: []api.PortProtocol{
{Port: "80", Protocol: api.ProtoTCP},
},
Rules: &api.L7Rules{
HTTP: []api.PortRuleHTTP{
{Method: "GET", Path: "/private"},
},
},
}},
},
},
}
expected := NewL4PolicyMapWithValues(map[string]*L4Filter{"80/TCP": {
Port: 80, Protocol: api.ProtoTCP, U8Proto: 6,
wildcard: td.wildcardCachedSelector,
PerSelectorPolicies: L7DataMap{
td.wildcardCachedSelector: &PerSelectorPolicy{
L7Parser: ParserTypeHTTP,
Priority: ListenerPriorityHTTP,
L7Rules: api.L7Rules{
HTTP: []api.PortRuleHTTP{{Path: "/public", Method: "GET"}, {}},
},
},
td.cachedSelectorB: &PerSelectorPolicy{
L7Parser: ParserTypeHTTP,
Priority: ListenerPriorityHTTP,
L7Rules: api.L7Rules{
HTTP: []api.PortRuleHTTP{{Path: "/private", Method: "GET"}},
},
},
},
Ingress: false,
RuleOrigin: OriginForTest(map[CachedSelector]labels.LabelArrayList{
td.wildcardCachedSelector: {nil},
td.cachedSelectorB: {nil},
}),
}})
td.policyMapEquals(t, nil, expected, &rule1)
rule2 := api.Rule{
Egress: []api.EgressRule{
{
// Note that this allows all on 9092, so the result should wildcard Kafka
ToPorts: []api.PortRule{{
Ports: []api.PortProtocol{
{Port: "9092", Protocol: api.ProtoTCP},
},
}},
},
{
ToPorts: []api.PortRule{{
Ports: []api.PortProtocol{
{Port: "9092", Protocol: api.ProtoTCP},
},
Rules: &api.L7Rules{
Kafka: []kafka.PortRule{
{Topic: "foo"},
},
},
}},
},
{
EgressCommonRule: api.EgressCommonRule{
ToEndpoints: []api.EndpointSelector{endpointSelectorB},
},
ToPorts: []api.PortRule{{
Ports: []api.PortProtocol{
{Port: "9092", Protocol: api.ProtoTCP},
},
Rules: &api.L7Rules{
Kafka: []kafka.PortRule{
{Topic: "foo"},
},
},
}},
},
},
}
expected = NewL4PolicyMapWithValues(map[string]*L4Filter{
"80/TCP": {
Port: 80, Protocol: api.ProtoTCP, U8Proto: 6,
wildcard: td.wildcardCachedSelector,
PerSelectorPolicies: L7DataMap{
td.wildcardCachedSelector: &PerSelectorPolicy{
L7Parser: ParserTypeHTTP,
Priority: ListenerPriorityHTTP,
L7Rules: api.L7Rules{
HTTP: []api.PortRuleHTTP{{Path: "/public", Method: "GET"}, {}},
},
},
td.cachedSelectorB: &PerSelectorPolicy{
L7Parser: ParserTypeHTTP,
Priority: ListenerPriorityHTTP,
L7Rules: api.L7Rules{
HTTP: []api.PortRuleHTTP{{Path: "/private", Method: "GET"}},
},
},
},
Ingress: false,
RuleOrigin: OriginForTest(map[CachedSelector]labels.LabelArrayList{
td.wildcardCachedSelector: {nil},
td.cachedSelectorB: {nil},
}),
},
"9092/TCP": {
Port: 9092, Protocol: api.ProtoTCP, U8Proto: 6,
wildcard: td.wildcardCachedSelector,
PerSelectorPolicies: L7DataMap{
td.wildcardCachedSelector: &PerSelectorPolicy{
L7Parser: ParserTypeKafka,
Priority: ListenerPriorityKafka,
L7Rules: api.L7Rules{
Kafka: []kafka.PortRule{{Topic: "foo"}, {}},
},
},
td.cachedSelectorB: &PerSelectorPolicy{
L7Parser: ParserTypeKafka,
Priority: ListenerPriorityKafka,
L7Rules: api.L7Rules{
Kafka: []kafka.PortRule{{Topic: "foo"}},
},
},
},
Ingress: false,
RuleOrigin: OriginForTest(map[CachedSelector]labels.LabelArrayList{
td.cachedSelectorB: {nil},
td.wildcardCachedSelector: {nil},
}),
},
})
td.policyMapEquals(t, nil, expected, &rule1, &rule2)
// Similar to 'rule2', but with different topics for the l3-dependent
// rule and the l4-only rule.
rule3 := api.Rule{
EndpointSelector: endpointSelectorA,
Egress: []api.EgressRule{
{
EgressCommonRule: api.EgressCommonRule{
ToEndpoints: []api.EndpointSelector{endpointSelectorB},
},
ToPorts: []api.PortRule{{
Ports: []api.PortProtocol{
{Port: "80", Protocol: api.ProtoTCP},
},
Rules: &api.L7Rules{
Kafka: []kafka.PortRule{
{Topic: "foo"},
},
},
}},
},
{
ToPorts: []api.PortRule{{
Ports: []api.PortProtocol{
{Port: "80", Protocol: api.ProtoTCP},
},
Rules: &api.L7Rules{
Kafka: []kafka.PortRule{
{Topic: "bar"},
},
},
}},
},
},
}
fooRules := api.L7Rules{
Kafka: []kafka.PortRule{{Topic: "foo"}},
}
barRules := api.L7Rules{
Kafka: []kafka.PortRule{{Topic: "bar"}},
}
// The l3-dependent l7 rules are not merged together.
l7map := L7DataMap{
td.cachedSelectorB: &PerSelectorPolicy{
L7Parser: ParserTypeKafka,
Priority: ListenerPriorityKafka,
L7Rules: fooRules,
},
td.wildcardCachedSelector: &PerSelectorPolicy{
L7Parser: ParserTypeKafka,
Priority: ListenerPriorityKafka,
L7Rules: barRules,
},
}
expected = NewL4PolicyMapWithValues(map[string]*L4Filter{"80/TCP": {
Port: 80, Protocol: api.ProtoTCP, U8Proto: 6,
wildcard: td.wildcardCachedSelector,
PerSelectorPolicies: l7map, Ingress: false,
RuleOrigin: OriginForTest(map[CachedSelector]labels.LabelArrayList{
td.cachedSelectorB: {nil},
td.wildcardCachedSelector: {nil},
}),
}})
td.policyMapEquals(t, nil, expected, &rule3)
}
func TestRuleWithNoEndpointSelector(t *testing.T) {
apiRule1 := api.Rule{
Ingress: []api.IngressRule{
{
IngressCommonRule: api.IngressCommonRule{
FromCIDR: []api.CIDR{
"10.0.1.0/24",
"192.168.2.0",
"10.0.3.1",
"2001:db8::1/48",
"2001:db9::",
},
},
},
},
Egress: []api.EgressRule{
{
EgressCommonRule: api.EgressCommonRule{
ToCIDR: []api.CIDR{
"10.1.0.0/16",
"2001:dbf::/64",
},
},
}, {
EgressCommonRule: api.EgressCommonRule{
ToCIDRSet: []api.CIDRRule{{Cidr: api.CIDR("10.0.0.0/8"), ExceptCIDRs: []api.CIDR{"10.96.0.0/12"}}},
},
},
},
}
err := apiRule1.Sanitize()
require.Error(t, err)
}
func TestL3Policy(t *testing.T) {
apiRule1 := api.Rule{
EndpointSelector: api.NewESFromLabels(labels.ParseSelectLabel("bar")),
Ingress: []api.IngressRule{
{
IngressCommonRule: api.IngressCommonRule{
FromCIDR: []api.CIDR{
"10.0.1.0/24",
"192.168.2.0",
"10.0.3.1",
"2001:db8::1/48",
"2001:db9::",
},
},
},
},
Egress: []api.EgressRule{
{
EgressCommonRule: api.EgressCommonRule{
ToCIDR: []api.CIDR{
"10.1.0.0/16",
"2001:dbf::/64",
},
},
}, {
EgressCommonRule: api.EgressCommonRule{
ToCIDRSet: []api.CIDRRule{{Cidr: api.CIDR("10.0.0.0/8"), ExceptCIDRs: []api.CIDR{"10.96.0.0/12"}}},
},
},
},
}
err := apiRule1.Sanitize()
require.NoError(t, err)
rule1 := &rule{Rule: apiRule1}
err = rule1.Sanitize()
require.NoError(t, err)
// Must be parsable, make sure Validate fails when not.
err = (&api.Rule{
EndpointSelector: api.NewESFromLabels(labels.ParseSelectLabel("bar")),
Ingress: []api.IngressRule{{
IngressCommonRule: api.IngressCommonRule{
FromCIDR: []api.CIDR{"10.0.1..0/24"},
},
}},
}).Sanitize()
require.Error(t, err)
// Test CIDRRule with no provided CIDR or ExceptionCIDR.
// Should fail as CIDR is required.
err = (&api.Rule{
EndpointSelector: api.NewESFromLabels(labels.ParseSelectLabel("bar")),
Ingress: []api.IngressRule{{
IngressCommonRule: api.IngressCommonRule{
FromCIDRSet: []api.CIDRRule{{Cidr: "", ExceptCIDRs: nil}},
},
}},
}).Sanitize()
require.Error(t, err)
// Test CIDRRule with only CIDR provided; should not fail, as ExceptionCIDR
// is optional.
err = (&api.Rule{
EndpointSelector: api.NewESFromLabels(labels.ParseSelectLabel("bar")),
Ingress: []api.IngressRule{{
IngressCommonRule: api.IngressCommonRule{
FromCIDRSet: []api.CIDRRule{{Cidr: "10.0.1.0/24", ExceptCIDRs: nil}},
},
}},
}).Sanitize()
require.NoError(t, err)
// Cannot provide just an IP to a CIDRRule; Cidr must be of format
// <IP>/<prefix>.
err = (&api.Rule{
EndpointSelector: api.NewESFromLabels(labels.ParseSelectLabel("bar")),
Ingress: []api.IngressRule{{
IngressCommonRule: api.IngressCommonRule{
FromCIDRSet: []api.CIDRRule{{Cidr: "10.0.1.32", ExceptCIDRs: nil}},
},
}},
}).Sanitize()
require.Error(t, err)
// Cannot exclude a range that is not part of the CIDR.
err = (&api.Rule{
EndpointSelector: api.NewESFromLabels(labels.ParseSelectLabel("bar")),
Ingress: []api.IngressRule{{
IngressCommonRule: api.IngressCommonRule{
FromCIDRSet: []api.CIDRRule{{Cidr: "10.0.0.0/10", ExceptCIDRs: []api.CIDR{"10.64.0.0/11"}}},
},
}},
}).Sanitize()
require.Error(t, err)
// Must have a contiguous mask, make sure Validate fails when not.
err = (&api.Rule{
EndpointSelector: api.NewESFromLabels(labels.ParseSelectLabel("bar")),
Ingress: []api.IngressRule{{
IngressCommonRule: api.IngressCommonRule{
FromCIDR: []api.CIDR{"10.0.1.0/128.0.0.128"},
},
}},
}).Sanitize()
require.Error(t, err)
// Prefix length must be in range for the address, make sure
// Validate fails if given prefix length is out of range.
err = (&api.Rule{
EndpointSelector: api.NewESFromLabels(labels.ParseSelectLabel("bar")),
Ingress: []api.IngressRule{{
IngressCommonRule: api.IngressCommonRule{
FromCIDR: []api.CIDR{"10.0.1.0/34"},
},
}},
}).Sanitize()
require.Error(t, err)
}
func TestICMPPolicy(t *testing.T) {
td := newTestData(hivetest.Logger(t))
// A rule for ICMP
i8 := intstr.FromInt(8)
i9 := intstr.FromInt(9)
rule1 := api.Rule{
EndpointSelector: endpointSelectorA,
Ingress: []api.IngressRule{
{
ICMPs: api.ICMPRules{{
Fields: []api.ICMPField{{
Type: &i8,
}},
}},
},
},
Egress: []api.EgressRule{
{
ICMPs: api.ICMPRules{{
Fields: []api.ICMPField{{
Type: &i9,
}},
}},
},
},
}
expectedIn := NewL4PolicyMapWithValues(map[string]*L4Filter{"ICMP/8": {
Port: 8,
Protocol: api.ProtoICMP,
U8Proto: u8proto.ProtoIDs["icmp"],
Ingress: true,
wildcard: td.wildcardCachedSelector,
PerSelectorPolicies: L7DataMap{
td.wildcardCachedSelector: nil,
},
RuleOrigin: OriginForTest(map[CachedSelector]labels.LabelArrayList{td.wildcardCachedSelector: {nil}}),
}})
expectedOut := NewL4PolicyMapWithValues(map[string]*L4Filter{"ICMP/9": {
Port: 9,
Protocol: api.ProtoICMP,
U8Proto: u8proto.ProtoIDs["icmp"],
Ingress: false,
wildcard: td.wildcardCachedSelector,
PerSelectorPolicies: L7DataMap{
td.wildcardCachedSelector: nil,
},
RuleOrigin: OriginForTest(map[CachedSelector]labels.LabelArrayList{td.wildcardCachedSelector: {nil}}),
}})
td.policyMapEquals(t, expectedIn, expectedOut, &rule1)
// A rule for Ports and ICMP
rule2 := api.Rule{
EndpointSelector: endpointSelectorA,
Ingress: []api.IngressRule{
{
ToPorts: []api.PortRule{{
Ports: []api.PortProtocol{
{Port: "80", Protocol: api.ProtoTCP},
},
}},
}, {
ICMPs: api.ICMPRules{{
Fields: []api.ICMPField{{
Type: &i8,
}},
}},
},
},
}
expected := NewL4PolicyMapWithValues(map[string]*L4Filter{
"ICMP/8": {
Port: 8,
Protocol: api.ProtoICMP,
U8Proto: u8proto.ProtoIDs["icmp"],
Ingress: true,
wildcard: td.wildcardCachedSelector,
PerSelectorPolicies: L7DataMap{
td.wildcardCachedSelector: nil,
},
RuleOrigin: OriginForTest(map[CachedSelector]labels.LabelArrayList{td.wildcardCachedSelector: {nil}}),
},
"TCP/80": {
Port: 80,
Protocol: api.ProtoTCP,
U8Proto: u8proto.ProtoIDs["tcp"],
Ingress: true,
wildcard: td.wildcardCachedSelector,
PerSelectorPolicies: L7DataMap{
td.wildcardCachedSelector: nil,
},
RuleOrigin: OriginForTest(map[CachedSelector]labels.LabelArrayList{td.wildcardCachedSelector: {nil}}),
},
})
td.policyMapEquals(t, expected, nil, &rule2)
// A rule for ICMPv6
icmpV6Type := intstr.FromInt(128)
rule3 := api.Rule{
EndpointSelector: endpointSelectorA,
Ingress: []api.IngressRule{
{
ICMPs: api.ICMPRules{{
Fields: []api.ICMPField{{
Family: "IPv6",
Type: &icmpV6Type,
}},
}},
},
},
}
expected = NewL4PolicyMapWithValues(map[string]*L4Filter{"ICMPV6/128": {
Port: 128,
Protocol: api.ProtoICMPv6,
U8Proto: u8proto.ProtoIDs["icmp"],
Ingress: true,
wildcard: td.wildcardCachedSelector,
PerSelectorPolicies: L7DataMap{
td.wildcardCachedSelector: nil,
},
RuleOrigin: OriginForTest(map[CachedSelector]labels.LabelArrayList{td.wildcardCachedSelector: {nil}}),
}})
td.policyMapEquals(t, expected, nil, &rule3)
}
func TestIPProtocolsWithNoTransportPorts(t *testing.T) {
old := option.Config.EnableExtendedIPProtocols
option.Config.EnableExtendedIPProtocols = true
t.Cleanup(func() {
option.Config.EnableExtendedIPProtocols = old
})
td := newTestData(hivetest.Logger(t))
rule1 := api.Rule{
EndpointSelector: endpointSelectorA,
Ingress: []api.IngressRule{
{
ToPorts: []api.PortRule{
{
Ports: []api.PortProtocol{
{
Protocol: api.ProtoVRRP,
},
{
Protocol: api.ProtoIGMP,
},
},
},
},
},
},
Egress: []api.EgressRule{
{
ToPorts: []api.PortRule{
{
Ports: []api.PortProtocol{
{
Protocol: api.ProtoVRRP,
},
},
},
},
},
},
}
expectedIn := NewL4PolicyMapWithValues(map[string]*L4Filter{
"0/vrrp": {
Port: 0,
Protocol: api.ProtoVRRP,
U8Proto: u8proto.ProtoIDs["vrrp"],
Ingress: true,
wildcard: td.wildcardCachedSelector,
PerSelectorPolicies: L7DataMap{
td.wildcardCachedSelector: nil,
},
},
"0/igmp": {
Port: 0,
Protocol: api.ProtoIGMP,
U8Proto: u8proto.ProtoIDs["igmp"],
Ingress: true,
wildcard: td.wildcardCachedSelector,
PerSelectorPolicies: L7DataMap{
td.wildcardCachedSelector: nil,
},
},
})
expectedOut := NewL4PolicyMapWithValues(map[string]*L4Filter{"0/egress": {
Port: 0,
Protocol: api.ProtoVRRP,
U8Proto: u8proto.ProtoIDs["vrrp"],
Ingress: false,
wildcard: td.wildcardCachedSelector,
PerSelectorPolicies: L7DataMap{
td.wildcardCachedSelector: nil,
},
}})
td.policyMapEquals(t, expectedIn, expectedOut, &rule1)
}
// Tests the restrictions of combining certain label-based L3 and L4 policies.
// This ensures that the user is informed of policy combinations that are not
// implemented in the datapath.
func TestEgressRuleRestrictions(t *testing.T) {
fooSelector := []api.EndpointSelector{
api.NewESFromLabels(labels.ParseSelectLabel("foo")),
}
// Cannot combine ToEndpoints and ToCIDR
apiRule1 := api.Rule{
EndpointSelector: api.NewESFromLabels(labels.ParseSelectLabel("bar")),
Egress: []api.EgressRule{
{
EgressCommonRule: api.EgressCommonRule{
ToCIDR: []api.CIDR{
"10.1.0.0/16",
"2001:dbf::/64",
},
ToEndpoints: fooSelector,
},
},
},
}
err := apiRule1.Sanitize()
require.Error(t, err)
}
func TestPolicyEntityValidationEgress(t *testing.T) {
r := api.Rule{
EndpointSelector: api.NewESFromLabels(labels.ParseSelectLabel("bar")),
Egress: []api.EgressRule{
{
EgressCommonRule: api.EgressCommonRule{
ToEntities: []api.Entity{api.EntityWorld},
},
},
},
}
require.NoError(t, r.Sanitize())
require.Len(t, r.Egress[0].ToEntities, 1)
r.Egress[0].ToEntities = []api.Entity{api.EntityHost}
require.NoError(t, r.Sanitize())
require.Len(t, r.Egress[0].ToEntities, 1)
r.Egress[0].ToEntities = []api.Entity{"trololo"}
require.Error(t, r.Sanitize())
}
func TestPolicyEntityValidationIngress(t *testing.T) {
r := api.Rule{
EndpointSelector: api.NewESFromLabels(labels.ParseSelectLabel("bar")),
Ingress: []api.IngressRule{
{
IngressCommonRule: api.IngressCommonRule{
FromEntities: []api.Entity{api.EntityWorld},
},
},
},
}
require.NoError(t, r.Sanitize())
require.Len(t, r.Ingress[0].FromEntities, 1)
r.Ingress[0].FromEntities = []api.Entity{api.EntityHost}
require.NoError(t, r.Sanitize())
require.Len(t, r.Ingress[0].FromEntities, 1)
r.Ingress[0].FromEntities = []api.Entity{"trololo"}
require.Error(t, r.Sanitize())
}
func TestPolicyEntityValidationEntitySelectorsFill(t *testing.T) {
r := api.Rule{
EndpointSelector: api.NewESFromLabels(labels.ParseSelectLabel("bar")),
Ingress: []api.IngressRule{
{
IngressCommonRule: api.IngressCommonRule{
FromEntities: []api.Entity{api.EntityWorld, api.EntityHost},
},
},
},
Egress: []api.EgressRule{
{
EgressCommonRule: api.EgressCommonRule{
ToEntities: []api.Entity{api.EntityWorld, api.EntityHost},
},
},
},
}
require.NoError(t, r.Sanitize())
require.Len(t, r.Ingress[0].FromEntities, 2)
require.Len(t, r.Egress[0].ToEntities, 2)
}
func TestL3RuleLabels(t *testing.T) {
ruleLabels := map[string]labels.LabelArray{
"rule0": labels.ParseLabelArray("name=apiRule0"),
"rule1": labels.ParseLabelArray("name=apiRule1"),
"rule2": labels.ParseLabelArray("name=apiRule2"),
}
rules := map[string]api.Rule{
"rule0": {
EndpointSelector: endpointSelectorA,
Labels: ruleLabels["rule0"],
Ingress: []api.IngressRule{{}},
Egress: []api.EgressRule{{}},
},
"rule1": {
EndpointSelector: endpointSelectorA,
Labels: ruleLabels["rule1"],
Ingress: []api.IngressRule{
{
IngressCommonRule: api.IngressCommonRule{
FromCIDR: []api.CIDR{"10.0.1.0/32"},
},
},
},
Egress: []api.EgressRule{
{
EgressCommonRule: api.EgressCommonRule{
ToCIDR: []api.CIDR{"10.1.0.0/32"},
},
},
},
},
"rule2": {
EndpointSelector: endpointSelectorA,
Labels: ruleLabels["rule2"],
Ingress: []api.IngressRule{
{
IngressCommonRule: api.IngressCommonRule{
FromCIDR: []api.CIDR{"10.0.2.0/32"},
},
},
},
Egress: []api.EgressRule{
{
EgressCommonRule: api.EgressCommonRule{
ToCIDR: []api.CIDR{"10.2.0.0/32"},
},
},
},
},
}
testCases := []struct {
description string // the description to print in asserts
rulesToApply []string // the rules from the rules map to resolve, in order
expectedIngressLabels map[string]labels.LabelArrayList // the slice of LabelArray we should see, per CIDR prefix
expectedEgressLabels map[string]labels.LabelArrayList // the slice of LabelArray we should see, per CIDR prefix
}{
{
description: "Empty rule that matches. Should not apply labels",
rulesToApply: []string{"rule0"},
expectedIngressLabels: nil,
expectedEgressLabels: nil,
}, {
description: "A rule that matches. Should apply labels",
rulesToApply: []string{"rule1"},
expectedIngressLabels: map[string]labels.LabelArrayList{"10.0.1.0/32": {ruleLabels["rule1"]}},
expectedEgressLabels: map[string]labels.LabelArrayList{"10.1.0.0/32": {ruleLabels["rule1"]}},
}, {
description: "Multiple matching rules. Should apply labels from all that have rule entries",
rulesToApply: []string{"rule0", "rule1", "rule2"},
expectedIngressLabels: map[string]labels.LabelArrayList{
"10.0.1.0/32": {ruleLabels["rule1"]},
"10.0.2.0/32": {ruleLabels["rule2"]}},
expectedEgressLabels: map[string]labels.LabelArrayList{
"10.1.0.0/32": {ruleLabels["rule1"]},
"10.2.0.0/32": {ruleLabels["rule2"]}},
}}
// endpoint selector for all tests
for i, test := range testCases {
t.Run(fmt.Sprintf("%d", i), func(t *testing.T) {
td := newTestData(hivetest.Logger(t)).withIDs(ruleTestIDs)
for _, r := range test.rulesToApply {
td.repo.mustAdd(rules[r])
}
finalPolicy, err := td.repo.resolvePolicyLocked(idA)
require.NoError(t, err)
type expectedResult map[string]labels.LabelArrayList
mapDirectionalResultsToExpectedOutput := map[*L4Filter]expectedResult{
finalPolicy.L4Policy.Ingress.PortRules.ExactLookup("0", 0, "ANY"): test.expectedIngressLabels,
finalPolicy.L4Policy.Egress.PortRules.ExactLookup("0", 0, "ANY"): test.expectedEgressLabels,
}
for filter, exp := range mapDirectionalResultsToExpectedOutput {
if len(exp) > 0 {
for cidr, rule := range exp {
matches := false
for _, origin := range filter.RuleOrigin {
lbls := origin.GetLabelArrayList()
if lbls.Equals(rule) {
matches = true
break
}
}
require.True(t, matches, "%s: expected filter %+v to be derived from rule %s", test.description, filter, rule)
matches = false
for sel := range filter.PerSelectorPolicies {
cidrLabels := labels.ParseLabelArray("cidr:" + cidr)
t.Logf("Testing %+v", cidrLabels)
if matches = sel.(*identitySelector).source.(*labelIdentitySelector).xxxMatches(cidrLabels); matches {
break
}
}
require.True(t, matches, "%s: expected cidr %s to match filter %+v", test.description, cidr, filter)
}
}
}
})
}
}
func TestL4RuleLabels(t *testing.T) {
ruleLabels := map[string]labels.LabelArray{
"rule0": labels.ParseLabelArray("name=apiRule0"),
"rule1": labels.ParseLabelArray("name=apiRule1"),
"rule2": labels.ParseLabelArray("name=apiRule2"),
}
rules := map[string]api.Rule{
"rule0": {
EndpointSelector: endpointSelectorA,
Labels: ruleLabels["rule0"],
Ingress: []api.IngressRule{{}},
Egress: []api.EgressRule{{}},
},
"rule1": {
EndpointSelector: endpointSelectorA,
Labels: ruleLabels["rule1"],
Ingress: []api.IngressRule{
{
ToPorts: []api.PortRule{{
Ports: []api.PortProtocol{{Port: "1010", Protocol: api.ProtoTCP}},
}},
},
},
Egress: []api.EgressRule{
{
ToPorts: []api.PortRule{{
Ports: []api.PortProtocol{{Port: "1100", Protocol: api.ProtoTCP}},
}},
},
},
},
"rule2": {
EndpointSelector: endpointSelectorA,
Labels: ruleLabels["rule2"],
Ingress: []api.IngressRule{
{
ToPorts: []api.PortRule{{
Ports: []api.PortProtocol{{Port: "1020", Protocol: api.ProtoTCP}},
}},
},
},
Egress: []api.EgressRule{
{
ToPorts: []api.PortRule{{
Ports: []api.PortProtocol{{Port: "1200", Protocol: api.ProtoTCP}},
}},
},
},
},
}
testCases := []struct {
description string // the description to print in asserts
rulesToApply []string // the rules from the rules map to resolve, in order
expectedIngressLabels map[string]labels.LabelArrayList // the slice of LabelArray we should see, in order
expectedEgressLabels map[string]labels.LabelArrayList // the slice of LabelArray we should see, in order
}{
{
description: "Empty rule that matches. Should not apply labels",
rulesToApply: []string{"rule0"},
expectedIngressLabels: map[string]labels.LabelArrayList{},
expectedEgressLabels: map[string]labels.LabelArrayList{},
},
{
description: "A rule that matches. Should apply labels",
rulesToApply: []string{"rule1"},
expectedIngressLabels: map[string]labels.LabelArrayList{"1010/TCP": {ruleLabels["rule1"]}},
expectedEgressLabels: map[string]labels.LabelArrayList{"1100/TCP": {ruleLabels["rule1"]}},
}, {
description: "Multiple matching rules. Should apply labels from all that have rule entries",
rulesToApply: []string{"rule0", "rule1", "rule2"},
expectedIngressLabels: map[string]labels.LabelArrayList{
"1010/TCP": {ruleLabels["rule1"]},
"1020/TCP": {ruleLabels["rule2"]}},
expectedEgressLabels: map[string]labels.LabelArrayList{
"1100/TCP": {ruleLabels["rule1"]},
"1200/TCP": {ruleLabels["rule2"]}},
}}
// endpoint selector for all tests
for i, test := range testCases {
t.Run(fmt.Sprintf("%d", i), func(t *testing.T) {
td := newTestData(hivetest.Logger(t)).withIDs(ruleTestIDs)
for _, r := range test.rulesToApply {
td.repo.mustAdd(rules[r])
}
finalPolicy, err := td.repo.resolvePolicyLocked(idA)
require.NoError(t, err)
require.Equal(t, len(test.expectedIngressLabels), finalPolicy.L4Policy.Ingress.PortRules.Len(), test.description)
for portProto := range test.expectedIngressLabels {
portProtoSlice := strings.Split(portProto, "/")
out := finalPolicy.L4Policy.Ingress.PortRules.ExactLookup(portProtoSlice[0], 0, portProtoSlice[1])
require.NotNil(t, out, test.description)
require.Len(t, out.RuleOrigin, 1, test.description)
lbls := out.RuleOrigin[out.wildcard].GetLabelArrayList()
require.Equal(t, test.expectedIngressLabels[portProto], lbls, test.description)
}
require.Equal(t, len(test.expectedEgressLabels), finalPolicy.L4Policy.Egress.PortRules.Len(), test.description)
for portProto := range test.expectedEgressLabels {
portProtoSlice := strings.Split(portProto, "/")
out := finalPolicy.L4Policy.Egress.PortRules.ExactLookup(portProtoSlice[0], 0, portProtoSlice[1])
require.NotNil(t, out, test.description)
require.Len(t, out.RuleOrigin, 1, test.description)
lbls := out.RuleOrigin[out.wildcard].GetLabelArrayList()
require.Equal(t, test.expectedEgressLabels[portProto], lbls, test.description)
}
})
}
}
func TestRuleLog(t *testing.T) {
td := newTestData(hivetest.Logger(t)).withIDs(ruleTestIDs)
// test merging on a per-selector basis, as well as for overlapping selectors
nsDefaultSelector := api.NewESFromLabels(labels.ParseSelectLabel("io.kubernetes.pod.namespace=default"))
rules := api.Rules{
// rule1, rule2 selects id=b -- should merge in L4Filter
// rule3 selects namespace = default -- should merge in MapState
{
EndpointSelector: endpointSelectorA,
Egress: []api.EgressRule{
{
EgressCommonRule: api.EgressCommonRule{
ToEndpoints: []api.EndpointSelector{endpointSelectorB},
},
ToPorts: []api.PortRule{{
Ports: []api.PortProtocol{
{Port: "80", Protocol: api.ProtoTCP},
},
}},
},
},
Log: api.LogConfig{Value: "rule1"},
},
{
EndpointSelector: endpointSelectorA,
Egress: []api.EgressRule{
{
EgressCommonRule: api.EgressCommonRule{
ToEndpoints: []api.EndpointSelector{endpointSelectorB},
},
ToPorts: []api.PortRule{{
Ports: []api.PortProtocol{
{Port: "80", Protocol: api.ProtoTCP},
},
}},
},
},
Log: api.LogConfig{Value: "rule2"},
},
{
EndpointSelector: endpointSelectorA,
Egress: []api.EgressRule{
{
EgressCommonRule: api.EgressCommonRule{
ToEndpoints: []api.EndpointSelector{nsDefaultSelector},
},
ToPorts: []api.PortRule{{
Ports: []api.PortProtocol{
{Port: "80", Protocol: api.ProtoTCP},
},
}},
},
},
Log: api.LogConfig{Value: "rule3"},
},
}
// endpoint b should have all 3 rules
td.repo.MustAddList(rules)
verdict, egress, _, err := LookupFlow(td.repo.logger, td.repo, flowAToB, nil, nil)
require.NoError(t, err)
require.Equal(t, api.Allowed, verdict)
require.Equal(t, []string{"rule1", "rule2", "rule3"}, egress.Log())
// endpoint c should have just rule3
verdict, egress, _, err = LookupFlow(td.repo.logger, td.repo, flowAToC, nil, nil)
require.NoError(t, err)
require.Equal(t, api.Allowed, verdict)
require.Equal(t, []string{"rule3"}, egress.Log())
}
var (
labelsA = labels.LabelArray{
labels.NewLabel("id", "a", labels.LabelSourceK8s),
labels.NewLabel("io.kubernetes.pod.namespace", "default", labels.LabelSourceK8s),
}
idA = identity.NewIdentity(1001, labelsA.Labels())
endpointSelectorA = api.NewESFromLabels(labels.ParseSelectLabel("id=a"))
labelsB = labels.LabelArray{
labels.NewLabel("id1", "b", labels.LabelSourceK8s),
labels.NewLabel("id2", "t", labels.LabelSourceK8s),
labels.NewLabel("io.kubernetes.pod.namespace", "default", labels.LabelSourceK8s),
}
idB = identity.NewIdentity(1002, labelsB.Labels())
endpointSelectorB = api.NewESFromLabels(labels.ParseSelectLabel("id1=b"))
labelsC = labels.LabelArray{
labels.NewLabel("id", "t", labels.LabelSourceK8s),
labels.NewLabel("io.kubernetes.pod.namespace", "default", labels.LabelSourceK8s),
}
idC = identity.NewIdentity(1003, labelsC.Labels())
endpointSelectorC = api.NewESFromLabels(labels.ParseSelectLabel("id=t"))
flowAToB = Flow{From: idA, To: idB, Proto: u8proto.TCP, Dport: 80}
flowAToC = Flow{From: idA, To: idC, Proto: u8proto.TCP, Dport: 80}
flowAToC90 = Flow{From: idA, To: idC, Proto: u8proto.TCP, Dport: 90}
flowAToWorld80 = Flow{From: idA, To: identity.LookupReservedIdentity(identity.ReservedIdentityWorld), Proto: u8proto.TCP, Dport: 80}
flowAToWorld90 = Flow{From: idA, To: identity.LookupReservedIdentity(identity.ReservedIdentityWorld), Proto: u8proto.TCP, Dport: 90}
ruleTestIDs = identity.IdentityMap{
idA.ID: idA.LabelArray,
idB.ID: idB.LabelArray,
idC.ID: idC.LabelArray,
}
defaultDenyIngress = &api.Rule{
EndpointSelector: api.WildcardEndpointSelector,
Ingress: []api.IngressRule{{}},
}
namedPorts = map[string]uint16{
"port-80": 80,
"port-90": 90,
}
)
func checkFlow(t *testing.T, repo *Repository, flow Flow, verdict api.Decision) {
t.Helper()
srcEP := &EndpointInfo{
ID: 1,
TCPNamedPorts: namedPorts,
}
dstEP := &EndpointInfo{
ID: 2,
TCPNamedPorts: namedPorts,
}
actual, _, _, err := LookupFlow(hivetest.Logger(t), repo, flow, srcEP, dstEP)
require.NoError(t, err)
require.Equal(t, verdict, actual)
}
func TestIngressAllowAll(t *testing.T) {
td := newTestData(hivetest.Logger(t)).withIDs(ruleTestIDs)
repo := td.repo
repo.MustAddList(api.Rules{
defaultDenyIngress,
&api.Rule{
EndpointSelector: endpointSelectorC,
Ingress: []api.IngressRule{
{
// Allow all L3&L4 ingress rule
IngressCommonRule: api.IngressCommonRule{
FromEndpoints: []api.EndpointSelector{
api.WildcardEndpointSelector,
},
},
},
},
},
})
checkFlow(t, repo, flowAToB, api.Denied)
checkFlow(t, repo, flowAToC, api.Allowed)
checkFlow(t, repo, flowAToC90, api.Allowed)
}
func TestIngressAllowAllL4Overlap(t *testing.T) {
td := newTestData(hivetest.Logger(t)).withIDs(ruleTestIDs)
repo := td.repo
repo.MustAddList(api.Rules{
defaultDenyIngress,
&api.Rule{
EndpointSelector: endpointSelectorC,
Ingress: []api.IngressRule{
{
// Allow all L3&L4 ingress rule
IngressCommonRule: api.IngressCommonRule{
FromEndpoints: []api.EndpointSelector{
api.WildcardEndpointSelector,
},
},
},
{
// This rule is a subset of the above
// rule and should *NOT* restrict to
// port 80 only
ToPorts: []api.PortRule{{
Ports: []api.PortProtocol{
{Port: "80", Protocol: api.ProtoTCP},
},
}},
},
},
},
})
checkFlow(t, repo, flowAToC, api.Allowed)
checkFlow(t, repo, flowAToC90, api.Allowed)
}
func TestIngressAllowAllNamedPort(t *testing.T) {
repo := newTestData(hivetest.Logger(t)).withIDs(ruleTestIDs).repo
repo.MustAddList(api.Rules{
defaultDenyIngress,
&api.Rule{
EndpointSelector: endpointSelectorC,
Ingress: []api.IngressRule{
{
// Allow all L3&L4 ingress rule
IngressCommonRule: api.IngressCommonRule{
FromEndpoints: []api.EndpointSelector{
api.WildcardEndpointSelector,
},
},
ToPorts: []api.PortRule{{
Ports: []api.PortProtocol{
{Port: "port-80", Protocol: api.ProtoTCP},
},
}},
},
},
},
})
checkFlow(t, repo, flowAToC, api.Allowed)
checkFlow(t, repo, flowAToB, api.Denied)
checkFlow(t, repo, flowAToC90, api.Denied)
}
func TestIngressAllowAllL4OverlapNamedPort(t *testing.T) {
td := newTestData(hivetest.Logger(t)).withIDs(ruleTestIDs)
repo := td.repo
repo.MustAddList(api.Rules{
defaultDenyIngress,
&api.Rule{
EndpointSelector: endpointSelectorC,
Ingress: []api.IngressRule{
{
// Allow all L3&L4 ingress rule
IngressCommonRule: api.IngressCommonRule{
FromEndpoints: []api.EndpointSelector{
api.WildcardEndpointSelector,
},
},
},
{
// This rule is a subset of the above
// rule and should *NOT* restrict to
// port 80 only
ToPorts: []api.PortRule{{
Ports: []api.PortProtocol{
{Port: "port-80", Protocol: api.ProtoTCP},
},
}},
},
},
},
})
checkFlow(t, repo, flowAToC, api.Allowed)
checkFlow(t, repo, flowAToC90, api.Allowed)
}
func TestIngressL4AllowAll(t *testing.T) {
td := newTestData(hivetest.Logger(t)).withIDs(ruleTestIDs)
repo := td.repo
repo.MustAddList(api.Rules{
defaultDenyIngress,
&api.Rule{
EndpointSelector: endpointSelectorC,
Ingress: []api.IngressRule{
{
ToPorts: []api.PortRule{{
Ports: []api.PortProtocol{
{Port: "80", Protocol: api.ProtoTCP},
},
}},
},
},
},
})
checkFlow(t, repo, flowAToC, api.Allowed)
checkFlow(t, repo, flowAToC90, api.Denied)
pol, err := repo.resolvePolicyLocked(idC)
require.NoError(t, err)
defer pol.detach(true, 0)
filter := pol.L4Policy.Ingress.PortRules.ExactLookup("80", 0, "TCP")
require.NotNil(t, filter)
require.Equal(t, uint16(80), filter.Port)
require.True(t, filter.Ingress)
require.Len(t, filter.PerSelectorPolicies, 1)
require.Nil(t, filter.PerSelectorPolicies[td.wildcardCachedSelector])
}
func TestIngressL4AllowAllNamedPort(t *testing.T) {
td := newTestData(hivetest.Logger(t)).withIDs(ruleTestIDs)
repo := td.repo
repo.MustAddList(api.Rules{
&api.Rule{
EndpointSelector: endpointSelectorC,
Ingress: []api.IngressRule{
{
ToPorts: []api.PortRule{{
Ports: []api.PortProtocol{
{Port: "port-80", Protocol: api.ProtoTCP},
},
}},
},
},
},
})
checkFlow(t, repo, flowAToC, api.Allowed)
checkFlow(t, repo, flowAToC90, api.Denied)
pol, err := repo.resolvePolicyLocked(idC)
require.NoError(t, err)
defer pol.detach(true, 0)
filter := pol.L4Policy.Ingress.PortRules.ExactLookup("port-80", 0, "TCP")
require.NotNil(t, filter)
require.Equal(t, uint16(0), filter.Port)
require.Equal(t, "port-80", filter.PortName)
require.True(t, filter.Ingress)
require.Len(t, filter.PerSelectorPolicies, 1)
require.Nil(t, filter.PerSelectorPolicies[td.wildcardCachedSelector])
}
func TestEgressAllowAll(t *testing.T) {
td := newTestData(hivetest.Logger(t)).withIDs(ruleTestIDs)
repo := td.repo
repo.MustAddList(api.Rules{
&api.Rule{
EndpointSelector: endpointSelectorA,
Egress: []api.EgressRule{
{
EgressCommonRule: api.EgressCommonRule{
ToEndpoints: []api.EndpointSelector{
api.WildcardEndpointSelector,
},
},
},
},
},
})
checkFlow(t, repo, flowAToB, api.Allowed)
checkFlow(t, repo, flowAToC, api.Allowed)
checkFlow(t, repo, flowAToC90, api.Allowed)
}
func TestEgressL4AllowAll(t *testing.T) {
td := newTestData(hivetest.Logger(t)).withIDs(ruleTestIDs)
repo := td.repo
repo.MustAddList(api.Rules{
&api.Rule{
EndpointSelector: endpointSelectorA,
Egress: []api.EgressRule{
{
ToPorts: []api.PortRule{{
Ports: []api.PortProtocol{
{Port: "80", Protocol: api.ProtoTCP},
},
}},
},
},
},
})
checkFlow(t, repo, flowAToB, api.Allowed)
checkFlow(t, repo, flowAToC, api.Allowed)
checkFlow(t, repo, flowAToC90, api.Denied)
pol, err := repo.resolvePolicyLocked(idA)
require.NoError(t, err)
defer pol.detach(true, 0)
t.Log(pol.L4Policy.Egress.PortRules)
filter := pol.L4Policy.Egress.PortRules.ExactLookup("80", 0, "TCP")
require.NotNil(t, filter)
require.Equal(t, uint16(80), filter.Port)
require.False(t, filter.Ingress)
require.Len(t, filter.PerSelectorPolicies, 1)
require.Nil(t, filter.PerSelectorPolicies[td.wildcardCachedSelector])
}
func TestEgressL4AllowWorld(t *testing.T) {
td := newTestData(hivetest.Logger(t)).withIDs(ruleTestIDs, identity.ListReservedIdentities())
repo := td.repo
repo.MustAddList(api.Rules{
&api.Rule{
EndpointSelector: endpointSelectorA,
Egress: []api.EgressRule{
{
EgressCommonRule: api.EgressCommonRule{
ToEntities: []api.Entity{api.EntityWorld},
},
ToPorts: []api.PortRule{{
Ports: []api.PortProtocol{
{Port: "80", Protocol: api.ProtoTCP},
},
}},
},
},
},
})
checkFlow(t, repo, flowAToWorld80, api.Allowed)
checkFlow(t, repo, flowAToWorld90, api.Denied)
// Pod to pod must be denied on port 80 and 90, only world was whitelisted
checkFlow(t, repo, flowAToC, api.Denied)
checkFlow(t, repo, flowAToC90, api.Denied)
pol, err := repo.resolvePolicyLocked(idA)
require.NoError(t, err)
defer pol.detach(true, 0)
filter := pol.L4Policy.Egress.PortRules.ExactLookup("80", 0, "TCP")
require.NotNil(t, filter)
require.Equal(t, uint16(80), filter.Port)
require.False(t, filter.Ingress)
require.Len(t, filter.PerSelectorPolicies, 3)
}
func TestEgressL4AllowAllEntity(t *testing.T) {
td := newTestData(hivetest.Logger(t)).withIDs(ruleTestIDs, identity.ListReservedIdentities())
repo := td.repo
repo.MustAddList(api.Rules{
&api.Rule{
EndpointSelector: endpointSelectorA,
Egress: []api.EgressRule{
{
EgressCommonRule: api.EgressCommonRule{
ToEntities: []api.Entity{api.EntityAll},
},
ToPorts: []api.PortRule{{
Ports: []api.PortProtocol{
{Port: "80", Protocol: api.ProtoTCP},
},
}},
},
},
},
})
checkFlow(t, repo, flowAToWorld80, api.Allowed)
checkFlow(t, repo, flowAToWorld90, api.Denied)
// Pod to pod must be allowed on port 80, denied on port 90 (all identity)
checkFlow(t, repo, flowAToC, api.Allowed)
checkFlow(t, repo, flowAToC90, api.Denied)
pol, err := repo.resolvePolicyLocked(idA)
require.NoError(t, err)
defer pol.detach(true, 0)
filter := pol.L4Policy.Egress.PortRules.ExactLookup("80", 0, "TCP")
require.NotNil(t, filter)
require.Equal(t, uint16(80), filter.Port)
require.False(t, filter.Ingress)
require.Len(t, filter.PerSelectorPolicies, 1)
}
func TestEgressL3AllowWorld(t *testing.T) {
td := newTestData(hivetest.Logger(t)).withIDs(ruleTestIDs, identity.ListReservedIdentities())
repo := td.repo
repo.MustAddList(api.Rules{
&api.Rule{
EndpointSelector: endpointSelectorA,
Egress: []api.EgressRule{
{
EgressCommonRule: api.EgressCommonRule{
ToEntities: []api.Entity{api.EntityWorld},
},
},
},
},
})
checkFlow(t, repo, flowAToWorld80, api.Allowed)
checkFlow(t, repo, flowAToWorld90, api.Allowed)
// Pod to pod must be denied on port 80 and 90, only world was whitelisted
checkFlow(t, repo, flowAToC, api.Denied)
checkFlow(t, repo, flowAToC90, api.Denied)
}
func TestEgressL3AllowAllEntity(t *testing.T) {
td := newTestData(hivetest.Logger(t)).withIDs(ruleTestIDs, identity.ListReservedIdentities())
repo := td.repo
repo.MustAddList(api.Rules{
&api.Rule{
EndpointSelector: endpointSelectorA,
Egress: []api.EgressRule{
{
EgressCommonRule: api.EgressCommonRule{
ToEntities: []api.Entity{api.EntityAll},
},
},
},
},
})
checkFlow(t, repo, flowAToWorld80, api.Allowed)
checkFlow(t, repo, flowAToWorld90, api.Allowed)
// Pod to pod must be allowed on both port 80 and 90 (L3 only rule)
checkFlow(t, repo, flowAToC, api.Allowed)
checkFlow(t, repo, flowAToC90, api.Allowed)
}
func TestL4WildcardMerge(t *testing.T) {
// First, test implicit case.
//
// Test the case where if we have rules that select the same endpoint on the
// same port-protocol tuple with one that is L4-only, and the other applying
// at L4 and L7, that the L4-only rule shadows the L4-L7 rule. This is because
// L4-only rule implicitly allows all traffic at L7, so the L7-related
// parts of the L4-L7 rule are useless.
td := newTestData(hivetest.Logger(t))
rule1 := api.Rule{
EndpointSelector: endpointSelectorA,
Ingress: []api.IngressRule{
{
IngressCommonRule: api.IngressCommonRule{
FromEndpoints: []api.EndpointSelector{endpointSelectorC},
},
ToPorts: []api.PortRule{{
Ports: []api.PortProtocol{
{Port: "80", Protocol: api.ProtoTCP},
},
Rules: &api.L7Rules{
HTTP: []api.PortRuleHTTP{
{Method: "GET", Path: "/"},
},
},
}},
},
{
ToPorts: []api.PortRule{{
Ports: []api.PortProtocol{
{Port: "80", Protocol: api.ProtoTCP},
},
}},
},
{
IngressCommonRule: api.IngressCommonRule{
FromEndpoints: []api.EndpointSelector{endpointSelectorC},
},
ToPorts: []api.PortRule{{
Ports: []api.PortProtocol{
{Port: "7000", Protocol: api.ProtoTCP},
},
Rules: &api.L7Rules{
L7Proto: "testparser",
L7: []api.PortRuleL7{
{"Key": "Value"},
},
},
}},
},
{
IngressCommonRule: api.IngressCommonRule{
FromEndpoints: []api.EndpointSelector{endpointSelectorC},
},
ToPorts: []api.PortRule{{
Ports: []api.PortProtocol{
{Port: "7000", Protocol: api.ProtoTCP},
},
}},
},
},
}
expected := NewL4PolicyMapWithValues(map[string]*L4Filter{"80/TCP": {
Port: 80, Protocol: api.ProtoTCP, U8Proto: 6,
wildcard: td.wildcardCachedSelector,
PerSelectorPolicies: L7DataMap{
td.wildcardCachedSelector: nil,
td.cachedSelectorC: &PerSelectorPolicy{
L7Parser: ParserTypeHTTP,
Priority: ListenerPriorityHTTP,
L7Rules: api.L7Rules{
HTTP: []api.PortRuleHTTP{{Path: "/", Method: "GET"}},
},
},
},
Ingress: true,
RuleOrigin: OriginForTest(map[CachedSelector]labels.LabelArrayList{
td.cachedSelectorC: {nil},
td.wildcardCachedSelector: {nil},
}),
},
"7000/TCP": {
Port: 7000, Protocol: api.ProtoTCP, U8Proto: 6,
PerSelectorPolicies: L7DataMap{
td.cachedSelectorC: &PerSelectorPolicy{
L7Parser: "testparser",
Priority: ListenerPriorityProxylib,
L7Rules: api.L7Rules{
L7Proto: "testparser",
L7: []api.PortRuleL7{{"Key": "Value"}, {}},
},
},
},
Ingress: true,
RuleOrigin: OriginForTest(map[CachedSelector]labels.LabelArrayList{td.cachedSelectorC: {nil}}),
},
})
td.policyMapEquals(t, expected, nil, &rule1)
// Test the reverse order as well; ensure that we check both conditions
// for if L4-only policy is in the L4Filter for the same port-protocol tuple,
// and L7 metadata exists in the L4Filter we are adding; expect to resolve
// to L4-only policy without any L7-metadata.
rule2 := api.Rule{
EndpointSelector: endpointSelectorA,
Ingress: []api.IngressRule{
{
IngressCommonRule: api.IngressCommonRule{
FromEndpoints: []api.EndpointSelector{endpointSelectorC},
},
ToPorts: []api.PortRule{{
Ports: []api.PortProtocol{
{Port: "7000", Protocol: api.ProtoTCP},
},
}},
},
{
IngressCommonRule: api.IngressCommonRule{
FromEndpoints: []api.EndpointSelector{endpointSelectorC},
},
ToPorts: []api.PortRule{{
Ports: []api.PortProtocol{
{Port: "7000", Protocol: api.ProtoTCP},
},
Rules: &api.L7Rules{
L7Proto: "testparser",
L7: []api.PortRuleL7{
{"Key": "Value"},
},
},
}},
},
{
ToPorts: []api.PortRule{{
Ports: []api.PortProtocol{
{Port: "80", Protocol: api.ProtoTCP},
},
}},
},
{
IngressCommonRule: api.IngressCommonRule{
FromEndpoints: []api.EndpointSelector{endpointSelectorC},
},
ToPorts: []api.PortRule{{
Ports: []api.PortProtocol{
{Port: "80", Protocol: api.ProtoTCP},
},
Rules: &api.L7Rules{
HTTP: []api.PortRuleHTTP{
{Method: "GET", Path: "/"},
},
},
}},
},
},
}
td.policyMapEquals(t, expected, nil, &rule2)
// Second, test the explicit allow at L3.
rule3 := api.Rule{
EndpointSelector: endpointSelectorA,
Ingress: []api.IngressRule{
{
IngressCommonRule: api.IngressCommonRule{
FromEndpoints: []api.EndpointSelector{endpointSelectorC},
},
ToPorts: []api.PortRule{{
Ports: []api.PortProtocol{
{Port: "80", Protocol: api.ProtoTCP},
},
Rules: &api.L7Rules{
HTTP: []api.PortRuleHTTP{
{Method: "GET", Path: "/"},
},
},
}},
},
{
IngressCommonRule: api.IngressCommonRule{
FromEndpoints: []api.EndpointSelector{api.WildcardEndpointSelector},
},
ToPorts: []api.PortRule{{
Ports: []api.PortProtocol{
{Port: "80", Protocol: api.ProtoTCP},
},
}},
},
},
}
expected = NewL4PolicyMapWithValues(map[string]*L4Filter{"80/TCP": {
Port: 80, Protocol: api.ProtoTCP, U8Proto: 6,
wildcard: td.wildcardCachedSelector,
PerSelectorPolicies: L7DataMap{
td.wildcardCachedSelector: nil,
td.cachedSelectorC: &PerSelectorPolicy{
L7Parser: ParserTypeHTTP,
Priority: ListenerPriorityHTTP,
L7Rules: api.L7Rules{
HTTP: []api.PortRuleHTTP{{Path: "/", Method: "GET"}},
},
},
},
Ingress: true,
RuleOrigin: OriginForTest(map[CachedSelector]labels.LabelArrayList{
td.cachedSelectorC: {nil},
td.wildcardCachedSelector: {nil},
}),
}})
td.policyMapEquals(t, expected, nil, &rule3)
// Test the reverse order as well; ensure that we check both conditions
// for if L4-only policy is in the L4Filter for the same port-protocol tuple,
// and L7 metadata exists in the L4Filter we are adding; expect to resolve
// to L4-only policy without any L7-metadata.
rule4 := api.Rule{
EndpointSelector: endpointSelectorA,
Ingress: []api.IngressRule{
{
IngressCommonRule: api.IngressCommonRule{
FromEndpoints: []api.EndpointSelector{api.WildcardEndpointSelector},
},
ToPorts: []api.PortRule{{
Ports: []api.PortProtocol{
{Port: "80", Protocol: api.ProtoTCP},
},
}},
},
{
IngressCommonRule: api.IngressCommonRule{
FromEndpoints: []api.EndpointSelector{endpointSelectorC},
},
ToPorts: []api.PortRule{{
Ports: []api.PortProtocol{
{Port: "80", Protocol: api.ProtoTCP},
},
Rules: &api.L7Rules{
HTTP: []api.PortRuleHTTP{
{Method: "GET", Path: "/"},
},
},
}},
},
},
}
expected = NewL4PolicyMapWithValues(map[string]*L4Filter{"80/TCP": {
Port: 80, Protocol: api.ProtoTCP, U8Proto: 6,
wildcard: td.wildcardCachedSelector,
PerSelectorPolicies: L7DataMap{
td.wildcardCachedSelector: nil,
td.cachedSelectorC: &PerSelectorPolicy{
L7Parser: ParserTypeHTTP,
Priority: ListenerPriorityHTTP,
L7Rules: api.L7Rules{
HTTP: []api.PortRuleHTTP{{Path: "/", Method: "GET"}},
},
},
},
Ingress: true,
RuleOrigin: OriginForTest(map[CachedSelector]labels.LabelArrayList{
td.cachedSelectorC: {nil},
td.wildcardCachedSelector: {nil},
}),
}})
td.policyMapEquals(t, expected, nil, &rule4)
}
func TestL3L4L7Merge(t *testing.T) {
// First rule allows ingress from all endpoints to port 80 only on
// GET to "/". However, second rule allows all traffic on port 80 only to a
// specific endpoint. When these rules are merged, it equates to allowing
// all traffic from port 80 from any endpoint.
//
// TODO: This comment can't be correct, the resulting policy
// should allow all on port 80 only from endpoint C, traffic
// from all other endpoints should still only allow only GET
// on "/".
td := newTestData(hivetest.Logger(t))
rule1 := api.Rule{
EndpointSelector: endpointSelectorA,
Ingress: []api.IngressRule{
{
ToPorts: []api.PortRule{{
Ports: []api.PortProtocol{
{Port: "80", Protocol: api.ProtoTCP},
},
Rules: &api.L7Rules{
HTTP: []api.PortRuleHTTP{
{Method: "GET", Path: "/"},
},
},
}},
},
{
IngressCommonRule: api.IngressCommonRule{
FromEndpoints: []api.EndpointSelector{endpointSelectorC},
},
ToPorts: []api.PortRule{{
Ports: []api.PortProtocol{
{Port: "80", Protocol: api.ProtoTCP},
},
}},
},
},
}
expected := NewL4PolicyMapWithValues(map[string]*L4Filter{"80/TCP": {
Port: 80, Protocol: api.ProtoTCP, U8Proto: 6,
wildcard: td.wildcardCachedSelector,
PerSelectorPolicies: L7DataMap{
td.cachedSelectorC: nil,
td.wildcardCachedSelector: &PerSelectorPolicy{
L7Parser: ParserTypeHTTP,
Priority: ListenerPriorityHTTP,
L7Rules: api.L7Rules{
HTTP: []api.PortRuleHTTP{{Path: "/", Method: "GET"}},
},
},
},
Ingress: true,
RuleOrigin: OriginForTest(map[CachedSelector]labels.LabelArrayList{
td.cachedSelectorC: {nil},
td.wildcardCachedSelector: {nil},
}),
}})
td.policyMapEquals(t, expected, nil, &rule1)
rule2 := api.Rule{
EndpointSelector: endpointSelectorA,
Ingress: []api.IngressRule{
{
IngressCommonRule: api.IngressCommonRule{
FromEndpoints: []api.EndpointSelector{endpointSelectorC},
},
ToPorts: []api.PortRule{{
Ports: []api.PortProtocol{
{Port: "80", Protocol: api.ProtoTCP},
},
}},
},
{
ToPorts: []api.PortRule{{
Ports: []api.PortProtocol{
{Port: "80", Protocol: api.ProtoTCP},
},
Rules: &api.L7Rules{
HTTP: []api.PortRuleHTTP{
{Method: "GET", Path: "/"},
},
},
}},
},
},
}
expected = NewL4PolicyMapWithValues(map[string]*L4Filter{"80/TCP": {
Port: 80, Protocol: api.ProtoTCP, U8Proto: 6,
wildcard: td.wildcardCachedSelector,
PerSelectorPolicies: L7DataMap{
td.cachedSelectorC: nil,
td.wildcardCachedSelector: &PerSelectorPolicy{
L7Parser: ParserTypeHTTP,
Priority: ListenerPriorityHTTP,
L7Rules: api.L7Rules{
HTTP: []api.PortRuleHTTP{{Path: "/", Method: "GET"}},
},
},
},
Ingress: true,
RuleOrigin: OriginForTest(map[CachedSelector]labels.LabelArrayList{
td.cachedSelectorC: {nil},
td.wildcardCachedSelector: {nil},
}),
}})
td.policyMapEquals(t, expected, nil, &rule2)
}
func TestMatches(t *testing.T) {
td := newTestData(hivetest.Logger(t))
repo := td.repo
repo.MustAddList(api.Rules{
&api.Rule{
EndpointSelector: endpointSelectorA,
Ingress: []api.IngressRule{
{
IngressCommonRule: api.IngressCommonRule{
FromEndpoints: []api.EndpointSelector{endpointSelectorC},
},
},
},
},
&api.Rule{
NodeSelector: endpointSelectorA,
Ingress: []api.IngressRule{
{
IngressCommonRule: api.IngressCommonRule{
FromEndpoints: []api.EndpointSelector{endpointSelectorC},
},
},
},
},
})
epRule := repo.rules[ruleKey{idx: 0}]
hostRule := repo.rules[ruleKey{idx: 1}]
selectedEpLabels := labels.ParseSelectLabel("id=a")
selectedIdentity := identity.NewIdentity(54321, labels.Labels{selectedEpLabels.Key: selectedEpLabels})
td.addIdentity(selectedIdentity)
notSelectedEpLabels := labels.ParseSelectLabel("id=b")
notSelectedIdentity := identity.NewIdentity(9876, labels.Labels{notSelectedEpLabels.Key: notSelectedEpLabels})
td.addIdentity(notSelectedIdentity)
hostLabels := labels.Labels{selectedEpLabels.Key: selectedEpLabels}
hostLabels.MergeLabels(labels.LabelHost)
hostIdentity := identity.NewIdentity(identity.ReservedIdentityHost, hostLabels)
td.addIdentity(hostIdentity)
// notSelectedEndpoint is not selected by rule, so we it shouldn't be added
// to EndpointsSelected.
require.False(t, epRule.matchesSubject(notSelectedIdentity))
// selectedEndpoint is selected by rule, so we it should be added to
// EndpointsSelected.
require.True(t, epRule.matchesSubject(selectedIdentity))
// Test again to check for caching working correctly.
require.True(t, epRule.matchesSubject(selectedIdentity))
// Possible scenario where an endpoint is deleted, and soon after another
// endpoint is added with the same ID, but with a different identity. Matching
// needs to handle this case correctly.
require.False(t, epRule.matchesSubject(notSelectedIdentity))
// host endpoint is not selected by rule, so we it shouldn't be added to EndpointsSelected.
require.False(t, epRule.matchesSubject(hostIdentity))
// selectedEndpoint is not selected by rule, so we it shouldn't be added to EndpointsSelected.
require.False(t, hostRule.matchesSubject(selectedIdentity))
// host endpoint is selected by rule, but host labels are mutable, so don't cache them
require.True(t, hostRule.matchesSubject(hostIdentity))
// Assert that mutable host identities are handled
// First, add an additional label, ensure that match succeeds
hostLabels.MergeLabels(labels.NewLabelsFromModel([]string{"foo=bar"}))
hostIdentity = identity.NewIdentity(identity.ReservedIdentityHost, hostLabels)
td.addIdentity(hostIdentity)
require.True(t, hostRule.matchesSubject(hostIdentity))
// Then, change host to id=c, which is not selected, and ensure match is correct
hostIdentity = identity.NewIdentity(identity.ReservedIdentityHost, labels.NewLabelsFromModel([]string{"id=c"}))
td.addIdentity(hostIdentity)
require.False(t, hostRule.matchesSubject(hostIdentity))
}
func BenchmarkRuleString(b *testing.B) {
r := &rule{
Rule: api.Rule{
EndpointSelector: api.NewESFromLabels(labels.ParseSelectLabel("bar")),
Ingress: []api.IngressRule{
{
ToPorts: []api.PortRule{{
Ports: []api.PortProtocol{
{Port: "80", Protocol: api.ProtoTCP},
{Port: "8080", Protocol: api.ProtoTCP},
},
Rules: &api.L7Rules{
HTTP: []api.PortRuleHTTP{
{Method: "GET", Path: "/"},
},
},
}},
},
},
Egress: []api.EgressRule{
{
ToPorts: []api.PortRule{{
Ports: []api.PortProtocol{
{Port: "3000", Protocol: api.ProtoAny},
},
}},
},
},
},
}
b.ReportAllocs()
for b.Loop() {
_ = r.String()
}
}
// Test merging of L7 rules when the same rules apply to multiple selectors.
// This was added to prevent regression of a bug where the merging of l7 rules for "foo"
// also affected the rules for "baz".
func TestMergeL7PolicyEgressWithMultipleSelectors(t *testing.T) {
td := newTestData(hivetest.Logger(t))
rule1 := api.Rule{
EndpointSelector: endpointSelectorA,
Egress: []api.EgressRule{
{
EgressCommonRule: api.EgressCommonRule{
ToEndpoints: []api.EndpointSelector{endpointSelectorB},
},
ToPorts: []api.PortRule{{
Ports: []api.PortProtocol{
{Port: "80", Protocol: api.ProtoTCP},
},
}},
},
{
EgressCommonRule: api.EgressCommonRule{
ToEndpoints: []api.EndpointSelector{endpointSelectorC},
},
ToPorts: []api.PortRule{{
Ports: []api.PortProtocol{
{Port: "80", Protocol: api.ProtoTCP},
},
Rules: &api.L7Rules{
HTTP: []api.PortRuleHTTP{
{Method: "GET"},
},
},
}},
},
{
EgressCommonRule: api.EgressCommonRule{
ToEndpoints: []api.EndpointSelector{endpointSelectorC},
},
ToPorts: []api.PortRule{{
Ports: []api.PortProtocol{
{Port: "80", Protocol: api.ProtoTCP},
},
Rules: &api.L7Rules{
HTTP: []api.PortRuleHTTP{
{Host: "foo"},
},
},
}},
},
},
}
expected := NewL4PolicyMapWithValues(map[string]*L4Filter{"80/TCP": {
Port: 80, Protocol: api.ProtoTCP, U8Proto: 6,
PerSelectorPolicies: L7DataMap{
td.cachedSelectorB: nil,
td.cachedSelectorC: &PerSelectorPolicy{
L7Parser: ParserTypeHTTP,
Priority: ListenerPriorityHTTP,
L7Rules: api.L7Rules{
HTTP: []api.PortRuleHTTP{{Method: "GET"}, {Host: "foo"}},
},
},
},
Ingress: false,
RuleOrigin: OriginForTest(map[CachedSelector]labels.LabelArrayList{
td.cachedSelectorB: {nil},
td.cachedSelectorC: {nil},
}),
}})
td.policyMapEquals(t, nil, expected, &rule1)
}
func TestMergeListenerReference(t *testing.T) {
// No listener remains a no listener
ps := &PerSelectorPolicy{}
err := ps.mergeRedirect(ps)
require.NoError(t, err)
require.Empty(t, ps.Listener)
require.Equal(t, ListenerPriority(0), ps.Priority)
// Listener reference remains when the other has none
ps0 := &PerSelectorPolicy{Listener: "listener0"}
err = ps0.mergeRedirect(ps)
require.NoError(t, err)
require.Equal(t, "listener0", ps0.Listener)
require.Equal(t, ListenerPriority(0), ps0.Priority)
// Listener reference is propagated when there is none to begin with
err = ps.mergeRedirect(ps0)
require.NoError(t, err)
require.Equal(t, "listener0", ps.Listener)
require.Equal(t, ListenerPriority(0), ps.Priority)
// A listener is not changed when there is no change
err = ps0.mergeRedirect(ps0)
require.NoError(t, err)
require.Equal(t, "listener0", ps0.Listener)
require.Equal(t, ListenerPriority(0), ps0.Priority)
// Cannot merge two different listeners with the default (zero) priority
ps0a := &PerSelectorPolicy{Listener: "listener0a"}
err = ps0.mergeRedirect(ps0a)
require.Error(t, err)
err = ps0a.mergeRedirect(ps0)
require.Error(t, err)
// Listener with a defined (non-zero) priority takes precedence over
// a listener with an undefined (zero) priority
ps1 := &PerSelectorPolicy{Listener: "listener1", Priority: 1}
err = ps1.mergeRedirect(ps0)
require.NoError(t, err)
require.Equal(t, "listener1", ps1.Listener)
require.Equal(t, ListenerPriority(1), ps1.Priority)
err = ps0.mergeRedirect(ps1)
require.NoError(t, err)
require.Equal(t, "listener1", ps0.Listener)
require.Equal(t, ListenerPriority(1), ps0.Priority)
// Listener with the lower priority value takes precedence
ps2 := &PerSelectorPolicy{Listener: "listener2", Priority: 2}
err = ps1.mergeRedirect(ps2)
require.NoError(t, err)
require.Equal(t, "listener1", ps1.Listener)
require.Equal(t, ListenerPriority(1), ps1.Priority)
err = ps2.mergeRedirect(ps1)
require.NoError(t, err)
require.Equal(t, "listener1", ps2.Listener)
require.Equal(t, ListenerPriority(1), ps2.Priority)
// Cannot merge two different listeners with the same priority
ps12 := &PerSelectorPolicy{Listener: "listener1", Priority: 2}
ps2 = &PerSelectorPolicy{Listener: "listener2", Priority: 2}
err = ps12.mergeRedirect(ps2)
require.Error(t, err)
err = ps2.mergeRedirect(ps12)
require.Error(t, err)
// Lower priority is propagated also when the listeners are the same
ps23 := &PerSelectorPolicy{Listener: "listener2", Priority: 3}
err = ps2.mergeRedirect(ps23)
require.NoError(t, err)
require.Equal(t, "listener2", ps2.Listener)
require.Equal(t, ListenerPriority(2), ps2.Priority)
err = ps23.mergeRedirect(ps2)
require.NoError(t, err)
require.Equal(t, "listener2", ps23.Listener)
require.Equal(t, ListenerPriority(2), ps23.Priority)
}
// SPDX-License-Identifier: Apache-2.0
// Copyright Authors of Cilium
package policy
import (
slim_metav1 "github.com/cilium/cilium/pkg/k8s/slim/k8s/apis/meta/v1"
policyapi "github.com/cilium/cilium/pkg/policy/api"
)
// ruleSlice is a wrapper around a slice of *rule, which allows for functions
// to be written with []*rule as a receiver.
type ruleSlice []*rule
func (rules ruleSlice) resolveL4IngressPolicy(policyCtx PolicyContext) (L4PolicyMap, error) {
result := NewL4PolicyMap()
policyCtx.PolicyTrace("Resolving ingress policy")
state := traceState{}
var requirements, requirementsDeny []slim_metav1.LabelSelectorRequirement
// Iterate over all FromRequires which select ctx.To. These requirements
// will be appended to each EndpointSelector's MatchExpressions in
// each FromEndpoints for all ingress rules. This ensures that FromRequires
// is taken into account when evaluating policy at L4.
for _, r := range rules {
for _, ingressRule := range r.Ingress {
for _, requirement := range ingressRule.FromRequires {
requirements = append(requirements, requirement.ConvertToLabelSelectorRequirementSlice()...)
}
}
for _, ingressRule := range r.IngressDeny {
for _, requirement := range ingressRule.FromRequires {
requirementsDeny = append(requirementsDeny, requirement.ConvertToLabelSelectorRequirementSlice()...)
}
}
}
for _, r := range rules {
err := r.resolveIngressPolicy(policyCtx, &state, result, requirements, requirementsDeny)
if err != nil {
return nil, err
}
state.ruleID++
}
state.trace(len(rules), policyCtx)
return result, nil
}
func (rules ruleSlice) resolveL4EgressPolicy(policyCtx PolicyContext) (L4PolicyMap, error) {
result := NewL4PolicyMap()
policyCtx.PolicyTrace("resolving egress policy")
state := traceState{}
var requirements, requirementsDeny []slim_metav1.LabelSelectorRequirement
// Iterate over all ToRequires which select ctx.To. These requirements will
// be appended to each EndpointSelector's MatchExpressions in each
// ToEndpoints for all egress rules. This ensures that ToRequires is
// taken into account when evaluating policy at L4.
for _, r := range rules {
for _, egressRule := range r.Egress {
for _, requirement := range egressRule.ToRequires {
requirements = append(requirements, requirement.ConvertToLabelSelectorRequirementSlice()...)
}
}
for _, egressRule := range r.EgressDeny {
for _, requirement := range egressRule.ToRequires {
requirementsDeny = append(requirementsDeny, requirement.ConvertToLabelSelectorRequirementSlice()...)
}
}
}
for i, r := range rules {
state.ruleID = i
err := r.resolveEgressPolicy(policyCtx, &state, result, requirements, requirementsDeny)
if err != nil {
return nil, err
}
state.ruleID++
}
state.trace(len(rules), policyCtx)
return result, nil
}
// AsPolicyRules return the internal policyapi.Rule objects as a policyapi.Rules object
func (rules ruleSlice) AsPolicyRules() policyapi.Rules {
policyRules := make(policyapi.Rules, 0, len(rules))
for _, r := range rules {
policyRules = append(policyRules, &r.Rule)
}
return policyRules
}
// traceState is an internal structure used to collect information
// while determining policy decision
type traceState struct {
// selectedRules is the number of rules with matching EndpointSelector
selectedRules int
// matchedRules is the number of rules that have allowed traffic
matchedRules int
// matchedDenyRules is the number of rules that have denied traffic
matchedDenyRules int
// constrainedRules counts how many "FromRequires" constraints are
// unsatisfied
constrainedRules int
// ruleID is the rule ID currently being evaluated
ruleID int
}
func (state *traceState) trace(rules int, policyCtx PolicyContext) {
policyCtx.PolicyTrace("%d/%d rules selected\n", state.selectedRules, rules)
if state.constrainedRules > 0 {
policyCtx.PolicyTrace("Found unsatisfied FromRequires constraint\n")
} else {
if state.matchedRules > 0 {
policyCtx.PolicyTrace("Found allow rule\n")
} else {
policyCtx.PolicyTrace("Found no allow rule\n")
}
if state.matchedDenyRules > 0 {
policyCtx.PolicyTrace("Found deny rule\n")
} else {
policyCtx.PolicyTrace("Found no deny rule\n")
}
}
}
func (state *traceState) selectRule(policyCtx PolicyContext, r *rule) {
policyCtx.PolicyTrace("* Rule %s: selected\n", r)
state.selectedRules++
}
// SPDX-License-Identifier: Apache-2.0
// Copyright Authors of Cilium
package policy
import (
"log/slog"
"sync"
"github.com/cilium/cilium/api/v1/models"
"github.com/cilium/cilium/pkg/container/versioned"
"github.com/cilium/cilium/pkg/identity"
k8sConst "github.com/cilium/cilium/pkg/k8s/apis/cilium.io"
"github.com/cilium/cilium/pkg/labels"
"github.com/cilium/cilium/pkg/lock"
"github.com/cilium/cilium/pkg/logging/logfields"
"github.com/cilium/cilium/pkg/metrics"
"github.com/cilium/cilium/pkg/policy/api"
"github.com/cilium/cilium/pkg/policy/types"
)
// scIdentity is the information we need about a an identity that rules can select
type scIdentity struct {
NID identity.NumericIdentity
lbls labels.LabelArray
namespace string // value of the namespace label, or ""
}
// scIdentityCache is a cache of Identities keyed by the numeric identity
type scIdentityCache map[identity.NumericIdentity]scIdentity
func newIdentity(nid identity.NumericIdentity, lbls labels.LabelArray) scIdentity {
return scIdentity{
NID: nid,
lbls: lbls,
namespace: lbls.Get(labels.LabelSourceK8sKeyPrefix + k8sConst.PodNamespaceLabel),
}
}
// userNotification stores the information needed to call
// IdentitySelectionUpdated callbacks to notify users of selector's
// identity changes. These are queued to be able to call the callbacks
// in FIFO order while not holding any locks.
type userNotification struct {
user CachedSelectionUser
selector CachedSelector // nil for a sync notification
txn *versioned.Tx // nil for non-sync notifications
added []identity.NumericIdentity
deleted []identity.NumericIdentity
wg *sync.WaitGroup
}
// SelectorCache caches identities, identity selectors, and the
// subsets of identities each selector selects.
type SelectorCache struct {
logger *slog.Logger
versioned *versioned.Coordinator
mutex lock.RWMutex
// selectorUpdates tracks changed selectors for efficient cleanup of old versions
selectorUpdates versioned.VersionedSlice[*identitySelector]
// idCache contains all known identities as informed by the
// kv-store and the local identity facility via our
// UpdateIdentities() function.
idCache scIdentityCache
// map key is the string representation of the selector being cached.
selectors map[string]*identitySelector
localIdentityNotifier identityNotifier
// userCond is a condition variable for receiving signals
// about addition of new elements in userNotes
userCond *sync.Cond
// userMutex protects userNotes and is linked to userCond
userMutex lock.Mutex
// userNotes holds a FIFO list of user notifications to be made
userNotes []userNotification
// notifiedUsers is a set of all notified users
notifiedUsers map[CachedSelectionUser]struct{}
// used to lazily start the handler for user notifications.
startNotificationsHandlerOnce sync.Once
}
// GetVersionHandleFunc calls the given function with a versioned.VersionHandle for the
// current version of SelectorCache selections while selector cache is locked for writing, so that
// the caller may get ready for getting incremental updates that are possible right after the lock
// is released.
// This should only be used with trivial functions that can not lock or sleep.
// Use the plain 'GetVersionHandle' whenever possible, as it does not lock the selector cache.
// VersionHandle passed to 'f' must be closed with Close().
func (sc *SelectorCache) GetVersionHandleFunc(f func(*versioned.VersionHandle)) {
// Lock synchronizes with UpdateIdentities() so that we do not use a stale version
// that may already have received partial incremental updates.
// Incremental updates are delivered asynchronously, so so the caller may still receive
// updates for older versions. These should be filtered out.
sc.mutex.Lock()
defer sc.mutex.Unlock()
f(sc.GetVersionHandle())
}
// GetVersionHandle returns a VersoionHandle for the current version.
// The returned VersionHandle must be closed with Close()
func (sc *SelectorCache) GetVersionHandle() *versioned.VersionHandle {
return sc.versioned.GetVersionHandle()
}
// GetModel returns the API model of the SelectorCache.
func (sc *SelectorCache) GetModel() models.SelectorCache {
sc.mutex.RLock()
defer sc.mutex.RUnlock()
selCacheMdl := make(models.SelectorCache, 0, len(sc.selectors))
// Get handle to the current version. Any concurrent updates will not be visible in the
// returned model.
version := sc.GetVersionHandle()
defer version.Close()
for selector, idSel := range sc.selectors {
selections := idSel.GetSelections(version)
ids := make([]int64, 0, len(selections))
for i := range selections {
ids = append(ids, int64(selections[i]))
}
selMdl := &models.SelectorIdentityMapping{
Selector: selector,
Identities: ids,
Users: int64(idSel.numUsers()),
Labels: labelArrayToModel(idSel.GetMetadataLabels()),
}
selCacheMdl = append(selCacheMdl, selMdl)
}
return selCacheMdl
}
func (sc *SelectorCache) Stats() selectorStats {
result := newSelectorStats()
sc.mutex.RLock()
defer sc.mutex.RUnlock()
version := sc.GetVersionHandle()
defer version.Close()
for _, idSel := range sc.selectors {
if !idSel.MaySelectPeers() {
// Peer selectors impact policymap cardinality, but
// subject selectors do not. Do not count cardinality
// if the selector is only used for policy subjects.
continue
}
selections := idSel.GetSelections(version)
class := idSel.source.metricsClass()
if result.maxCardinalityByClass[class] < len(selections) {
result.maxCardinalityByClass[class] = len(selections)
}
}
return result
}
func labelArrayToModel(arr labels.LabelArray) models.LabelArray {
lbls := make(models.LabelArray, 0, len(arr))
for _, l := range arr {
lbls = append(lbls, &models.Label{
Key: l.Key,
Value: l.Value,
Source: l.Source,
})
}
return lbls
}
func (sc *SelectorCache) handleUserNotifications() {
for {
sc.userMutex.Lock()
for len(sc.userNotes) == 0 {
sc.userCond.Wait()
}
// get the current batch of notifications and release the lock so that SelectorCache
// can't block on userMutex while we call IdentitySelectionUpdated callbacks below.
notifications := sc.userNotes
sc.userNotes = nil
sc.userMutex.Unlock()
for _, n := range notifications {
if n.selector == nil {
n.user.IdentitySelectionCommit(sc.logger, n.txn)
} else {
n.user.IdentitySelectionUpdated(sc.logger, n.selector, n.added, n.deleted)
}
n.wg.Done()
}
}
}
func (sc *SelectorCache) queueUserNotification(user CachedSelectionUser, selector CachedSelector, added, deleted []identity.NumericIdentity, wg *sync.WaitGroup) {
sc.startNotificationsHandlerOnce.Do(func() {
go sc.handleUserNotifications()
})
wg.Add(1)
sc.userMutex.Lock()
if sc.notifiedUsers == nil {
sc.notifiedUsers = make(map[CachedSelectionUser]struct{})
}
sc.notifiedUsers[user] = struct{}{}
sc.userNotes = append(sc.userNotes, userNotification{
user: user,
selector: selector,
added: added,
deleted: deleted,
wg: wg,
})
sc.userMutex.Unlock()
sc.userCond.Signal()
}
func (sc *SelectorCache) queueNotifiedUsersCommit(txn *versioned.Tx, wg *sync.WaitGroup) {
sc.userMutex.Lock()
for user := range sc.notifiedUsers {
wg.Add(1)
// sync notification has a nil selector
sc.userNotes = append(sc.userNotes, userNotification{
user: user,
txn: txn,
wg: wg,
})
}
sc.notifiedUsers = nil
sc.userMutex.Unlock()
sc.userCond.Signal()
}
// NewSelectorCache creates a new SelectorCache with the given identities.
func NewSelectorCache(logger *slog.Logger, ids identity.IdentityMap) *SelectorCache {
sc := &SelectorCache{
logger: logger,
idCache: make(map[identity.NumericIdentity]scIdentity, len(ids)),
selectors: make(map[string]*identitySelector),
}
sc.userCond = sync.NewCond(&sc.userMutex)
sc.versioned = &versioned.Coordinator{
Cleaner: sc.oldVersionCleaner,
Logger: logger,
}
for nid, lbls := range ids {
sc.idCache[nid] = newIdentity(nid, lbls)
}
return sc
}
func (sc *SelectorCache) RegisterMetrics() {
if err := metrics.Register(newSelectorCacheMetrics(sc)); err != nil {
sc.logger.Warn("Selector cache metrics registration failed. No metrics will be reported.", logfields.Error, err)
}
}
// oldVersionCleaner is called from a goroutine without holding any locks
func (sc *SelectorCache) oldVersionCleaner(keepVersion versioned.KeepVersion) {
// Log before taking the lock so that if we ever have a deadlock here this log line will be seen
sc.logger.Debug(
"Cleaning old selector and identity versions",
logfields.Version, keepVersion,
)
// This is called when some versions are no longer needed, from wherever
// VersionHandle's may be kept, so we must take the lock to safely access
// 'sc.selectorUpdates'.
sc.mutex.Lock()
defer sc.mutex.Unlock()
n := 0
for idSel := range sc.selectorUpdates.Before(keepVersion) {
idSel.selections.RemoveBefore(keepVersion)
n++
}
sc.selectorUpdates = sc.selectorUpdates[n:]
}
// SetLocalIdentityNotifier injects the provided identityNotifier into the
// SelectorCache. Currently, this is used to inject the FQDN subsystem into
// the SelectorCache so the SelectorCache can notify the FQDN subsystem when
// it should be aware of a given FQDNSelector for which CIDR identities need
// to be provided upon DNS lookups which corespond to said FQDNSelector.
func (sc *SelectorCache) SetLocalIdentityNotifier(pop identityNotifier) {
sc.localIdentityNotifier = pop
}
var (
// wildcardSelectorKey is used to compare if a key is for a wildcard
wildcardSelectorKey = api.WildcardEndpointSelector.LabelSelector.String()
// noneSelectorKey is used to compare if a key is for "reserved:none"
noneSelectorKey = api.EndpointSelectorNone.LabelSelector.String()
)
// identityNotifier provides a means for other subsystems to be made aware of a
// given FQDNSelector (currently pkg/fqdn) so that said subsystems can notify
// the IPCache about IPs which correspond to said FQDNSelector.
// This is necessary as there is nothing intrinsic about an IP that says that
// it corresponds to a given FQDNSelector; this relationship is contained only
// via DNS responses, which are handled externally.
type identityNotifier interface {
// RegisterFQDNSelector exposes this FQDNSelector so that the identity labels
// of IPs contained in a DNS response that matches said selector can be
// associated with that selector.
RegisterFQDNSelector(selector api.FQDNSelector)
// UnregisterFQDNSelector removes this FQDNSelector from the set of
// IPs which are being tracked by the identityNotifier. The result
// of this is that an IP may be evicted from IPCache if it is no longer
// selected by any other FQDN selector.
// This occurs when there are no more users of a given FQDNSelector for the
// SelectorCache.
UnregisterFQDNSelector(selector api.FQDNSelector)
}
// AddFQDNSelector adds the given api.FQDNSelector in to the selector cache. If
// an identical EndpointSelector has already been cached, the corresponding
// types.CachedSelector is returned, otherwise one is created and added to the cache.
func (sc *SelectorCache) AddFQDNSelector(user CachedSelectionUser, lbls stringLabels, fqdnSelec api.FQDNSelector) (cachedSelector types.CachedSelector, added bool) {
key := fqdnSelec.String()
sc.mutex.Lock()
defer sc.mutex.Unlock()
// If the selector already exists, use it.
idSel, exists := sc.selectors[key]
if exists {
return idSel, idSel.addUser(user)
}
source := &fqdnSelector{
selector: fqdnSelec,
}
// Make the FQDN subsystem aware of this selector
sc.localIdentityNotifier.RegisterFQDNSelector(source.selector)
return sc.addSelectorLocked(user, lbls, key, source)
}
// must hold lock for writing
func (sc *SelectorCache) addSelectorLocked(user CachedSelectionUser, lbls stringLabels, key string, source selectorSource) (types.CachedSelector, bool) {
idSel := &identitySelector{
logger: sc.logger,
key: key,
users: make(map[CachedSelectionUser]struct{}),
cachedSelections: make(map[identity.NumericIdentity]struct{}),
source: source,
metadataLbls: lbls,
}
sc.selectors[key] = idSel
// Scan the cached set of IDs to determine any new matchers
for nid, identity := range sc.idCache {
if idSel.source.matches(identity) {
idSel.cachedSelections[nid] = struct{}{}
}
}
// Note: No notifications are sent for the existing
// identities. Caller must use GetSelections() to get the
// current selections after adding a selector. This way the
// behavior is the same between the two cases here (selector
// is already cached, or is a new one).
// Create the immutable slice representation of the selected
// numeric identities
txn := sc.versioned.PrepareNextVersion()
idSel.updateSelections(txn)
txn.Commit()
return idSel, idSel.addUser(user)
}
// FindCachedIdentitySelector finds the given api.EndpointSelector in the
// selector cache, returning nil if one can not be found.
func (sc *SelectorCache) FindCachedIdentitySelector(selector api.EndpointSelector) types.CachedSelector {
key := selector.CachedString()
sc.mutex.RLock()
idSel := sc.selectors[key]
sc.mutex.RUnlock()
return idSel
}
// AddIdentitySelector adds the given api.EndpointSelector in to the
// selector cache. If an identical EndpointSelector has already been
// cached, the corresponding types.CachedSelector is returned, otherwise one
// is created and added to the cache.
func (sc *SelectorCache) AddIdentitySelector(user types.CachedSelectionUser, lbls stringLabels, selector api.EndpointSelector) (cachedSelector types.CachedSelector, added bool) {
// The key returned here may be different for equivalent
// labelselectors, if the selector's requirements are stored
// in different orders. When this happens we'll be tracking
// essentially two copies of the same selector.
key := selector.CachedString()
sc.mutex.Lock()
defer sc.mutex.Unlock()
idSel, exists := sc.selectors[key]
if exists {
return idSel, idSel.addUser(user)
}
// Selectors are never modified once a rule is placed in the policy repository,
// so no need to deep copy.
source := &labelIdentitySelector{
selector: selector,
}
// check is selector has a namespace match or requirement
if namespaces, ok := selector.GetMatch(labels.LabelSourceK8sKeyPrefix + k8sConst.PodNamespaceLabel); ok {
source.namespaces = namespaces
}
return sc.addSelectorLocked(user, lbls, key, source)
}
// lock must be held
func (sc *SelectorCache) removeSelectorLocked(selector types.CachedSelector, user CachedSelectionUser) {
key := selector.String()
sel, exists := sc.selectors[key]
if exists {
if sel.removeUser(user) {
sel.source.remove(sc.localIdentityNotifier)
delete(sc.selectors, key)
}
}
}
// RemoveSelector removes types.CachedSelector for the user.
func (sc *SelectorCache) RemoveSelector(selector types.CachedSelector, user CachedSelectionUser) {
sc.mutex.Lock()
sc.removeSelectorLocked(selector, user)
sc.mutex.Unlock()
}
// RemoveSelectors removes types.CachedSelectorSlice for the user.
func (sc *SelectorCache) RemoveSelectors(selectors types.CachedSelectorSlice, user CachedSelectionUser) {
sc.mutex.Lock()
for _, selector := range selectors {
sc.removeSelectorLocked(selector, user)
}
sc.mutex.Unlock()
}
// ChangeUser changes the CachedSelectionUser that gets updates on the
// updates on the cached selector.
func (sc *SelectorCache) ChangeUser(selector types.CachedSelector, from, to CachedSelectionUser) {
key := selector.String()
sc.mutex.Lock()
idSel, exists := sc.selectors[key]
if exists {
// Add before remove so that the count does not dip to zero in between,
// as this causes FQDN unregistration (if applicable).
idSel.addUser(to)
// ignoring the return value as we have just added a user above
idSel.removeUser(from)
}
sc.mutex.Unlock()
}
// CanSkipUpdate returns true if a proposed update is already known to the SelectorCache
// and thus a no-op. Is used to de-dup an ID update stream, because identical updates
// may come from multiple sources.
func (sc *SelectorCache) CanSkipUpdate(added, deleted identity.IdentityMap) bool {
sc.mutex.RLock()
defer sc.mutex.RUnlock()
for nid := range deleted {
if _, exists := sc.idCache[nid]; exists {
return false
}
}
for nid, lbls := range added {
haslbls, exists := sc.idCache[nid]
if !exists { // id not known to us: cannot skip
return false
}
if !haslbls.lbls.Equals(lbls) {
// labels are not equal: cannot skip
return false
}
}
return true
}
// UpdateIdentities propagates identity updates to selectors
//
// The caller is responsible for making sure the same identity is not
// present in both 'added' and 'deleted'.
//
// Caller should Wait() on the returned sync.WaitGroup before triggering any
// policy updates. Policy updates may need Endpoint locks, so this Wait() can
// deadlock if the caller is holding any endpoint locks.
//
// Incremental deletes of mutated identities are not sent to the users, as that could
// lead to deletion of policy map entries while other selectors may still select the mutated
// identity.
// In this case the return value is 'true' and the caller should trigger policy updates on all
// endpoints to remove the affected identity only from selectors that no longer select the mutated
// identity.
func (sc *SelectorCache) UpdateIdentities(added, deleted identity.IdentityMap, wg *sync.WaitGroup) (mutated bool) {
sc.mutex.Lock()
defer sc.mutex.Unlock()
txn := sc.versioned.PrepareNextVersion()
// Update idCache so that newly added selectors get
// prepopulated with all matching numeric identities.
for numericID := range deleted {
if old, exists := sc.idCache[numericID]; exists {
sc.logger.Debug(
"UpdateIdentities: Deleting identity",
logfields.NewVersion, txn,
logfields.Identity, numericID,
logfields.Labels, old.lbls,
)
delete(sc.idCache, numericID)
} else {
sc.logger.Warn(
"UpdateIdentities: Skipping Delete of a non-existing identity",
logfields.NewVersion, txn,
logfields.Identity, numericID,
)
delete(deleted, numericID)
}
}
for numericID, lbls := range added {
if old, exists := sc.idCache[numericID]; exists {
// Skip if no change. Not skipping if label
// order is different, but identity labels are
// sorted for the kv-store, so there should
// not be too many false negatives.
if lbls.Equals(old.lbls) {
sc.logger.Debug(
"UpdateIdentities: Skipping add of an existing identical identity",
logfields.NewVersion, txn,
logfields.Identity, numericID,
)
delete(added, numericID)
continue
}
msg := "UpdateIdentities: Updating an existing identity"
// Warn if any other ID has their labels change, besides local
// host. The local host can have its labels change at runtime if
// the kube-apiserver is running on the local host, see
// ipcache.TriggerLabelInjection().
if numericID == identity.ReservedIdentityHost {
sc.logger.Debug(msg,
logfields.NewVersion, txn,
logfields.Identity, numericID,
logfields.Labels, old.lbls,
logfields.LabelsNew, lbls,
)
} else {
sc.logger.Warn(msg,
logfields.NewVersion, txn,
logfields.Identity, numericID,
logfields.Labels, old.lbls,
logfields.LabelsNew, lbls,
)
}
} else {
sc.logger.Debug(
"UpdateIdentities: Adding a new identity",
logfields.NewVersion, txn,
logfields.Identity, numericID,
logfields.Labels, lbls,
)
}
sc.idCache[numericID] = newIdentity(numericID, lbls)
}
updated := false
if len(deleted)+len(added) > 0 {
// Iterate through all locally used identity selectors and
// update the cached numeric identities as required.
for _, idSel := range sc.selectors {
var adds, dels []identity.NumericIdentity
for numericID := range deleted {
if _, exists := idSel.cachedSelections[numericID]; exists {
dels = append(dels, numericID)
delete(idSel.cachedSelections, numericID)
}
}
for numericID := range added {
matches := idSel.source.matches(sc.idCache[numericID])
_, exists := idSel.cachedSelections[numericID]
if matches && !exists {
adds = append(adds, numericID)
idSel.cachedSelections[numericID] = struct{}{}
} else if !matches && exists {
// Identity was mutated and no longer matches, the identity
// is deleted from the cached selections, but is not sent to
// users as a deletion. Instead, we return 'mutated = true'
// telling the caller to trigger forced policy updates on
// all endpoints to recompute the policy as if the mutated
// identity was never selected by the affected selector.
mutated = true
delete(idSel.cachedSelections, numericID)
}
}
if len(dels)+len(adds) > 0 {
updated = true
sc.selectorUpdates = sc.selectorUpdates.Append(idSel, txn)
idSel.updateSelections(txn)
idSel.notifyUsers(sc, adds, dels, wg)
}
}
}
if updated {
// Launch a waiter that holds the new version as long as needed for users to have grabbed it
sc.queueNotifiedUsersCommit(txn, wg)
go func(version *versioned.VersionHandle) {
wg.Wait()
sc.logger.Debug(
"UpdateIdentities: Waited for incremental updates to have committed, closing handle on the new version.",
logfields.NewVersion, txn,
)
version.Close()
}(txn.GetVersionHandle())
txn.Commit()
}
return mutated
}
// SPDX-License-Identifier: Apache-2.0
// Copyright Authors of Cilium
package policy
import (
"log/slog"
"slices"
"sort"
"sync"
"github.com/hashicorp/go-hclog"
"github.com/cilium/cilium/pkg/container/versioned"
"github.com/cilium/cilium/pkg/identity"
"github.com/cilium/cilium/pkg/labels"
"github.com/cilium/cilium/pkg/logging/logfields"
"github.com/cilium/cilium/pkg/policy/api"
"github.com/cilium/cilium/pkg/policy/types"
)
type CachedSelector types.CachedSelector
type CachedSelectorSlice types.CachedSelectorSlice
type CachedSelectionUser types.CachedSelectionUser
// identitySelector is the internal type for all selectors in the
// selector cache.
//
// identitySelector represents the mapping of an EndpointSelector
// to a slice of identities. These mappings are updated via two
// different processes:
//
// 1. When policy rules are changed these are added and/or deleted
// depending on what selectors the rules contain. Cached selections of
// new identitySelectors are pre-populated from the set of currently
// known identities.
//
// 2. When reachable identities appear or disappear, either via local
// allocation (CIDRs), or via the KV-store (remote endpoints). In this
// case all existing identitySelectors are walked through and their
// cached selections are updated as necessary.
//
// In both of the above cases the set of existing identitySelectors is
// write locked.
//
// To minimize the upkeep the identity selectors are shared across
// all IdentityPolicies, so that only one copy exists for each
// identitySelector. Users of the SelectorCache take care of creating
// identitySelectors as needed by identity policies. The set of
// identitySelectors is read locked during an IdentityPolicy update so
// that the policy is always updated using a coherent set of
// cached selections.
//
// identitySelector is used as a map key, so it must not be implemented by a
// map, slice, or a func, or a runtime panic will be triggered. In all
// cases below identitySelector is being implemented by structs.
//
// identitySelector is used in the policy engine as a map key,
// so it must always be given to the user as a pointer to the actual type.
// (The public methods only expose the CachedSelector interface.)
type identitySelector struct {
logger *slog.Logger
source selectorSource
key string
selections versioned.Value[identity.NumericIdentitySlice]
users map[CachedSelectionUser]struct{}
cachedSelections map[identity.NumericIdentity]struct{}
metadataLbls stringLabels
}
func (i *identitySelector) MaySelectPeers() bool {
for user := range i.users {
if user.IsPeerSelector() {
return true
}
}
return false
}
// identitySelector implements CachedSelector
var _ types.CachedSelector = (*identitySelector)(nil)
type selectorSource interface {
matches(scIdentity) bool
remove(identityNotifier)
metricsClass() string
}
// fqdnSelector implements the selectorSource for a FQDNSelector. A fqdnSelector
// matches an identity if the identity has a `fqdn:` label matching the FQDN
// selector string.
// In addition, the remove implementation calls back into the DNS name manager
// to unregister the FQDN selector.
type fqdnSelector struct {
selector api.FQDNSelector
}
func (f *fqdnSelector) remove(dnsProxy identityNotifier) {
dnsProxy.UnregisterFQDNSelector(f.selector)
}
// matches returns true if the identity contains at least one label
// that matches the FQDNSelector's IdentityLabel string
func (f *fqdnSelector) matches(identity scIdentity) bool {
return identity.lbls.Intersects(labels.LabelArray{f.selector.IdentityLabel()})
}
func (f *fqdnSelector) metricsClass() string {
return LabelValueSCFQDN
}
type labelIdentitySelector struct {
selector api.EndpointSelector
namespaces []string // allowed namespaces, or ""
}
// xxxMatches returns true if the CachedSelector matches given labels.
// This is slow, but only used for policy tracing, so it's OK.
func (l *labelIdentitySelector) xxxMatches(labels labels.LabelArray) bool {
return l.selector.Matches(labels)
}
func (l *labelIdentitySelector) matchesNamespace(ns string) bool {
if len(l.namespaces) > 0 {
if ns != "" {
if slices.Contains(l.namespaces, ns) {
return true
}
}
// namespace required, but no match
return false
}
// no namespace required, match
return true
}
func (l *labelIdentitySelector) matches(identity scIdentity) bool {
return l.matchesNamespace(identity.namespace) && l.selector.Matches(identity.lbls)
}
func (l *labelIdentitySelector) remove(_ identityNotifier) {
// only useful for fqdn selectors
}
func (l *labelIdentitySelector) metricsClass() string {
if l.selector.DeepEqual(&api.EntitySelectorMapping[api.EntityCluster][0]) {
return LabelValueSCCluster
}
for _, entity := range api.EntitySelectorMapping[api.EntityWorld] {
if l.selector.DeepEqual(&entity) {
return LabelValueSCWorld
}
}
return LabelValueSCOther
}
// lock must be held
//
// The caller is responsible for making sure the same identity is not
// present in both 'added' and 'deleted'.
func (i *identitySelector) notifyUsers(sc *SelectorCache, added, deleted []identity.NumericIdentity, wg *sync.WaitGroup) {
for user := range i.users {
// pass 'f' to the user as '*fqdnSelector'
sc.queueUserNotification(user, i, added, deleted, wg)
}
}
// Equal is used by checker.Equals, and only considers the identity of the selector,
// ignoring the internal state!
func (i *identitySelector) Equal(b *identitySelector) bool {
return i.key == b.key
}
//
// CachedSelector implementation (== Public API)
//
// No locking needed.
//
// GetSelections returns the set of numeric identities currently
// selected. The cached selections can be concurrently updated. In
// that case GetSelections() will return either the old or new version
// of the selections. If the old version is returned, the user is
// guaranteed to receive a notification including the update.
func (i *identitySelector) GetSelections(version *versioned.VersionHandle) identity.NumericIdentitySlice {
if !version.IsValid() {
i.logger.Error(
"GetSelections: Invalid VersionHandle finds nothing",
logfields.Version, version,
logfields.Stacktrace, hclog.Stacktrace(),
)
return identity.NumericIdentitySlice{}
}
return i.selections.At(version)
}
func (i *identitySelector) GetMetadataLabels() labels.LabelArray {
return labels.LabelArrayFromString(string(i.metadataLbls.Value()))
}
// Selects return 'true' if the CachedSelector selects the given
// numeric identity.
func (i *identitySelector) Selects(version *versioned.VersionHandle, nid identity.NumericIdentity) bool {
if i.IsWildcard() {
return true
}
nids := i.GetSelections(version)
idx := sort.Search(len(nids), func(i int) bool { return nids[i] >= nid })
return idx < len(nids) && nids[idx] == nid
}
// IsWildcard returns true if the endpoint selector selects all
// endpoints.
func (i *identitySelector) IsWildcard() bool {
return i.key == wildcardSelectorKey
}
// IsNone returns true if the endpoint selector never selects anything.
func (i *identitySelector) IsNone() bool {
return i.key == noneSelectorKey
}
// String returns the map key for this selector
func (i *identitySelector) String() string {
return i.key
}
//
// identitySelector implementation (== internal API)
//
// lock must be held
func (i *identitySelector) addUser(user CachedSelectionUser) (added bool) {
if _, exists := i.users[user]; exists {
return false
}
i.users[user] = struct{}{}
return true
}
// locks must be held for the dnsProxy and the SelectorCache (if the selector is a FQDN selector)
func (i *identitySelector) removeUser(user CachedSelectionUser) (last bool) {
delete(i.users, user)
return len(i.users) == 0
}
// lock must be held
func (i *identitySelector) numUsers() int {
return len(i.users)
}
// updateSelections updates the immutable slice representation of the
// cached selections after the cached selections have been changed.
//
// lock must be held
func (i *identitySelector) updateSelections(nextVersion *versioned.Tx) {
selections := make(identity.NumericIdentitySlice, len(i.cachedSelections))
idx := 0
for nid := range i.cachedSelections {
selections[idx] = nid
idx++
}
// Sort the numeric identities so that the map iteration order
// does not matter. This makes testing easier, but may help
// identifying changes easier also otherwise.
slices.Sort(selections)
i.setSelections(selections, nextVersion)
}
func (i *identitySelector) setSelections(selections identity.NumericIdentitySlice, nextVersion *versioned.Tx) {
var err error
if len(selections) > 0 {
err = i.selections.SetAt(selections, nextVersion)
} else {
err = i.selections.RemoveAt(nextVersion)
}
if err != nil {
i.logger.Error(
"setSelections failed",
logfields.Error, err,
logfields.Stacktrace, hclog.Stacktrace(),
)
}
}
// SPDX-License-Identifier: Apache-2.0
// Copyright Authors of Cilium
package policy
import (
"log/slog"
"net/netip"
"slices"
"sync"
"testing"
"github.com/cilium/hive/hivetest"
"github.com/stretchr/testify/require"
"github.com/cilium/cilium/pkg/container/versioned"
"github.com/cilium/cilium/pkg/identity"
k8sConst "github.com/cilium/cilium/pkg/k8s/apis/cilium.io"
"github.com/cilium/cilium/pkg/labels"
"github.com/cilium/cilium/pkg/lock"
"github.com/cilium/cilium/pkg/policy/api"
policytypes "github.com/cilium/cilium/pkg/policy/types"
testidentity "github.com/cilium/cilium/pkg/testutils/identity"
)
type cachedSelectionUser struct {
t *testing.T
sc *SelectorCache
name string
updateMutex lock.Mutex
updateCond *sync.Cond
selections map[CachedSelector][]identity.NumericIdentity
notifications int
adds int
deletes int
}
func (sc *SelectorCache) haveUserNotifications() bool {
sc.userMutex.Lock()
defer sc.userMutex.Unlock()
return len(sc.userNotes) > 0
}
func newUser(t *testing.T, name string, sc *SelectorCache) *cachedSelectionUser {
csu := &cachedSelectionUser{
t: t,
sc: sc,
name: name,
selections: make(map[CachedSelector][]identity.NumericIdentity),
}
csu.updateCond = sync.NewCond(&csu.updateMutex)
return csu
}
func haveNid(nid identity.NumericIdentity, selections []identity.NumericIdentity) bool {
return slices.Contains(selections, nid)
}
func (csu *cachedSelectionUser) AddIdentitySelector(sel api.EndpointSelector) CachedSelector {
csu.updateMutex.Lock()
defer csu.updateMutex.Unlock()
cached, added := csu.sc.AddIdentitySelector(csu, EmptyStringLabels, sel)
require.NotNil(csu.t, cached)
_, exists := csu.selections[cached]
// Not added if already exists for this user
require.Equal(csu.t, !exists, added)
csu.selections[cached] = cached.GetSelections(versioned.Latest())
// Pre-existing selections are not notified as updates
require.False(csu.t, csu.sc.haveUserNotifications())
return cached
}
func (csu *cachedSelectionUser) AddFQDNSelector(sel api.FQDNSelector) CachedSelector {
csu.updateMutex.Lock()
defer csu.updateMutex.Unlock()
cached, added := csu.sc.AddFQDNSelector(csu, EmptyStringLabels, sel)
require.NotNil(csu.t, cached)
_, exists := csu.selections[cached]
// Not added if already exists for this user
require.Equal(csu.t, !exists, added)
csu.selections[cached] = cached.GetSelections(versioned.Latest())
// Pre-existing selections are not notified as updates
require.False(csu.t, csu.sc.haveUserNotifications())
return cached
}
func (csu *cachedSelectionUser) RemoveSelector(sel CachedSelector) {
csu.updateMutex.Lock()
defer csu.updateMutex.Unlock()
csu.sc.RemoveSelector(sel, csu)
delete(csu.selections, sel)
// No notifications for a removed selector
require.False(csu.t, csu.sc.haveUserNotifications())
}
func (csu *cachedSelectionUser) Reset() {
csu.updateMutex.Lock()
defer csu.updateMutex.Unlock()
csu.notifications = 0
}
func (csu *cachedSelectionUser) WaitForUpdate() (adds, deletes int) {
csu.updateMutex.Lock()
defer csu.updateMutex.Unlock()
for csu.notifications == 0 {
csu.updateCond.Wait()
}
return csu.adds, csu.deletes
}
func (csu *cachedSelectionUser) IdentitySelectionUpdated(logger *slog.Logger, selector policytypes.CachedSelector, added, deleted []identity.NumericIdentity) {
csu.updateMutex.Lock()
defer csu.updateMutex.Unlock()
csu.notifications++
csu.adds += len(added)
csu.deletes += len(deleted)
selections := selector.GetSelections(versioned.Latest())
// Validate added & deleted against the selections
for _, add := range added {
require.True(csu.t, haveNid(add, selections))
}
for _, del := range deleted {
require.False(csu.t, haveNid(del, selections))
}
// update selections
csu.selections[selector] = selections
}
func (csu *cachedSelectionUser) IdentitySelectionCommit(*slog.Logger, *versioned.Tx) {
csu.updateCond.Signal()
}
func (csu *cachedSelectionUser) IsPeerSelector() bool {
return true
}
// Mock CachedSelector for unit testing.
//
// testCachedSelector is used in isolation so there is no point to implement versioning for it.
type testCachedSelector struct {
name string
wildcard bool
selections []identity.NumericIdentity
}
func newTestCachedSelector(name string, wildcard bool, selections ...int) *testCachedSelector {
cs := &testCachedSelector{
name: name,
wildcard: wildcard,
selections: make([]identity.NumericIdentity, 0, len(selections)),
}
cs.addSelections(selections...)
return cs
}
// returns selections as []identity.NumericIdentity
func (cs *testCachedSelector) addSelections(selections ...int) (adds []identity.NumericIdentity) {
for _, id := range selections {
nid := identity.NumericIdentity(id)
adds = append(adds, nid)
if cs == nil {
continue
}
if !cs.Selects(versioned.Latest(), nid) {
cs.selections = append(cs.selections, nid)
}
}
return adds
}
// returns selections as []identity.NumericIdentity
func (cs *testCachedSelector) deleteSelections(selections ...int) (deletes []identity.NumericIdentity) {
for _, id := range selections {
nid := identity.NumericIdentity(id)
deletes = append(deletes, nid)
if cs == nil {
continue
}
for i := 0; i < len(cs.selections); i++ {
if nid == cs.selections[i] {
cs.selections = slices.Delete(cs.selections, i, i+1)
i--
}
}
}
return deletes
}
// CachedSelector interface
func (cs *testCachedSelector) GetSelections(*versioned.VersionHandle) identity.NumericIdentitySlice {
return cs.selections
}
func (cs *testCachedSelector) GetMetadataLabels() labels.LabelArray {
return nil
}
func (cs *testCachedSelector) Selects(_ *versioned.VersionHandle, nid identity.NumericIdentity) bool {
return slices.Contains(cs.selections, nid)
}
func (cs *testCachedSelector) IsWildcard() bool {
return cs.wildcard
}
func (cs *testCachedSelector) IsNone() bool {
return false
}
func (cs *testCachedSelector) String() string {
return cs.name
}
func TestAddRemoveSelector(t *testing.T) {
sc := testNewSelectorCache(hivetest.Logger(t), identity.IdentityMap{})
// Add some identities to the identity cache
wg := &sync.WaitGroup{}
sc.UpdateIdentities(identity.IdentityMap{
1234: labels.Labels{"app": labels.NewLabel("app", "test", labels.LabelSourceK8s),
k8sConst.PodNamespaceLabel: labels.NewLabel(k8sConst.PodNamespaceLabel, "default", labels.LabelSourceK8s)}.LabelArray(),
2345: labels.Labels{"app": labels.NewLabel("app", "test2", labels.LabelSourceK8s)}.LabelArray(),
}, nil, wg)
wg.Wait()
testSelector := api.NewESFromLabels(labels.NewLabel("app", "test", labels.LabelSourceK8s),
labels.NewLabel(k8sConst.PodNamespaceLabel, "default", labels.LabelSourceK8s))
user1 := newUser(t, "user1", sc)
cached := user1.AddIdentitySelector(testSelector)
// Current selections contain the numeric identities of existing identities that match
selections := cached.GetSelections(versioned.Latest())
require.Len(t, selections, 1)
require.Equal(t, identity.NumericIdentity(1234), selections[0])
// Try add the same selector from the same user the second time
testSelector = api.NewESFromLabels(labels.NewLabel("app", "test", labels.LabelSourceK8s),
labels.NewLabel(k8sConst.PodNamespaceLabel, "default", labels.LabelSourceK8s))
cached2 := user1.AddIdentitySelector(testSelector)
require.Equal(t, cached, cached2)
// Add the same selector from a different user
testSelector = api.NewESFromLabels(labels.NewLabel("app", "test", labels.LabelSourceK8s),
labels.NewLabel(k8sConst.PodNamespaceLabel, "default", labels.LabelSourceK8s))
user2 := newUser(t, "user2", sc)
cached3 := user2.AddIdentitySelector(testSelector)
// Same old CachedSelector is returned, nothing new is cached
require.Equal(t, cached, cached3)
// Removing the first user does not remove the cached selector
user1.RemoveSelector(cached)
// Remove is idempotent
user1.RemoveSelector(cached)
// Removing the last user removes the cached selector
user2.RemoveSelector(cached3)
// Remove is idempotent
user2.RemoveSelector(cached3)
// All identities removed
require.Empty(t, sc.selectors)
}
func TestMultipleIdentitySelectors(t *testing.T) {
sc := testNewSelectorCache(hivetest.Logger(t), identity.IdentityMap{})
// Add some identities to the identity cache
wg := &sync.WaitGroup{}
li1 := identity.IdentityScopeLocal
li2 := li1 + 1
sc.UpdateIdentities(identity.IdentityMap{
1234: labels.Labels{"app": labels.NewLabel("app", "test", labels.LabelSourceK8s)}.LabelArray(),
2345: labels.Labels{"app": labels.NewLabel("app", "test2", labels.LabelSourceK8s)}.LabelArray(),
li1: labels.GetCIDRLabels(netip.MustParsePrefix("10.0.0.1/32")).LabelArray(),
li2: labels.GetCIDRLabels(netip.MustParsePrefix("10.0.0.0/8")).LabelArray(),
}, nil, wg)
wg.Wait()
testSelector := api.NewESFromLabels(labels.NewLabel("app", "test", labels.LabelSourceAny))
test2Selector := api.NewESFromLabels(labels.NewLabel("app", "test2", labels.LabelSourceAny))
// Test both exact and broader CIDR selectors
cidr32Selector := api.NewESFromLabels(labels.NewLabel("cidr:10.0.0.1/32", "", labels.LabelSourceCIDR))
cidr24Selector := api.NewESFromLabels(labels.NewLabel("cidr:10.0.0.0/24", "", labels.LabelSourceCIDR))
cidr8Selector := api.NewESFromLabels(labels.NewLabel("cidr:10.0.0.0/8", "", labels.LabelSourceCIDR))
cidr7Selector := api.NewESFromLabels(labels.NewLabel("cidr:10.0.0.0/7", "", labels.LabelSourceCIDR))
user1 := newUser(t, "user1", sc)
cached := user1.AddIdentitySelector(testSelector)
// Current selections contain the numeric identities of existing identities that match
selections := cached.GetSelections(versioned.Latest())
require.Len(t, selections, 1)
require.Equal(t, identity.NumericIdentity(1234), selections[0])
// Add another selector from the same user
cached2 := user1.AddIdentitySelector(test2Selector)
require.NotEqual(t, cached, cached2)
// Current selections contain the numeric identities of existing identities that match
selections2 := cached2.GetSelections(versioned.Latest())
require.Len(t, selections2, 1)
require.Equal(t, identity.NumericIdentity(2345), selections2[0])
shouldSelect := func(sel api.EndpointSelector, wantIDs ...identity.NumericIdentity) {
csel := user1.AddIdentitySelector(sel)
selections := csel.GetSelections(versioned.Latest())
require.Equal(t, identity.NumericIdentitySlice(wantIDs), selections)
user1.RemoveSelector(csel)
}
shouldSelect(cidr32Selector, li1)
shouldSelect(cidr24Selector, li1)
shouldSelect(cidr8Selector, li1, li2)
shouldSelect(cidr7Selector, li1, li2)
user1.RemoveSelector(cached)
user1.RemoveSelector(cached2)
// All identities removed
require.Empty(t, sc.selectors)
}
func TestIdentityUpdates(t *testing.T) {
sc := testNewSelectorCache(hivetest.Logger(t), identity.IdentityMap{})
// Add some identities to the identity cache
wg := &sync.WaitGroup{}
sc.UpdateIdentities(identity.IdentityMap{
1234: labels.Labels{"app": labels.NewLabel("app", "test", labels.LabelSourceK8s)}.LabelArray(),
2345: labels.Labels{"app": labels.NewLabel("app", "test2", labels.LabelSourceK8s)}.LabelArray(),
}, nil, wg)
wg.Wait()
testSelector := api.NewESFromLabels(labels.NewLabel("app", "test", labels.LabelSourceAny))
test2Selector := api.NewESFromLabels(labels.NewLabel("app", "test2", labels.LabelSourceAny))
user1 := newUser(t, "user1", sc)
cached := user1.AddIdentitySelector(testSelector)
// Current selections contain the numeric identities of existing identities that match
selections := cached.GetSelections(versioned.Latest())
require.Len(t, selections, 1)
require.Equal(t, identity.NumericIdentity(1234), selections[0])
// Add another selector from the same user
cached2 := user1.AddIdentitySelector(test2Selector)
require.NotEqual(t, cached, cached2)
// Current selections contain the numeric identities of existing identities that match
selections2 := cached2.GetSelections(versioned.Latest())
require.Len(t, selections2, 1)
require.Equal(t, identity.NumericIdentity(2345), selections2[0])
user1.Reset()
// Add some identities to the identity cache
wg = &sync.WaitGroup{}
sc.UpdateIdentities(identity.IdentityMap{
12345: labels.Labels{"app": labels.NewLabel("app", "test", labels.LabelSourceK8s)}.LabelArray(),
}, nil, wg)
wg.Wait()
adds, deletes := user1.WaitForUpdate()
require.Equal(t, 1, adds)
require.Equal(t, 0, deletes)
// Current selections contain the numeric identities of existing identities that match
selections = cached.GetSelections(versioned.Latest())
require.Len(t, selections, 2)
require.Equal(t, identity.NumericIdentity(1234), selections[0])
require.Equal(t, identity.NumericIdentity(12345), selections[1])
user1.Reset()
// Remove some identities from the identity cache
wg = &sync.WaitGroup{}
sc.UpdateIdentities(nil, identity.IdentityMap{
12345: labels.Labels{"app": labels.NewLabel("app", "test", labels.LabelSourceK8s)}.LabelArray(),
}, wg)
wg.Wait()
adds, deletes = user1.WaitForUpdate()
require.Equal(t, 1, adds)
require.Equal(t, 1, deletes)
// Current selections contain the numeric identities of existing identities that match
selections = cached.GetSelections(versioned.Latest())
require.Len(t, selections, 1)
require.Equal(t, identity.NumericIdentity(1234), selections[0])
user1.RemoveSelector(cached)
user1.RemoveSelector(cached2)
// All identities removed
require.Empty(t, sc.selectors)
}
func TestIdentityUpdatesMultipleUsers(t *testing.T) {
sc := testNewSelectorCache(hivetest.Logger(t), identity.IdentityMap{})
// Add some identities to the identity cache
wg := &sync.WaitGroup{}
sc.UpdateIdentities(identity.IdentityMap{
1234: labels.Labels{"app": labels.NewLabel("app", "test", labels.LabelSourceK8s)}.LabelArray(),
2345: labels.Labels{"app": labels.NewLabel("app", "test2", labels.LabelSourceK8s)}.LabelArray(),
}, nil, wg)
wg.Wait()
testSelector := api.NewESFromLabels(labels.NewLabel("app", "test", labels.LabelSourceK8s))
user1 := newUser(t, "user1", sc)
cached := user1.AddIdentitySelector(testSelector)
// Add same selector from a different user
user2 := newUser(t, "user2", sc)
cached2 := user2.AddIdentitySelector(testSelector)
require.Equal(t, cached, cached2)
user1.Reset()
user2.Reset()
// Add some identities to the identity cache
wg = &sync.WaitGroup{}
sc.UpdateIdentities(identity.IdentityMap{
123: labels.Labels{"app": labels.NewLabel("app", "test", labels.LabelSourceK8s)}.LabelArray(),
234: labels.Labels{"app": labels.NewLabel("app", "test2", labels.LabelSourceK8s)}.LabelArray(),
345: labels.Labels{"app": labels.NewLabel("app", "test", labels.LabelSourceK8s)}.LabelArray(),
}, nil, wg)
wg.Wait()
adds, deletes := user1.WaitForUpdate()
require.Equal(t, 2, adds)
require.Equal(t, 0, deletes)
adds, deletes = user2.WaitForUpdate()
require.Equal(t, 2, adds)
require.Equal(t, 0, deletes)
// Current selections contain the numeric identities of existing identities that match
selections := cached.GetSelections(versioned.Latest())
require.Len(t, selections, 3)
require.Equal(t, identity.NumericIdentity(123), selections[0])
require.Equal(t, identity.NumericIdentity(345), selections[1])
require.Equal(t, identity.NumericIdentity(1234), selections[2])
require.Equal(t, cached2.GetSelections(versioned.Latest()), cached.GetSelections(versioned.Latest()))
user1.Reset()
user2.Reset()
// Remove some identities from the identity cache
wg = &sync.WaitGroup{}
sc.UpdateIdentities(nil, identity.IdentityMap{
123: labels.Labels{"app": labels.NewLabel("app", "test", labels.LabelSourceK8s)}.LabelArray(),
234: labels.Labels{"app": labels.NewLabel("app", "test2", labels.LabelSourceK8s)}.LabelArray(),
}, wg)
wg.Wait()
adds, deletes = user1.WaitForUpdate()
require.Equal(t, 2, adds)
require.Equal(t, 1, deletes)
adds, deletes = user2.WaitForUpdate()
require.Equal(t, 2, adds)
require.Equal(t, 1, deletes)
// Current selections contain the numeric identities of existing identities that match
selections = cached.GetSelections(versioned.Latest())
require.Len(t, selections, 2)
require.Equal(t, identity.NumericIdentity(345), selections[0])
require.Equal(t, identity.NumericIdentity(1234), selections[1])
require.Equal(t, cached2.GetSelections(versioned.Latest()), cached.GetSelections(versioned.Latest()))
user1.RemoveSelector(cached)
user2.RemoveSelector(cached2)
// All identities removed
require.Empty(t, sc.selectors)
}
func TestTransactionalUpdate(t *testing.T) {
sc := testNewSelectorCache(hivetest.Logger(t), identity.IdentityMap{})
// Add some identities to the identity cache
wg := &sync.WaitGroup{}
li1 := identity.IdentityScopeLocal
li2 := li1 + 1
sc.UpdateIdentities(identity.IdentityMap{
li1: labels.GetCIDRLabels(netip.MustParsePrefix("10.0.0.1/32")).LabelArray(),
li2: labels.GetCIDRLabels(netip.MustParsePrefix("10.0.0.0/8")).LabelArray(),
}, nil, wg)
wg.Wait()
// Test both exact and broader CIDR selectors
cidr32Selector := api.NewESFromLabels(labels.NewLabel("cidr:10.0.0.1/32", "", labels.LabelSourceCIDR))
cidr24Selector := api.NewESFromLabels(labels.NewLabel("cidr:10.0.0.0/24", "", labels.LabelSourceCIDR))
cidr8Selector := api.NewESFromLabels(labels.NewLabel("cidr:10.0.0.0/8", "", labels.LabelSourceCIDR))
cidr7Selector := api.NewESFromLabels(labels.NewLabel("cidr:10.0.0.0/7", "", labels.LabelSourceCIDR))
user1 := newUser(t, "user1", sc)
cs32 := user1.AddIdentitySelector(cidr32Selector)
cs24 := user1.AddIdentitySelector(cidr24Selector)
cs8 := user1.AddIdentitySelector(cidr8Selector)
cs7 := user1.AddIdentitySelector(cidr7Selector)
version := sc.versioned.GetVersionHandle()
defer version.Close()
require.Equal(t, identity.NumericIdentitySlice{li1}, cs32.GetSelections(version))
require.Equal(t, identity.NumericIdentitySlice{li1}, cs24.GetSelections(version))
require.Equal(t, identity.NumericIdentitySlice{li1, li2}, cs8.GetSelections(version))
require.Equal(t, identity.NumericIdentitySlice{li1, li2}, cs7.GetSelections(version))
// Add some identities to the identity cache
li3 := li2 + 1
li4 := li3 + 1
wg = &sync.WaitGroup{}
sc.UpdateIdentities(identity.IdentityMap{
li3: labels.GetCIDRLabels(netip.MustParsePrefix("10.0.0.0/31")).LabelArray(),
li4: labels.GetCIDRLabels(netip.MustParsePrefix("10.0.0.0/7")).LabelArray(),
}, nil, wg)
wg.Wait()
// Old version handle still gets the same selections as before
require.Equal(t, identity.NumericIdentitySlice{li1}, cs32.GetSelections(version))
require.Equal(t, identity.NumericIdentitySlice{li1}, cs24.GetSelections(version))
require.Equal(t, identity.NumericIdentitySlice{li1, li2}, cs8.GetSelections(version))
require.Equal(t, identity.NumericIdentitySlice{li1, li2}, cs7.GetSelections(version))
// New version handle sees the new updates on all selectors
version2 := sc.versioned.GetVersionHandle()
defer version2.Close()
require.Equal(t, identity.NumericIdentitySlice{li1}, cs32.GetSelections(version2))
require.Equal(t, identity.NumericIdentitySlice{li1, li3}, cs24.GetSelections(version2))
require.Equal(t, identity.NumericIdentitySlice{li1, li2, li3}, cs8.GetSelections(version2))
require.Equal(t, identity.NumericIdentitySlice{li1, li2, li3, li4}, cs7.GetSelections(version2))
// Remove some identities from the identity cache
wg = &sync.WaitGroup{}
sc.UpdateIdentities(nil, identity.IdentityMap{
li1: labels.GetCIDRLabels(netip.MustParsePrefix("10.0.0.1/32")).LabelArray(),
}, wg)
wg.Wait()
// Oldest version handle still gets the same selections as before
require.Equal(t, identity.NumericIdentitySlice{li1}, cs32.GetSelections(version))
require.Equal(t, identity.NumericIdentitySlice{li1}, cs24.GetSelections(version))
require.Equal(t, identity.NumericIdentitySlice{li1, li2}, cs8.GetSelections(version))
require.Equal(t, identity.NumericIdentitySlice{li1, li2}, cs7.GetSelections(version))
require.Equal(t, identity.NumericIdentitySlice{li1}, cs32.GetSelections(version2))
require.Equal(t, identity.NumericIdentitySlice{li1, li3}, cs24.GetSelections(version2))
require.Equal(t, identity.NumericIdentitySlice{li1, li2, li3}, cs8.GetSelections(version2))
require.Equal(t, identity.NumericIdentitySlice{li1, li2, li3, li4}, cs7.GetSelections(version2))
// New version handle sees the removal
version3 := sc.versioned.GetVersionHandle()
defer version3.Close()
require.Equal(t, identity.NumericIdentitySlice(nil), cs32.GetSelections(version3))
require.Equal(t, identity.NumericIdentitySlice{li3}, cs24.GetSelections(version3))
require.Equal(t, identity.NumericIdentitySlice{li2, li3}, cs8.GetSelections(version3))
require.Equal(t, identity.NumericIdentitySlice{li2, li3, li4}, cs7.GetSelections(version3))
user1.RemoveSelector(cs32)
user1.RemoveSelector(cs24)
user1.RemoveSelector(cs8)
user1.RemoveSelector(cs7)
// All identities removed
require.Empty(t, sc.selectors)
}
func TestSelectorCacheCanSkipUpdate(t *testing.T) {
id1 := identity.NewIdentity(1001, labels.LabelArray{labels.NewLabel("id", "a", labels.LabelSourceK8s)}.Labels())
id2 := identity.NewIdentity(1002, labels.LabelArray{labels.NewLabel("id", "b", labels.LabelSourceK8s)}.Labels())
toIdentityMap := func(ids ...*identity.Identity) identity.IdentityMap {
idMap := identity.IdentityMap{}
for _, id := range ids {
idMap[id.ID] = id.LabelArray
}
return idMap
}
sc := testNewSelectorCache(hivetest.Logger(t), identity.IdentityMap{})
wg := &sync.WaitGroup{}
require.False(t, sc.CanSkipUpdate(toIdentityMap(id1), nil))
sc.UpdateIdentities(toIdentityMap(id1), nil, wg)
wg.Wait()
require.True(t, sc.CanSkipUpdate(nil, toIdentityMap(id2)))
require.True(t, sc.CanSkipUpdate(toIdentityMap(id1), toIdentityMap(id2)))
require.False(t, sc.CanSkipUpdate(toIdentityMap(id2), nil))
sc.UpdateIdentities(toIdentityMap(id2), nil, wg)
wg.Wait()
require.True(t, sc.CanSkipUpdate(toIdentityMap(id2), nil))
require.False(t, sc.CanSkipUpdate(nil, toIdentityMap(id2)))
require.False(t, sc.CanSkipUpdate(nil, toIdentityMap(id1, id2)))
sc.UpdateIdentities(nil, toIdentityMap(id1, id2), wg)
wg.Wait()
}
func TestSelectorManagerCanGetBeforeSet(t *testing.T) {
defer func() {
r := recover()
require.Nil(t, r)
}()
idSel := identitySelector{
logger: hivetest.Logger(t),
key: "test",
users: make(map[CachedSelectionUser]struct{}),
}
selections := idSel.GetSelections(versioned.Latest())
require.Empty(t, selections)
}
func testNewSelectorCache(logger *slog.Logger, ids identity.IdentityMap) *SelectorCache {
sc := NewSelectorCache(logger, ids)
sc.SetLocalIdentityNotifier(testidentity.NewDummyIdentityNotifier())
return sc
}
// SPDX-License-Identifier: Apache-2.0
// Copyright Authors of Cilium
package trafficdirection
// TrafficDirection specifies the directionality of policy (ingress or egress).
type TrafficDirection uint8
const (
// Invalid represents an invalid traffic direction.
Invalid TrafficDirection = 2
// Egress represents egress traffic.
Egress TrafficDirection = 1
// Ingress represents ingress traffic.
Ingress TrafficDirection = 0
)
// Uint8 normalizes the TrafficDirection for insertion into BPF maps.
func (td TrafficDirection) Uint8() uint8 {
return uint8(td)
}
func (td TrafficDirection) String() string {
if td == Egress {
return "Egress"
} else if td == Ingress {
return "Ingress"
}
return "Unknown"
}
// SPDX-License-Identifier: Apache-2.0
// Copyright Authors of Cilium
package policy
import (
"log/slog"
"github.com/cilium/cilium/pkg/logging/logfields"
)
// TriggerPolicyUpdates force full policy recomputation before
// regenerating all endpoints.
// This artificially bumps the policy revision, invalidating
// all cached policies. This is done when an additional resource
// used in policy calculation has changed.
func (u *Updater) TriggerPolicyUpdates(reason string) {
u.repo.BumpRevision()
u.logger.Info("Triggering full policy recalculation and regeneration of all endpoints", logfields.Reason, reason)
u.regen.TriggerRegenerateAllEndpoints()
}
// NewUpdater returns a new Updater instance to handle triggering policy
// updates ready for use.
func NewUpdater(logger *slog.Logger, r PolicyRepository, regen regenerator) *Updater {
return &Updater{
logger: logger,
regen: regen,
repo: r,
}
}
// Updater is responsible for triggering policy updates, in order to perform
// policy recalculation.
type Updater struct {
logger *slog.Logger
repo PolicyRepository
regen regenerator
}
type regenerator interface {
// RegenerateAllEndpoints should trigger a regeneration of all endpoints.
TriggerRegenerateAllEndpoints()
}
// SPDX-License-Identifier: Apache-2.0
// Copyright Authors of Cilium
package types
import "strconv"
// AuthType enumerates the supported authentication types in api.
// Numerically higher type takes precedence in case of conflicting auth types.
type AuthType uint8
// AuthTypes is a set of AuthTypes, usually nil if empty
type AuthTypes map[AuthType]struct{}
const (
// AuthTypeDisabled means no authentication required
AuthTypeDisabled AuthType = iota
// AuthTypeSpire is a mutual auth type that uses SPIFFE identities with a SPIRE server
AuthTypeSpire
// AuthTypeAlwaysFail is a simple auth type that always denies the request
AuthTypeAlwaysFail
)
// AuthRequirement is a combination of an AuthType with an 'explicit' flag on the highest bit This
// is defined in order to keep MapStateEntry smaller and to simplify code wiring this to the bpf
// datapath.
//
// NOTE: This type is part of the bpf policy API.
//
// This type reflects the layout of the 'auth_type' field in the bpf policy map and is used in
// pkg/maps/policymap. This layout must not be changed!
type AuthRequirement AuthType
const (
NoAuthRequirement AuthRequirement = 0
AuthTypeIsExplicit AuthRequirement = 1 << 7
)
func (a AuthRequirement) IsExplicit() bool {
return a&AuthTypeIsExplicit != 0
}
// asDerived returns the auth requirement with the 'explicit' flag cleared.
func (a AuthRequirement) AsDerived() AuthRequirement {
return a & ^AuthTypeIsExplicit
}
func (a AuthRequirement) AuthType() AuthType {
return AuthType(a.AsDerived())
}
func (a AuthType) AsDerivedRequirement() AuthRequirement {
return AuthRequirement(a)
}
func (a AuthType) AsExplicitRequirement() AuthRequirement {
return AuthRequirement(a) | AuthTypeIsExplicit
}
// Uint8 returns AuthType as a uint8
func (a AuthType) Uint8() uint8 {
return uint8(a)
}
// String returns AuthType as a string.
// This must return the strings accepted for api.AuthType
func (a AuthType) String() string {
switch a {
case AuthTypeDisabled:
return "disabled"
case AuthTypeSpire:
return "spire"
case AuthTypeAlwaysFail:
return "test-always-fail"
}
return "Unknown-auth-type-" + strconv.FormatUint(uint64(a.Uint8()), 10)
}
// SPDX-License-Identifier: Apache-2.0
// Copyright Authors of Cilium
package types
import "strconv"
type ListenerPriority uint8
type ProxyPortPriority uint8
const (
MaxProxyPortPriority = 127
MaxListenerPriority = 126
)
// MapStateEntry is the configuration associated with a Key in a
// MapState. This is a minimized version of policymap.PolicyEntry.
type MapStateEntry struct {
// isDeny is true when the policy should be denied.
isDeny bool
// ProxyPortPriority encodes the listener priority.
ProxyPortPriority ProxyPortPriority
// The proxy port, in host byte order.
// If 0 (default), there is no proxy redirection for the corresponding
// Key. Any other value signifies proxy redirection.
ProxyPort uint16
// Invalid is only set to mark the current entry for update when syncing entries to datapath
Invalid bool
// AuthRequirement is non-zero when authentication is required for the traffic to be
// allowed, except for when it explicitly defines authentication is not required.
AuthRequirement AuthRequirement
}
type MapStateMap map[Key]MapStateEntry
// String returns a string representation of the MapStateEntry
func (e MapStateEntry) String() string {
var authText string
if e.AuthRequirement != 0 {
var authNote string
if !e.AuthRequirement.IsExplicit() {
authNote = " (derived)"
}
authText = ",AuthType=" + e.AuthRequirement.AuthType().String() + authNote
}
return "IsDeny=" + strconv.FormatBool(e.IsDeny()) +
",ProxyPort=" + strconv.FormatUint(uint64(e.ProxyPort), 10) +
",Priority=" + strconv.FormatUint(uint64(e.ProxyPortPriority), 10) +
authText
}
// NewMapStateEntry creates a new MapStateEntry
// Listener 'priority' is encoded in ProxyPortPriority, inverted
func NewMapStateEntry(deny bool, proxyPort uint16, priority ListenerPriority, authReq AuthRequirement) MapStateEntry {
// Normalize inputs
if deny {
proxyPort = 0
priority = 0
authReq = 0
}
return MapStateEntry{
isDeny: deny,
ProxyPort: proxyPort,
AuthRequirement: authReq,
}.WithListenerPriority(priority)
}
func (e MapStateEntry) IsDeny() bool {
return e.isDeny
}
// IsRedirectEntry returns true if the entry redirects to a proxy port
func (e MapStateEntry) IsRedirectEntry() bool {
return e.ProxyPort != 0
}
// AllowEntry returns a MapStateEntry for an allow policy without a proxy redirect
func AllowEntry() MapStateEntry {
return MapStateEntry{}
}
// DenyEntry returns a MapStateEntry for a deny policy
func DenyEntry() MapStateEntry {
return MapStateEntry{isDeny: true}
}
// WithDeny returns the entry 'e' with 'isDeny' set as indicated
func (e MapStateEntry) WithDeny(isDeny bool) MapStateEntry {
e.isDeny = isDeny
return e
}
// WithListenerPriority returns a MapStateEntry with the given listener priority:
// 0 - default (low) priority for all proxy redirects
// 1 - highest listener priority
// ..
// 100 - lowest (non-default) listener priority
// 101 - priority for HTTP parser type
// 106 - priority for the Kafka parser type
// 111 - priority for the proxylib parsers
// 116 - priority for TLS interception parsers (can be promoted to HTTP/Kafka/proxylib)
// 121 - priority for DNS parser type
// 126 - default priority for CRD parser type
// 127 - reserved (listener priority passed as 0)
func (e MapStateEntry) WithListenerPriority(priority ListenerPriority) MapStateEntry {
if e.ProxyPort != 0 {
if priority > 0 {
priority = min(priority, MaxListenerPriority)
// invert the priority so that higher number has the
// precedence, priority 1 becomes '127', 100 -> '28', 126 -> '2'
// '1' is reserved for a listener priority passed as 0
// '0' is reserved for entries without proxy redirect
e.ProxyPortPriority = MaxProxyPortPriority + 1 - ProxyPortPriority(priority)
} else {
e.ProxyPortPriority = 1 // proxy port without explicit priority
}
}
return e
}
// WithProxyPort return the MapStateEntry with proxy port set at the default precedence
func (e MapStateEntry) WithProxyPort(proxyPort uint16) MapStateEntry {
if proxyPort > 0 {
e.ProxyPort = proxyPort
e.ProxyPortPriority = 1 // proxy port without explicit priority
}
return e
}
// Merge is only called if both entries are denies or allows
func (e *MapStateEntry) Merge(entry MapStateEntry) {
// Only allow entries have proxy redirection or auth requirement
if !e.IsDeny() {
// Proxy port takes precedence, but may be updated due to priority
if entry.IsRedirectEntry() {
// Higher number has higher priority, but non-redirects have 0 priority
// value.
// Proxy port value is the tie-breaker when priorities have the same value.
if entry.ProxyPortPriority > e.ProxyPortPriority || entry.ProxyPortPriority == e.ProxyPortPriority && entry.ProxyPort < e.ProxyPort {
e.ProxyPort = entry.ProxyPort
e.ProxyPortPriority = entry.ProxyPortPriority
}
}
// Numerically higher AuthType takes precedence when both are
// either explicitly defined or derived
if entry.AuthRequirement.IsExplicit() == e.AuthRequirement.IsExplicit() {
if entry.AuthRequirement > e.AuthRequirement {
e.AuthRequirement = entry.AuthRequirement
}
} else if entry.AuthRequirement.IsExplicit() {
// Explicit auth takes precedence over defaulted one.
e.AuthRequirement = entry.AuthRequirement
}
}
}
// Diff returns the string of differences between 'obtained' and 'expected' prefixed with
// '+ ' or '- ' for obtaining something unexpected, or not obtaining the expected, respectively.
// For use in debugging from other packages.
func (obtained MapStateMap) Diff(expected MapStateMap) (res string) {
res += "Missing (-), Unexpected (+):\n"
for kE, vE := range expected {
if vO, ok := obtained[kE]; ok {
if vO != vE {
res += "- " + kE.String() + ": " + vE.String() + "\n"
res += "+ " + kE.String() + ": " + vO.String() + "\n"
}
} else {
res += "- " + kE.String() + ": " + vE.String() + "\n"
}
}
for kO, vO := range obtained {
if _, ok := expected[kO]; !ok {
res += "+ " + kO.String() + ": " + vO.String() + "\n"
}
}
return res
}
// SPDX-License-Identifier: Apache-2.0
// Copyright Authors of Hubble
package types
import (
"bytes"
"encoding/json"
"log/slog"
"strings"
"github.com/cilium/cilium/pkg/container/versioned"
"github.com/cilium/cilium/pkg/identity"
"github.com/cilium/cilium/pkg/labels"
)
// CachedSelector represents an identity selector owned by the selector cache
type CachedSelector interface {
// GetSelections returns the cached set of numeric identities
// selected by the CachedSelector. The retuned slice must NOT
// be modified, as it is shared among multiple users.
GetSelections(*versioned.VersionHandle) identity.NumericIdentitySlice
// GetMetadataLabels returns metadata labels for additional context
// surrounding the selector. These are typically the labels associated with
// Cilium rules.
GetMetadataLabels() labels.LabelArray
// Selects return 'true' if the CachedSelector selects the given
// numeric identity.
Selects(*versioned.VersionHandle, identity.NumericIdentity) bool
// IsWildcard returns true if the endpoint selector selects
// all endpoints.
IsWildcard() bool
// IsNone returns true if the selector never selects anything
IsNone() bool
// String returns the string representation of this selector.
// Used as a map key.
String() string
}
// CachedSelectorSlice is a slice of CachedSelectors that can be sorted.
type CachedSelectorSlice []CachedSelector
// MarshalJSON returns the CachedSelectors as JSON formatted buffer
func (s CachedSelectorSlice) MarshalJSON() ([]byte, error) {
buffer := bytes.NewBufferString("[")
for i, selector := range s {
buf, err := json.Marshal(selector.String())
if err != nil {
return nil, err
}
buffer.Write(buf)
if i < len(s)-1 {
buffer.WriteString(",")
}
}
buffer.WriteString("]")
return buffer.Bytes(), nil
}
func (s CachedSelectorSlice) Len() int { return len(s) }
func (s CachedSelectorSlice) Swap(i, j int) { s[i], s[j] = s[j], s[i] }
func (s CachedSelectorSlice) Less(i, j int) bool {
return strings.Compare(s[i].String(), s[j].String()) < 0
}
// SelectsAllEndpoints returns whether the CachedSelectorSlice selects all
// endpoints, which is true if the wildcard endpoint selector is present in the
// slice.
func (s CachedSelectorSlice) SelectsAllEndpoints() bool {
for _, selector := range s {
if selector.IsWildcard() {
return true
}
}
return false
}
// CachedSelectionUser inserts selectors into the cache and gets update
// callbacks whenever the set of selected numeric identities change for
// the CachedSelectors pushed by it.
// Callbacks are executed from a separate goroutine that does not take the
// selector cache lock, so the implemenations generally may call back to
// the selector cache.
type CachedSelectionUser interface {
// The caller is responsible for making sure the same identity is not
// present in both 'added' and 'deleted'.
IdentitySelectionUpdated(logger *slog.Logger, selector CachedSelector, added, deleted []identity.NumericIdentity)
// IdentitySelectionCommit tells the user that all IdentitySelectionUpdated calls relating
// to a specific added or removed identity have been made.
IdentitySelectionCommit(logger *slog.Logger, txn *versioned.Tx)
// IsPeerSelector returns true if the selector is used by the policy
// engine for selecting traffic for remote peers. False if used for
// selecting policy subjects.
IsPeerSelector() bool
}
// SPDX-License-Identifier: Apache-2.0
// Copyright Authors of Cilium
package types
import (
"math/bits"
"strconv"
"github.com/cilium/cilium/pkg/identity"
"github.com/cilium/cilium/pkg/policy/trafficdirection"
"github.com/cilium/cilium/pkg/u8proto"
)
// MapStatePrefixLen is the length, in bits, of the Key when converted
// to binary minus the sizeof the identity field (which is not indexed).
const MapStatePrefixLen = uint(32)
// Key is the userspace representation of a policy key in BPF. It is
// intentionally duplicated from pkg/maps/policymap to avoid pulling in the
// BPF dependency to this package.
type LPMKey struct {
// bits contains the TrafficDirection in the highest bit and the port prefix length in the 5 lowest bits.
bits uint8
// NextHdr is the protocol which is allowed.
Nexthdr u8proto.U8proto
// DestPort is the port at L4 to / from which traffic is allowed, in
// host-byte order.
DestPort uint16
}
type Key struct {
LPMKey
// Identity is the numeric identity to / from which traffic is allowed.
Identity identity.NumericIdentity
}
const (
directionBitShift = 7
directionBitMask = uint8(1) << directionBitShift
)
//
// Key initialization utility functions
//
func EgressKey() Key {
return Key{
LPMKey: LPMKey{
bits: 1 << directionBitShift,
},
}
}
func IngressKey() Key {
return Key{
LPMKey: LPMKey{
bits: 0 << directionBitShift,
},
}
}
func KeyForDirection(direction trafficdirection.TrafficDirection) Key {
return Key{
LPMKey: LPMKey{
bits: uint8(direction) << directionBitShift,
},
}
}
func (k Key) WithProto(proto u8proto.U8proto) Key {
k.Nexthdr = proto
return k
}
func (k Key) WithPort(port uint16) Key {
k.DestPort = port
k.bits &= directionBitMask
if port != 0 {
// non-wildcarded port
k.bits |= 16
}
return k
}
func (k Key) WithPortPrefix(port uint16, prefixLen uint8) Key {
if prefixLen > 16 || port != 0 && prefixLen == 0 {
prefixLen = 16
}
// set up the port wildcard
k.DestPort = port & (0xffff << (16 - prefixLen))
k.bits = k.bits&directionBitMask | prefixLen
return k
}
func (k Key) WithPortProto(proto u8proto.U8proto, port uint16) Key {
return k.WithProto(proto).WithPort(port)
}
func (k Key) WithPortProtoPrefix(proto u8proto.U8proto, port uint16, prefixLen uint8) Key {
return k.WithProto(proto).WithPortPrefix(port, prefixLen)
}
func (k Key) WithTCPPort(port uint16) Key {
return k.WithPortProto(u8proto.TCP, port)
}
func (k Key) WithTCPPortPrefix(port uint16, prefixLen uint8) Key {
return k.WithPortProtoPrefix(u8proto.TCP, port, prefixLen)
}
func (k Key) WithUDPPort(port uint16) Key {
return k.WithPortProto(u8proto.UDP, port)
}
func (k Key) WithUDPPortPrefix(port uint16, prefixLen uint8) Key {
return k.WithPortProtoPrefix(u8proto.UDP, port, prefixLen)
}
func (k Key) WithSCTPPort(port uint16) Key {
return k.WithPortProto(u8proto.SCTP, port)
}
func (k Key) WithSCTPPortPrefix(port uint16, prefixLen uint8) Key {
return k.WithPortProtoPrefix(u8proto.SCTP, port, prefixLen)
}
func (k Key) WithIdentity(nid identity.NumericIdentity) Key {
k.Identity = nid
return k
}
// TrafficDirection() returns the direction of the Key, 0 == ingress, 1 == egress
func (k LPMKey) TrafficDirection() trafficdirection.TrafficDirection {
// Note that 0 and 1 are the only possible return values, the shift below reduces the byte
// to a single bit.
return trafficdirection.TrafficDirection(k.bits >> directionBitShift)
}
// PortPrefixLen returns the length of the bitwise mask that should be applied to the DestPort.
func (k LPMKey) PortPrefixLen() uint8 {
return k.bits & ^directionBitMask
}
func (k LPMKey) HasPortWildcard() bool {
return k.bits & ^directionBitMask < 16
}
// String returns a string representation of the Key
func (k Key) String() string {
dPort := strconv.FormatUint(uint64(k.DestPort), 10)
if k.DestPort != 0 && k.PortPrefixLen() < 16 {
dPort += "-" + strconv.FormatUint(uint64(k.EndPort()), 10)
}
return "Identity=" + strconv.FormatUint(uint64(k.Identity), 10) +
",DestPort=" + dPort +
",Nexthdr=" + strconv.FormatUint(uint64(k.Nexthdr), 10) +
",TrafficDirection=" + strconv.FormatUint(uint64(k.TrafficDirection()), 10)
}
// IsIngress returns true if the key refers to an ingress policy key
func (k LPMKey) IsIngress() bool {
return k.TrafficDirection() == trafficdirection.Ingress
}
// IsEgress returns true if the key refers to an egress policy key
func (k LPMKey) IsEgress() bool {
return k.TrafficDirection() == trafficdirection.Egress
}
// EndPort returns the end-port of the Key based on the Mask.
func (k LPMKey) EndPort() uint16 {
return k.DestPort + uint16(0xffff)>>k.PortPrefixLen()
}
// PortProtoIsBroader returns true if the receiver Key has broader
// port-protocol than the argument Key. That is a port-protocol
// that covers the argument Key's port-protocol and is larger.
// An equal port-protocol will return false.
func (k Key) PortProtoIsBroader(c Key) bool {
// Port is wildcarded when protocol is wildcarded
return k.Nexthdr == 0 && c.Nexthdr != 0 || k.Nexthdr == c.Nexthdr && k.PortIsBroader(c)
}
// PortProtoIsEqual returns true if the port-protocols of the
// two keys are exactly equal.
func (k LPMKey) PortProtoIsEqual(c Key) bool {
return k.Nexthdr == c.Nexthdr && k.PortIsEqual(c)
}
// PortIsBroader returns true if the receiver Key's
// port range covers the argument Key's port range,
// but returns false if they are equal.
func (k LPMKey) PortIsBroader(c Key) bool {
// Broader port must have shorter prefix and a common part needs to be the same
kPrefixLen := k.PortPrefixLen()
cPrefixLen := c.PortPrefixLen()
return kPrefixLen < cPrefixLen &&
k.DestPort^c.DestPort&(uint16(0xffff)<<(16-kPrefixLen)) == 0
}
// PortIsEqual returns true if the port ranges
// between the two keys are exactly equal.
func (k LPMKey) PortIsEqual(c Key) bool {
return k.DestPort == c.DestPort && k.bits<<1 == c.bits<<1 // ignore traffic direction
}
// PrefixLength returns the prefix lenth of the key
// for indexing it for the userspace cache (not the
// BPF map or datapath).
func (k LPMKey) PrefixLength() uint {
if k.Nexthdr == 0 {
return 1 // direction always specified
}
// 1 bit from direction bit + 8 bits for the protocol when `k.Nexthdr' != 0
return 9 + uint(k.PortPrefixLen())
}
// CommonPrefix implements the CommonPrefix method for the
// bitlpm.Key interface. Identity is not indexed and is instead,
// saved as a simple map per TrafficDirection-Protocol-Port index
// key.
func (k LPMKey) CommonPrefix(b LPMKey) uint {
// if direction bits are different then there is nothing in common
if (k.bits^b.bits)>>directionBitShift != 0 {
return 0
}
v := 1 + bits.LeadingZeros8(uint8(k.Nexthdr^b.Nexthdr))
// if protocols are different then there is no need to look at the ports
if v < 9 {
return uint(v)
}
return uint(v + bits.LeadingZeros16(k.DestPort^b.DestPort))
}
// BitValueAt implements the BitValueAt method for the
// bitlpm.Key interface.
func (k LPMKey) BitValueAt(i uint) uint8 {
switch {
case i == 0:
return k.bits >> directionBitShift
case i < 9:
return uint8((k.Nexthdr >> (8 - i)) & 1)
default:
return uint8(k.DestPort>>(24-i)) & 1
}
}
type Keys map[Key]struct{}
// SPDX-License-Identifier: Apache-2.0
// Copyright Authors of Cilium
package policy
import "github.com/cilium/cilium/pkg/labels"
// JoinPath returns a joined path from a and b.
func JoinPath(a, b string) string {
return a + labels.PathDelimiter + b
}
// SPDX-License-Identifier: Apache-2.0
// Copyright Authors of Cilium
package promise
import (
"context"
"sync"
"github.com/cilium/cilium/pkg/lock"
)
// A promise for a future value.
type Promise[T any] interface {
// Await blocks until the value is resolved or rejected.
Await(context.Context) (T, error)
}
// Resolver can resolve or reject a promise.
// These methods are separate from 'Promise' to make it clear where the promise is resolved
// from.
type Resolver[T any] interface {
// Resolve a promise. Unblocks all Await()s. Future calls of Await()
// return the resolved value immediately.
//
// Only the first call to resolve (or reject) has an effect and
// further calls are ignored.
Resolve(T)
// Reject a promise with an error.
Reject(error)
}
// New creates a new promise for value T.
// Returns a resolver and the promise.
func New[T any]() (Resolver[T], Promise[T]) {
promise := &promise[T]{}
promise.cond = sync.NewCond(promise)
return promise, promise
}
const (
promiseUnresolved = iota
promiseResolved
promiseRejected
)
type promise[T any] struct {
lock.Mutex
cond *sync.Cond
state int
value T
err error
}
// Resolve informs all other codepaths who are Await()ing on the received
// promise that T is now successfully initialized and available for usage.
//
// Initialization logic for T should either call Resolve() or Reject(), and
// must not call these functions more than once.
func (p *promise[T]) Resolve(value T) {
p.Lock()
defer p.Unlock()
if p.state != promiseUnresolved {
return
}
p.state = promiseResolved
p.value = value
p.cond.Broadcast()
}
// Reject informs all other codepaths who are Await()ing on the received
// promise that T could not be initialized and cannot be used to due the
// specified error reason.
//
// Initialization logic for T should either call Resolve() or Reject(), and
// must not call these functions more than once.
func (p *promise[T]) Reject(err error) {
p.Lock()
defer p.Unlock()
if p.state != promiseUnresolved {
return
}
p.state = promiseRejected
p.err = err
p.cond.Broadcast()
}
// Await blocks until the promise has been resolved, rejected or context cancelled.
func (p *promise[T]) Await(ctx context.Context) (value T, err error) {
// Wake up the for-loop below if the context is cancelled.
// See https://pkg.go.dev/context#AfterFunc for a more detailed
// explanation of this pattern
cleanupCancellation := context.AfterFunc(ctx, func() {
p.Lock()
defer p.Unlock()
p.cond.Broadcast()
})
defer cleanupCancellation()
p.Lock()
defer p.Unlock()
// Wait until the promise is resolved or context cancelled.
for p.state == promiseUnresolved && (ctx == nil || ctx.Err() == nil) {
p.cond.Wait()
}
if ctx.Err() != nil {
err = ctx.Err()
} else if p.state == promiseResolved {
value = p.value
} else {
err = p.err
}
return
}
type wrappedPromise[T any] func(context.Context) (T, error)
func (await wrappedPromise[T]) Await(ctx context.Context) (T, error) {
return await(ctx)
}
// Map transforms the value of a promise with the provided function.
func Map[A, B any](p Promise[A], transform func(A) B) Promise[B] {
return wrappedPromise[B](func(ctx context.Context) (out B, err error) {
v, err := p.Await(ctx)
if err != nil {
return out, err
}
return transform(v), nil
})
}
// SPDX-License-Identifier: Apache-2.0
// Copyright Authors of Cilium
package accesslog
import (
"github.com/cilium/hive/cell"
"github.com/spf13/pflag"
)
// Cell provides the Proxy Access logging infrastructure that allows for sending
// L7 proxy access flow logs.
var Cell = cell.Module(
"proxy-logger",
"Proxy Logger provides support for L7 proxy access flow logging",
cell.Provide(NewProxyAccessLogger),
cell.ProvidePrivate(newMonitorAgentLogRecordNotifier),
cell.Config(ProxyAccessLoggerConfig{}),
)
type ProxyAccessLoggerConfig struct {
AgentLabels []string
}
func (r ProxyAccessLoggerConfig) Flags(flags *pflag.FlagSet) {
flags.StringSlice("agent-labels", []string{}, "Additional labels to identify this agent in monitor events")
}
// SPDX-License-Identifier: Apache-2.0
// Copyright Authors of Cilium
package accesslog
import (
"fmt"
monitoragent "github.com/cilium/cilium/pkg/monitor/agent"
monitorAPI "github.com/cilium/cilium/pkg/monitor/api"
)
type monitorAgentLogRecordNotifier struct {
monitorAgent monitoragent.Agent
}
func newMonitorAgentLogRecordNotifier(monitorAgent monitoragent.Agent) LogRecordNotifier {
return &monitorAgentLogRecordNotifier{monitorAgent: monitorAgent}
}
func (m *monitorAgentLogRecordNotifier) NewProxyLogRecord(l *LogRecord) error {
// Note: important to pass the event as value
if err := m.monitorAgent.SendEvent(monitorAPI.MessageTypeAccessLog, *l); err != nil {
return fmt.Errorf("failed to send log record to monitor agent: %w", err)
}
return nil
}
// SPDX-License-Identifier: Apache-2.0
// Copyright Authors of Cilium
package accesslog
import (
"log/slog"
"github.com/cilium/cilium/pkg/flowdebug"
"github.com/cilium/cilium/pkg/node"
"github.com/cilium/cilium/pkg/time"
)
type ProxyAccessLogger interface {
// NewLogRecord creates a new log record and applies optional tags
//
// Example:
// NewLogRecord(flowType, observationPoint, logger.LogTags.Timestamp(time.Now()))
NewLogRecord(t FlowType, ingress bool, tags ...LogTag) *LogRecord
// Log logs the given log record to the flow log (if flow debug logging is enabled)
// and sends it of to the monitor agent via notifier.
Log(lr *LogRecord)
}
type proxyAccessLogger struct {
logger *slog.Logger
notifier LogRecordNotifier
endpointInfoRegistry EndpointInfoRegistry
metadata []string
}
// LogRecordNotifier is the interface to implement LogRecord notifications.
// Each type that wants to implement this interface must support concurrent calls
// to the interface methods.
// Besides, the number of concurrent calls may be very high, so long critical sections
// should be avoided (i.e.: avoid using a single lock for slow logging operations).
type LogRecordNotifier interface {
// NewProxyLogRecord is called for each new log record
NewProxyLogRecord(l *LogRecord) error
}
func NewProxyAccessLogger(logger *slog.Logger, config ProxyAccessLoggerConfig, notifier LogRecordNotifier, endpointInfoRegistry EndpointInfoRegistry) ProxyAccessLogger {
return &proxyAccessLogger{
logger: logger,
notifier: notifier,
endpointInfoRegistry: endpointInfoRegistry,
metadata: config.AgentLabels,
}
}
func (r *proxyAccessLogger) NewLogRecord(t FlowType, ingress bool, tags ...LogTag) *LogRecord {
var observationPoint ObservationPoint
if ingress {
observationPoint = Ingress
} else {
observationPoint = Egress
}
lr := LogRecord{
Type: t,
ObservationPoint: observationPoint,
IPVersion: VersionIPv4,
TransportProtocol: 6,
Timestamp: time.Now().UTC().Format(time.RFC3339Nano),
NodeAddressInfo: NodeAddressInfo{},
}
if ip := node.GetIPv4(r.logger); ip != nil {
lr.NodeAddressInfo.IPv4 = ip.String()
}
if ip := node.GetIPv6(r.logger); ip != nil {
lr.NodeAddressInfo.IPv6 = ip.String()
}
for _, tagFn := range tags {
tagFn(&lr, r.endpointInfoRegistry)
}
return &lr
}
func (r *proxyAccessLogger) Log(lr *LogRecord) {
if flowdebug.Enabled() {
r.logger.Debug("Logging flow record", r.getLogFields(lr)...)
}
lr.Metadata = r.metadata
r.notifier.NewProxyLogRecord(lr)
}
func (r *proxyAccessLogger) getLogFields(lr *LogRecord) []any {
fields := []any{}
fields = append(fields,
FieldType, lr.Type,
FieldVerdict, lr.Verdict,
FieldMessage, lr.Info,
)
if lr.HTTP != nil {
fields = append(fields,
FieldCode, lr.HTTP.Code,
FieldMethod, lr.HTTP.Method,
FieldURL, lr.HTTP.URL,
FieldProtocol, lr.HTTP.Protocol,
FieldHeader, lr.HTTP.Headers,
)
}
if lr.Kafka != nil {
fields = append(fields,
FieldCode, lr.Kafka.ErrorCode,
FieldKafkaAPIVersion, lr.Kafka.APIVersion,
FieldKafkaAPIKey, lr.Kafka.APIKey,
FieldKafkaCorrelationID, lr.Kafka.CorrelationID,
)
}
return fields
}
// SPDX-License-Identifier: Apache-2.0
// Copyright Authors of Cilium
package accesslog
import (
"context"
"net/netip"
"github.com/cilium/cilium/pkg/identity"
"github.com/cilium/cilium/pkg/logging/logfields"
"github.com/cilium/cilium/pkg/time"
)
// fields used for structured logging
const (
FieldType = "type"
FieldVerdict = "verdict"
FieldCode = "code"
FieldMethod = "method"
FieldURL = "url"
FieldProtocol = "protocol"
FieldHeader = "header"
FieldFilePath = logfields.Path
FieldMessage = "message"
)
// fields used for structured logging of Kafka messages
const (
FieldKafkaAPIKey = "kafkaApiKey"
FieldKafkaAPIVersion = "kafkaApiVersion"
FieldKafkaCorrelationID = "kafkaCorrelationID"
)
// LogTag attaches a tag to a log record
type LogTag func(lr *LogRecord, endpointInfoRegistry EndpointInfoRegistry)
// LogTags are optional structured tags that can be attached to log records.
// See NewLogRecord() and ApplyTags() for example usage.
var LogTags logTags
type logTags struct{}
// Verdict attach verdict information to the log record
func (logTags) Verdict(v FlowVerdict, info string) LogTag {
return func(lr *LogRecord, _ EndpointInfoRegistry) {
lr.Verdict = v
lr.Info = info
}
}
// Timestamp overwrites the starting timestamp of the log record
func (logTags) Timestamp(ts time.Time) LogTag {
return func(lr *LogRecord, _ EndpointInfoRegistry) {
lr.Timestamp = ts.UTC().Format(time.RFC3339Nano)
}
}
// AddressingInfo is the information passed in via the Addressing() tag
type AddressingInfo struct {
SrcIPPort string
DstIPPort string
SrcIdentity identity.NumericIdentity
SrcSecIdentity *identity.Identity
SrcEPID uint64
DstIdentity identity.NumericIdentity
DstSecIdentity *identity.Identity
DstEPID uint64
}
// Addressing attaches addressing information about the source and destination
// to the logrecord
func (logTags) Addressing(ctx context.Context, i AddressingInfo) LogTag {
return func(lr *LogRecord, endpointInfoRegistry EndpointInfoRegistry) {
lr.SourceEndpoint.ID = i.SrcEPID
if i.SrcSecIdentity != nil {
lr.SourceEndpoint.Identity = uint64(i.SrcSecIdentity.ID)
lr.SourceEndpoint.Labels = i.SrcSecIdentity.LabelArray
} else {
lr.SourceEndpoint.Identity = uint64(i.SrcIdentity)
}
addrPort, err := netip.ParseAddrPort(i.SrcIPPort)
if err == nil {
if addrPort.Addr().Is6() {
lr.IPVersion = VersionIPV6
}
lr.SourceEndpoint.Port = addrPort.Port()
endpointInfoRegistry.FillEndpointInfo(ctx, &lr.SourceEndpoint, addrPort.Addr())
}
lr.DestinationEndpoint.ID = i.DstEPID
if i.DstSecIdentity != nil {
lr.DestinationEndpoint.Identity = uint64(i.DstSecIdentity.ID)
lr.DestinationEndpoint.Labels = i.DstSecIdentity.LabelArray
} else {
lr.DestinationEndpoint.Identity = uint64(i.DstIdentity)
}
addrPort, err = netip.ParseAddrPort(i.DstIPPort)
if err == nil {
lr.DestinationEndpoint.Port = addrPort.Port()
endpointInfoRegistry.FillEndpointInfo(ctx, &lr.DestinationEndpoint, addrPort.Addr())
}
}
}
// HTTP attaches HTTP information to the log record
func (logTags) HTTP(h *LogRecordHTTP) LogTag {
return func(lr *LogRecord, _ EndpointInfoRegistry) {
lr.HTTP = h
}
}
// Kafka attaches Kafka information to the log record
func (logTags) Kafka(k *LogRecordKafka) LogTag {
return func(lr *LogRecord, _ EndpointInfoRegistry) {
lr.Kafka = k
}
}
// DNS attaches DNS information to the log record
func (logTags) DNS(d *LogRecordDNS) LogTag {
return func(lr *LogRecord, _ EndpointInfoRegistry) {
lr.DNS = d
}
}
// L7 attaches generic L7 information to the log record
func (logTags) L7(h *LogRecordL7) LogTag {
return func(lr *LogRecord, _ EndpointInfoRegistry) {
lr.L7 = h
}
}
// EndpointInfoRegistry provides endpoint information lookup by endpoint IP address.
type EndpointInfoRegistry interface {
// FillEndpointInfo resolves the labels of the specified identity if known locally.
// ID and Labels should be provided in 'info' if known.
// If 'id' is passed as zero, will locate the EP by 'addr', and also fill info.ID, if found.
// Fills in the following info member fields:
// - info.IPv4 (if 'ip' is IPv4)
// - info.IPv6 (if 'ip' is not IPv4)
// - info.Identity (defaults to WORLD if not known)
// - info.Labels (only if identity is found)
FillEndpointInfo(ctx context.Context, info *EndpointInfo, addr netip.Addr)
}
// SPDX-License-Identifier: Apache-2.0
// Copyright Authors of Cilium
package rate
import (
"context"
"errors"
"fmt"
"log/slog"
"math"
"strconv"
"strings"
"github.com/google/uuid"
"golang.org/x/sync/semaphore"
"golang.org/x/time/rate"
"github.com/cilium/cilium/pkg/lock"
"github.com/cilium/cilium/pkg/logging/logfields"
"github.com/cilium/cilium/pkg/time"
)
var (
ErrWaitCancelled = errors.New("request cancelled while waiting for rate limiting slot")
)
const (
defaultMeanOver = 10
defaultDelayedAdjustmentFactor = 0.50
defaultMaxAdjustmentFactor = 100.0
// waitSemaphoreWeight is the maximum resolution of the wait semaphore,
// the higher this value, the more accurate the ParallelRequests
// requirement is implemented
waitSemaphoreResolution = 10000000
// logUUID is the UUID of the request.
logUUID = "uuid"
// logAPICallName is the name of the underlying API call, such as
// "endpoint-create".
logAPICallName = "name"
// logProcessingDuration is the time taken to perform the actual underlying
// API call such as creating an endpoint or deleting an endpoint. This is
// the time between when the request has finished waiting (or being
// delayed), to when the underlying action has finished.
logProcessingDuration = "processingDuration"
// logParallelRequests is the number of allowed parallel requests. See
// APILimiter.parallelRequests.
logParallelRequests = "parallelRequests"
// logMinWaitDuration represents APILimiterParameters.MinWaitDuration.
logMinWaitDuration = "minWaitDuration"
// logMaxWaitDuration represents APILimiterParameters.MaxWaitDuration.
logMaxWaitDuration = "maxWaitDuration"
// logMaxWaitDurationLimiter is the actual / calculated maximum threshold
// for a request to wait. Any request exceeding this threshold will not be
// processed.
logMaxWaitDurationLimiter = "maxWaitDurationLimiter"
// logWaitDurationLimit is the actual / calculated amount of time
// determined by the underlying rate-limiting library that this request
// must wait before the rate limiter releases it, so that it can take the
// underlying action. See golang.org/x/time/rate.(*Reservation).Delay().
logWaitDurationLimit = "waitDurationLimiter"
// logWaitDurationTotal is the actual total amount of time that this
// request spent waiting to be released by the rate limiter.
logWaitDurationTotal = "waitDurationTotal"
// logLimit is the rate limit. See APILimiterParameters.RateLimit.
logLimit = "limit"
// logLimit is the burst rate. See APILimiterParameters.RateBurst.
logBurst = "burst"
// logTotalDuration is the total time between when the request was first
// scheduled (entered the rate limiter) to when it completed processing of
// the underlying action. This is the absolute total time of the request
// from beginning to end.
logTotalDuration = "totalDuration"
// logSkipped represents whether the rate limiter will skip rate-limiting
// this request. See APILimiterParameters.SkipInitial.
logSkipped = "rateLimiterSkipped"
)
type outcome string
const (
outcomeParallelMaxWait outcome = "fail-parallel-wait"
outcomeLimitMaxWait outcome = "fail-limit-wait"
outcomeReqCancelled outcome = "request-cancelled"
outcomeErrorCode int = 429
outcomeSuccessCode int = 200
)
// APILimiter is an extension to x/time/rate.Limiter specifically for Cilium
// API calls. It allows to automatically adjust the rate, burst and maximum
// parallel API calls to stay as close as possible to an estimated processing
// time.
type APILimiter struct {
logger *slog.Logger
// name is the name of the API call. This field is immutable after
// NewAPILimiter()
name string
// params is the parameters of the limiter. This field is immutable
// after NewAPILimiter()
params APILimiterParameters
// metrics points to the metrics implementation provided by the caller
// of the APILimiter. This field is immutable after NewAPILimiter()
metrics MetricsObserver
// mutex protects all fields below this line
mutex lock.RWMutex
// meanProcessingDuration is the latest mean processing duration,
// calculated based on processingDurations
meanProcessingDuration float64
// processingDurations is the last params.MeanOver processing durations
processingDurations []time.Duration
// meanWaitDuration is the latest mean wait duration, calculated based
// on waitDurations
meanWaitDuration float64
// waitDurations is the last params.MeanOver wait durations
waitDurations []time.Duration
// parallelRequests is the currently allowed maximum parallel
// requests. This defaults to params.MaxParallel requests and is then
// adjusted automatically if params.AutoAdjust is enabled.
parallelRequests int
// adjustmentFactor is the latest adjustment factor. It is the ratio
// between params.EstimatedProcessingDuration and
// meanProcessingDuration.
adjustmentFactor float64
// limiter is the rate limiter based on params.RateLimit and
// params.RateBurst.
limiter *rate.Limiter
// currentRequestsInFlight is the number of parallel API requests
// currently in flight
currentRequestsInFlight int
// requestsProcessed is the total number of processed requests
requestsProcessed int64
// requestsScheduled is the total number of scheduled requests
requestsScheduled int64
// parallelWaitSemaphore is the semaphore used to implement
// params.MaxParallel. It is initialized with a capacity of
// waitSemaphoreResolution and each API request will acquire
// waitSemaphoreResolution/params.MaxParallel tokens.
parallelWaitSemaphore *semaphore.Weighted
}
// APILimiterParameters is the configuration of an APILimiter. The structure
// may not be mutated after it has been passed into NewAPILimiter().
type APILimiterParameters struct {
// EstimatedProcessingDuration is the estimated duration an API call
// will take. This value is used if AutoAdjust is enabled to
// automatically adjust rate limits to stay as close as possible to the
// estimated processing duration.
EstimatedProcessingDuration time.Duration
// AutoAdjust enables automatic adjustment of the values
// ParallelRequests, RateLimit, and RateBurst in order to keep the
// mean processing duration close to EstimatedProcessingDuration
AutoAdjust bool
// MeanOver is the number of entries to keep in order to calculate the
// mean processing and wait duration
MeanOver int
// ParallelRequests is the parallel requests allowed. If AutoAdjust is
// enabled, the value will adjust automatically.
ParallelRequests int
// MaxParallelRequests is the maximum parallel requests allowed. If
// AutoAdjust is enabled, then the ParalelRequests will never grow
// above MaxParallelRequests.
MaxParallelRequests int
// MinParallelRequests is the minimum parallel requests allowed. If
// AutoAdjust is enabled, then the ParallelRequests will never fall
// below MinParallelRequests.
MinParallelRequests int
// RateLimit is the initial number of API requests allowed per second.
// If AutoAdjust is enabled, the value will adjust automatically.
RateLimit rate.Limit
// RateBurst is the initial allowed burst of API requests allowed. If
// AutoAdjust is enabled, the value will adjust automatically.
RateBurst int
// MinWaitDuration is the minimum time an API request always has to
// wait before the Wait() function returns an error.
MinWaitDuration time.Duration
// MaxWaitDuration is the maximum time an API request is allowed to
// wait before the Wait() function returns an error.
MaxWaitDuration time.Duration
// Log enables info logging of processed API requests. This should only
// be used for low frequency API calls.
Log bool
// DelayedAdjustmentFactor is percentage of the AdjustmentFactor to be
// applied to RateBurst and MaxWaitDuration defined as a value between
// 0.0..1.0. This is used to steer a slower reaction of the RateBurst
// and ParallelRequests compared to RateLimit.
DelayedAdjustmentFactor float64
// SkipInitial is the number of initial API calls for which to not
// apply any rate limiting. This is useful to define a learning phase
// in the beginning to allow for auto adjustment before imposing wait
// durations and rate limiting on API calls.
SkipInitial int
// MaxAdjustmentFactor is the maximum adjustment factor when AutoAdjust
// is enabled. Base values will not adjust more than by this factor.
MaxAdjustmentFactor float64
}
// MergeUserConfig merges the provided user configuration into the existing
// parameters and returns a new copy.
func (p APILimiterParameters) MergeUserConfig(config string) (APILimiterParameters, error) {
if err := (&p).mergeUserConfig(config); err != nil {
return APILimiterParameters{}, err
}
return p, nil
}
// NewAPILimiter returns a new APILimiter based on the parameters and metrics implementation
func NewAPILimiter(logger *slog.Logger, name string, p APILimiterParameters, metrics MetricsObserver) *APILimiter {
if p.MeanOver == 0 {
p.MeanOver = defaultMeanOver
}
if p.MinParallelRequests == 0 {
p.MinParallelRequests = 1
}
if p.RateBurst == 0 {
p.RateBurst = 1
}
if p.DelayedAdjustmentFactor == 0.0 {
p.DelayedAdjustmentFactor = defaultDelayedAdjustmentFactor
}
if p.MaxAdjustmentFactor == 0.0 {
p.MaxAdjustmentFactor = defaultMaxAdjustmentFactor
}
l := &APILimiter{
logger: logger,
name: name,
params: p,
parallelRequests: p.ParallelRequests,
parallelWaitSemaphore: semaphore.NewWeighted(waitSemaphoreResolution),
metrics: metrics,
}
if p.RateLimit != 0 {
l.limiter = rate.NewLimiter(p.RateLimit, p.RateBurst)
}
return l
}
// NewAPILimiterFromConfig returns a new APILimiter based on user configuration
func NewAPILimiterFromConfig(logger *slog.Logger, name, config string, metrics MetricsObserver) (*APILimiter, error) {
p := &APILimiterParameters{}
if err := p.mergeUserConfig(config); err != nil {
return nil, err
}
return NewAPILimiter(logger, name, *p, metrics), nil
}
func (p *APILimiterParameters) mergeUserConfigKeyValue(key, value string) error {
switch strings.ToLower(key) {
case "rate-limit":
limit, err := parseRate(value)
if err != nil {
return fmt.Errorf("unable to parse rate %q: %w", value, err)
}
p.RateLimit = limit
case "rate-burst":
burst, err := parsePositiveInt(value)
if err != nil {
return err
}
p.RateBurst = burst
case "min-wait-duration":
minWaitDuration, err := time.ParseDuration(value)
if err != nil {
return fmt.Errorf("unable to parse duration %q: %w", value, err)
}
p.MinWaitDuration = minWaitDuration
case "max-wait-duration":
maxWaitDuration, err := time.ParseDuration(value)
if err != nil {
return fmt.Errorf("unable to parse duration %q: %w", value, err)
}
p.MaxWaitDuration = maxWaitDuration
case "estimated-processing-duration":
estProcessingDuration, err := time.ParseDuration(value)
if err != nil {
return fmt.Errorf("unable to parse duration %q: %w", value, err)
}
p.EstimatedProcessingDuration = estProcessingDuration
case "auto-adjust":
v, err := strconv.ParseBool(value)
if err != nil {
return fmt.Errorf("unable to parse bool %q: %w", value, err)
}
p.AutoAdjust = v
case "parallel-requests":
parallel, err := parsePositiveInt(value)
if err != nil {
return err
}
p.ParallelRequests = parallel
case "min-parallel-requests":
minParallel, err := parsePositiveInt(value)
if err != nil {
return err
}
p.MinParallelRequests = minParallel
case "max-parallel-requests":
maxParallel, err := parsePositiveInt(value)
if err != nil {
return err
}
p.MaxParallelRequests = int(maxParallel)
case "mean-over":
meanOver, err := parsePositiveInt(value)
if err != nil {
return err
}
p.MeanOver = meanOver
case "log":
v, err := strconv.ParseBool(value)
if err != nil {
return fmt.Errorf("unable to parse bool %q: %w", value, err)
}
p.Log = v
case "delayed-adjustment-factor":
delayedAdjustmentFactor, err := strconv.ParseFloat(value, 64)
if err != nil {
return fmt.Errorf("unable to parse float %q: %w", value, err)
}
p.DelayedAdjustmentFactor = delayedAdjustmentFactor
case "max-adjustment-factor":
maxAdjustmentFactor, err := strconv.ParseFloat(value, 64)
if err != nil {
return fmt.Errorf("unable to parse float %q: %w", value, err)
}
p.MaxAdjustmentFactor = maxAdjustmentFactor
case "skip-initial":
skipInitial, err := parsePositiveInt(value)
if err != nil {
return err
}
p.SkipInitial = skipInitial
default:
return fmt.Errorf("unknown rate limiting option %q", key)
}
return nil
}
func (p *APILimiterParameters) mergeUserConfig(config string) error {
for token := range strings.SplitSeq(config, ",") {
if token == "" {
continue
}
t := strings.SplitN(token, ":", 2)
if len(t) != 2 {
return fmt.Errorf("unable to parse rate limit option %q, must in the form name=option:value[,option:value]", token)
}
if err := p.mergeUserConfigKeyValue(t[0], t[1]); err != nil {
return fmt.Errorf("unable to parse rate limit option %q with value %q: %w", t[0], t[1], err)
}
}
return nil
}
func (l *APILimiter) Parameters() APILimiterParameters {
return l.params
}
// SetRateLimit sets the rate limit of the limiter. If limiter is unset, a new
// Limiter is created using the rate burst set in the parameters.
func (l *APILimiter) SetRateLimit(limit rate.Limit) {
l.mutex.Lock()
defer l.mutex.Unlock()
if l.limiter != nil {
l.limiter.SetLimit(limit)
} else {
l.limiter = rate.NewLimiter(limit, l.params.RateBurst)
}
}
// SetRateBurst sets the rate burst of the limiter. If limiter is unset, a new
// Limiter is created using the rate limit set in the parameters.
func (l *APILimiter) SetRateBurst(burst int) {
l.mutex.Lock()
defer l.mutex.Unlock()
if l.limiter != nil {
l.limiter.SetBurst(burst)
} else {
l.limiter = rate.NewLimiter(l.params.RateLimit, burst)
}
}
func (l *APILimiter) delayedAdjustment(current, min, max float64) (n float64) {
n = current * l.adjustmentFactor
n = current + ((n - current) * l.params.DelayedAdjustmentFactor)
if min > 0.0 && n < min {
n = min
}
if max > 0.0 && n > max {
n = max
}
return
}
func (l *APILimiter) calculateAdjustmentFactor() float64 {
f := l.params.EstimatedProcessingDuration.Seconds() / l.meanProcessingDuration
f = min(f, l.params.MaxAdjustmentFactor)
f = max(f, 1.0/l.params.MaxAdjustmentFactor)
return f
}
func (l *APILimiter) adjustmentLimit(newValue, initialValue float64) float64 {
return math.Max(initialValue/l.params.MaxAdjustmentFactor, math.Min(initialValue*l.params.MaxAdjustmentFactor, newValue))
}
func (l *APILimiter) adjustedBurst() int {
newBurst := l.delayedAdjustment(float64(l.params.RateBurst), float64(l.params.MinParallelRequests), 0.0)
return int(math.Round(l.adjustmentLimit(newBurst, float64(l.params.RateBurst))))
}
func (l *APILimiter) adjustedLimit() rate.Limit {
newLimit := rate.Limit(float64(l.params.RateLimit) * l.adjustmentFactor)
return rate.Limit(l.adjustmentLimit(float64(newLimit), float64(l.params.RateLimit)))
}
func (l *APILimiter) adjustedParallelRequests() int {
newParallelRequests := l.delayedAdjustment(float64(l.params.ParallelRequests),
float64(l.params.MinParallelRequests), float64(l.params.MaxParallelRequests))
return int(l.adjustmentLimit(newParallelRequests, float64(l.params.ParallelRequests)))
}
func (l *APILimiter) requestFinished(r *limitedRequest, err error, code int) {
if r.finished {
return
}
r.finished = true
var processingDuration time.Duration
if !r.startTime.IsZero() {
processingDuration = time.Since(r.startTime)
}
totalDuration := time.Since(r.scheduleTime)
scopedLog := l.logger.With(
logAPICallName, l.name,
logUUID, r.uuid,
logProcessingDuration, processingDuration,
logTotalDuration, totalDuration,
logWaitDurationTotal, r.waitDuration,
)
if err != nil {
scopedLog = scopedLog.With(logfields.Error, err)
}
if l.params.Log {
scopedLog.Info("API call has been processed")
} else {
scopedLog.Debug("API call has been processed")
}
if r.waitSemaphoreWeight != 0 {
l.parallelWaitSemaphore.Release(r.waitSemaphoreWeight)
}
l.mutex.Lock()
if !r.startTime.IsZero() {
l.requestsProcessed++
l.currentRequestsInFlight--
}
// Only auto-adjust ratelimiter using metrics from successful API requests
if err == nil {
l.processingDurations = append(l.processingDurations, processingDuration)
if exceed := len(l.processingDurations) - l.params.MeanOver; exceed > 0 {
l.processingDurations = l.processingDurations[exceed:]
}
l.meanProcessingDuration = calcMeanDuration(l.processingDurations)
l.waitDurations = append(l.waitDurations, r.waitDuration)
if exceed := len(l.waitDurations) - l.params.MeanOver; exceed > 0 {
l.waitDurations = l.waitDurations[exceed:]
}
l.meanWaitDuration = calcMeanDuration(l.waitDurations)
if l.params.AutoAdjust && l.params.EstimatedProcessingDuration != 0 {
l.adjustmentFactor = l.calculateAdjustmentFactor()
l.parallelRequests = l.adjustedParallelRequests()
if l.limiter != nil {
l.limiter.SetLimit(l.adjustedLimit())
newBurst := l.adjustedBurst()
l.limiter.SetBurst(newBurst)
}
}
}
values := MetricsValues{
EstimatedProcessingDuration: l.params.EstimatedProcessingDuration.Seconds(),
WaitDuration: r.waitDuration,
MaxWaitDuration: l.params.MaxWaitDuration,
MinWaitDuration: l.params.MinWaitDuration,
MeanProcessingDuration: l.meanProcessingDuration,
MeanWaitDuration: l.meanWaitDuration,
ParallelRequests: l.parallelRequests,
CurrentRequestsInFlight: l.currentRequestsInFlight,
AdjustmentFactor: l.adjustmentFactor,
Error: err,
Outcome: string(r.outcome),
ReturnCode: code,
}
if l.limiter != nil {
values.Limit = l.limiter.Limit()
values.Burst = l.limiter.Burst()
}
l.mutex.Unlock()
if l.metrics != nil {
l.metrics.ProcessedRequest(l.name, values)
}
}
// calcMeanDuration returns the mean duration in seconds
func calcMeanDuration(durations []time.Duration) float64 {
total := 0.0
for _, t := range durations {
total += t.Seconds()
}
return total / float64(len(durations))
}
// LimitedRequest represents a request that is being limited. It is returned
// by Wait() and the caller of Wait() is responsible to call Done() or Error()
// when the API call has been processed or resulted in an error. It is safe to
// call Error() and then Done(). It is not safe to call Done(), Error(), or
// WaitDuration() concurrently.
type LimitedRequest interface {
Done()
Error(err error, code int)
WaitDuration() time.Duration
}
type limitedRequest struct {
limiter *APILimiter
startTime time.Time
scheduleTime time.Time
waitDuration time.Duration
waitSemaphoreWeight int64
uuid string
finished bool
outcome outcome
}
// WaitDuration returns the duration the request had to wait
func (l *limitedRequest) WaitDuration() time.Duration {
return l.waitDuration
}
// Done must be called when the API request has been successfully processed
func (l *limitedRequest) Done() {
l.limiter.requestFinished(l, nil, outcomeSuccessCode)
}
// Error must be called when the API request resulted in an error
func (l *limitedRequest) Error(err error, code int) {
l.limiter.requestFinished(l, err, code)
}
// Wait blocks until the next API call is allowed to be processed. If the
// configured MaxWaitDuration is exceeded, an error is returned. On success, a
// LimitedRequest is returned on which Done() must be called when the API call
// has completed or Error() if an error occurred.
func (l *APILimiter) Wait(ctx context.Context) (LimitedRequest, error) {
req, err := l.wait(ctx)
if err != nil {
l.requestFinished(req, err, outcomeErrorCode)
return nil, err
}
return req, nil
}
// wait implements the API rate limiting delaying functionality. Every error
// message and corresponding log message are documented in
// Documentation/configuration/api-rate-limiting.rst. If any changes related to
// errors or log messages are made to this function, please update the
// aforementioned page as well.
func (l *APILimiter) wait(ctx context.Context) (req *limitedRequest, err error) {
var (
limitWaitDuration time.Duration
r *rate.Reservation
)
req = &limitedRequest{
limiter: l,
scheduleTime: time.Now(),
uuid: uuid.New().String(),
}
l.mutex.Lock()
l.requestsScheduled++
scopedLog := l.logger.With(
logAPICallName, l.name,
logUUID, req.uuid,
logParallelRequests, l.parallelRequests,
)
if l.params.MaxWaitDuration > 0 {
scopedLog = scopedLog.With(logMaxWaitDuration, l.params.MaxWaitDuration)
}
if l.params.MinWaitDuration > 0 {
scopedLog = scopedLog.With(logMinWaitDuration, l.params.MinWaitDuration)
}
select {
case <-ctx.Done():
if l.params.Log {
scopedLog.Warn("Not processing API request due to cancelled context")
}
l.mutex.Unlock()
req.outcome = outcomeReqCancelled
err = fmt.Errorf("%w: %w", ErrWaitCancelled, ctx.Err())
return
default:
}
skip := l.params.SkipInitial > 0 && l.requestsScheduled <= int64(l.params.SkipInitial)
if skip {
scopedLog = scopedLog.With(logSkipped, skip)
}
parallelRequests := l.parallelRequests
meanProcessingDuration := l.meanProcessingDuration
l.mutex.Unlock()
if l.params.Log {
scopedLog.Info("Processing API request with rate limiter")
} else {
scopedLog.Debug("Processing API request with rate limiter")
}
if skip {
goto skipRateLimiter
}
if parallelRequests > 0 {
waitCtx := ctx
if l.params.MaxWaitDuration > 0 {
ctx2, cancel := context.WithTimeout(ctx, l.params.MaxWaitDuration)
defer cancel()
waitCtx = ctx2
}
w := int64(waitSemaphoreResolution / parallelRequests)
err2 := l.parallelWaitSemaphore.Acquire(waitCtx, w)
if err2 != nil {
if l.params.Log {
scopedLog.Warn("Not processing API request. Wait duration for maximum parallel requests exceeds maximum", logfields.Error, err2)
}
req.outcome = outcomeParallelMaxWait
err = fmt.Errorf("timed out while waiting to be served with %d parallel requests: %w", parallelRequests, err2)
return
}
req.waitSemaphoreWeight = w
}
req.waitDuration = time.Since(req.scheduleTime)
l.mutex.Lock()
if l.limiter != nil {
r = l.limiter.Reserve()
limitWaitDuration = r.Delay()
scopedLog = scopedLog.With(
logLimit, fmt.Sprintf("%.2f/s", l.limiter.Limit()),
logBurst, l.limiter.Burst(),
logWaitDurationLimit, limitWaitDuration,
logMaxWaitDurationLimiter, l.params.MaxWaitDuration-req.waitDuration,
)
}
l.mutex.Unlock()
if l.params.MinWaitDuration > 0 && limitWaitDuration < l.params.MinWaitDuration {
limitWaitDuration = l.params.MinWaitDuration
}
if (l.params.MaxWaitDuration > 0 && (limitWaitDuration+req.waitDuration) > l.params.MaxWaitDuration) || limitWaitDuration == rate.InfDuration {
if l.params.Log {
scopedLog.Warn("Not processing API request. Wait duration exceeds maximum")
}
// The rate limiter should only consider a reservation valid if
// the request is actually processed. Cancellation of the
// reservation should happen before we sleep below.
if r != nil {
r.Cancel()
}
// Instead of returning immediately, pace the caller by
// sleeping for the mean processing duration. This helps
// against callers who disrespect 429 error codes and retry
// immediately.
if meanProcessingDuration > 0.0 {
time.Sleep(time.Duration(meanProcessingDuration * float64(time.Second)))
}
req.outcome = outcomeLimitMaxWait
err = fmt.Errorf("request would have to wait %v to be served (maximum wait duration: %v)",
limitWaitDuration, l.params.MaxWaitDuration-req.waitDuration)
return
}
if limitWaitDuration != 0 {
select {
case <-time.After(limitWaitDuration):
case <-ctx.Done():
if l.params.Log {
scopedLog.Warn("Not processing API request due to cancelled context while waiting")
}
// The rate limiter should only consider a reservation
// valid if the request is actually processed.
if r != nil {
r.Cancel()
}
req.outcome = outcomeReqCancelled
err = fmt.Errorf("%w: %w", ErrWaitCancelled, ctx.Err())
return
}
}
req.waitDuration = time.Since(req.scheduleTime)
skipRateLimiter:
l.mutex.Lock()
l.currentRequestsInFlight++
l.mutex.Unlock()
scopedLog = scopedLog.With(logWaitDurationTotal, req.waitDuration)
if l.params.Log {
scopedLog.Info("API request released by rate limiter")
} else {
scopedLog.Debug("API request released by rate limiter")
}
req.startTime = time.Now()
return req, nil
}
func parseRate(r string) (rate.Limit, error) {
tokens := strings.SplitN(r, "/", 2)
if len(tokens) != 2 {
return 0, fmt.Errorf("not in the form number/interval")
}
f, err := strconv.ParseFloat(tokens[0], 64)
if err != nil {
return 0, fmt.Errorf("unable to parse float %q: %w", tokens[0], err)
}
// Reject rates such as 1/1 or 10/10 as it will default to nanoseconds
// which is likely unexpected to the user. Require an explicit suffix.
if _, err := strconv.ParseInt(string(tokens[1]), 10, 64); err == nil {
return 0, fmt.Errorf("interval %q must contain duration suffix", tokens[1])
}
// If duration is provided as "m" or "s", convert it into "1m" or "1s"
if _, err := strconv.ParseInt(string(tokens[1][0]), 10, 64); err != nil {
tokens[1] = "1" + tokens[1]
}
d, err := time.ParseDuration(tokens[1])
if err != nil {
return 0, fmt.Errorf("unable to parse duration %q: %w", tokens[1], err)
}
return rate.Limit(f / d.Seconds()), nil
}
// APILimiterSet is a set of APILimiter indexed by name
type APILimiterSet struct {
limiters map[string]*APILimiter
metrics MetricsObserver
}
// MetricsValues is the snapshot of relevant values to feed into the
// MetricsObserver
type MetricsValues struct {
WaitDuration time.Duration
MinWaitDuration time.Duration
MaxWaitDuration time.Duration
Outcome string
MeanProcessingDuration float64
MeanWaitDuration float64
EstimatedProcessingDuration float64
ParallelRequests int
Limit rate.Limit
Burst int
CurrentRequestsInFlight int
AdjustmentFactor float64
Error error
ReturnCode int
}
// MetricsObserver is the interface that must be implemented to extract metrics
type MetricsObserver interface {
// ProcessedRequest is invoked after invocation of an API call
ProcessedRequest(name string, values MetricsValues)
}
// NewAPILimiterSet creates a new APILimiterSet based on a set of rate limiting
// configurations and the default configuration. Any rate limiter that is
// configured in the config OR the defaults will be configured and made
// available via the Limiter(name) and Wait() function.
func NewAPILimiterSet(logger *slog.Logger, config map[string]string, defaults map[string]APILimiterParameters, metrics MetricsObserver) (*APILimiterSet, error) {
limiters := map[string]*APILimiter{}
for name, p := range defaults {
// Merge user config into defaults when provided
if userConfig, ok := config[name]; ok {
combinedParams, err := p.MergeUserConfig(userConfig)
if err != nil {
return nil, err
}
p = combinedParams
}
limiters[name] = NewAPILimiter(logger, name, p, metrics)
}
for name, c := range config {
if _, ok := defaults[name]; !ok {
l, err := NewAPILimiterFromConfig(logger, name, c, metrics)
if err != nil {
return nil, fmt.Errorf("unable to parse rate limiting configuration %s=%s: %w", name, c, err)
}
limiters[name] = l
}
}
return &APILimiterSet{
limiters: limiters,
metrics: metrics,
}, nil
}
// Limiter returns the APILimiter with a given name
func (s *APILimiterSet) Limiter(name string) *APILimiter {
return s.limiters[name]
}
type dummyRequest struct{}
func (d dummyRequest) WaitDuration() time.Duration { return 0 }
func (d dummyRequest) Done() {}
func (d dummyRequest) Error(err error, code int) {}
// Wait invokes Wait() on the APILimiter with the given name. If the limiter
// does not exist, a dummy limiter is used which will not impose any
// restrictions.
func (s *APILimiterSet) Wait(ctx context.Context, name string) (LimitedRequest, error) {
l, ok := s.limiters[name]
if !ok {
return dummyRequest{}, nil
}
return l.Wait(ctx)
}
// parsePositiveInt parses value as an int. It returns an error if value cannot
// be parsed or is negative.
func parsePositiveInt(value string) (int, error) {
switch i64, err := strconv.ParseInt(value, 10, 64); {
case err != nil:
return 0, fmt.Errorf("unable to parse positive integer %q: %w", value, err)
case i64 < 0:
return 0, fmt.Errorf("unable to parse positive integer %q: negative value", value)
case i64 > math.MaxInt:
return 0, fmt.Errorf("unable to parse positive integer %q: overflow", value)
default:
return int(i64), nil
}
}
// SPDX-License-Identifier: Apache-2.0
// Copyright Authors of Cilium
package rate
import (
"context"
"fmt"
"sync/atomic"
"time"
"golang.org/x/sync/semaphore"
)
// Limiter is used to limit the number of operations done.
type Limiter struct {
semaphore *semaphore.Weighted
burst int64
currWeights atomic.Int64
ticker *time.Ticker
cancelFunc context.CancelFunc
ctx context.Context
}
// NewLimiter returns a new Limiter that allows events up to b tokens during
// the given interval.
// This Limiter has a different implementation from the 'x/time/rate's Limiter
// implementation. 'x/time/rate.Limiter' sends a constant stream of updates
// (at a rate of few dozen events per second) over the period of a N minutes
// which is the behavior of the token bucket algorithm. It is designed to
// flatten bursts in a signal to a fixed output rate.
// This rate.Limiter does the opposite of 'x/time/rate.Limiter'. It takes a
// somewhat fixed-rate stream of updates and turns it into a stream of
// controlled small bursts every N minutes.
func NewLimiter(interval time.Duration, b int64) *Limiter {
ticker := time.NewTicker(interval)
ctx, cancel := context.WithCancel(context.Background())
l := &Limiter{
semaphore: semaphore.NewWeighted(b),
burst: b,
ticker: ticker,
ctx: ctx,
cancelFunc: cancel,
}
go func() {
for {
select {
case <-ticker.C:
case <-l.ctx.Done():
return
}
currWeights := l.currWeights.Swap(0)
l.semaphore.Release(currWeights)
}
}()
return l
}
// Stop stops the internal components used for the rate limiter logic.
func (lim *Limiter) Stop() {
lim.cancelFunc()
lim.ticker.Stop()
}
func (lim *Limiter) assertAlive() {
select {
case <-lim.ctx.Done():
panic("limiter misuse: Allow / Wait / WaitN called concurrently after Stop")
default:
}
}
// Allow is shorthand for AllowN(1).
func (lim *Limiter) Allow() bool {
return lim.AllowN(1)
}
// AllowN returns true if it's possible to allow n tokens.
func (lim *Limiter) AllowN(n int64) bool {
lim.assertAlive()
acq := lim.semaphore.TryAcquire(n)
if acq {
lim.currWeights.Add(n)
return true
}
return false
}
// Wait is shorthand for WaitN(ctx, 1).
func (lim *Limiter) Wait(ctx context.Context) error {
return lim.WaitN(ctx, 1)
}
// WaitN acquires n tokens, blocking until resources are available or ctx is
// done. On success, returns nil. On failure, returns ctx.Err() and leaves the
// limiter unchanged.
//
// If ctx is already done, WaitN may still succeed without blocking.
func (lim *Limiter) WaitN(ctx context.Context, n int64) error {
lim.assertAlive()
if n > lim.burst {
return fmt.Errorf("rate: Wait(n=%d) exceeds limiter's burst %d", n, lim.burst)
}
err := lim.semaphore.Acquire(ctx, n)
if err != nil {
return err
}
lim.currWeights.Add(n)
return nil
}
// SPDX-License-Identifier: Apache-2.0
// Copyright Authors of Cilium
package metrics
import (
"strconv"
"github.com/cilium/cilium/pkg/metrics"
"github.com/cilium/cilium/pkg/rate"
)
func APILimiterObserver() rate.MetricsObserver {
return &apiRateLimitingMetrics{}
}
type apiRateLimitingMetrics struct{}
func (a *apiRateLimitingMetrics) ProcessedRequest(name string, v rate.MetricsValues) {
metrics.APILimiterProcessingDuration.WithLabelValues(name, "mean").Set(v.MeanProcessingDuration)
metrics.APILimiterProcessingDuration.WithLabelValues(name, "estimated").Set(v.EstimatedProcessingDuration)
metrics.APILimiterWaitDuration.WithLabelValues(name, "mean").Set(v.MeanWaitDuration)
metrics.APILimiterWaitDuration.WithLabelValues(name, "max").Set(v.MaxWaitDuration.Seconds())
metrics.APILimiterWaitDuration.WithLabelValues(name, "min").Set(v.MinWaitDuration.Seconds())
metrics.APILimiterRequestsInFlight.WithLabelValues(name, "in-flight").Set(float64(v.CurrentRequestsInFlight))
metrics.APILimiterRequestsInFlight.WithLabelValues(name, "limit").Set(float64(v.ParallelRequests))
metrics.APILimiterRateLimit.WithLabelValues(name, "limit").Set(float64(v.Limit))
metrics.APILimiterRateLimit.WithLabelValues(name, "burst").Set(float64(v.Burst))
metrics.APILimiterAdjustmentFactor.WithLabelValues(name).Set(v.AdjustmentFactor)
if v.Outcome == "" {
metrics.APILimiterWaitHistoryDuration.WithLabelValues(name).Observe(v.WaitDuration.Seconds())
v.Outcome = metrics.Error2Outcome(v.Error)
}
if v.ReturnCode == -1 {
v.ReturnCode = metrics.LabelOutcome2Code(v.Outcome)
}
metrics.APILimiterProcessedRequests.WithLabelValues(name, v.Outcome, strconv.Itoa(v.ReturnCode)).Inc()
}
// SPDX-License-Identifier: Apache-2.0
// Copyright Authors of Cilium
package resiliency
// retryableErr tracks errors that could be retried.
type retryableErr struct {
error
}
// Retryable returns a new instance.
func Retryable(e error) retryableErr {
return retryableErr{error: e}
}
// SPDX-License-Identifier: Apache-2.0
// Copyright Authors of Cilium
package resiliency
import (
"errors"
"fmt"
)
type tuple struct {
index int
err error
}
// ErrorSet tracks a collection of unique errors.
type ErrorSet struct {
total, failed int
msg string
errs map[string]tuple
}
// NewErrorSet returns a new instance.
func NewErrorSet(msg string, c int) *ErrorSet {
return &ErrorSet{
msg: msg,
total: c,
errs: make(map[string]tuple),
}
}
// Add adds one or more errors to the set.
func (e *ErrorSet) Add(errs ...error) {
for _, err := range errs {
if err == nil {
continue
}
if _, ok := e.errs[err.Error()]; ok {
continue
}
e.errs[err.Error()] = tuple{index: e.failed, err: err}
e.failed++
}
}
// Error returns a list of unique errors or nil.
func (e *ErrorSet) Errors() []error {
if len(e.errs) == 0 {
return nil
}
errs := make([]error, len(e.errs)+1)
errs[0] = fmt.Errorf("%s (%d/%d) failed", e.msg, e.failed, e.total)
for _, t := range e.errs {
errs[t.index+1] = t.err
}
return errs
}
// Error returns a new composite error or nil.
func (e *ErrorSet) Error() error {
return errors.Join(e.Errors()...)
}
// SPDX-License-Identifier: Apache-2.0
// Copyright Authors of Cilium
package resiliency
import (
"context"
"time"
"k8s.io/apimachinery/pkg/util/wait"
)
// RetryFunc tracks resiliency retry calls.
type RetryFunc func(ctx context.Context, retries int) (bool, error)
// Retry retries the provided call using exponential retries given an initial duration for up to max retries count.
func Retry(ctx context.Context, duration time.Duration, maxRetries int, fn RetryFunc) error {
bo := wait.Backoff{
Duration: duration,
Factor: 1,
Jitter: 0.1,
Steps: maxRetries,
}
var retries int
f := func(ctx context.Context) (bool, error) {
retries++
return fn(ctx, retries)
}
return wait.ExponentialBackoffWithContext(ctx, bo, f)
}
// SPDX-License-Identifier: Apache-2.0
// Copyright Authors of Cilium
package resiliency
import (
"errors"
)
// IsRetryable checks if an error can be retried.
func IsRetryable(e error) bool {
return errors.As(e, new(retryableErr))
}
// SPDX-License-Identifier: Apache-2.0
// Copyright Authors of Cilium
package safetime
import (
"log/slog"
"runtime"
"github.com/cilium/cilium/pkg/logging/logfields"
"github.com/cilium/cilium/pkg/time"
)
// TimeSinceSafe returns the duration since t. If the duration is negative,
// returns false to indicate the fact.
//
// Used to workaround a malfunctioning monotonic clock.
func TimeSinceSafe(t time.Time, logger *slog.Logger) (time.Duration, bool) {
n := time.Now()
d := n.Sub(t)
if d < 0 {
scopedLog := logger.With(
logfields.StartTime, t,
logfields.EndTime, n,
logfields.Duration, d,
)
_, file, line, ok := runtime.Caller(1)
if ok {
scopedLog = scopedLog.With(
logfields.Path, file,
logfields.Line, line,
)
}
scopedLog.Warn("BUG: negative duration")
return time.Duration(0), false
}
return d, true
}
// SPDX-License-Identifier: Apache-2.0
// Copyright Authors of Cilium
package slices
import (
"cmp"
"iter"
"slices"
)
// Unique deduplicates the elements in the input slice, preserving their ordering and
// modifying the slice in place.
// Unique relies on a map to find multiple occurrences of the same elements.
// For slices with a size less than 192 elements, a simpler O(N^2) search algorithm
// that does not allocate memory is used instead.
// Limit of 192 has been experimentally derived (look at BenchmarkUnique for more information).
func Unique[S ~[]T, T comparable](s S) S {
if len(s) < 2 {
return s
}
last := 0
if len(s) < 192 {
Loop:
for i := range len(s) {
for j := range last {
if s[i] == s[j] {
continue Loop
}
}
s[last] = s[i]
last++
}
} else {
set := make(map[T]struct{}, len(s))
for i := range len(s) {
if _, ok := set[s[i]]; ok {
continue
}
set[s[i]] = struct{}{}
s[last] = s[i]
last++
}
}
clear(s[last:]) // zero out obsolete elements for GC
return s[:last]
}
// UniqueFunc deduplicates the elements in the input slice like Unique, but takes a
// function to extract the comparable "key" to compare T. This is slower than Unique,
// but can be used with non-comparable elements.
func UniqueFunc[S ~[]T, T any, K comparable](s S, key func(i int) K) S {
if len(s) < 2 {
return s
}
last := 0
set := make(map[K]struct{}, len(s))
for i := range len(s) {
if _, ok := set[key(i)]; ok {
continue
}
set[key(i)] = struct{}{}
s[last] = s[i]
last++
}
clear(s[last:]) // zero out obsolete elements for GC
return s[:last]
}
// SortedUnique sorts and dedup the input slice in place.
// It uses the < operator to compare the elements in the slice and thus requires
// the elements to satisfies contraints.Ordered.
func SortedUnique[S ~[]T, T cmp.Ordered](s S) S {
if len(s) < 2 {
return s
}
slices.Sort(s)
return slices.Compact(s)
}
// Diff returns a slice of elements which is the difference of a and b.
// The returned slice keeps the elements in the same order found in the "a" slice.
// Both input slices are considered as sets, that is, all elements are considered as
// unique when computing the difference.
func Diff[S ~[]T, T comparable](a, b S) []T {
if len(a) == 0 {
return nil
}
if len(b) == 0 {
return a
}
var diff []T
setB := make(map[T]struct{}, len(b))
for _, v := range b {
setB[v] = struct{}{}
}
setA := make(map[T]struct{}, len(a))
for _, v := range a {
// v is in b, too
if _, ok := setB[v]; ok {
continue
}
// v has been already added to diff
if _, ok := setA[v]; ok {
continue
}
diff = append(diff, v)
setA[v] = struct{}{}
}
return diff
}
// SubsetOf returns a boolean that indicates if slice a is a subset of slice b.
// In case it is not, the returned slice contains all the unique elements that are in a but not in b.
func SubsetOf[S ~[]T, T comparable](a, b S) (bool, []T) {
d := Diff(a, b)
return len(d) == 0, d
}
// XorNil returns true if one of the two slices is nil while the other is not.
func XorNil[T any](s1, s2 []T) bool {
return s1 == nil && s2 != nil ||
s1 != nil && s2 == nil
}
// AllMatch returns true if pred is true for each element in s, false otherwise.
// May not evaluate on all elements if not necessary for determining the result.
// If the slice is empty then true is returned and predicate is not evaluated.
func AllMatch[T any](s []T, pred func(v T) bool) bool {
for _, v := range s {
if !pred(v) {
return false
}
}
return true
}
// Map returns a slice obtained applying fn over the input elements.
func Map[In, Out any](in []In, fn func(In) Out) []Out {
if in == nil {
return nil
}
out := make([]Out, len(in))
for i, obj := range in {
out[i] = fn(obj)
}
return out
}
// MapIter returns an iterator obtained applying fn over the input elements.
func MapIter[In, Out any](s iter.Seq[In], fn func(In) Out) iter.Seq[Out] {
return func(yield func(Out) bool) {
for obj := range s {
if !yield(fn(obj)) {
return
}
}
}
}
// SPDX-License-Identifier: Apache-2.0
// Copyright Authors of Cilium
package source
import (
"slices"
"github.com/cilium/hive/cell"
)
// Source describes the source of a definition
type Source string
const (
// Unspec is used when the source is unspecified
Unspec Source = "unspec"
// KubeAPIServer is the source used for state which represents the
// kube-apiserver, such as the IPs associated with it. This is not to be
// confused with the Kubernetes source.
// KubeAPIServer state has the strongest ownership and can only be
// overwritten by itself.
KubeAPIServer Source = "kube-apiserver"
// Local is the source used for state derived from local agent state.
// Local state has the second strongest ownership, behind KubeAPIServer.
Local Source = "local"
// KVStore is the source used for state derived from a key value store.
// State in the key value stored takes precedence over orchestration
// system state such as Kubernetes.
KVStore Source = "kvstore"
// CustomResource is the source used for state derived from Kubernetes
// custom resources
CustomResource Source = "custom-resource"
// Kubernetes is the source used for state derived from Kubernetes
Kubernetes Source = "k8s"
// ClusterMesh is the source used for state derived from remote clusters
ClusterMesh Source = "clustermesh"
// LocalAPI is the source used for state derived from the API served
// locally on the node.
LocalAPI Source = "api"
// Generated is the source used for generated state which can be
// overwritten by all other sources, except for restored (and unspec).
Generated Source = "generated"
// Restored is the source used for restored state from data left behind
// by the previous agent instance. Can be overwritten by all other
// sources (except for unspec).
Restored Source = "restored"
// Directory is the source used for watching and reading
// cilium network policy files from specific directory.
Directory Source = "directory"
// Please remember to add your source to defaultSources below.
)
// Sources is a priority-sorted slice of sources.
type Sources []Source
// The ordering in defaultSources is critical and it should only be changed
// with care because as it determines the behavior of AllowOverwrite().
// It is from highest precedence to lowest precedence.
var defaultSources Sources = []Source{
KubeAPIServer,
Local,
KVStore,
CustomResource,
Kubernetes,
ClusterMesh,
Directory,
LocalAPI,
Generated,
Restored,
Unspec,
}
// AllowOverwrite returns true if new state from a particular source is allowed
// to overwrite existing state from another source
func AllowOverwrite(existing, new Source) bool {
overflowNegative := overflowNegativeTo(len(defaultSources))
return overflowNegative(slices.Index(defaultSources, new)) <= overflowNegative(slices.Index(defaultSources, existing))
}
func overflowNegativeTo(infinity int) func(int) int {
return func(n int) int {
if n < 0 {
return infinity
} else {
return n
}
}
}
var Cell = cell.Module(
"source",
"Definitions and priorities of data sources",
cell.Provide(NewSources),
)
// NewSources returns sources ordered from the most preferred.
func NewSources() Sources {
return defaultSources
}
// SPDX-License-Identifier: Apache-2.0
// Copyright Authors of Cilium
package spanstat
import (
"github.com/cilium/cilium/pkg/lock"
"github.com/cilium/cilium/pkg/logging"
"github.com/cilium/cilium/pkg/safetime"
"github.com/cilium/cilium/pkg/time"
)
// SpanStat measures the total duration of all time spent in between Start()
// and Stop() calls.
type SpanStat struct {
mutex lock.RWMutex
spanStart time.Time
successDuration time.Duration
failureDuration time.Duration
}
// Start creates a new SpanStat and starts it
func Start() *SpanStat {
s := &SpanStat{}
return s.Start()
}
// Start starts a new span
func (s *SpanStat) Start() *SpanStat {
s.mutex.Lock()
defer s.mutex.Unlock()
s.spanStart = time.Now()
return s
}
// EndError calls End() based on the value of err
func (s *SpanStat) EndError(err error) *SpanStat {
s.mutex.Lock()
defer s.mutex.Unlock()
return s.end(err == nil)
}
// End ends the current span and adds the measured duration to the total
// cumulated duration, and to the success or failure cumulated duration
// depending on the given success flag
func (s *SpanStat) End(success bool) *SpanStat {
s.mutex.Lock()
defer s.mutex.Unlock()
return s.end(success)
}
// must be called with Lock() held
func (s *SpanStat) end(success bool) *SpanStat {
if !s.spanStart.IsZero() {
// slogloggercheck: it's safe to use the default logger here as it has been initialized by the program up to this point.
d, _ := safetime.TimeSinceSafe(s.spanStart, logging.DefaultSlogLogger)
if success {
s.successDuration += d
} else {
s.failureDuration += d
}
}
s.spanStart = time.Time{}
return s
}
// Total returns the total duration of all spans measured, including both
// successes and failures
func (s *SpanStat) Total() time.Duration {
s.mutex.RLock()
defer s.mutex.RUnlock()
return s.successDuration + s.failureDuration
}
// SuccessTotal returns the total duration of all successful spans measured
func (s *SpanStat) SuccessTotal() time.Duration {
s.mutex.RLock()
defer s.mutex.RUnlock()
return s.successDuration
}
// FailureTotal returns the total duration of all unsuccessful spans measured
func (s *SpanStat) FailureTotal() time.Duration {
s.mutex.RLock()
defer s.mutex.RUnlock()
return s.failureDuration
}
// Reset rests the duration measurements
func (s *SpanStat) Reset() {
s.mutex.Lock()
defer s.mutex.Unlock()
s.successDuration = 0
s.failureDuration = 0
}
// Seconds returns the number of seconds represents by the spanstat. If a span
// is still open, it is closed first.
func (s *SpanStat) Seconds() float64 {
s.mutex.Lock()
defer s.mutex.Unlock()
if !s.spanStart.IsZero() {
s.end(true)
}
total := s.successDuration + s.failureDuration
return total.Seconds()
}
// SPDX-License-Identifier: Apache-2.0
// Copyright Authors of Cilium
package testutils
import (
"os"
"testing"
)
// TempBPFFS creates a temporary directory on a BPF FS.
//
// The directory is automatically cleaned up at the end of the test run.
func TempBPFFS(tb testing.TB) string {
tb.Helper()
tmp, err := os.MkdirTemp("/sys/fs/bpf", "cilium-test")
if err != nil {
tb.Fatal("Create temporary directory on bpffs:", err)
}
tb.Cleanup(func() { os.RemoveAll(tmp) })
return tmp
}
// SPDX-License-Identifier: Apache-2.0
// Copyright Authors of Cilium
package testutils
import (
"errors"
"os"
"strings"
"sync"
"testing"
)
type cache struct {
once sync.Once
path string
err error
}
var c cache
func cgroup2Path() (string, error) {
c.once.Do(func() {
mounts, err := os.ReadFile("/proc/mounts")
if err != nil {
c.path, c.err = "", err
return
}
for line := range strings.SplitSeq(string(mounts), "\n") {
mount := strings.SplitN(line, " ", 3)
if mount[0] == "cgroup2" {
c.path, c.err = mount[1], nil
return
}
continue
}
c.path, c.err = "", errors.New("cgroup2 not mounted")
})
return c.path, c.err
}
// TempCgroup finds the first cgroup2 mount point on the host and creates a
// temporary cgroup in it. Returns the absolute path to the cgroup.
//
// The cgroup is automatically cleaned up at the end of the test run.
func TempCgroup(tb testing.TB) string {
tb.Helper()
cg2, err := cgroup2Path()
if err != nil {
tb.Fatal("Can't locate cgroup2 mount:", err)
}
cgdir, err := os.MkdirTemp(cg2, "cilium-test")
if err != nil {
tb.Fatal("Can't create cgroupv2:", err)
}
tb.Cleanup(func() {
os.Remove(cgdir)
})
return cgdir
}
// SPDX-License-Identifier: Apache-2.0
// Copyright Authors of Cilium
package testutils
import (
"fmt"
"time"
)
// ConditionFunc is the function implementing the condition, it must return
// true if the condition has been met
type ConditionFunc func() bool
// WaitUntil evaluates the condition every 10 milliseconds and waits for the
// condition to be met. The function will time out and return an error after
// timeout
func WaitUntil(condition ConditionFunc, timeout time.Duration) error {
return WaitUntilWithSleep(condition, timeout, 10*time.Millisecond)
}
// WaitUntilWithSleep does the same as WaitUntil except that the sleep time
// between the condition checks is given.
func WaitUntilWithSleep(condition ConditionFunc, timeout, sleep time.Duration) error {
now := time.Now()
for {
if time.Since(now) > timeout {
return fmt.Errorf("timeout reached while waiting for condition")
}
if condition() {
return nil
}
time.Sleep(sleep)
}
}
// SPDX-License-Identifier: Apache-2.0
// Copyright Authors of Cilium
package testutils
import (
"log/slog"
"net/netip"
"testing"
"github.com/cilium/hive/hivetest"
"github.com/cilium/cilium/pkg/identity"
"github.com/cilium/cilium/pkg/labels"
"github.com/cilium/cilium/pkg/logging/logfields"
"github.com/cilium/cilium/pkg/mac"
"github.com/cilium/cilium/pkg/option"
)
var (
defaultIdentity = identity.NewIdentity(42, labels.NewLabelsFromModel([]string{"foo"}))
hostIdentity = identity.NewIdentity(identity.ReservedIdentityHost, labels.LabelHost)
)
type TestEndpoint struct {
logger *slog.Logger
Id uint64
Identity *identity.Identity
Opts *option.IntOptions
MAC mac.MAC
IfIndex int
IPv6 netip.Addr
isHost bool
State string
NetNsCookie uint64
}
func NewTestEndpoint(t testing.TB) TestEndpoint {
opts := option.NewIntOptions(&option.OptionLibrary{})
opts.SetBool("TEST_OPTION", true)
return TestEndpoint{
logger: hivetest.Logger(t),
Id: 42,
Identity: defaultIdentity,
MAC: mac.MAC([]byte{0x02, 0x00, 0x60, 0x0D, 0xF0, 0x0D}),
IfIndex: 0,
Opts: opts,
NetNsCookie: 0,
}
}
func NewTestHostEndpoint(t testing.TB) TestEndpoint {
opts := option.NewIntOptions(&option.OptionLibrary{})
opts.SetBool("TEST_OPTION", true)
return TestEndpoint{
logger: hivetest.Logger(t),
Id: 65535,
Identity: hostIdentity,
MAC: mac.MAC([]byte{0x01, 0x02, 0x03, 0x04, 0x05, 0x06}),
IfIndex: 0,
Opts: opts,
isHost: true,
}
}
func (e *TestEndpoint) RequireARPPassthrough() bool { return false }
func (e *TestEndpoint) RequireEgressProg() bool { return false }
func (e *TestEndpoint) RequireRouting() bool { return false }
func (e *TestEndpoint) RequireEndpointRoute() bool { return false }
func (e *TestEndpoint) GetPolicyVerdictLogFilter() uint32 { return 0xffff }
func (e *TestEndpoint) GetID() uint64 { return e.Id }
func (e *TestEndpoint) StringID() string { return "42" }
func (e *TestEndpoint) GetIdentity() identity.NumericIdentity { return e.Identity.ID }
func (e *TestEndpoint) GetEndpointNetNsCookie() uint64 { return e.NetNsCookie }
func (e *TestEndpoint) GetSecurityIdentity() *identity.Identity { return e.Identity }
func (e *TestEndpoint) GetNodeMAC() mac.MAC { return e.MAC }
func (e *TestEndpoint) GetIfIndex() int { return e.IfIndex }
func (e *TestEndpoint) GetOptions() *option.IntOptions { return e.Opts }
func (e *TestEndpoint) IsHost() bool { return e.isHost }
func (e *TestEndpoint) IPv4Address() netip.Addr {
return netip.MustParseAddr("192.0.2.3")
}
func (e *TestEndpoint) IPv6Address() netip.Addr {
return e.IPv6
}
func (e *TestEndpoint) InterfaceName() string {
return "cilium_test"
}
func (e *TestEndpoint) Logger(subsystem string) *slog.Logger {
return e.logger.With(logfields.LogSubsys, subsystem)
}
func (e *TestEndpoint) SetIdentity(secID int64, newEndpoint bool) {
e.Identity = identity.NewIdentity(identity.NumericIdentity(secID), labels.NewLabelsFromModel([]string{"bar"}))
}
func (e *TestEndpoint) StateDir() string {
if e.State != "" {
return e.State
}
return "test_loader"
}
// SPDX-License-Identifier: Apache-2.0
// Copyright Authors of Cilium
package testutils
import (
"errors"
"os"
"testing"
)
func SkipIfFileMissing(t testing.TB, file string) {
_, err := os.Open(file)
if errors.Is(err, os.ErrNotExist) {
t.Skipf("Skipping due to missing file %s", file)
}
if err != nil {
t.Fatal(err)
}
}
// SPDX-License-Identifier: Apache-2.0
// Copyright Authors of Cilium
package testutils
import (
"slices"
"testing"
//nolint:gomodguard
"go.uber.org/goleak"
)
func defaultGoleakOptions() []goleak.Option {
return []goleak.Option{
// The metrics "status" collector tries to connect to the agent and leaves these
// around. We should refactor pkg/metrics to split it into "plain registry"
// and the agent specifics.
goleak.IgnoreTopFunction("net/http.(*persistConn).writeLoop"),
goleak.IgnoreTopFunction("internal/poll.runtime_pollWait"),
// Unfortunately we don't have a way for waiting for the workqueue's background goroutine
// to exit (used by pkg/k8s/resource), so we'll just need to ignore it.
goleak.IgnoreTopFunction("k8s.io/client-go/util/workqueue.(*Typed[...]).updateUnfinishedWorkLoop"),
}
}
// GoleakVerifyTestMain calls [goleak.VerifyTestMain] with our known list of
// leaky functions to ignore. To use this:
//
// func TestMain(m *testing.M) {
// testutils.GoleakVerifyTestMain(m)
// }
func GoleakVerifyTestMain(m *testing.M, options ...goleak.Option) {
goleak.VerifyTestMain(
m,
slices.Concat(defaultGoleakOptions(), options)...)
}
// GoleakVerifyNone calls [goleak.VerifyNone] with our known list of leaky
// functions to ignore.
func GoleakVerifyNone(t *testing.T, options ...goleak.Option) {
goleak.VerifyNone(
t,
slices.Concat(defaultGoleakOptions(), options)...)
}
// Aliases for the goleak options as we're forbidding the go.uber.org/goleak
// import.
var (
GoleakIgnoreTopFunction = goleak.IgnoreTopFunction
GoleakIgnoreAnyFunction = goleak.IgnoreAnyFunction
GoleakIgnoreCurrent = goleak.IgnoreCurrent
GoleakCleanup = goleak.Cleanup
)
type GoleakOption = goleak.Option
// SPDX-License-Identifier: Apache-2.0
// Copyright Authors of Cilium
package testidentity
import (
"context"
"fmt"
"github.com/cilium/cilium/pkg/identity"
"github.com/cilium/cilium/pkg/identity/cache"
"github.com/cilium/cilium/pkg/labels"
)
type IdentityAllocatorOwnerMock struct{}
func (i *IdentityAllocatorOwnerMock) UpdateIdentities(added, deleted identity.IdentityMap) <-chan struct{} {
out := make(chan struct{})
close(out)
return out
}
func (i *IdentityAllocatorOwnerMock) GetNodeSuffix() string {
return "foo"
}
// MockIdentityAllocator is used as a mock identity allocator for unit tests.
type MockIdentityAllocator struct {
identity.IdentityMap
// map from scope -> next ID
nextIDs map[identity.NumericIdentity]int
idToIdentity map[int]*identity.Identity
labelsToIdentity map[string]int // labels are sorted as a key
withheldIdentities map[identity.NumericIdentity]struct{}
labelsToReject map[string]struct{}
}
// NewMockIdentityAllocator returns a new mock identity allocator to be used
// for unit testing purposes. It can be used as a drop-in for "real" identity
// allocation in a testing context.
func NewMockIdentityAllocator(c identity.IdentityMap) *MockIdentityAllocator {
if c == nil {
c = identity.IdentityMap{}
}
return &MockIdentityAllocator{
IdentityMap: c,
nextIDs: map[identity.NumericIdentity]int{
identity.IdentityScopeGlobal: 1000,
identity.IdentityScopeLocal: 0,
identity.IdentityScopeRemoteNode: 0,
},
idToIdentity: make(map[int]*identity.Identity),
labelsToIdentity: make(map[string]int),
withheldIdentities: map[identity.NumericIdentity]struct{}{},
labelsToReject: map[string]struct{}{},
}
}
// WaitForInitialGlobalIdentities does nothing.
func (f *MockIdentityAllocator) WaitForInitialGlobalIdentities(context.Context) error {
return nil
}
// GetIdentities returns the identities from the identity cache.
func (f *MockIdentityAllocator) GetIdentities() cache.IdentitiesModel {
result := cache.IdentitiesModel{}
return result.FromIdentityCache(f.IdentityMap)
}
// Reject programs the mock allocator to reject an identity
// for testing purposes
func (f *MockIdentityAllocator) Reject(lbls labels.Labels) {
f.labelsToReject[lbls.String()] = struct{}{}
}
func (f *MockIdentityAllocator) Unreject(lbls labels.Labels) {
delete(f.labelsToReject, lbls.String())
}
// AllocateIdentity allocates a fake identity. It is meant to generally mock
// the canonical identity allocator logic.
func (f *MockIdentityAllocator) AllocateIdentity(_ context.Context, lbls labels.Labels, _ bool, oldNID identity.NumericIdentity) (*identity.Identity, bool, error) {
if reservedIdentity := identity.LookupReservedIdentityByLabels(lbls); reservedIdentity != nil {
return reservedIdentity, false, nil
}
if _, ok := f.labelsToReject[lbls.String()]; ok {
return nil, false, fmt.Errorf("rejecting labels manually")
}
if numID, ok := f.labelsToIdentity[lbls.String()]; ok {
id := f.idToIdentity[numID]
id.ReferenceCount++
return id, false, nil
}
scope := identity.ScopeForLabels(lbls)
id := identity.IdentityUnknown
// if suggested id is available, use it
if scope != identity.IdentityScopeGlobal {
if _, ok := f.idToIdentity[int(oldNID)]; !ok && oldNID.Scope() == identity.ScopeForLabels(lbls) {
id = oldNID
}
}
for id == identity.IdentityUnknown {
candidate := identity.NumericIdentity(f.nextIDs[scope]) | scope
_, allocated := f.idToIdentity[int(candidate)]
_, withheld := f.withheldIdentities[candidate]
if !allocated && !withheld {
id = candidate
}
f.nextIDs[scope]++
}
f.IdentityMap[identity.NumericIdentity(id)] = lbls.LabelArray()
f.labelsToIdentity[lbls.String()] = int(id)
realID := &identity.Identity{
ID: identity.NumericIdentity(id),
Labels: lbls,
ReferenceCount: 1,
}
realID.Sanitize() // copy Labels to LabelArray
f.idToIdentity[int(id)] = realID
return realID, true, nil
}
func (f *MockIdentityAllocator) AllocateLocalIdentity(lbls labels.Labels, notifyOwner bool, oldNID identity.NumericIdentity) (*identity.Identity, bool, error) {
scope := identity.ScopeForLabels(lbls)
if scope == identity.IdentityScopeGlobal {
return nil, false, cache.ErrNonLocalIdentity
}
return f.AllocateIdentity(context.TODO(), lbls, notifyOwner, oldNID)
}
func (f *MockIdentityAllocator) ReleaseLocalIdentities(nids ...identity.NumericIdentity) ([]identity.NumericIdentity, error) {
var dealloc []identity.NumericIdentity
for _, nid := range nids {
if nid.Scope() == identity.IdentityScopeGlobal {
continue
}
id := f.LookupIdentityByID(context.TODO(), nid)
if id == nil {
continue
}
if r, _ := f.Release(context.TODO(), id, true); r {
dealloc = append(dealloc, nid)
}
}
return dealloc, nil
}
// Release releases a fake identity. It is meant to generally mock the
// canonical identity release logic.
func (f *MockIdentityAllocator) Release(_ context.Context, id *identity.Identity, _ bool) (released bool, err error) {
realID, ok := f.idToIdentity[int(id.ID)]
if !ok {
return false, nil
}
if realID.ReferenceCount == 1 {
delete(f.idToIdentity, int(id.ID))
delete(f.IdentityMap, id.ID)
for key, lblID := range f.labelsToIdentity {
if lblID == int(id.ID) {
delete(f.labelsToIdentity, key)
}
}
} else {
realID.ReferenceCount--
return false, nil
}
return true, nil
}
func (f *MockIdentityAllocator) WithholdLocalIdentities(nids []identity.NumericIdentity) {
for _, nid := range nids {
f.withheldIdentities[nid] = struct{}{}
}
}
func (f *MockIdentityAllocator) UnwithholdLocalIdentities(nids []identity.NumericIdentity) {
for _, nid := range nids {
delete(f.withheldIdentities, nid)
}
}
// LookupIdentity looks up the labels in the mock identity store.
func (f *MockIdentityAllocator) LookupIdentity(ctx context.Context, lbls labels.Labels) *identity.Identity {
if reservedIdentity := identity.LookupReservedIdentityByLabels(lbls); reservedIdentity != nil {
return reservedIdentity
}
return f.idToIdentity[f.labelsToIdentity[lbls.String()]]
}
// LookupIdentityByID returns the identity corresponding to the id if the
// identity is a reserved identity. Otherwise, returns nil.
func (f *MockIdentityAllocator) LookupIdentityByID(ctx context.Context, id identity.NumericIdentity) *identity.Identity {
if identity := identity.LookupReservedIdentity(id); identity != nil {
return identity
}
return f.idToIdentity[int(id)]
}
// GetIdentityCache returns the identity cache.
func (f *MockIdentityAllocator) GetIdentityCache() identity.IdentityMap {
return f.IdentityMap
}
func (f *MockIdentityAllocator) Observe(ctx context.Context, next func(cache.IdentityChange), complete func(error)) {
go complete(nil)
}
// SPDX-License-Identifier: Apache-2.0
// Copyright Authors of Cilium
package testidentity
import "github.com/cilium/cilium/pkg/policy/api"
type DummyIdentityNotifier struct {
Registered map[api.FQDNSelector]struct{}
}
func NewDummyIdentityNotifier() *DummyIdentityNotifier {
return &DummyIdentityNotifier{
Registered: make(map[api.FQDNSelector]struct{}),
}
}
func (d DummyIdentityNotifier) RegisterFQDNSelector(selector api.FQDNSelector) {}
func (d DummyIdentityNotifier) UnregisterFQDNSelector(selector api.FQDNSelector) {}
// SPDX-License-Identifier: Apache-2.0
// Copyright Authors of Cilium
package testpolicy
import (
"github.com/cilium/cilium/pkg/policy/api"
"github.com/cilium/cilium/pkg/policy/types"
)
type policyMetricsNoop struct {
}
func (p *policyMetricsNoop) AddRule(api.Rule) {
}
func (p *policyMetricsNoop) DelRule(api.Rule) {
}
func NewPolicyMetricsNoop() types.PolicyMetrics {
return &policyMetricsNoop{}
}
// SPDX-License-Identifier: Apache-2.0
// Copyright Authors of Cilium
package testpolicy
import (
"log/slog"
"github.com/cilium/cilium/pkg/container/versioned"
"github.com/cilium/cilium/pkg/identity"
"github.com/cilium/cilium/pkg/policy/types"
)
type DummySelectorCacheUser struct{}
func (d *DummySelectorCacheUser) IdentitySelectionUpdated(logger *slog.Logger, selector types.CachedSelector, added, deleted []identity.NumericIdentity) {
}
func (d *DummySelectorCacheUser) IdentitySelectionCommit(logger *slog.Logger, txn *versioned.Tx) {
}
func (d *DummySelectorCacheUser) IsPeerSelector() bool {
return true
}
// SPDX-License-Identifier: Apache-2.0
// Copyright Authors of Cilium
package testutils
import (
"os"
"strings"
"testing"
)
const (
privilegedEnv = "PRIVILEGED_TESTS"
integrationEnv = "INTEGRATION_TESTS"
gatewayAPIConformanceEnv = "GATEWAY_API_CONFORMANCE_TESTS"
requiredTestPrefix = "TestPrivileged"
requiredBenchmarkPrefix = "BenchmarkPrivileged"
)
func PrivilegedTest(tb testing.TB) {
tb.Helper()
testName := tb.Name()
// Check if test name has the required prefix
switch v := tb.(type) {
case *testing.T:
if !hasPrivilegedPrefix(testName, requiredTestPrefix) {
tb.Fatalf("Privileged tests must have prefix '%s' in their name, got: %s", requiredTestPrefix, testName)
}
case *testing.B:
if !hasPrivilegedPrefix(testName, requiredBenchmarkPrefix) {
tb.Fatalf("Privileged benchmarks must have prefix '%s' in their name, got: %s", requiredBenchmarkPrefix, testName)
}
default:
tb.Fatalf("Unknown testing type %v", v)
}
if os.Getenv(privilegedEnv) == "" {
tb.Skipf("Set %s to run this test", privilegedEnv)
}
}
// hasPrivilegedPrefix checks if the test/benchmark name has the TestPrivileged/BenchmarkPrivileged prefix.
// It handles both normal test functions "TestPrivileged*" and subtests that have
// a parent test name included like "TestPrivileged*/SubTest".
func hasPrivilegedPrefix(testName string, requiredPrefix string) bool {
// Handle regular test function
if strings.HasPrefix(testName, requiredPrefix) {
return true
}
// Handle subtests (format: ParentTest/SubTest)
parts := strings.Split(testName, "/")
if len(parts) > 0 && strings.HasPrefix(parts[0], requiredPrefix) {
return true
}
return false
}
func IsPrivileged() bool {
return os.Getenv(privilegedEnv) != ""
}
// IntegrationTests returns true if integration tests are requested.
func IntegrationTests() bool {
return os.Getenv(integrationEnv) != ""
}
// IntegrationTest only executes tb if integration tests are requested.
func IntegrationTest(tb testing.TB) {
tb.Helper()
if os.Getenv(integrationEnv) == "" {
tb.Skipf("Set %s to run this test", integrationEnv)
}
}
func GatewayAPIConformanceTest(tb testing.TB) {
tb.Helper()
if os.Getenv(gatewayAPIConformanceEnv) == "" {
tb.Skipf("Set %s to run this test", gatewayAPIConformanceEnv)
}
}
// SPDX-License-Identifier: Apache-2.0
// Copyright Authors of Cilium
package testutils
import (
"testing"
"github.com/cilium/cilium/pkg/version"
"github.com/cilium/cilium/pkg/versioncheck"
)
// SkipOnOldKernel skips the test if minVersion is lower than the detected kernel
// version. Parameter feature is mentioned as the reason in the Skip message.
func SkipOnOldKernel(tb testing.TB, minVersion, feature string) {
tb.Helper()
v, err := versioncheck.Version(minVersion)
if err != nil {
tb.Fatalf("Can't parse version %s: %s", minVersion, err)
}
kv, err := version.GetKernelVersion()
if err != nil {
tb.Fatalf("Can't get kernel version: %s", err)
}
if kv.LT(v) {
tb.Skipf("Test requires at least kernel %s (missing feature %s)", minVersion, feature)
}
}
// SPDX-License-Identifier: Apache-2.0
// Copyright Authors of Cilium
// package time is a wrapper for the stdlib time library that aliases most
// underlying types, but allows overrides for testing purposes.
//
// Synced to go-1.20.7.
package time
import (
"time"
)
const (
Layout = time.Layout
ANSIC = time.ANSIC
UnixDate = time.UnixDate
RubyDate = time.RubyDate
RFC822 = time.RFC822
RFC822Z = time.RFC822Z
RFC850 = time.RFC850
RFC1123 = time.RFC1123
RFC1123Z = time.RFC1123Z
RFC3339 = time.RFC3339
RFC3339Nano = time.RFC3339Nano
Kitchen = time.Kitchen
Stamp = time.Stamp
StampMilli = time.StampMilli
StampMicro = time.StampMicro
StampNano = time.StampNano
DateTime = time.DateTime
DateOnly = time.DateOnly
TimeOnly = time.TimeOnly
Nanosecond = time.Nanosecond
Microsecond = time.Microsecond
Millisecond = time.Millisecond
Second = time.Second
Minute = time.Minute
Hour = time.Hour
)
var (
ParseDuration = time.ParseDuration
Since = time.Since
Until = time.Until
FixedZone = time.FixedZone
LoadLocation = time.LoadLocation
LoadLocationFromTZData = time.LoadLocationFromTZData
Date = time.Date
Now = time.Now
Parse = time.Parse
ParseInLocation = time.ParseInLocation
UTC = time.UTC
Unix = time.Unix
UnixMicro = time.UnixMicro
UnixMilli = time.UnixMilli
)
type (
Duration = time.Duration
Location = time.Location
Month = time.Month
ParseError = time.ParseError
Ticker = time.Ticker
Time = time.Time
Timer = time.Timer
Weekday = time.Weekday
)
var (
MaxInternalTimerDelay time.Duration
)
// After overrides the stdlib time.After to enforce maximum sleepiness via
// option.MaxInternalTimerDelay.
func After(d Duration) <-chan Time {
if MaxInternalTimerDelay > 0 && d > MaxInternalTimerDelay {
d = MaxInternalTimerDelay
}
return time.After(d)
}
// Sleep overrides the stdlib time.Sleep to enforce maximum sleepiness via
// option.MaxInternalTimerDelay.
func Sleep(d time.Duration) {
if MaxInternalTimerDelay > 0 && d > MaxInternalTimerDelay {
d = MaxInternalTimerDelay
}
time.Sleep(d)
}
// Tick overrides the stdlib time.Tick to enforce maximum sleepiness via
// option.MaxInternalTimerDelay.
func Tick(d Duration) <-chan time.Time {
return NewTicker(d).C
}
// NewTicker overrides the stdlib time.NewTicker to enforce maximum sleepiness
// via option.MaxInternalTimerDelay.
func NewTicker(d Duration) *time.Ticker {
if MaxInternalTimerDelay > 0 && d > MaxInternalTimerDelay {
d = MaxInternalTimerDelay
}
return time.NewTicker(d)
}
// NewTimer overrides the stdlib time.NewTimer to enforce maximum sleepiness
// via option.MaxInternalTimerDelay.
func NewTimer(d Duration) *time.Timer {
if MaxInternalTimerDelay > 0 && d > MaxInternalTimerDelay {
d = MaxInternalTimerDelay
}
return time.NewTimer(d)
}
// NewTimerWithoutMaxDelay returns a time.NewTimer without enforcing maximum
// sleepiness. This function should only be used in cases where the timer firing
// early impacts correctness. If in doubt, you probably should use NewTimer.
func NewTimerWithoutMaxDelay(d Duration) *time.Timer {
return time.NewTimer(d)
}
// AfterFunc overrides the stdlib time.AfterFunc to enforce maximum sleepiness
// via option.MaxInternalTimerDelay.
func AfterFunc(d Duration, f func()) *time.Timer {
if MaxInternalTimerDelay > 0 && d > MaxInternalTimerDelay {
d = MaxInternalTimerDelay
}
return time.AfterFunc(d, f)
}
// SPDX-License-Identifier: Apache-2.0
// Copyright Authors of Cilium
package trigger
import (
"fmt"
"github.com/cilium/cilium/pkg/lock"
"github.com/cilium/cilium/pkg/time"
)
// MetricsObserver is the interface a metrics collector has to implement in
// order to collect trigger metrics
type MetricsObserver interface {
// PostRun is called after a trigger run with the call duration, the
// latency between 1st queue request and the call run and the number of
// queued events folded into the last run
PostRun(callDuration, latency time.Duration, folds int)
// QueueEvent is called when Trigger() is called to schedule a trigger
// run
QueueEvent(reason string)
}
// Parameters are the user specified parameters
type Parameters struct {
// MinInterval is the minimum required interval between invocations of
// TriggerFunc
MinInterval time.Duration
// TriggerFunc is the function to be called when Trigger() is called
// while respecting MinInterval and serialization
TriggerFunc func(reasons []string)
// ShutdownFunc is called when the trigger is shut down
ShutdownFunc func()
MetricsObserver MetricsObserver
// Name is the unique name of the trigger. It must be provided in a
// format compatible to be used as prometheus name string.
Name string
// sleepInterval controls the waiter sleep duration. This parameter is
// only exposed to tests
sleepInterval time.Duration
}
type reasonStack map[string]struct{}
func newReasonStack() reasonStack {
return map[string]struct{}{}
}
func (r reasonStack) add(reason string) {
r[reason] = struct{}{}
}
func (r reasonStack) slice() []string {
result := make([]string, len(r))
i := 0
for reason := range r {
result[i] = reason
i++
}
return result
}
// Trigger represents an active trigger logic. Use NewTrigger() to create a
// trigger
type Trigger struct {
// protect mutual access of 'trigger' between Trigger() and waiter()
mutex lock.Mutex
trigger bool
// params are the user specified parameters
params Parameters
// lastTrigger is the timestamp of the last invoked trigger
lastTrigger time.Time
// wakeupCan is used to wake up the background trigger routine
wakeupChan chan struct{}
// closeChan is used to stop the background trigger routine
closeChan chan struct{}
// numFolds is the current count of folds that happened into the
// currently scheduled trigger
numFolds int
// foldedReasons is the sum of all unique reasons folded together.
foldedReasons reasonStack
waitStart time.Time
}
// NewTrigger returns a new trigger based on the provided parameters
func NewTrigger(p Parameters) (*Trigger, error) {
if p.sleepInterval == 0 {
p.sleepInterval = time.Second
}
if p.TriggerFunc == nil {
return nil, fmt.Errorf("trigger function is nil")
}
t := &Trigger{
params: p,
wakeupChan: make(chan struct{}, 1),
closeChan: make(chan struct{}, 1),
foldedReasons: newReasonStack(),
}
// Guarantee that initial trigger has no delay
if p.MinInterval > time.Duration(0) {
t.lastTrigger = time.Now().Add(-1 * p.MinInterval)
}
go t.waiter()
return t, nil
}
// needsDelay returns whether and how long of a delay is required to fullfil
// MinInterval
func (t *Trigger) needsDelay() (bool, time.Duration) {
if t.params.MinInterval == time.Duration(0) {
return false, 0
}
sleepTime := time.Since(t.lastTrigger.Add(t.params.MinInterval))
return sleepTime < 0, sleepTime * -1
}
// Trigger triggers the call to TriggerFunc as specified in the parameters
// provided to NewTrigger(). It respects MinInterval and ensures that calls to
// TriggerFunc are serialized. This function is non-blocking and will return
// immediately before TriggerFunc is potentially triggered and has completed.
func (t *Trigger) TriggerWithReason(reason string) {
t.mutex.Lock()
t.trigger = true
if t.numFolds == 0 {
t.waitStart = time.Now()
}
t.numFolds++
t.foldedReasons.add(reason)
t.mutex.Unlock()
if t.params.MetricsObserver != nil {
t.params.MetricsObserver.QueueEvent(reason)
}
select {
case t.wakeupChan <- struct{}{}:
default:
}
}
// Trigger triggers the call to TriggerFunc as specified in the parameters
// provided to NewTrigger(). It respects MinInterval and ensures that calls to
// TriggerFunc are serialized. This function is non-blocking and will return
// immediately before TriggerFunc is potentially triggered and has completed.
func (t *Trigger) Trigger() {
t.TriggerWithReason("")
}
// Shutdown stops the trigger mechanism
func (t *Trigger) Shutdown() {
close(t.closeChan)
}
func (t *Trigger) waiter() {
tk := time.NewTicker(t.params.sleepInterval)
defer tk.Stop()
for {
// keep critical section as small as possible
t.mutex.Lock()
triggerEnabled := t.trigger
t.trigger = false
t.mutex.Unlock()
// run the trigger function
if triggerEnabled {
if delayNeeded, delay := t.needsDelay(); delayNeeded {
time.Sleep(delay)
}
t.mutex.Lock()
t.lastTrigger = time.Now()
numFolds := t.numFolds
t.numFolds = 0
reasons := t.foldedReasons.slice()
t.foldedReasons = newReasonStack()
callLatency := time.Since(t.waitStart)
t.mutex.Unlock()
beforeTrigger := time.Now()
t.params.TriggerFunc(reasons)
if t.params.MetricsObserver != nil {
callDuration := time.Since(beforeTrigger)
t.params.MetricsObserver.PostRun(callDuration, callLatency, numFolds)
}
}
select {
case <-t.wakeupChan:
case <-tk.C:
case <-t.closeChan:
shutdownFunc := t.params.ShutdownFunc
if shutdownFunc != nil {
shutdownFunc()
}
return
}
}
}
// SPDX-License-Identifier: Apache-2.0
// Copyright Authors of Cilium
package types
import (
"net"
"net/netip"
)
// IPv4 is the binary representation for encoding in binary structs.
type IPv4 [4]byte
func (v4 IPv4) IsZero() bool {
return v4[0] == 0 && v4[1] == 0 && v4[2] == 0 && v4[3] == 0
}
func (v4 IPv4) IP() net.IP {
return v4[:]
}
func (v4 IPv4) Addr() netip.Addr {
return netip.AddrFrom4(v4)
}
func (v4 IPv4) String() string {
return v4.IP().String()
}
// DeepCopyInto is a deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (v4 *IPv4) DeepCopyInto(out *IPv4) {
copy(out[:], v4[:])
}
// FromAddr will populate the receiver with the specified address if and only
// if the provided address is a valid IPv4 address. Any other address,
// including the "invalid ip" value netip.Addr{} will zero the receiver.
func (v4 *IPv4) FromAddr(addr netip.Addr) {
if addr.Is4() {
a := IPv4(addr.As4())
copy(v4[:], a[:])
} else {
clear(v4[:])
}
}
// SPDX-License-Identifier: Apache-2.0
// Copyright Authors of Cilium
package types
import (
"net"
"net/netip"
)
// IPv6 is the binary representation for encoding in binary structs.
type IPv6 [16]byte
func (v6 IPv6) IsZero() bool {
for i := 0; i < 16; i++ {
if v6[i] != 0 {
return false
}
}
return true
}
func (v6 IPv6) IP() net.IP {
return v6[:]
}
func (v6 IPv6) Addr() netip.Addr {
return netip.AddrFrom16(v6)
}
func (v6 IPv6) String() string {
return v6.IP().String()
}
// DeepCopyInto is a deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (v6 *IPv6) DeepCopyInto(out *IPv6) {
copy(out[:], v6[:])
}
// FromAddr will populate the receiver with the specified address if and only
// if the provided address is a valid IPv6 address. Any other address,
// including the "invalid ip" value netip.Addr{} will zero the receiver.
func (v6 *IPv6) FromAddr(addr netip.Addr) {
if addr.Is6() {
a := IPv6(addr.As16())
copy(v6[:], a[:])
} else {
clear(v6[:])
}
}
// SPDX-License-Identifier: Apache-2.0
// Copyright Authors of Cilium
package types
import (
"net"
)
// MACAddr is the binary representation for encoding in binary structs.
type MACAddr [6]byte
func (addr MACAddr) hardwareAddr() net.HardwareAddr {
return addr[:]
}
func (addr MACAddr) String() string {
return addr.hardwareAddr().String()
}
// DeepCopyInto is a deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (addr *MACAddr) DeepCopyInto(out *MACAddr) {
copy(out[:], addr[:])
}
// SPDX-License-Identifier: Apache-2.0
// Copyright Authors of Cilium
package types
import (
"errors"
"fmt"
"strings"
"github.com/cilium/cilium/pkg/counter"
"github.com/cilium/cilium/pkg/iana"
"github.com/cilium/cilium/pkg/lock"
"github.com/cilium/cilium/pkg/u8proto"
)
var (
ErrNilMap = errors.New("nil map")
ErrUnknownNamedPort = errors.New("unknown named port")
ErrIncompatibleProtocol = errors.New("incompatible protocol")
ErrNamedPortIsZero = errors.New("named port is zero")
ErrDuplicateNamedPorts = errors.New("duplicate named ports")
)
// PortProto is a pair of port number and protocol and is used as the
// value type in named port maps.
type PortProto struct {
Proto u8proto.U8proto // 0 for any
Port uint16 // non-0
}
// NamedPortMap maps port names to port numbers and protocols.
type NamedPortMap map[string]PortProto
// PortProtoSet is a reference-counted set of unique PortProto values.
type PortProtoSet counter.Counter[PortProto]
// Equal returns true if the PortProtoSets are equal.
func (pps PortProtoSet) Equal(other PortProtoSet) bool {
if len(pps) != len(other) {
return false
}
for port := range pps {
if _, exists := other[port]; !exists {
return false
}
}
return true
}
// Add increments the reference count for the specified key.
func (pps PortProtoSet) Add(pp PortProto) bool {
return counter.Counter[PortProto](pps).Add(pp)
}
// Delete decrements the reference count for the specified key.
func (pps PortProtoSet) Delete(pp PortProto) bool {
return counter.Counter[PortProto](pps).Delete(pp)
}
// NamedPortMultiMap may have multiple entries for a name if multiple PODs
// define the same name with different values.
type NamedPortMultiMap interface {
// GetNamedPort returns the port number for the named port, if any.
GetNamedPort(name string, proto u8proto.U8proto) (uint16, error)
// Len returns the number of Name->PortProtoSet mappings known.
Len() int
}
func NewNamedPortMultiMap() *namedPortMultiMap {
return &namedPortMultiMap{
m: make(map[string]PortProtoSet),
}
}
// Implements NamedPortMultiMap and allows changes through Update. All accesses
// must be protected by its RW mutex.
type namedPortMultiMap struct {
lock.RWMutex
m map[string]PortProtoSet
}
func (npm *namedPortMultiMap) Len() int {
npm.RLock()
defer npm.RUnlock()
return len(npm.m)
}
// Update applies potential changes in named ports, and returns whether there were any.
func (npm *namedPortMultiMap) Update(old, new NamedPortMap) (namedPortsChanged bool) {
npm.Lock()
defer npm.Unlock()
// The order is important here. Increment the refcount first, and then
// decrement it again for old ports, so that we don't hit zero if there are
// no changes.
for name, port := range new {
c, ok := npm.m[name]
if !ok {
c = make(PortProtoSet)
npm.m[name] = c
}
if c.Add(port) {
namedPortsChanged = true
}
}
for name, port := range old {
if npm.m[name].Delete(port) {
namedPortsChanged = true
if len(npm.m[name]) == 0 {
delete(npm.m, name)
}
}
}
return namedPortsChanged
}
// ValidatePortName checks that the port name conforms to the IANA Service Names spec
// and converts the port name to lower case for case-insensitive comparisons.
func ValidatePortName(name string) (string, error) {
if !iana.IsSvcName(name) { // Port names are formatted as IANA Service Names
return "", fmt.Errorf("Invalid port name \"%s\", not using as a named port", name)
}
return strings.ToLower(name), nil // Normalize for case-insensitive comparison
}
func newPortProto(port int, protocol string) (pp PortProto, err error) {
var u8p u8proto.U8proto
if protocol == "" {
u8p = u8proto.TCP // K8s ContainerPort protocol defaults to TCP
} else {
var err error
u8p, err = u8proto.ParseProtocol(protocol)
if err != nil {
return pp, err
}
}
if port < 1 || port > 65535 {
if port == 0 {
return pp, ErrNamedPortIsZero
}
return pp, fmt.Errorf("Port number %d out of 16-bit range", port)
}
return PortProto{
Proto: u8p,
Port: uint16(port),
}, nil
}
// AddPort adds a new PortProto to the NamedPortMap
func (npm NamedPortMap) AddPort(name string, port int, protocol string) error {
name, err := ValidatePortName(name)
if err != nil {
return err
}
pp, err := newPortProto(port, protocol)
if err != nil {
return err
}
npm[name] = pp
return nil
}
// GetNamedPort returns the port number for the named port, if any.
func (npm NamedPortMap) GetNamedPort(name string, proto u8proto.U8proto) (uint16, error) {
if npm == nil {
return 0, ErrNilMap
}
pp, ok := npm[name]
if !ok {
return 0, ErrUnknownNamedPort
}
if pp.Proto != 0 && proto != pp.Proto {
return 0, ErrIncompatibleProtocol
}
if pp.Port == 0 {
return 0, ErrNamedPortIsZero
}
return pp.Port, nil
}
// GetNamedPort returns the port number for the named port, if any.
func (npm *namedPortMultiMap) GetNamedPort(name string, proto u8proto.U8proto) (uint16, error) {
if npm == nil {
return 0, ErrNilMap
}
npm.RLock()
defer npm.RUnlock()
if npm.m == nil {
return 0, ErrNilMap
}
pps, ok := npm.m[name]
if !ok {
// Return an error the caller can filter out as this happens only for egress policy
// and it is likely the destination POD with the port name is simply not scheduled yet.
return 0, ErrUnknownNamedPort
}
// Find if there is a single port that has no proto conflict and no zero port value
port := uint16(0)
err := ErrUnknownNamedPort
for pp := range pps {
if pp.Proto != 0 && proto != pp.Proto {
err = ErrIncompatibleProtocol
continue // conflicting proto
}
if pp.Port == 0 {
err = ErrNamedPortIsZero
continue // zero port
}
if port != 0 && pp.Port != port {
return 0, ErrDuplicateNamedPorts
}
port = pp.Port
}
if port == 0 {
return 0, err
}
return port, nil
}
// SPDX-License-Identifier: Apache-2.0
// Copyright Authors of Cilium
package u8proto
import (
"fmt"
"strconv"
"strings"
)
// These definitions must contain and be compatible with the string
// values defined for pkg/pollicy/api/L4Proto
const (
// ANY represents protocols with transport-layer ports (TCP, UDP, SCTP).
ANY U8proto = 0
ICMP U8proto = 1
IGMP U8proto = 2
TCP U8proto = 6
UDP U8proto = 17
ICMPv6 U8proto = 58
VRRP U8proto = 112
SCTP U8proto = 132
)
var protoNames = map[U8proto]string{
0: "ANY",
1: "ICMP",
2: "IGMP",
6: "TCP",
17: "UDP",
58: "ICMPv6",
112: "VRRP",
132: "SCTP",
}
var ProtoIDs = map[string]U8proto{
"all": 0,
"any": 0,
"none": 0,
"icmp": 1,
"igmp": 2,
"tcp": 6,
"udp": 17,
"icmpv6": 58,
"vrrp": 112,
"sctp": 132,
}
type U8proto uint8
func (p U8proto) String() string {
if _, ok := protoNames[p]; ok {
return protoNames[p]
}
return strconv.Itoa(int(p))
}
func ParseProtocol(proto string) (U8proto, error) {
if u, ok := ProtoIDs[strings.ToLower(proto)]; ok {
return u, nil
}
return 0, fmt.Errorf("unknown protocol '%s'", proto)
}
func FromNumber(proto uint8) (U8proto, error) {
_, ok := protoNames[U8proto(proto)]
if !ok {
return 0, fmt.Errorf("unknown protocol %d", proto)
}
return U8proto(proto), nil
}
// SPDX-License-Identifier: Apache-2.0
// Copyright Authors of Cilium
package util
// RoundUp rounds x up to the next specified multiple. This implementation
// is equivalent to the kernel's roundup().
func RoundUp(x, multiple int) int {
return int(((x + (multiple - 1)) / multiple) * multiple)
}
// RoundDown rounds x down to the next specified multiple. Again, this
// implementation is equivalent to the kernel's rounddown().
func RoundDown(x, multiple int) int {
return int(x - (x % multiple))
}
// SPDX-License-Identifier: Apache-2.0
// Copyright Authors of Cilium
package version
import (
"encoding/base64"
"encoding/json"
"fmt"
"runtime"
"strings"
"sync"
)
// CiliumVersion provides a minimal structure to the version string
type CiliumVersion struct {
// Version is the semantic version of Cilium
Version string
// Revision is the short SHA from the last commit
Revision string
// GoRuntimeVersion is the Go version used to run Cilium
GoRuntimeVersion string
// Arch is the architecture where Cilium was compiled
Arch string
// AuthorDate is the git author time reference stored as string ISO 8601 formatted
AuthorDate string
}
// ciliumVersion is set to Cilium's version, revision and git author time reference during build.
var ciliumVersion string
// Version is the complete Cilium version string including Go version.
var Version string
func init() {
// Mimic the output of `go version` and append it to ciliumVersion.
// Report GOOS/GOARCH of the actual binary, not the system it was built on, in case it was
// cross-compiled. See #13122
Version = fmt.Sprintf("%s go version %s %s/%s", ciliumVersion, runtime.Version(), runtime.GOOS, runtime.GOARCH)
}
// FromString converts a version string into struct
func FromString(versionString string) CiliumVersion {
// string to parse: "0.13.90 a722bdb 2018-01-09T22:32:37+01:00 go version go1.9 linux/amd64"
fields := strings.Split(versionString, " ")
if len(fields) != 7 {
return CiliumVersion{}
}
cver := CiliumVersion{
Version: fields[0],
Revision: fields[1],
AuthorDate: fields[2],
GoRuntimeVersion: fields[5],
Arch: fields[6],
}
return cver
}
// GetCiliumVersion returns a initialized CiliumVersion structure
var GetCiliumVersion = sync.OnceValue(func() CiliumVersion {
return FromString(Version)
})
// Base64 returns the version in a base64 format.
func Base64() (string, error) {
jsonBytes, err := json.Marshal(Version)
if err != nil {
return "", err
}
return base64.StdEncoding.EncodeToString(jsonBytes), nil
}
// SPDX-License-Identifier: Apache-2.0
// Copyright Authors of Cilium
//go:build !windows
package version
import (
"fmt"
"regexp"
"strings"
"github.com/blang/semver/v4"
"golang.org/x/sys/unix"
"github.com/cilium/cilium/pkg/versioncheck"
)
func parseKernelVersion(ver string) (semver.Version, error) {
// Trim null bytes and whitespace that may come from C strings
ver = strings.TrimRight(ver, "\x00")
ver = strings.TrimSpace(ver)
verStrs := strings.Split(ver, ".")
// We are assuming the kernel version will be one of the following:
// 4.9.17-040917-generic or 4.9-040917-generic or 4-generic
// 6.15.8-200.fc42.x86_64 (newer format with additional dot-separated components)
// So as observed, the kernel value is N.N.N-m or N.N-m or N-m or N.N.N-m.additional.components
// This implies the len(verStrs) should be at least 1, but can be more than 3
if len(verStrs) < 1 {
return semver.Version{}, fmt.Errorf("unable to get kernel version from %q", ver)
}
// Take only the first 3 components for semantic version parsing
// If there are more than 3 components, we'll only use the first 3
if len(verStrs) > 3 {
verStrs = verStrs[:3]
}
// Given the observations, we use regular expression to extract
// the patch number from the last element of the verStrs array and
// append "0" to the verStrs array in case the until its length is
// 3 as in all cases we want to return from this function :
// Major.Minor.PatchNumber
patch := regexp.MustCompilePOSIX(`^[0-9]+`).FindString(verStrs[len(verStrs)-1])
if patch == "" {
verStrs[len(verStrs)-1] = "0"
} else {
verStrs[len(verStrs)-1] = patch
}
for len(verStrs) < 3 {
verStrs = append(verStrs, "0")
}
return versioncheck.Version(strings.Join(verStrs[:3], "."))
}
// GetKernelVersion returns the version of the Linux kernel running on this host.
func GetKernelVersion() (semver.Version, error) {
var unameBuf unix.Utsname
if err := unix.Uname(&unameBuf); err != nil {
return semver.Version{}, err
}
return parseKernelVersion(string(unameBuf.Release[:]))
}
// SPDX-License-Identifier: Apache-2.0
// Copyright Authors of Cilium
// Package versioncheck provides utility wrappers for go-version, allowing the
// constraints to be used as global variables.
package versioncheck
import (
"fmt"
"strconv"
"strings"
"github.com/blang/semver/v4"
)
// MustCompile wraps go-version.NewConstraint, panicing when an error is
// returns (this occurs when the constraint cannot be parsed).
// It is intended to be use similar to re.MustCompile, to ensure unparseable
// constraints are caught in testing.
func MustCompile(constraint string) semver.Range {
verCheck, err := Compile(constraint)
if err != nil {
panic(fmt.Errorf("cannot compile go-version constraint '%s': %w", constraint, err))
}
return verCheck
}
// Compile trivially wraps go-version.NewConstraint, returning the constraint
// and error
func Compile(constraint string) (semver.Range, error) {
return semver.ParseRange(constraint)
}
// MustVersion wraps go-version.NewVersion, panicing when an error is
// returns (this occurs when the version cannot be parsed).
func MustVersion(version string) semver.Version {
ver, err := Version(version)
if err != nil {
panic(fmt.Errorf("cannot compile go-version version '%s': %w", version, err))
}
return ver
}
// Version wraps go-version.NewVersion, panicing when an error is
// returns (this occurs when the version cannot be parsed).
func Version(version string) (semver.Version, error) {
ver, err := semver.ParseTolerant(version)
if err != nil {
return ver, err
}
if len(ver.Pre) == 0 {
return ver, nil
}
for _, pre := range ver.Pre {
if strings.Contains(pre.VersionStr, "rc") ||
strings.Contains(pre.VersionStr, "beta") ||
strings.Contains(pre.VersionStr, "alpha") ||
strings.Contains(pre.VersionStr, "snapshot") {
return ver, nil
}
}
strSegments := make([]string, 3)
strSegments[0] = strconv.Itoa(int(ver.Major))
strSegments[1] = strconv.Itoa(int(ver.Minor))
strSegments[2] = strconv.Itoa(int(ver.Patch))
verStr := strings.Join(strSegments, ".")
return semver.ParseTolerant(verStr)
}
// SPDX-License-Identifier: Apache-2.0
// Copyright Authors of Cilium
//go:build gofuzz
package fuzz
import (
"github.com/cilium/cilium/pkg/labels"
)
func Fuzz(data []byte) int {
label := labels.NewLabel("test", "label", "1")
err := label.UnmarshalJSON(data)
if err != nil {
return 0
}
return 1
}
// Code generated by protoc-gen-go. DO NOT EDIT.
// versions:
// protoc-gen-go v1.36.6
// protoc v5.29.3
// source: cilium/api/accesslog.proto
package cilium
import (
protoreflect "google.golang.org/protobuf/reflect/protoreflect"
protoimpl "google.golang.org/protobuf/runtime/protoimpl"
reflect "reflect"
sync "sync"
unsafe "unsafe"
)
const (
// Verify that this generated code is sufficiently up-to-date.
_ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion)
// Verify that runtime/protoimpl is sufficiently up-to-date.
_ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20)
)
type HttpProtocol int32
const (
HttpProtocol_HTTP10 HttpProtocol = 0
HttpProtocol_HTTP11 HttpProtocol = 1
HttpProtocol_HTTP2 HttpProtocol = 2
)
// Enum value maps for HttpProtocol.
var (
HttpProtocol_name = map[int32]string{
0: "HTTP10",
1: "HTTP11",
2: "HTTP2",
}
HttpProtocol_value = map[string]int32{
"HTTP10": 0,
"HTTP11": 1,
"HTTP2": 2,
}
)
func (x HttpProtocol) Enum() *HttpProtocol {
p := new(HttpProtocol)
*p = x
return p
}
func (x HttpProtocol) String() string {
return protoimpl.X.EnumStringOf(x.Descriptor(), protoreflect.EnumNumber(x))
}
func (HttpProtocol) Descriptor() protoreflect.EnumDescriptor {
return file_cilium_api_accesslog_proto_enumTypes[0].Descriptor()
}
func (HttpProtocol) Type() protoreflect.EnumType {
return &file_cilium_api_accesslog_proto_enumTypes[0]
}
func (x HttpProtocol) Number() protoreflect.EnumNumber {
return protoreflect.EnumNumber(x)
}
// Deprecated: Use HttpProtocol.Descriptor instead.
func (HttpProtocol) EnumDescriptor() ([]byte, []int) {
return file_cilium_api_accesslog_proto_rawDescGZIP(), []int{0}
}
type EntryType int32
const (
EntryType_Request EntryType = 0
EntryType_Response EntryType = 1
EntryType_Denied EntryType = 2
)
// Enum value maps for EntryType.
var (
EntryType_name = map[int32]string{
0: "Request",
1: "Response",
2: "Denied",
}
EntryType_value = map[string]int32{
"Request": 0,
"Response": 1,
"Denied": 2,
}
)
func (x EntryType) Enum() *EntryType {
p := new(EntryType)
*p = x
return p
}
func (x EntryType) String() string {
return protoimpl.X.EnumStringOf(x.Descriptor(), protoreflect.EnumNumber(x))
}
func (EntryType) Descriptor() protoreflect.EnumDescriptor {
return file_cilium_api_accesslog_proto_enumTypes[1].Descriptor()
}
func (EntryType) Type() protoreflect.EnumType {
return &file_cilium_api_accesslog_proto_enumTypes[1]
}
func (x EntryType) Number() protoreflect.EnumNumber {
return protoreflect.EnumNumber(x)
}
// Deprecated: Use EntryType.Descriptor instead.
func (EntryType) EnumDescriptor() ([]byte, []int) {
return file_cilium_api_accesslog_proto_rawDescGZIP(), []int{1}
}
type KeyValue struct {
state protoimpl.MessageState `protogen:"open.v1"`
Key string `protobuf:"bytes,1,opt,name=key,proto3" json:"key,omitempty"`
Value string `protobuf:"bytes,2,opt,name=value,proto3" json:"value,omitempty"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *KeyValue) Reset() {
*x = KeyValue{}
mi := &file_cilium_api_accesslog_proto_msgTypes[0]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *KeyValue) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*KeyValue) ProtoMessage() {}
func (x *KeyValue) ProtoReflect() protoreflect.Message {
mi := &file_cilium_api_accesslog_proto_msgTypes[0]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use KeyValue.ProtoReflect.Descriptor instead.
func (*KeyValue) Descriptor() ([]byte, []int) {
return file_cilium_api_accesslog_proto_rawDescGZIP(), []int{0}
}
func (x *KeyValue) GetKey() string {
if x != nil {
return x.Key
}
return ""
}
func (x *KeyValue) GetValue() string {
if x != nil {
return x.Value
}
return ""
}
type HttpLogEntry struct {
state protoimpl.MessageState `protogen:"open.v1"`
HttpProtocol HttpProtocol `protobuf:"varint,1,opt,name=http_protocol,json=httpProtocol,proto3,enum=cilium.HttpProtocol" json:"http_protocol,omitempty"`
// Request info that is also retained for the response
Scheme string `protobuf:"bytes,2,opt,name=scheme,proto3" json:"scheme,omitempty"` // Envoy "x-forwarded-proto", e.g., "http", "https"
Host string `protobuf:"bytes,3,opt,name=host,proto3" json:"host,omitempty"` // Envoy ":authority" header
Path string `protobuf:"bytes,4,opt,name=path,proto3" json:"path,omitempty"` // Envoy ":path" header
Method string `protobuf:"bytes,5,opt,name=method,proto3" json:"method,omitempty"` // Envoy ":method" header
// Request or response headers not included above
Headers []*KeyValue `protobuf:"bytes,6,rep,name=headers,proto3" json:"headers,omitempty"`
// Response info
Status uint32 `protobuf:"varint,7,opt,name=status,proto3" json:"status,omitempty"` // Envoy ":status" header, zero for request
// missing_headers includes both headers that were added to the
// request, and headers that were merely logged as missing
MissingHeaders []*KeyValue `protobuf:"bytes,8,rep,name=missing_headers,json=missingHeaders,proto3" json:"missing_headers,omitempty"`
// rejected_headers includes headers that were flagged as unallowed,
// which may have been removed, or merely logged and the request still
// allowed, or the request may have been dropped due to them.
RejectedHeaders []*KeyValue `protobuf:"bytes,9,rep,name=rejected_headers,json=rejectedHeaders,proto3" json:"rejected_headers,omitempty"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *HttpLogEntry) Reset() {
*x = HttpLogEntry{}
mi := &file_cilium_api_accesslog_proto_msgTypes[1]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *HttpLogEntry) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*HttpLogEntry) ProtoMessage() {}
func (x *HttpLogEntry) ProtoReflect() protoreflect.Message {
mi := &file_cilium_api_accesslog_proto_msgTypes[1]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use HttpLogEntry.ProtoReflect.Descriptor instead.
func (*HttpLogEntry) Descriptor() ([]byte, []int) {
return file_cilium_api_accesslog_proto_rawDescGZIP(), []int{1}
}
func (x *HttpLogEntry) GetHttpProtocol() HttpProtocol {
if x != nil {
return x.HttpProtocol
}
return HttpProtocol_HTTP10
}
func (x *HttpLogEntry) GetScheme() string {
if x != nil {
return x.Scheme
}
return ""
}
func (x *HttpLogEntry) GetHost() string {
if x != nil {
return x.Host
}
return ""
}
func (x *HttpLogEntry) GetPath() string {
if x != nil {
return x.Path
}
return ""
}
func (x *HttpLogEntry) GetMethod() string {
if x != nil {
return x.Method
}
return ""
}
func (x *HttpLogEntry) GetHeaders() []*KeyValue {
if x != nil {
return x.Headers
}
return nil
}
func (x *HttpLogEntry) GetStatus() uint32 {
if x != nil {
return x.Status
}
return 0
}
func (x *HttpLogEntry) GetMissingHeaders() []*KeyValue {
if x != nil {
return x.MissingHeaders
}
return nil
}
func (x *HttpLogEntry) GetRejectedHeaders() []*KeyValue {
if x != nil {
return x.RejectedHeaders
}
return nil
}
type KafkaLogEntry struct {
state protoimpl.MessageState `protogen:"open.v1"`
// correlation_id is a user-supplied integer value that will be passed
// back with the response
CorrelationId int32 `protobuf:"varint,1,opt,name=correlation_id,json=correlationId,proto3" json:"correlation_id,omitempty"`
// error_code is the Kafka error code being returned
// Ref. https://kafka.apache.org/protocol#protocol_error_codes
ErrorCode int32 `protobuf:"varint,2,opt,name=error_code,json=errorCode,proto3" json:"error_code,omitempty"`
// api_version of the Kafka api used
// Ref. https://kafka.apache.org/protocol#protocol_compatibility
ApiVersion int32 `protobuf:"varint,3,opt,name=api_version,json=apiVersion,proto3" json:"api_version,omitempty"`
// api_key for Kafka message
// Reference: https://kafka.apache.org/protocol#protocol_api_keys
ApiKey int32 `protobuf:"varint,4,opt,name=api_key,json=apiKey,proto3" json:"api_key,omitempty"`
// Topics of the request
// Optional, as not all messages have topics (ex. LeaveGroup, Heartbeat)
Topics []string `protobuf:"bytes,5,rep,name=topics,proto3" json:"topics,omitempty"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *KafkaLogEntry) Reset() {
*x = KafkaLogEntry{}
mi := &file_cilium_api_accesslog_proto_msgTypes[2]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *KafkaLogEntry) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*KafkaLogEntry) ProtoMessage() {}
func (x *KafkaLogEntry) ProtoReflect() protoreflect.Message {
mi := &file_cilium_api_accesslog_proto_msgTypes[2]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use KafkaLogEntry.ProtoReflect.Descriptor instead.
func (*KafkaLogEntry) Descriptor() ([]byte, []int) {
return file_cilium_api_accesslog_proto_rawDescGZIP(), []int{2}
}
func (x *KafkaLogEntry) GetCorrelationId() int32 {
if x != nil {
return x.CorrelationId
}
return 0
}
func (x *KafkaLogEntry) GetErrorCode() int32 {
if x != nil {
return x.ErrorCode
}
return 0
}
func (x *KafkaLogEntry) GetApiVersion() int32 {
if x != nil {
return x.ApiVersion
}
return 0
}
func (x *KafkaLogEntry) GetApiKey() int32 {
if x != nil {
return x.ApiKey
}
return 0
}
func (x *KafkaLogEntry) GetTopics() []string {
if x != nil {
return x.Topics
}
return nil
}
type L7LogEntry struct {
state protoimpl.MessageState `protogen:"open.v1"`
Proto string `protobuf:"bytes,1,opt,name=proto,proto3" json:"proto,omitempty"`
Fields map[string]string `protobuf:"bytes,2,rep,name=fields,proto3" json:"fields,omitempty" protobuf_key:"bytes,1,opt,name=key" protobuf_val:"bytes,2,opt,name=value"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *L7LogEntry) Reset() {
*x = L7LogEntry{}
mi := &file_cilium_api_accesslog_proto_msgTypes[3]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *L7LogEntry) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*L7LogEntry) ProtoMessage() {}
func (x *L7LogEntry) ProtoReflect() protoreflect.Message {
mi := &file_cilium_api_accesslog_proto_msgTypes[3]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use L7LogEntry.ProtoReflect.Descriptor instead.
func (*L7LogEntry) Descriptor() ([]byte, []int) {
return file_cilium_api_accesslog_proto_rawDescGZIP(), []int{3}
}
func (x *L7LogEntry) GetProto() string {
if x != nil {
return x.Proto
}
return ""
}
func (x *L7LogEntry) GetFields() map[string]string {
if x != nil {
return x.Fields
}
return nil
}
type LogEntry struct {
state protoimpl.MessageState `protogen:"open.v1"`
// The time that Cilium filter captured this log entry,
// in, nanoseconds since 1/1/1970.
Timestamp uint64 `protobuf:"varint,1,opt,name=timestamp,proto3" json:"timestamp,omitempty"`
// 'true' if the request was received by an ingress listener,
// 'false' if received by an egress listener
IsIngress bool `protobuf:"varint,15,opt,name=is_ingress,json=isIngress,proto3" json:"is_ingress,omitempty"`
EntryType EntryType `protobuf:"varint,3,opt,name=entry_type,json=entryType,proto3,enum=cilium.EntryType" json:"entry_type,omitempty"`
// Cilium network policy resource name
PolicyName string `protobuf:"bytes,4,opt,name=policy_name,json=policyName,proto3" json:"policy_name,omitempty"`
// proxy_id identifies the listener this message relates to,
// as configured via the bpf_metadata listener filter
ProxyId uint32 `protobuf:"varint,17,opt,name=proxy_id,json=proxyId,proto3" json:"proxy_id,omitempty"`
// Cilium rule reference
CiliumRuleRef string `protobuf:"bytes,5,opt,name=cilium_rule_ref,json=ciliumRuleRef,proto3" json:"cilium_rule_ref,omitempty"`
// Cilium security ID of the source and destination
SourceSecurityId uint32 `protobuf:"varint,6,opt,name=source_security_id,json=sourceSecurityId,proto3" json:"source_security_id,omitempty"`
DestinationSecurityId uint32 `protobuf:"varint,16,opt,name=destination_security_id,json=destinationSecurityId,proto3" json:"destination_security_id,omitempty"`
// These fields record the original source and destination addresses,
// stored in ipv4:port or [ipv6]:port format.
SourceAddress string `protobuf:"bytes,7,opt,name=source_address,json=sourceAddress,proto3" json:"source_address,omitempty"`
DestinationAddress string `protobuf:"bytes,8,opt,name=destination_address,json=destinationAddress,proto3" json:"destination_address,omitempty"`
// Types that are valid to be assigned to L7:
//
// *LogEntry_Http
// *LogEntry_Kafka
// *LogEntry_GenericL7
L7 isLogEntry_L7 `protobuf_oneof:"l7"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *LogEntry) Reset() {
*x = LogEntry{}
mi := &file_cilium_api_accesslog_proto_msgTypes[4]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *LogEntry) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*LogEntry) ProtoMessage() {}
func (x *LogEntry) ProtoReflect() protoreflect.Message {
mi := &file_cilium_api_accesslog_proto_msgTypes[4]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use LogEntry.ProtoReflect.Descriptor instead.
func (*LogEntry) Descriptor() ([]byte, []int) {
return file_cilium_api_accesslog_proto_rawDescGZIP(), []int{4}
}
func (x *LogEntry) GetTimestamp() uint64 {
if x != nil {
return x.Timestamp
}
return 0
}
func (x *LogEntry) GetIsIngress() bool {
if x != nil {
return x.IsIngress
}
return false
}
func (x *LogEntry) GetEntryType() EntryType {
if x != nil {
return x.EntryType
}
return EntryType_Request
}
func (x *LogEntry) GetPolicyName() string {
if x != nil {
return x.PolicyName
}
return ""
}
func (x *LogEntry) GetProxyId() uint32 {
if x != nil {
return x.ProxyId
}
return 0
}
func (x *LogEntry) GetCiliumRuleRef() string {
if x != nil {
return x.CiliumRuleRef
}
return ""
}
func (x *LogEntry) GetSourceSecurityId() uint32 {
if x != nil {
return x.SourceSecurityId
}
return 0
}
func (x *LogEntry) GetDestinationSecurityId() uint32 {
if x != nil {
return x.DestinationSecurityId
}
return 0
}
func (x *LogEntry) GetSourceAddress() string {
if x != nil {
return x.SourceAddress
}
return ""
}
func (x *LogEntry) GetDestinationAddress() string {
if x != nil {
return x.DestinationAddress
}
return ""
}
func (x *LogEntry) GetL7() isLogEntry_L7 {
if x != nil {
return x.L7
}
return nil
}
func (x *LogEntry) GetHttp() *HttpLogEntry {
if x != nil {
if x, ok := x.L7.(*LogEntry_Http); ok {
return x.Http
}
}
return nil
}
func (x *LogEntry) GetKafka() *KafkaLogEntry {
if x != nil {
if x, ok := x.L7.(*LogEntry_Kafka); ok {
return x.Kafka
}
}
return nil
}
func (x *LogEntry) GetGenericL7() *L7LogEntry {
if x != nil {
if x, ok := x.L7.(*LogEntry_GenericL7); ok {
return x.GenericL7
}
}
return nil
}
type isLogEntry_L7 interface {
isLogEntry_L7()
}
type LogEntry_Http struct {
Http *HttpLogEntry `protobuf:"bytes,100,opt,name=http,proto3,oneof"`
}
type LogEntry_Kafka struct {
Kafka *KafkaLogEntry `protobuf:"bytes,101,opt,name=kafka,proto3,oneof"`
}
type LogEntry_GenericL7 struct {
GenericL7 *L7LogEntry `protobuf:"bytes,102,opt,name=generic_l7,json=genericL7,proto3,oneof"`
}
func (*LogEntry_Http) isLogEntry_L7() {}
func (*LogEntry_Kafka) isLogEntry_L7() {}
func (*LogEntry_GenericL7) isLogEntry_L7() {}
var File_cilium_api_accesslog_proto protoreflect.FileDescriptor
const file_cilium_api_accesslog_proto_rawDesc = "" +
"\n" +
"\x1acilium/api/accesslog.proto\x12\x06cilium\"2\n" +
"\bKeyValue\x12\x10\n" +
"\x03key\x18\x01 \x01(\tR\x03key\x12\x14\n" +
"\x05value\x18\x02 \x01(\tR\x05value\"\xdd\x02\n" +
"\fHttpLogEntry\x129\n" +
"\rhttp_protocol\x18\x01 \x01(\x0e2\x14.cilium.HttpProtocolR\fhttpProtocol\x12\x16\n" +
"\x06scheme\x18\x02 \x01(\tR\x06scheme\x12\x12\n" +
"\x04host\x18\x03 \x01(\tR\x04host\x12\x12\n" +
"\x04path\x18\x04 \x01(\tR\x04path\x12\x16\n" +
"\x06method\x18\x05 \x01(\tR\x06method\x12*\n" +
"\aheaders\x18\x06 \x03(\v2\x10.cilium.KeyValueR\aheaders\x12\x16\n" +
"\x06status\x18\a \x01(\rR\x06status\x129\n" +
"\x0fmissing_headers\x18\b \x03(\v2\x10.cilium.KeyValueR\x0emissingHeaders\x12;\n" +
"\x10rejected_headers\x18\t \x03(\v2\x10.cilium.KeyValueR\x0frejectedHeaders\"\xa7\x01\n" +
"\rKafkaLogEntry\x12%\n" +
"\x0ecorrelation_id\x18\x01 \x01(\x05R\rcorrelationId\x12\x1d\n" +
"\n" +
"error_code\x18\x02 \x01(\x05R\terrorCode\x12\x1f\n" +
"\vapi_version\x18\x03 \x01(\x05R\n" +
"apiVersion\x12\x17\n" +
"\aapi_key\x18\x04 \x01(\x05R\x06apiKey\x12\x16\n" +
"\x06topics\x18\x05 \x03(\tR\x06topics\"\x95\x01\n" +
"\n" +
"L7LogEntry\x12\x14\n" +
"\x05proto\x18\x01 \x01(\tR\x05proto\x126\n" +
"\x06fields\x18\x02 \x03(\v2\x1e.cilium.L7LogEntry.FieldsEntryR\x06fields\x1a9\n" +
"\vFieldsEntry\x12\x10\n" +
"\x03key\x18\x01 \x01(\tR\x03key\x12\x14\n" +
"\x05value\x18\x02 \x01(\tR\x05value:\x028\x01\"\xb1\x04\n" +
"\bLogEntry\x12\x1c\n" +
"\ttimestamp\x18\x01 \x01(\x04R\ttimestamp\x12\x1d\n" +
"\n" +
"is_ingress\x18\x0f \x01(\bR\tisIngress\x120\n" +
"\n" +
"entry_type\x18\x03 \x01(\x0e2\x11.cilium.EntryTypeR\tentryType\x12\x1f\n" +
"\vpolicy_name\x18\x04 \x01(\tR\n" +
"policyName\x12\x19\n" +
"\bproxy_id\x18\x11 \x01(\rR\aproxyId\x12&\n" +
"\x0fcilium_rule_ref\x18\x05 \x01(\tR\rciliumRuleRef\x12,\n" +
"\x12source_security_id\x18\x06 \x01(\rR\x10sourceSecurityId\x126\n" +
"\x17destination_security_id\x18\x10 \x01(\rR\x15destinationSecurityId\x12%\n" +
"\x0esource_address\x18\a \x01(\tR\rsourceAddress\x12/\n" +
"\x13destination_address\x18\b \x01(\tR\x12destinationAddress\x12*\n" +
"\x04http\x18d \x01(\v2\x14.cilium.HttpLogEntryH\x00R\x04http\x12-\n" +
"\x05kafka\x18e \x01(\v2\x15.cilium.KafkaLogEntryH\x00R\x05kafka\x123\n" +
"\n" +
"generic_l7\x18f \x01(\v2\x12.cilium.L7LogEntryH\x00R\tgenericL7B\x04\n" +
"\x02l7*1\n" +
"\fHttpProtocol\x12\n" +
"\n" +
"\x06HTTP10\x10\x00\x12\n" +
"\n" +
"\x06HTTP11\x10\x01\x12\t\n" +
"\x05HTTP2\x10\x02*2\n" +
"\tEntryType\x12\v\n" +
"\aRequest\x10\x00\x12\f\n" +
"\bResponse\x10\x01\x12\n" +
"\n" +
"\x06Denied\x10\x02B.Z,github.com/cilium/proxy/go/cilium/api;ciliumb\x06proto3"
var (
file_cilium_api_accesslog_proto_rawDescOnce sync.Once
file_cilium_api_accesslog_proto_rawDescData []byte
)
func file_cilium_api_accesslog_proto_rawDescGZIP() []byte {
file_cilium_api_accesslog_proto_rawDescOnce.Do(func() {
file_cilium_api_accesslog_proto_rawDescData = protoimpl.X.CompressGZIP(unsafe.Slice(unsafe.StringData(file_cilium_api_accesslog_proto_rawDesc), len(file_cilium_api_accesslog_proto_rawDesc)))
})
return file_cilium_api_accesslog_proto_rawDescData
}
var file_cilium_api_accesslog_proto_enumTypes = make([]protoimpl.EnumInfo, 2)
var file_cilium_api_accesslog_proto_msgTypes = make([]protoimpl.MessageInfo, 6)
var file_cilium_api_accesslog_proto_goTypes = []any{
(HttpProtocol)(0), // 0: cilium.HttpProtocol
(EntryType)(0), // 1: cilium.EntryType
(*KeyValue)(nil), // 2: cilium.KeyValue
(*HttpLogEntry)(nil), // 3: cilium.HttpLogEntry
(*KafkaLogEntry)(nil), // 4: cilium.KafkaLogEntry
(*L7LogEntry)(nil), // 5: cilium.L7LogEntry
(*LogEntry)(nil), // 6: cilium.LogEntry
nil, // 7: cilium.L7LogEntry.FieldsEntry
}
var file_cilium_api_accesslog_proto_depIdxs = []int32{
0, // 0: cilium.HttpLogEntry.http_protocol:type_name -> cilium.HttpProtocol
2, // 1: cilium.HttpLogEntry.headers:type_name -> cilium.KeyValue
2, // 2: cilium.HttpLogEntry.missing_headers:type_name -> cilium.KeyValue
2, // 3: cilium.HttpLogEntry.rejected_headers:type_name -> cilium.KeyValue
7, // 4: cilium.L7LogEntry.fields:type_name -> cilium.L7LogEntry.FieldsEntry
1, // 5: cilium.LogEntry.entry_type:type_name -> cilium.EntryType
3, // 6: cilium.LogEntry.http:type_name -> cilium.HttpLogEntry
4, // 7: cilium.LogEntry.kafka:type_name -> cilium.KafkaLogEntry
5, // 8: cilium.LogEntry.generic_l7:type_name -> cilium.L7LogEntry
9, // [9:9] is the sub-list for method output_type
9, // [9:9] is the sub-list for method input_type
9, // [9:9] is the sub-list for extension type_name
9, // [9:9] is the sub-list for extension extendee
0, // [0:9] is the sub-list for field type_name
}
func init() { file_cilium_api_accesslog_proto_init() }
func file_cilium_api_accesslog_proto_init() {
if File_cilium_api_accesslog_proto != nil {
return
}
file_cilium_api_accesslog_proto_msgTypes[4].OneofWrappers = []any{
(*LogEntry_Http)(nil),
(*LogEntry_Kafka)(nil),
(*LogEntry_GenericL7)(nil),
}
type x struct{}
out := protoimpl.TypeBuilder{
File: protoimpl.DescBuilder{
GoPackagePath: reflect.TypeOf(x{}).PkgPath(),
RawDescriptor: unsafe.Slice(unsafe.StringData(file_cilium_api_accesslog_proto_rawDesc), len(file_cilium_api_accesslog_proto_rawDesc)),
NumEnums: 2,
NumMessages: 6,
NumExtensions: 0,
NumServices: 0,
},
GoTypes: file_cilium_api_accesslog_proto_goTypes,
DependencyIndexes: file_cilium_api_accesslog_proto_depIdxs,
EnumInfos: file_cilium_api_accesslog_proto_enumTypes,
MessageInfos: file_cilium_api_accesslog_proto_msgTypes,
}.Build()
File_cilium_api_accesslog_proto = out.File
file_cilium_api_accesslog_proto_goTypes = nil
file_cilium_api_accesslog_proto_depIdxs = nil
}
// Code generated by protoc-gen-validate. DO NOT EDIT.
// source: cilium/api/accesslog.proto
package cilium
import (
"bytes"
"errors"
"fmt"
"net"
"net/mail"
"net/url"
"regexp"
"sort"
"strings"
"time"
"unicode/utf8"
"google.golang.org/protobuf/types/known/anypb"
)
// ensure the imports are used
var (
_ = bytes.MinRead
_ = errors.New("")
_ = fmt.Print
_ = utf8.UTFMax
_ = (*regexp.Regexp)(nil)
_ = (*strings.Reader)(nil)
_ = net.IPv4len
_ = time.Duration(0)
_ = (*url.URL)(nil)
_ = (*mail.Address)(nil)
_ = anypb.Any{}
_ = sort.Sort
)
// Validate checks the field values on KeyValue with the rules defined in the
// proto definition for this message. If any rules are violated, the first
// error encountered is returned, or nil if there are no violations.
func (m *KeyValue) Validate() error {
return m.validate(false)
}
// ValidateAll checks the field values on KeyValue with the rules defined in
// the proto definition for this message. If any rules are violated, the
// result is a list of violation errors wrapped in KeyValueMultiError, or nil
// if none found.
func (m *KeyValue) ValidateAll() error {
return m.validate(true)
}
func (m *KeyValue) validate(all bool) error {
if m == nil {
return nil
}
var errors []error
// no validation rules for Key
// no validation rules for Value
if len(errors) > 0 {
return KeyValueMultiError(errors)
}
return nil
}
// KeyValueMultiError is an error wrapping multiple validation errors returned
// by KeyValue.ValidateAll() if the designated constraints aren't met.
type KeyValueMultiError []error
// Error returns a concatenation of all the error messages it wraps.
func (m KeyValueMultiError) Error() string {
msgs := make([]string, 0, len(m))
for _, err := range m {
msgs = append(msgs, err.Error())
}
return strings.Join(msgs, "; ")
}
// AllErrors returns a list of validation violation errors.
func (m KeyValueMultiError) AllErrors() []error { return m }
// KeyValueValidationError is the validation error returned by
// KeyValue.Validate if the designated constraints aren't met.
type KeyValueValidationError struct {
field string
reason string
cause error
key bool
}
// Field function returns field value.
func (e KeyValueValidationError) Field() string { return e.field }
// Reason function returns reason value.
func (e KeyValueValidationError) Reason() string { return e.reason }
// Cause function returns cause value.
func (e KeyValueValidationError) Cause() error { return e.cause }
// Key function returns key value.
func (e KeyValueValidationError) Key() bool { return e.key }
// ErrorName returns error name.
func (e KeyValueValidationError) ErrorName() string { return "KeyValueValidationError" }
// Error satisfies the builtin error interface
func (e KeyValueValidationError) Error() string {
cause := ""
if e.cause != nil {
cause = fmt.Sprintf(" | caused by: %v", e.cause)
}
key := ""
if e.key {
key = "key for "
}
return fmt.Sprintf(
"invalid %sKeyValue.%s: %s%s",
key,
e.field,
e.reason,
cause)
}
var _ error = KeyValueValidationError{}
var _ interface {
Field() string
Reason() string
Key() bool
Cause() error
ErrorName() string
} = KeyValueValidationError{}
// Validate checks the field values on HttpLogEntry with the rules defined in
// the proto definition for this message. If any rules are violated, the first
// error encountered is returned, or nil if there are no violations.
func (m *HttpLogEntry) Validate() error {
return m.validate(false)
}
// ValidateAll checks the field values on HttpLogEntry with the rules defined
// in the proto definition for this message. If any rules are violated, the
// result is a list of violation errors wrapped in HttpLogEntryMultiError, or
// nil if none found.
func (m *HttpLogEntry) ValidateAll() error {
return m.validate(true)
}
func (m *HttpLogEntry) validate(all bool) error {
if m == nil {
return nil
}
var errors []error
// no validation rules for HttpProtocol
// no validation rules for Scheme
// no validation rules for Host
// no validation rules for Path
// no validation rules for Method
for idx, item := range m.GetHeaders() {
_, _ = idx, item
if all {
switch v := interface{}(item).(type) {
case interface{ ValidateAll() error }:
if err := v.ValidateAll(); err != nil {
errors = append(errors, HttpLogEntryValidationError{
field: fmt.Sprintf("Headers[%v]", idx),
reason: "embedded message failed validation",
cause: err,
})
}
case interface{ Validate() error }:
if err := v.Validate(); err != nil {
errors = append(errors, HttpLogEntryValidationError{
field: fmt.Sprintf("Headers[%v]", idx),
reason: "embedded message failed validation",
cause: err,
})
}
}
} else if v, ok := interface{}(item).(interface{ Validate() error }); ok {
if err := v.Validate(); err != nil {
return HttpLogEntryValidationError{
field: fmt.Sprintf("Headers[%v]", idx),
reason: "embedded message failed validation",
cause: err,
}
}
}
}
// no validation rules for Status
for idx, item := range m.GetMissingHeaders() {
_, _ = idx, item
if all {
switch v := interface{}(item).(type) {
case interface{ ValidateAll() error }:
if err := v.ValidateAll(); err != nil {
errors = append(errors, HttpLogEntryValidationError{
field: fmt.Sprintf("MissingHeaders[%v]", idx),
reason: "embedded message failed validation",
cause: err,
})
}
case interface{ Validate() error }:
if err := v.Validate(); err != nil {
errors = append(errors, HttpLogEntryValidationError{
field: fmt.Sprintf("MissingHeaders[%v]", idx),
reason: "embedded message failed validation",
cause: err,
})
}
}
} else if v, ok := interface{}(item).(interface{ Validate() error }); ok {
if err := v.Validate(); err != nil {
return HttpLogEntryValidationError{
field: fmt.Sprintf("MissingHeaders[%v]", idx),
reason: "embedded message failed validation",
cause: err,
}
}
}
}
for idx, item := range m.GetRejectedHeaders() {
_, _ = idx, item
if all {
switch v := interface{}(item).(type) {
case interface{ ValidateAll() error }:
if err := v.ValidateAll(); err != nil {
errors = append(errors, HttpLogEntryValidationError{
field: fmt.Sprintf("RejectedHeaders[%v]", idx),
reason: "embedded message failed validation",
cause: err,
})
}
case interface{ Validate() error }:
if err := v.Validate(); err != nil {
errors = append(errors, HttpLogEntryValidationError{
field: fmt.Sprintf("RejectedHeaders[%v]", idx),
reason: "embedded message failed validation",
cause: err,
})
}
}
} else if v, ok := interface{}(item).(interface{ Validate() error }); ok {
if err := v.Validate(); err != nil {
return HttpLogEntryValidationError{
field: fmt.Sprintf("RejectedHeaders[%v]", idx),
reason: "embedded message failed validation",
cause: err,
}
}
}
}
if len(errors) > 0 {
return HttpLogEntryMultiError(errors)
}
return nil
}
// HttpLogEntryMultiError is an error wrapping multiple validation errors
// returned by HttpLogEntry.ValidateAll() if the designated constraints aren't met.
type HttpLogEntryMultiError []error
// Error returns a concatenation of all the error messages it wraps.
func (m HttpLogEntryMultiError) Error() string {
msgs := make([]string, 0, len(m))
for _, err := range m {
msgs = append(msgs, err.Error())
}
return strings.Join(msgs, "; ")
}
// AllErrors returns a list of validation violation errors.
func (m HttpLogEntryMultiError) AllErrors() []error { return m }
// HttpLogEntryValidationError is the validation error returned by
// HttpLogEntry.Validate if the designated constraints aren't met.
type HttpLogEntryValidationError struct {
field string
reason string
cause error
key bool
}
// Field function returns field value.
func (e HttpLogEntryValidationError) Field() string { return e.field }
// Reason function returns reason value.
func (e HttpLogEntryValidationError) Reason() string { return e.reason }
// Cause function returns cause value.
func (e HttpLogEntryValidationError) Cause() error { return e.cause }
// Key function returns key value.
func (e HttpLogEntryValidationError) Key() bool { return e.key }
// ErrorName returns error name.
func (e HttpLogEntryValidationError) ErrorName() string { return "HttpLogEntryValidationError" }
// Error satisfies the builtin error interface
func (e HttpLogEntryValidationError) Error() string {
cause := ""
if e.cause != nil {
cause = fmt.Sprintf(" | caused by: %v", e.cause)
}
key := ""
if e.key {
key = "key for "
}
return fmt.Sprintf(
"invalid %sHttpLogEntry.%s: %s%s",
key,
e.field,
e.reason,
cause)
}
var _ error = HttpLogEntryValidationError{}
var _ interface {
Field() string
Reason() string
Key() bool
Cause() error
ErrorName() string
} = HttpLogEntryValidationError{}
// Validate checks the field values on KafkaLogEntry with the rules defined in
// the proto definition for this message. If any rules are violated, the first
// error encountered is returned, or nil if there are no violations.
func (m *KafkaLogEntry) Validate() error {
return m.validate(false)
}
// ValidateAll checks the field values on KafkaLogEntry with the rules defined
// in the proto definition for this message. If any rules are violated, the
// result is a list of violation errors wrapped in KafkaLogEntryMultiError, or
// nil if none found.
func (m *KafkaLogEntry) ValidateAll() error {
return m.validate(true)
}
func (m *KafkaLogEntry) validate(all bool) error {
if m == nil {
return nil
}
var errors []error
// no validation rules for CorrelationId
// no validation rules for ErrorCode
// no validation rules for ApiVersion
// no validation rules for ApiKey
if len(errors) > 0 {
return KafkaLogEntryMultiError(errors)
}
return nil
}
// KafkaLogEntryMultiError is an error wrapping multiple validation errors
// returned by KafkaLogEntry.ValidateAll() if the designated constraints
// aren't met.
type KafkaLogEntryMultiError []error
// Error returns a concatenation of all the error messages it wraps.
func (m KafkaLogEntryMultiError) Error() string {
msgs := make([]string, 0, len(m))
for _, err := range m {
msgs = append(msgs, err.Error())
}
return strings.Join(msgs, "; ")
}
// AllErrors returns a list of validation violation errors.
func (m KafkaLogEntryMultiError) AllErrors() []error { return m }
// KafkaLogEntryValidationError is the validation error returned by
// KafkaLogEntry.Validate if the designated constraints aren't met.
type KafkaLogEntryValidationError struct {
field string
reason string
cause error
key bool
}
// Field function returns field value.
func (e KafkaLogEntryValidationError) Field() string { return e.field }
// Reason function returns reason value.
func (e KafkaLogEntryValidationError) Reason() string { return e.reason }
// Cause function returns cause value.
func (e KafkaLogEntryValidationError) Cause() error { return e.cause }
// Key function returns key value.
func (e KafkaLogEntryValidationError) Key() bool { return e.key }
// ErrorName returns error name.
func (e KafkaLogEntryValidationError) ErrorName() string { return "KafkaLogEntryValidationError" }
// Error satisfies the builtin error interface
func (e KafkaLogEntryValidationError) Error() string {
cause := ""
if e.cause != nil {
cause = fmt.Sprintf(" | caused by: %v", e.cause)
}
key := ""
if e.key {
key = "key for "
}
return fmt.Sprintf(
"invalid %sKafkaLogEntry.%s: %s%s",
key,
e.field,
e.reason,
cause)
}
var _ error = KafkaLogEntryValidationError{}
var _ interface {
Field() string
Reason() string
Key() bool
Cause() error
ErrorName() string
} = KafkaLogEntryValidationError{}
// Validate checks the field values on L7LogEntry with the rules defined in the
// proto definition for this message. If any rules are violated, the first
// error encountered is returned, or nil if there are no violations.
func (m *L7LogEntry) Validate() error {
return m.validate(false)
}
// ValidateAll checks the field values on L7LogEntry with the rules defined in
// the proto definition for this message. If any rules are violated, the
// result is a list of violation errors wrapped in L7LogEntryMultiError, or
// nil if none found.
func (m *L7LogEntry) ValidateAll() error {
return m.validate(true)
}
func (m *L7LogEntry) validate(all bool) error {
if m == nil {
return nil
}
var errors []error
// no validation rules for Proto
// no validation rules for Fields
if len(errors) > 0 {
return L7LogEntryMultiError(errors)
}
return nil
}
// L7LogEntryMultiError is an error wrapping multiple validation errors
// returned by L7LogEntry.ValidateAll() if the designated constraints aren't met.
type L7LogEntryMultiError []error
// Error returns a concatenation of all the error messages it wraps.
func (m L7LogEntryMultiError) Error() string {
msgs := make([]string, 0, len(m))
for _, err := range m {
msgs = append(msgs, err.Error())
}
return strings.Join(msgs, "; ")
}
// AllErrors returns a list of validation violation errors.
func (m L7LogEntryMultiError) AllErrors() []error { return m }
// L7LogEntryValidationError is the validation error returned by
// L7LogEntry.Validate if the designated constraints aren't met.
type L7LogEntryValidationError struct {
field string
reason string
cause error
key bool
}
// Field function returns field value.
func (e L7LogEntryValidationError) Field() string { return e.field }
// Reason function returns reason value.
func (e L7LogEntryValidationError) Reason() string { return e.reason }
// Cause function returns cause value.
func (e L7LogEntryValidationError) Cause() error { return e.cause }
// Key function returns key value.
func (e L7LogEntryValidationError) Key() bool { return e.key }
// ErrorName returns error name.
func (e L7LogEntryValidationError) ErrorName() string { return "L7LogEntryValidationError" }
// Error satisfies the builtin error interface
func (e L7LogEntryValidationError) Error() string {
cause := ""
if e.cause != nil {
cause = fmt.Sprintf(" | caused by: %v", e.cause)
}
key := ""
if e.key {
key = "key for "
}
return fmt.Sprintf(
"invalid %sL7LogEntry.%s: %s%s",
key,
e.field,
e.reason,
cause)
}
var _ error = L7LogEntryValidationError{}
var _ interface {
Field() string
Reason() string
Key() bool
Cause() error
ErrorName() string
} = L7LogEntryValidationError{}
// Validate checks the field values on LogEntry with the rules defined in the
// proto definition for this message. If any rules are violated, the first
// error encountered is returned, or nil if there are no violations.
func (m *LogEntry) Validate() error {
return m.validate(false)
}
// ValidateAll checks the field values on LogEntry with the rules defined in
// the proto definition for this message. If any rules are violated, the
// result is a list of violation errors wrapped in LogEntryMultiError, or nil
// if none found.
func (m *LogEntry) ValidateAll() error {
return m.validate(true)
}
func (m *LogEntry) validate(all bool) error {
if m == nil {
return nil
}
var errors []error
// no validation rules for Timestamp
// no validation rules for IsIngress
// no validation rules for EntryType
// no validation rules for PolicyName
// no validation rules for ProxyId
// no validation rules for CiliumRuleRef
// no validation rules for SourceSecurityId
// no validation rules for DestinationSecurityId
// no validation rules for SourceAddress
// no validation rules for DestinationAddress
switch v := m.L7.(type) {
case *LogEntry_Http:
if v == nil {
err := LogEntryValidationError{
field: "L7",
reason: "oneof value cannot be a typed-nil",
}
if !all {
return err
}
errors = append(errors, err)
}
if all {
switch v := interface{}(m.GetHttp()).(type) {
case interface{ ValidateAll() error }:
if err := v.ValidateAll(); err != nil {
errors = append(errors, LogEntryValidationError{
field: "Http",
reason: "embedded message failed validation",
cause: err,
})
}
case interface{ Validate() error }:
if err := v.Validate(); err != nil {
errors = append(errors, LogEntryValidationError{
field: "Http",
reason: "embedded message failed validation",
cause: err,
})
}
}
} else if v, ok := interface{}(m.GetHttp()).(interface{ Validate() error }); ok {
if err := v.Validate(); err != nil {
return LogEntryValidationError{
field: "Http",
reason: "embedded message failed validation",
cause: err,
}
}
}
case *LogEntry_Kafka:
if v == nil {
err := LogEntryValidationError{
field: "L7",
reason: "oneof value cannot be a typed-nil",
}
if !all {
return err
}
errors = append(errors, err)
}
if all {
switch v := interface{}(m.GetKafka()).(type) {
case interface{ ValidateAll() error }:
if err := v.ValidateAll(); err != nil {
errors = append(errors, LogEntryValidationError{
field: "Kafka",
reason: "embedded message failed validation",
cause: err,
})
}
case interface{ Validate() error }:
if err := v.Validate(); err != nil {
errors = append(errors, LogEntryValidationError{
field: "Kafka",
reason: "embedded message failed validation",
cause: err,
})
}
}
} else if v, ok := interface{}(m.GetKafka()).(interface{ Validate() error }); ok {
if err := v.Validate(); err != nil {
return LogEntryValidationError{
field: "Kafka",
reason: "embedded message failed validation",
cause: err,
}
}
}
case *LogEntry_GenericL7:
if v == nil {
err := LogEntryValidationError{
field: "L7",
reason: "oneof value cannot be a typed-nil",
}
if !all {
return err
}
errors = append(errors, err)
}
if all {
switch v := interface{}(m.GetGenericL7()).(type) {
case interface{ ValidateAll() error }:
if err := v.ValidateAll(); err != nil {
errors = append(errors, LogEntryValidationError{
field: "GenericL7",
reason: "embedded message failed validation",
cause: err,
})
}
case interface{ Validate() error }:
if err := v.Validate(); err != nil {
errors = append(errors, LogEntryValidationError{
field: "GenericL7",
reason: "embedded message failed validation",
cause: err,
})
}
}
} else if v, ok := interface{}(m.GetGenericL7()).(interface{ Validate() error }); ok {
if err := v.Validate(); err != nil {
return LogEntryValidationError{
field: "GenericL7",
reason: "embedded message failed validation",
cause: err,
}
}
}
default:
_ = v // ensures v is used
}
if len(errors) > 0 {
return LogEntryMultiError(errors)
}
return nil
}
// LogEntryMultiError is an error wrapping multiple validation errors returned
// by LogEntry.ValidateAll() if the designated constraints aren't met.
type LogEntryMultiError []error
// Error returns a concatenation of all the error messages it wraps.
func (m LogEntryMultiError) Error() string {
msgs := make([]string, 0, len(m))
for _, err := range m {
msgs = append(msgs, err.Error())
}
return strings.Join(msgs, "; ")
}
// AllErrors returns a list of validation violation errors.
func (m LogEntryMultiError) AllErrors() []error { return m }
// LogEntryValidationError is the validation error returned by
// LogEntry.Validate if the designated constraints aren't met.
type LogEntryValidationError struct {
field string
reason string
cause error
key bool
}
// Field function returns field value.
func (e LogEntryValidationError) Field() string { return e.field }
// Reason function returns reason value.
func (e LogEntryValidationError) Reason() string { return e.reason }
// Cause function returns cause value.
func (e LogEntryValidationError) Cause() error { return e.cause }
// Key function returns key value.
func (e LogEntryValidationError) Key() bool { return e.key }
// ErrorName returns error name.
func (e LogEntryValidationError) ErrorName() string { return "LogEntryValidationError" }
// Error satisfies the builtin error interface
func (e LogEntryValidationError) Error() string {
cause := ""
if e.cause != nil {
cause = fmt.Sprintf(" | caused by: %v", e.cause)
}
key := ""
if e.key {
key = "key for "
}
return fmt.Sprintf(
"invalid %sLogEntry.%s: %s%s",
key,
e.field,
e.reason,
cause)
}
var _ error = LogEntryValidationError{}
var _ interface {
Field() string
Reason() string
Key() bool
Cause() error
ErrorName() string
} = LogEntryValidationError{}
// Code generated by protoc-gen-go. DO NOT EDIT.
// versions:
// protoc-gen-go v1.36.6
// protoc v5.29.3
// source: cilium/api/bpf_metadata.proto
package cilium
import (
protoreflect "google.golang.org/protobuf/reflect/protoreflect"
protoimpl "google.golang.org/protobuf/runtime/protoimpl"
durationpb "google.golang.org/protobuf/types/known/durationpb"
reflect "reflect"
sync "sync"
unsafe "unsafe"
)
const (
// Verify that this generated code is sufficiently up-to-date.
_ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion)
// Verify that runtime/protoimpl is sufficiently up-to-date.
_ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20)
)
type BpfMetadata struct {
state protoimpl.MessageState `protogen:"open.v1"`
// File system root for bpf. Bpf will not be used if left empty.
BpfRoot string `protobuf:"bytes,1,opt,name=bpf_root,json=bpfRoot,proto3" json:"bpf_root,omitempty"`
// 'true' if the filter is on ingress listener, 'false' for egress listener.
IsIngress bool `protobuf:"varint,2,opt,name=is_ingress,json=isIngress,proto3" json:"is_ingress,omitempty"`
// Use of the original source address requires kernel datapath support which
// may or may not be available. 'true' if original source address
// should be used. Original source address use may still be
// skipped in scenarios where it is knows to not work.
UseOriginalSourceAddress bool `protobuf:"varint,3,opt,name=use_original_source_address,json=useOriginalSourceAddress,proto3" json:"use_original_source_address,omitempty"`
// True if the listener is used for an L7 LB. In this case policy enforcement is done on the
// destination selected by the listener rather than on the original destination address. For
// local sources the source endpoint ID is set in socket mark instead of source security ID if
// 'use_original_source_address' is also true, so that the local source's egress policy is
// enforced on the bpf datapath.
// Only valid for egress.
IsL7Lb bool `protobuf:"varint,4,opt,name=is_l7lb,json=isL7lb,proto3" json:"is_l7lb,omitempty"`
// Source address to be used whenever the original source address is not used.
// Either ipv4_source_address or ipv6_source_address depending on the address
// family of the destination address. If left empty, and no Envoy Cluster Bind
// Config is provided, the source address will be picked by the local IP stack.
Ipv4SourceAddress string `protobuf:"bytes,5,opt,name=ipv4_source_address,json=ipv4SourceAddress,proto3" json:"ipv4_source_address,omitempty"`
Ipv6SourceAddress string `protobuf:"bytes,6,opt,name=ipv6_source_address,json=ipv6SourceAddress,proto3" json:"ipv6_source_address,omitempty"`
// True if policy should be enforced on l7 LB used. The policy bound to the configured
// ipv[46]_source_addresses, which must be explicitly set, applies. Ingress policy is
// enforced on the security identity of the original (e.g., external) source. Egress
// policy is enforced on the security identity of the backend selected by the load balancer.
//
// Deprecation note: This option will be forced 'true' and deprecated when Cilium 1.15 is
// the oldest supported release.
EnforcePolicyOnL7Lb bool `protobuf:"varint,7,opt,name=enforce_policy_on_l7lb,json=enforcePolicyOnL7lb,proto3" json:"enforce_policy_on_l7lb,omitempty"`
// proxy_id is passed to access log messages and allows relating access log messages to
// listeners.
ProxyId uint32 `protobuf:"varint,8,opt,name=proxy_id,json=proxyId,proto3" json:"proxy_id,omitempty"`
// policy_update_warning_limit is the time in milliseconds after which a warning is logged if
// network policy update took longer
// Deprecated, has no effect.
PolicyUpdateWarningLimit *durationpb.Duration `protobuf:"bytes,9,opt,name=policy_update_warning_limit,json=policyUpdateWarningLimit,proto3" json:"policy_update_warning_limit,omitempty"`
// l7lb_policy_name is the name of the L7LB policy that is enforced on the listener.
// This is optional field.
L7LbPolicyName string `protobuf:"bytes,10,opt,name=l7lb_policy_name,json=l7lbPolicyName,proto3" json:"l7lb_policy_name,omitempty"`
// original_source_so_linger_time specifies the number of seconds to linger on socket close.
// Only used if use_original_source_address is also true, and the original source address
// is used in the upstream connections. Value 0 causes connections to be reset on close (TCP RST).
// Values above 0 cause the Envoy worker thread to block up to the given number of seconds while
// the connection is closing. If the timeout is reached the connection is being reset (TCP RST).
// This option may be needed for allowing new connections to successfully bind to the original
// source address and port.
OriginalSourceSoLingerTime *uint32 `protobuf:"varint,11,opt,name=original_source_so_linger_time,json=originalSourceSoLingerTime,proto3,oneof" json:"original_source_so_linger_time,omitempty"`
// Name of the pin file for opening bpf ipcache in "<bpf_root>/tc/globals/". If empty, defaults to
// "cilium_ipcache" for backwards compatibility.
// Only used if 'bpf_root' is non-empty and 'use_nphds' is 'false'.
IpcacheName string `protobuf:"bytes,12,opt,name=ipcache_name,json=ipcacheName,proto3" json:"ipcache_name,omitempty"`
// Use Network Policy Hosts xDS (NPHDS) protocol to sync IP/ID mappings.
// Network Policy xDS (NPDS) will only be used if this is 'true' or 'bpf_root' is non-empty.
// If 'use_nphds' is 'false' ipcache named by 'ipcache_name' is used instead.
UseNphds bool `protobuf:"varint,13,opt,name=use_nphds,json=useNphds,proto3" json:"use_nphds,omitempty"`
// Duration to reuse ipcache results until the entry is looked up from bpf ipcache again.
// Defaults to 3 milliseconds.
CacheEntryTtl *durationpb.Duration `protobuf:"bytes,14,opt,name=cache_entry_ttl,json=cacheEntryTtl,proto3" json:"cache_entry_ttl,omitempty"`
// Cache is garbage collected at interval 10 times the ttl (default 30 ms).
CacheGcInterval *durationpb.Duration `protobuf:"bytes,15,opt,name=cache_gc_interval,json=cacheGcInterval,proto3" json:"cache_gc_interval,omitempty"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *BpfMetadata) Reset() {
*x = BpfMetadata{}
mi := &file_cilium_api_bpf_metadata_proto_msgTypes[0]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *BpfMetadata) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*BpfMetadata) ProtoMessage() {}
func (x *BpfMetadata) ProtoReflect() protoreflect.Message {
mi := &file_cilium_api_bpf_metadata_proto_msgTypes[0]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use BpfMetadata.ProtoReflect.Descriptor instead.
func (*BpfMetadata) Descriptor() ([]byte, []int) {
return file_cilium_api_bpf_metadata_proto_rawDescGZIP(), []int{0}
}
func (x *BpfMetadata) GetBpfRoot() string {
if x != nil {
return x.BpfRoot
}
return ""
}
func (x *BpfMetadata) GetIsIngress() bool {
if x != nil {
return x.IsIngress
}
return false
}
func (x *BpfMetadata) GetUseOriginalSourceAddress() bool {
if x != nil {
return x.UseOriginalSourceAddress
}
return false
}
func (x *BpfMetadata) GetIsL7Lb() bool {
if x != nil {
return x.IsL7Lb
}
return false
}
func (x *BpfMetadata) GetIpv4SourceAddress() string {
if x != nil {
return x.Ipv4SourceAddress
}
return ""
}
func (x *BpfMetadata) GetIpv6SourceAddress() string {
if x != nil {
return x.Ipv6SourceAddress
}
return ""
}
func (x *BpfMetadata) GetEnforcePolicyOnL7Lb() bool {
if x != nil {
return x.EnforcePolicyOnL7Lb
}
return false
}
func (x *BpfMetadata) GetProxyId() uint32 {
if x != nil {
return x.ProxyId
}
return 0
}
func (x *BpfMetadata) GetPolicyUpdateWarningLimit() *durationpb.Duration {
if x != nil {
return x.PolicyUpdateWarningLimit
}
return nil
}
func (x *BpfMetadata) GetL7LbPolicyName() string {
if x != nil {
return x.L7LbPolicyName
}
return ""
}
func (x *BpfMetadata) GetOriginalSourceSoLingerTime() uint32 {
if x != nil && x.OriginalSourceSoLingerTime != nil {
return *x.OriginalSourceSoLingerTime
}
return 0
}
func (x *BpfMetadata) GetIpcacheName() string {
if x != nil {
return x.IpcacheName
}
return ""
}
func (x *BpfMetadata) GetUseNphds() bool {
if x != nil {
return x.UseNphds
}
return false
}
func (x *BpfMetadata) GetCacheEntryTtl() *durationpb.Duration {
if x != nil {
return x.CacheEntryTtl
}
return nil
}
func (x *BpfMetadata) GetCacheGcInterval() *durationpb.Duration {
if x != nil {
return x.CacheGcInterval
}
return nil
}
var File_cilium_api_bpf_metadata_proto protoreflect.FileDescriptor
const file_cilium_api_bpf_metadata_proto_rawDesc = "" +
"\n" +
"\x1dcilium/api/bpf_metadata.proto\x12\x06cilium\x1a\x1egoogle/protobuf/duration.proto\"\x89\x06\n" +
"\vBpfMetadata\x12\x19\n" +
"\bbpf_root\x18\x01 \x01(\tR\abpfRoot\x12\x1d\n" +
"\n" +
"is_ingress\x18\x02 \x01(\bR\tisIngress\x12=\n" +
"\x1buse_original_source_address\x18\x03 \x01(\bR\x18useOriginalSourceAddress\x12\x17\n" +
"\ais_l7lb\x18\x04 \x01(\bR\x06isL7lb\x12.\n" +
"\x13ipv4_source_address\x18\x05 \x01(\tR\x11ipv4SourceAddress\x12.\n" +
"\x13ipv6_source_address\x18\x06 \x01(\tR\x11ipv6SourceAddress\x123\n" +
"\x16enforce_policy_on_l7lb\x18\a \x01(\bR\x13enforcePolicyOnL7lb\x12\x19\n" +
"\bproxy_id\x18\b \x01(\rR\aproxyId\x12X\n" +
"\x1bpolicy_update_warning_limit\x18\t \x01(\v2\x19.google.protobuf.DurationR\x18policyUpdateWarningLimit\x12(\n" +
"\x10l7lb_policy_name\x18\n" +
" \x01(\tR\x0el7lbPolicyName\x12G\n" +
"\x1eoriginal_source_so_linger_time\x18\v \x01(\rH\x00R\x1aoriginalSourceSoLingerTime\x88\x01\x01\x12!\n" +
"\fipcache_name\x18\f \x01(\tR\vipcacheName\x12\x1b\n" +
"\tuse_nphds\x18\r \x01(\bR\buseNphds\x12A\n" +
"\x0fcache_entry_ttl\x18\x0e \x01(\v2\x19.google.protobuf.DurationR\rcacheEntryTtl\x12E\n" +
"\x11cache_gc_interval\x18\x0f \x01(\v2\x19.google.protobuf.DurationR\x0fcacheGcIntervalB!\n" +
"\x1f_original_source_so_linger_timeB.Z,github.com/cilium/proxy/go/cilium/api;ciliumb\x06proto3"
var (
file_cilium_api_bpf_metadata_proto_rawDescOnce sync.Once
file_cilium_api_bpf_metadata_proto_rawDescData []byte
)
func file_cilium_api_bpf_metadata_proto_rawDescGZIP() []byte {
file_cilium_api_bpf_metadata_proto_rawDescOnce.Do(func() {
file_cilium_api_bpf_metadata_proto_rawDescData = protoimpl.X.CompressGZIP(unsafe.Slice(unsafe.StringData(file_cilium_api_bpf_metadata_proto_rawDesc), len(file_cilium_api_bpf_metadata_proto_rawDesc)))
})
return file_cilium_api_bpf_metadata_proto_rawDescData
}
var file_cilium_api_bpf_metadata_proto_msgTypes = make([]protoimpl.MessageInfo, 1)
var file_cilium_api_bpf_metadata_proto_goTypes = []any{
(*BpfMetadata)(nil), // 0: cilium.BpfMetadata
(*durationpb.Duration)(nil), // 1: google.protobuf.Duration
}
var file_cilium_api_bpf_metadata_proto_depIdxs = []int32{
1, // 0: cilium.BpfMetadata.policy_update_warning_limit:type_name -> google.protobuf.Duration
1, // 1: cilium.BpfMetadata.cache_entry_ttl:type_name -> google.protobuf.Duration
1, // 2: cilium.BpfMetadata.cache_gc_interval:type_name -> google.protobuf.Duration
3, // [3:3] is the sub-list for method output_type
3, // [3:3] is the sub-list for method input_type
3, // [3:3] is the sub-list for extension type_name
3, // [3:3] is the sub-list for extension extendee
0, // [0:3] is the sub-list for field type_name
}
func init() { file_cilium_api_bpf_metadata_proto_init() }
func file_cilium_api_bpf_metadata_proto_init() {
if File_cilium_api_bpf_metadata_proto != nil {
return
}
file_cilium_api_bpf_metadata_proto_msgTypes[0].OneofWrappers = []any{}
type x struct{}
out := protoimpl.TypeBuilder{
File: protoimpl.DescBuilder{
GoPackagePath: reflect.TypeOf(x{}).PkgPath(),
RawDescriptor: unsafe.Slice(unsafe.StringData(file_cilium_api_bpf_metadata_proto_rawDesc), len(file_cilium_api_bpf_metadata_proto_rawDesc)),
NumEnums: 0,
NumMessages: 1,
NumExtensions: 0,
NumServices: 0,
},
GoTypes: file_cilium_api_bpf_metadata_proto_goTypes,
DependencyIndexes: file_cilium_api_bpf_metadata_proto_depIdxs,
MessageInfos: file_cilium_api_bpf_metadata_proto_msgTypes,
}.Build()
File_cilium_api_bpf_metadata_proto = out.File
file_cilium_api_bpf_metadata_proto_goTypes = nil
file_cilium_api_bpf_metadata_proto_depIdxs = nil
}
// Code generated by protoc-gen-validate. DO NOT EDIT.
// source: cilium/api/bpf_metadata.proto
package cilium
import (
"bytes"
"errors"
"fmt"
"net"
"net/mail"
"net/url"
"regexp"
"sort"
"strings"
"time"
"unicode/utf8"
"google.golang.org/protobuf/types/known/anypb"
)
// ensure the imports are used
var (
_ = bytes.MinRead
_ = errors.New("")
_ = fmt.Print
_ = utf8.UTFMax
_ = (*regexp.Regexp)(nil)
_ = (*strings.Reader)(nil)
_ = net.IPv4len
_ = time.Duration(0)
_ = (*url.URL)(nil)
_ = (*mail.Address)(nil)
_ = anypb.Any{}
_ = sort.Sort
)
// Validate checks the field values on BpfMetadata with the rules defined in
// the proto definition for this message. If any rules are violated, the first
// error encountered is returned, or nil if there are no violations.
func (m *BpfMetadata) Validate() error {
return m.validate(false)
}
// ValidateAll checks the field values on BpfMetadata with the rules defined in
// the proto definition for this message. If any rules are violated, the
// result is a list of violation errors wrapped in BpfMetadataMultiError, or
// nil if none found.
func (m *BpfMetadata) ValidateAll() error {
return m.validate(true)
}
func (m *BpfMetadata) validate(all bool) error {
if m == nil {
return nil
}
var errors []error
// no validation rules for BpfRoot
// no validation rules for IsIngress
// no validation rules for UseOriginalSourceAddress
// no validation rules for IsL7Lb
// no validation rules for Ipv4SourceAddress
// no validation rules for Ipv6SourceAddress
// no validation rules for EnforcePolicyOnL7Lb
// no validation rules for ProxyId
if all {
switch v := interface{}(m.GetPolicyUpdateWarningLimit()).(type) {
case interface{ ValidateAll() error }:
if err := v.ValidateAll(); err != nil {
errors = append(errors, BpfMetadataValidationError{
field: "PolicyUpdateWarningLimit",
reason: "embedded message failed validation",
cause: err,
})
}
case interface{ Validate() error }:
if err := v.Validate(); err != nil {
errors = append(errors, BpfMetadataValidationError{
field: "PolicyUpdateWarningLimit",
reason: "embedded message failed validation",
cause: err,
})
}
}
} else if v, ok := interface{}(m.GetPolicyUpdateWarningLimit()).(interface{ Validate() error }); ok {
if err := v.Validate(); err != nil {
return BpfMetadataValidationError{
field: "PolicyUpdateWarningLimit",
reason: "embedded message failed validation",
cause: err,
}
}
}
// no validation rules for L7LbPolicyName
// no validation rules for IpcacheName
// no validation rules for UseNphds
if all {
switch v := interface{}(m.GetCacheEntryTtl()).(type) {
case interface{ ValidateAll() error }:
if err := v.ValidateAll(); err != nil {
errors = append(errors, BpfMetadataValidationError{
field: "CacheEntryTtl",
reason: "embedded message failed validation",
cause: err,
})
}
case interface{ Validate() error }:
if err := v.Validate(); err != nil {
errors = append(errors, BpfMetadataValidationError{
field: "CacheEntryTtl",
reason: "embedded message failed validation",
cause: err,
})
}
}
} else if v, ok := interface{}(m.GetCacheEntryTtl()).(interface{ Validate() error }); ok {
if err := v.Validate(); err != nil {
return BpfMetadataValidationError{
field: "CacheEntryTtl",
reason: "embedded message failed validation",
cause: err,
}
}
}
if all {
switch v := interface{}(m.GetCacheGcInterval()).(type) {
case interface{ ValidateAll() error }:
if err := v.ValidateAll(); err != nil {
errors = append(errors, BpfMetadataValidationError{
field: "CacheGcInterval",
reason: "embedded message failed validation",
cause: err,
})
}
case interface{ Validate() error }:
if err := v.Validate(); err != nil {
errors = append(errors, BpfMetadataValidationError{
field: "CacheGcInterval",
reason: "embedded message failed validation",
cause: err,
})
}
}
} else if v, ok := interface{}(m.GetCacheGcInterval()).(interface{ Validate() error }); ok {
if err := v.Validate(); err != nil {
return BpfMetadataValidationError{
field: "CacheGcInterval",
reason: "embedded message failed validation",
cause: err,
}
}
}
if m.OriginalSourceSoLingerTime != nil {
// no validation rules for OriginalSourceSoLingerTime
}
if len(errors) > 0 {
return BpfMetadataMultiError(errors)
}
return nil
}
// BpfMetadataMultiError is an error wrapping multiple validation errors
// returned by BpfMetadata.ValidateAll() if the designated constraints aren't met.
type BpfMetadataMultiError []error
// Error returns a concatenation of all the error messages it wraps.
func (m BpfMetadataMultiError) Error() string {
msgs := make([]string, 0, len(m))
for _, err := range m {
msgs = append(msgs, err.Error())
}
return strings.Join(msgs, "; ")
}
// AllErrors returns a list of validation violation errors.
func (m BpfMetadataMultiError) AllErrors() []error { return m }
// BpfMetadataValidationError is the validation error returned by
// BpfMetadata.Validate if the designated constraints aren't met.
type BpfMetadataValidationError struct {
field string
reason string
cause error
key bool
}
// Field function returns field value.
func (e BpfMetadataValidationError) Field() string { return e.field }
// Reason function returns reason value.
func (e BpfMetadataValidationError) Reason() string { return e.reason }
// Cause function returns cause value.
func (e BpfMetadataValidationError) Cause() error { return e.cause }
// Key function returns key value.
func (e BpfMetadataValidationError) Key() bool { return e.key }
// ErrorName returns error name.
func (e BpfMetadataValidationError) ErrorName() string { return "BpfMetadataValidationError" }
// Error satisfies the builtin error interface
func (e BpfMetadataValidationError) Error() string {
cause := ""
if e.cause != nil {
cause = fmt.Sprintf(" | caused by: %v", e.cause)
}
key := ""
if e.key {
key = "key for "
}
return fmt.Sprintf(
"invalid %sBpfMetadata.%s: %s%s",
key,
e.field,
e.reason,
cause)
}
var _ error = BpfMetadataValidationError{}
var _ interface {
Field() string
Reason() string
Key() bool
Cause() error
ErrorName() string
} = BpfMetadataValidationError{}
// Code generated by protoc-gen-go. DO NOT EDIT.
// versions:
// protoc-gen-go v1.36.6
// protoc v5.29.3
// source: cilium/api/health_check_sink.proto
package cilium
import (
_ "github.com/envoyproxy/protoc-gen-validate/validate"
protoreflect "google.golang.org/protobuf/reflect/protoreflect"
protoimpl "google.golang.org/protobuf/runtime/protoimpl"
reflect "reflect"
sync "sync"
unsafe "unsafe"
)
const (
// Verify that this generated code is sufficiently up-to-date.
_ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion)
// Verify that runtime/protoimpl is sufficiently up-to-date.
_ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20)
)
// Health check event pipe sink.
// The health check event will be streamed as binary protobufs.
type HealthCheckEventPipeSink struct {
state protoimpl.MessageState `protogen:"open.v1"`
// Unix domain socket path where to connect to send health check events to.
Path string `protobuf:"bytes,1,opt,name=path,proto3" json:"path,omitempty"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *HealthCheckEventPipeSink) Reset() {
*x = HealthCheckEventPipeSink{}
mi := &file_cilium_api_health_check_sink_proto_msgTypes[0]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *HealthCheckEventPipeSink) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*HealthCheckEventPipeSink) ProtoMessage() {}
func (x *HealthCheckEventPipeSink) ProtoReflect() protoreflect.Message {
mi := &file_cilium_api_health_check_sink_proto_msgTypes[0]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use HealthCheckEventPipeSink.ProtoReflect.Descriptor instead.
func (*HealthCheckEventPipeSink) Descriptor() ([]byte, []int) {
return file_cilium_api_health_check_sink_proto_rawDescGZIP(), []int{0}
}
func (x *HealthCheckEventPipeSink) GetPath() string {
if x != nil {
return x.Path
}
return ""
}
var File_cilium_api_health_check_sink_proto protoreflect.FileDescriptor
const file_cilium_api_health_check_sink_proto_rawDesc = "" +
"\n" +
"\"cilium/api/health_check_sink.proto\x12\x06cilium\x1a\x17validate/validate.proto\"7\n" +
"\x18HealthCheckEventPipeSink\x12\x1b\n" +
"\x04path\x18\x01 \x01(\tB\a\xfaB\x04r\x02\x10\x01R\x04pathB.Z,github.com/cilium/proxy/go/cilium/api;ciliumb\x06proto3"
var (
file_cilium_api_health_check_sink_proto_rawDescOnce sync.Once
file_cilium_api_health_check_sink_proto_rawDescData []byte
)
func file_cilium_api_health_check_sink_proto_rawDescGZIP() []byte {
file_cilium_api_health_check_sink_proto_rawDescOnce.Do(func() {
file_cilium_api_health_check_sink_proto_rawDescData = protoimpl.X.CompressGZIP(unsafe.Slice(unsafe.StringData(file_cilium_api_health_check_sink_proto_rawDesc), len(file_cilium_api_health_check_sink_proto_rawDesc)))
})
return file_cilium_api_health_check_sink_proto_rawDescData
}
var file_cilium_api_health_check_sink_proto_msgTypes = make([]protoimpl.MessageInfo, 1)
var file_cilium_api_health_check_sink_proto_goTypes = []any{
(*HealthCheckEventPipeSink)(nil), // 0: cilium.HealthCheckEventPipeSink
}
var file_cilium_api_health_check_sink_proto_depIdxs = []int32{
0, // [0:0] is the sub-list for method output_type
0, // [0:0] is the sub-list for method input_type
0, // [0:0] is the sub-list for extension type_name
0, // [0:0] is the sub-list for extension extendee
0, // [0:0] is the sub-list for field type_name
}
func init() { file_cilium_api_health_check_sink_proto_init() }
func file_cilium_api_health_check_sink_proto_init() {
if File_cilium_api_health_check_sink_proto != nil {
return
}
type x struct{}
out := protoimpl.TypeBuilder{
File: protoimpl.DescBuilder{
GoPackagePath: reflect.TypeOf(x{}).PkgPath(),
RawDescriptor: unsafe.Slice(unsafe.StringData(file_cilium_api_health_check_sink_proto_rawDesc), len(file_cilium_api_health_check_sink_proto_rawDesc)),
NumEnums: 0,
NumMessages: 1,
NumExtensions: 0,
NumServices: 0,
},
GoTypes: file_cilium_api_health_check_sink_proto_goTypes,
DependencyIndexes: file_cilium_api_health_check_sink_proto_depIdxs,
MessageInfos: file_cilium_api_health_check_sink_proto_msgTypes,
}.Build()
File_cilium_api_health_check_sink_proto = out.File
file_cilium_api_health_check_sink_proto_goTypes = nil
file_cilium_api_health_check_sink_proto_depIdxs = nil
}
// Code generated by protoc-gen-validate. DO NOT EDIT.
// source: cilium/api/health_check_sink.proto
package cilium
import (
"bytes"
"errors"
"fmt"
"net"
"net/mail"
"net/url"
"regexp"
"sort"
"strings"
"time"
"unicode/utf8"
"google.golang.org/protobuf/types/known/anypb"
)
// ensure the imports are used
var (
_ = bytes.MinRead
_ = errors.New("")
_ = fmt.Print
_ = utf8.UTFMax
_ = (*regexp.Regexp)(nil)
_ = (*strings.Reader)(nil)
_ = net.IPv4len
_ = time.Duration(0)
_ = (*url.URL)(nil)
_ = (*mail.Address)(nil)
_ = anypb.Any{}
_ = sort.Sort
)
// Validate checks the field values on HealthCheckEventPipeSink with the rules
// defined in the proto definition for this message. If any rules are
// violated, the first error encountered is returned, or nil if there are no violations.
func (m *HealthCheckEventPipeSink) Validate() error {
return m.validate(false)
}
// ValidateAll checks the field values on HealthCheckEventPipeSink with the
// rules defined in the proto definition for this message. If any rules are
// violated, the result is a list of violation errors wrapped in
// HealthCheckEventPipeSinkMultiError, or nil if none found.
func (m *HealthCheckEventPipeSink) ValidateAll() error {
return m.validate(true)
}
func (m *HealthCheckEventPipeSink) validate(all bool) error {
if m == nil {
return nil
}
var errors []error
if utf8.RuneCountInString(m.GetPath()) < 1 {
err := HealthCheckEventPipeSinkValidationError{
field: "Path",
reason: "value length must be at least 1 runes",
}
if !all {
return err
}
errors = append(errors, err)
}
if len(errors) > 0 {
return HealthCheckEventPipeSinkMultiError(errors)
}
return nil
}
// HealthCheckEventPipeSinkMultiError is an error wrapping multiple validation
// errors returned by HealthCheckEventPipeSink.ValidateAll() if the designated
// constraints aren't met.
type HealthCheckEventPipeSinkMultiError []error
// Error returns a concatenation of all the error messages it wraps.
func (m HealthCheckEventPipeSinkMultiError) Error() string {
msgs := make([]string, 0, len(m))
for _, err := range m {
msgs = append(msgs, err.Error())
}
return strings.Join(msgs, "; ")
}
// AllErrors returns a list of validation violation errors.
func (m HealthCheckEventPipeSinkMultiError) AllErrors() []error { return m }
// HealthCheckEventPipeSinkValidationError is the validation error returned by
// HealthCheckEventPipeSink.Validate if the designated constraints aren't met.
type HealthCheckEventPipeSinkValidationError struct {
field string
reason string
cause error
key bool
}
// Field function returns field value.
func (e HealthCheckEventPipeSinkValidationError) Field() string { return e.field }
// Reason function returns reason value.
func (e HealthCheckEventPipeSinkValidationError) Reason() string { return e.reason }
// Cause function returns cause value.
func (e HealthCheckEventPipeSinkValidationError) Cause() error { return e.cause }
// Key function returns key value.
func (e HealthCheckEventPipeSinkValidationError) Key() bool { return e.key }
// ErrorName returns error name.
func (e HealthCheckEventPipeSinkValidationError) ErrorName() string {
return "HealthCheckEventPipeSinkValidationError"
}
// Error satisfies the builtin error interface
func (e HealthCheckEventPipeSinkValidationError) Error() string {
cause := ""
if e.cause != nil {
cause = fmt.Sprintf(" | caused by: %v", e.cause)
}
key := ""
if e.key {
key = "key for "
}
return fmt.Sprintf(
"invalid %sHealthCheckEventPipeSink.%s: %s%s",
key,
e.field,
e.reason,
cause)
}
var _ error = HealthCheckEventPipeSinkValidationError{}
var _ interface {
Field() string
Reason() string
Key() bool
Cause() error
ErrorName() string
} = HealthCheckEventPipeSinkValidationError{}
// Code generated by protoc-gen-go. DO NOT EDIT.
// versions:
// protoc-gen-go v1.36.6
// protoc v5.29.3
// source: cilium/api/l7policy.proto
package cilium
import (
protoreflect "google.golang.org/protobuf/reflect/protoreflect"
protoimpl "google.golang.org/protobuf/runtime/protoimpl"
reflect "reflect"
sync "sync"
unsafe "unsafe"
)
const (
// Verify that this generated code is sufficiently up-to-date.
_ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion)
// Verify that runtime/protoimpl is sufficiently up-to-date.
_ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20)
)
type L7Policy struct {
state protoimpl.MessageState `protogen:"open.v1"`
// Path to the unix domain socket for the cilium access log.
AccessLogPath string `protobuf:"bytes,1,opt,name=access_log_path,json=accessLogPath,proto3" json:"access_log_path,omitempty"`
// HTTP response body message for 403 status code.
// If empty, "Access denied" will be used.
Denied_403Body string `protobuf:"bytes,3,opt,name=denied_403_body,json=denied403Body,proto3" json:"denied_403_body,omitempty"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *L7Policy) Reset() {
*x = L7Policy{}
mi := &file_cilium_api_l7policy_proto_msgTypes[0]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *L7Policy) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*L7Policy) ProtoMessage() {}
func (x *L7Policy) ProtoReflect() protoreflect.Message {
mi := &file_cilium_api_l7policy_proto_msgTypes[0]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use L7Policy.ProtoReflect.Descriptor instead.
func (*L7Policy) Descriptor() ([]byte, []int) {
return file_cilium_api_l7policy_proto_rawDescGZIP(), []int{0}
}
func (x *L7Policy) GetAccessLogPath() string {
if x != nil {
return x.AccessLogPath
}
return ""
}
func (x *L7Policy) GetDenied_403Body() string {
if x != nil {
return x.Denied_403Body
}
return ""
}
var File_cilium_api_l7policy_proto protoreflect.FileDescriptor
const file_cilium_api_l7policy_proto_rawDesc = "" +
"\n" +
"\x19cilium/api/l7policy.proto\x12\x06cilium\"Z\n" +
"\bL7Policy\x12&\n" +
"\x0faccess_log_path\x18\x01 \x01(\tR\raccessLogPath\x12&\n" +
"\x0fdenied_403_body\x18\x03 \x01(\tR\rdenied403BodyB.Z,github.com/cilium/proxy/go/cilium/api;ciliumb\x06proto3"
var (
file_cilium_api_l7policy_proto_rawDescOnce sync.Once
file_cilium_api_l7policy_proto_rawDescData []byte
)
func file_cilium_api_l7policy_proto_rawDescGZIP() []byte {
file_cilium_api_l7policy_proto_rawDescOnce.Do(func() {
file_cilium_api_l7policy_proto_rawDescData = protoimpl.X.CompressGZIP(unsafe.Slice(unsafe.StringData(file_cilium_api_l7policy_proto_rawDesc), len(file_cilium_api_l7policy_proto_rawDesc)))
})
return file_cilium_api_l7policy_proto_rawDescData
}
var file_cilium_api_l7policy_proto_msgTypes = make([]protoimpl.MessageInfo, 1)
var file_cilium_api_l7policy_proto_goTypes = []any{
(*L7Policy)(nil), // 0: cilium.L7Policy
}
var file_cilium_api_l7policy_proto_depIdxs = []int32{
0, // [0:0] is the sub-list for method output_type
0, // [0:0] is the sub-list for method input_type
0, // [0:0] is the sub-list for extension type_name
0, // [0:0] is the sub-list for extension extendee
0, // [0:0] is the sub-list for field type_name
}
func init() { file_cilium_api_l7policy_proto_init() }
func file_cilium_api_l7policy_proto_init() {
if File_cilium_api_l7policy_proto != nil {
return
}
type x struct{}
out := protoimpl.TypeBuilder{
File: protoimpl.DescBuilder{
GoPackagePath: reflect.TypeOf(x{}).PkgPath(),
RawDescriptor: unsafe.Slice(unsafe.StringData(file_cilium_api_l7policy_proto_rawDesc), len(file_cilium_api_l7policy_proto_rawDesc)),
NumEnums: 0,
NumMessages: 1,
NumExtensions: 0,
NumServices: 0,
},
GoTypes: file_cilium_api_l7policy_proto_goTypes,
DependencyIndexes: file_cilium_api_l7policy_proto_depIdxs,
MessageInfos: file_cilium_api_l7policy_proto_msgTypes,
}.Build()
File_cilium_api_l7policy_proto = out.File
file_cilium_api_l7policy_proto_goTypes = nil
file_cilium_api_l7policy_proto_depIdxs = nil
}
// Code generated by protoc-gen-validate. DO NOT EDIT.
// source: cilium/api/l7policy.proto
package cilium
import (
"bytes"
"errors"
"fmt"
"net"
"net/mail"
"net/url"
"regexp"
"sort"
"strings"
"time"
"unicode/utf8"
"google.golang.org/protobuf/types/known/anypb"
)
// ensure the imports are used
var (
_ = bytes.MinRead
_ = errors.New("")
_ = fmt.Print
_ = utf8.UTFMax
_ = (*regexp.Regexp)(nil)
_ = (*strings.Reader)(nil)
_ = net.IPv4len
_ = time.Duration(0)
_ = (*url.URL)(nil)
_ = (*mail.Address)(nil)
_ = anypb.Any{}
_ = sort.Sort
)
// Validate checks the field values on L7Policy with the rules defined in the
// proto definition for this message. If any rules are violated, the first
// error encountered is returned, or nil if there are no violations.
func (m *L7Policy) Validate() error {
return m.validate(false)
}
// ValidateAll checks the field values on L7Policy with the rules defined in
// the proto definition for this message. If any rules are violated, the
// result is a list of violation errors wrapped in L7PolicyMultiError, or nil
// if none found.
func (m *L7Policy) ValidateAll() error {
return m.validate(true)
}
func (m *L7Policy) validate(all bool) error {
if m == nil {
return nil
}
var errors []error
// no validation rules for AccessLogPath
// no validation rules for Denied_403Body
if len(errors) > 0 {
return L7PolicyMultiError(errors)
}
return nil
}
// L7PolicyMultiError is an error wrapping multiple validation errors returned
// by L7Policy.ValidateAll() if the designated constraints aren't met.
type L7PolicyMultiError []error
// Error returns a concatenation of all the error messages it wraps.
func (m L7PolicyMultiError) Error() string {
msgs := make([]string, 0, len(m))
for _, err := range m {
msgs = append(msgs, err.Error())
}
return strings.Join(msgs, "; ")
}
// AllErrors returns a list of validation violation errors.
func (m L7PolicyMultiError) AllErrors() []error { return m }
// L7PolicyValidationError is the validation error returned by
// L7Policy.Validate if the designated constraints aren't met.
type L7PolicyValidationError struct {
field string
reason string
cause error
key bool
}
// Field function returns field value.
func (e L7PolicyValidationError) Field() string { return e.field }
// Reason function returns reason value.
func (e L7PolicyValidationError) Reason() string { return e.reason }
// Cause function returns cause value.
func (e L7PolicyValidationError) Cause() error { return e.cause }
// Key function returns key value.
func (e L7PolicyValidationError) Key() bool { return e.key }
// ErrorName returns error name.
func (e L7PolicyValidationError) ErrorName() string { return "L7PolicyValidationError" }
// Error satisfies the builtin error interface
func (e L7PolicyValidationError) Error() string {
cause := ""
if e.cause != nil {
cause = fmt.Sprintf(" | caused by: %v", e.cause)
}
key := ""
if e.key {
key = "key for "
}
return fmt.Sprintf(
"invalid %sL7Policy.%s: %s%s",
key,
e.field,
e.reason,
cause)
}
var _ error = L7PolicyValidationError{}
var _ interface {
Field() string
Reason() string
Key() bool
Cause() error
ErrorName() string
} = L7PolicyValidationError{}
// Code generated by protoc-gen-go. DO NOT EDIT.
// versions:
// protoc-gen-go v1.36.6
// protoc v5.29.3
// source: cilium/api/network_filter.proto
package cilium
import (
protoreflect "google.golang.org/protobuf/reflect/protoreflect"
protoimpl "google.golang.org/protobuf/runtime/protoimpl"
reflect "reflect"
sync "sync"
unsafe "unsafe"
)
const (
// Verify that this generated code is sufficiently up-to-date.
_ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion)
// Verify that runtime/protoimpl is sufficiently up-to-date.
_ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20)
)
type NetworkFilter struct {
state protoimpl.MessageState `protogen:"open.v1"`
// Path to the proxylib to be opened
Proxylib string `protobuf:"bytes,1,opt,name=proxylib,proto3" json:"proxylib,omitempty"`
// Transparent set of parameters provided for proxylib initialization
ProxylibParams map[string]string `protobuf:"bytes,2,rep,name=proxylib_params,json=proxylibParams,proto3" json:"proxylib_params,omitempty" protobuf_key:"bytes,1,opt,name=key" protobuf_val:"bytes,2,opt,name=value"`
// Path to the unix domain socket for the cilium access log.
AccessLogPath string `protobuf:"bytes,5,opt,name=access_log_path,json=accessLogPath,proto3" json:"access_log_path,omitempty"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *NetworkFilter) Reset() {
*x = NetworkFilter{}
mi := &file_cilium_api_network_filter_proto_msgTypes[0]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *NetworkFilter) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*NetworkFilter) ProtoMessage() {}
func (x *NetworkFilter) ProtoReflect() protoreflect.Message {
mi := &file_cilium_api_network_filter_proto_msgTypes[0]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use NetworkFilter.ProtoReflect.Descriptor instead.
func (*NetworkFilter) Descriptor() ([]byte, []int) {
return file_cilium_api_network_filter_proto_rawDescGZIP(), []int{0}
}
func (x *NetworkFilter) GetProxylib() string {
if x != nil {
return x.Proxylib
}
return ""
}
func (x *NetworkFilter) GetProxylibParams() map[string]string {
if x != nil {
return x.ProxylibParams
}
return nil
}
func (x *NetworkFilter) GetAccessLogPath() string {
if x != nil {
return x.AccessLogPath
}
return ""
}
var File_cilium_api_network_filter_proto protoreflect.FileDescriptor
const file_cilium_api_network_filter_proto_rawDesc = "" +
"\n" +
"\x1fcilium/api/network_filter.proto\x12\x06cilium\"\xea\x01\n" +
"\rNetworkFilter\x12\x1a\n" +
"\bproxylib\x18\x01 \x01(\tR\bproxylib\x12R\n" +
"\x0fproxylib_params\x18\x02 \x03(\v2).cilium.NetworkFilter.ProxylibParamsEntryR\x0eproxylibParams\x12&\n" +
"\x0faccess_log_path\x18\x05 \x01(\tR\raccessLogPath\x1aA\n" +
"\x13ProxylibParamsEntry\x12\x10\n" +
"\x03key\x18\x01 \x01(\tR\x03key\x12\x14\n" +
"\x05value\x18\x02 \x01(\tR\x05value:\x028\x01B.Z,github.com/cilium/proxy/go/cilium/api;ciliumb\x06proto3"
var (
file_cilium_api_network_filter_proto_rawDescOnce sync.Once
file_cilium_api_network_filter_proto_rawDescData []byte
)
func file_cilium_api_network_filter_proto_rawDescGZIP() []byte {
file_cilium_api_network_filter_proto_rawDescOnce.Do(func() {
file_cilium_api_network_filter_proto_rawDescData = protoimpl.X.CompressGZIP(unsafe.Slice(unsafe.StringData(file_cilium_api_network_filter_proto_rawDesc), len(file_cilium_api_network_filter_proto_rawDesc)))
})
return file_cilium_api_network_filter_proto_rawDescData
}
var file_cilium_api_network_filter_proto_msgTypes = make([]protoimpl.MessageInfo, 2)
var file_cilium_api_network_filter_proto_goTypes = []any{
(*NetworkFilter)(nil), // 0: cilium.NetworkFilter
nil, // 1: cilium.NetworkFilter.ProxylibParamsEntry
}
var file_cilium_api_network_filter_proto_depIdxs = []int32{
1, // 0: cilium.NetworkFilter.proxylib_params:type_name -> cilium.NetworkFilter.ProxylibParamsEntry
1, // [1:1] is the sub-list for method output_type
1, // [1:1] is the sub-list for method input_type
1, // [1:1] is the sub-list for extension type_name
1, // [1:1] is the sub-list for extension extendee
0, // [0:1] is the sub-list for field type_name
}
func init() { file_cilium_api_network_filter_proto_init() }
func file_cilium_api_network_filter_proto_init() {
if File_cilium_api_network_filter_proto != nil {
return
}
type x struct{}
out := protoimpl.TypeBuilder{
File: protoimpl.DescBuilder{
GoPackagePath: reflect.TypeOf(x{}).PkgPath(),
RawDescriptor: unsafe.Slice(unsafe.StringData(file_cilium_api_network_filter_proto_rawDesc), len(file_cilium_api_network_filter_proto_rawDesc)),
NumEnums: 0,
NumMessages: 2,
NumExtensions: 0,
NumServices: 0,
},
GoTypes: file_cilium_api_network_filter_proto_goTypes,
DependencyIndexes: file_cilium_api_network_filter_proto_depIdxs,
MessageInfos: file_cilium_api_network_filter_proto_msgTypes,
}.Build()
File_cilium_api_network_filter_proto = out.File
file_cilium_api_network_filter_proto_goTypes = nil
file_cilium_api_network_filter_proto_depIdxs = nil
}
// Code generated by protoc-gen-validate. DO NOT EDIT.
// source: cilium/api/network_filter.proto
package cilium
import (
"bytes"
"errors"
"fmt"
"net"
"net/mail"
"net/url"
"regexp"
"sort"
"strings"
"time"
"unicode/utf8"
"google.golang.org/protobuf/types/known/anypb"
)
// ensure the imports are used
var (
_ = bytes.MinRead
_ = errors.New("")
_ = fmt.Print
_ = utf8.UTFMax
_ = (*regexp.Regexp)(nil)
_ = (*strings.Reader)(nil)
_ = net.IPv4len
_ = time.Duration(0)
_ = (*url.URL)(nil)
_ = (*mail.Address)(nil)
_ = anypb.Any{}
_ = sort.Sort
)
// Validate checks the field values on NetworkFilter with the rules defined in
// the proto definition for this message. If any rules are violated, the first
// error encountered is returned, or nil if there are no violations.
func (m *NetworkFilter) Validate() error {
return m.validate(false)
}
// ValidateAll checks the field values on NetworkFilter with the rules defined
// in the proto definition for this message. If any rules are violated, the
// result is a list of violation errors wrapped in NetworkFilterMultiError, or
// nil if none found.
func (m *NetworkFilter) ValidateAll() error {
return m.validate(true)
}
func (m *NetworkFilter) validate(all bool) error {
if m == nil {
return nil
}
var errors []error
// no validation rules for Proxylib
// no validation rules for ProxylibParams
// no validation rules for AccessLogPath
if len(errors) > 0 {
return NetworkFilterMultiError(errors)
}
return nil
}
// NetworkFilterMultiError is an error wrapping multiple validation errors
// returned by NetworkFilter.ValidateAll() if the designated constraints
// aren't met.
type NetworkFilterMultiError []error
// Error returns a concatenation of all the error messages it wraps.
func (m NetworkFilterMultiError) Error() string {
msgs := make([]string, 0, len(m))
for _, err := range m {
msgs = append(msgs, err.Error())
}
return strings.Join(msgs, "; ")
}
// AllErrors returns a list of validation violation errors.
func (m NetworkFilterMultiError) AllErrors() []error { return m }
// NetworkFilterValidationError is the validation error returned by
// NetworkFilter.Validate if the designated constraints aren't met.
type NetworkFilterValidationError struct {
field string
reason string
cause error
key bool
}
// Field function returns field value.
func (e NetworkFilterValidationError) Field() string { return e.field }
// Reason function returns reason value.
func (e NetworkFilterValidationError) Reason() string { return e.reason }
// Cause function returns cause value.
func (e NetworkFilterValidationError) Cause() error { return e.cause }
// Key function returns key value.
func (e NetworkFilterValidationError) Key() bool { return e.key }
// ErrorName returns error name.
func (e NetworkFilterValidationError) ErrorName() string { return "NetworkFilterValidationError" }
// Error satisfies the builtin error interface
func (e NetworkFilterValidationError) Error() string {
cause := ""
if e.cause != nil {
cause = fmt.Sprintf(" | caused by: %v", e.cause)
}
key := ""
if e.key {
key = "key for "
}
return fmt.Sprintf(
"invalid %sNetworkFilter.%s: %s%s",
key,
e.field,
e.reason,
cause)
}
var _ error = NetworkFilterValidationError{}
var _ interface {
Field() string
Reason() string
Key() bool
Cause() error
ErrorName() string
} = NetworkFilterValidationError{}
// Code generated by protoc-gen-go. DO NOT EDIT.
// versions:
// protoc-gen-go v1.36.6
// protoc v5.29.3
// source: cilium/api/npds.proto
package cilium
import (
context "context"
_ "github.com/envoyproxy/go-control-plane/envoy/annotations"
v3 "github.com/envoyproxy/go-control-plane/envoy/config/core/v3"
v31 "github.com/envoyproxy/go-control-plane/envoy/config/route/v3"
v33 "github.com/envoyproxy/go-control-plane/envoy/service/discovery/v3"
v32 "github.com/envoyproxy/go-control-plane/envoy/type/matcher/v3"
_ "github.com/envoyproxy/protoc-gen-validate/validate"
_ "google.golang.org/genproto/googleapis/api/annotations"
grpc "google.golang.org/grpc"
codes "google.golang.org/grpc/codes"
status "google.golang.org/grpc/status"
protoreflect "google.golang.org/protobuf/reflect/protoreflect"
protoimpl "google.golang.org/protobuf/runtime/protoimpl"
reflect "reflect"
sync "sync"
unsafe "unsafe"
)
const (
// Verify that this generated code is sufficiently up-to-date.
_ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion)
// Verify that runtime/protoimpl is sufficiently up-to-date.
_ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20)
)
// Action specifies what to do when the header matches.
type HeaderMatch_MatchAction int32
const (
HeaderMatch_CONTINUE_ON_MATCH HeaderMatch_MatchAction = 0 // Keep checking other matches (default)
HeaderMatch_FAIL_ON_MATCH HeaderMatch_MatchAction = 1 // Drop the request if no other rule matches
HeaderMatch_DELETE_ON_MATCH HeaderMatch_MatchAction = 2 // Remove the whole matching header
)
// Enum value maps for HeaderMatch_MatchAction.
var (
HeaderMatch_MatchAction_name = map[int32]string{
0: "CONTINUE_ON_MATCH",
1: "FAIL_ON_MATCH",
2: "DELETE_ON_MATCH",
}
HeaderMatch_MatchAction_value = map[string]int32{
"CONTINUE_ON_MATCH": 0,
"FAIL_ON_MATCH": 1,
"DELETE_ON_MATCH": 2,
}
)
func (x HeaderMatch_MatchAction) Enum() *HeaderMatch_MatchAction {
p := new(HeaderMatch_MatchAction)
*p = x
return p
}
func (x HeaderMatch_MatchAction) String() string {
return protoimpl.X.EnumStringOf(x.Descriptor(), protoreflect.EnumNumber(x))
}
func (HeaderMatch_MatchAction) Descriptor() protoreflect.EnumDescriptor {
return file_cilium_api_npds_proto_enumTypes[0].Descriptor()
}
func (HeaderMatch_MatchAction) Type() protoreflect.EnumType {
return &file_cilium_api_npds_proto_enumTypes[0]
}
func (x HeaderMatch_MatchAction) Number() protoreflect.EnumNumber {
return protoreflect.EnumNumber(x)
}
// Deprecated: Use HeaderMatch_MatchAction.Descriptor instead.
func (HeaderMatch_MatchAction) EnumDescriptor() ([]byte, []int) {
return file_cilium_api_npds_proto_rawDescGZIP(), []int{5, 0}
}
type HeaderMatch_MismatchAction int32
const (
HeaderMatch_FAIL_ON_MISMATCH HeaderMatch_MismatchAction = 0 // Drop the request if no other rule matches (default)
HeaderMatch_CONTINUE_ON_MISMATCH HeaderMatch_MismatchAction = 1 // Keep checking other matches, log the mismatch
HeaderMatch_ADD_ON_MISMATCH HeaderMatch_MismatchAction = 2 // Add 'value' to the multivalued header
HeaderMatch_DELETE_ON_MISMATCH HeaderMatch_MismatchAction = 3 // Remove the whole mismatching header
HeaderMatch_REPLACE_ON_MISMATCH HeaderMatch_MismatchAction = 4 // Replace the whole mismatching header with 'value'
)
// Enum value maps for HeaderMatch_MismatchAction.
var (
HeaderMatch_MismatchAction_name = map[int32]string{
0: "FAIL_ON_MISMATCH",
1: "CONTINUE_ON_MISMATCH",
2: "ADD_ON_MISMATCH",
3: "DELETE_ON_MISMATCH",
4: "REPLACE_ON_MISMATCH",
}
HeaderMatch_MismatchAction_value = map[string]int32{
"FAIL_ON_MISMATCH": 0,
"CONTINUE_ON_MISMATCH": 1,
"ADD_ON_MISMATCH": 2,
"DELETE_ON_MISMATCH": 3,
"REPLACE_ON_MISMATCH": 4,
}
)
func (x HeaderMatch_MismatchAction) Enum() *HeaderMatch_MismatchAction {
p := new(HeaderMatch_MismatchAction)
*p = x
return p
}
func (x HeaderMatch_MismatchAction) String() string {
return protoimpl.X.EnumStringOf(x.Descriptor(), protoreflect.EnumNumber(x))
}
func (HeaderMatch_MismatchAction) Descriptor() protoreflect.EnumDescriptor {
return file_cilium_api_npds_proto_enumTypes[1].Descriptor()
}
func (HeaderMatch_MismatchAction) Type() protoreflect.EnumType {
return &file_cilium_api_npds_proto_enumTypes[1]
}
func (x HeaderMatch_MismatchAction) Number() protoreflect.EnumNumber {
return protoreflect.EnumNumber(x)
}
// Deprecated: Use HeaderMatch_MismatchAction.Descriptor instead.
func (HeaderMatch_MismatchAction) EnumDescriptor() ([]byte, []int) {
return file_cilium_api_npds_proto_rawDescGZIP(), []int{5, 1}
}
// A network policy that is enforced by a filter on the network flows to/from
// associated hosts.
type NetworkPolicy struct {
state protoimpl.MessageState `protogen:"open.v1"`
// IPs of the endpoint to which this policy applies.
// Required.
EndpointIps []string `protobuf:"bytes,1,rep,name=endpoint_ips,json=endpointIps,proto3" json:"endpoint_ips,omitempty"`
// The endpoint identifier associated with the network policy.
// Required.
EndpointId uint64 `protobuf:"varint,2,opt,name=endpoint_id,json=endpointId,proto3" json:"endpoint_id,omitempty"`
// The part of the policy to be enforced at ingress by the filter, as a set
// of per-port network policies, one per destination L4 port.
// Every PortNetworkPolicy element in this set has a unique port / protocol
// combination.
// Optional. If empty, all flows in this direction are denied.
IngressPerPortPolicies []*PortNetworkPolicy `protobuf:"bytes,3,rep,name=ingress_per_port_policies,json=ingressPerPortPolicies,proto3" json:"ingress_per_port_policies,omitempty"`
// The part of the policy to be enforced at egress by the filter, as a set
// of per-port network policies, one per destination L4 port.
// Every PortNetworkPolicy element in this set has a unique port / protocol
// combination.
// Optional. If empty, all flows in this direction are denied.
EgressPerPortPolicies []*PortNetworkPolicy `protobuf:"bytes,4,rep,name=egress_per_port_policies,json=egressPerPortPolicies,proto3" json:"egress_per_port_policies,omitempty"`
// Name of the conntrack map to use with this policy.
// The paths to various Cilium conntrack maps are derived using this name.
// Optional. If empty, ipcache or hostmap lookup is used instead of conntrack
// map.
ConntrackMapName string `protobuf:"bytes,5,opt,name=conntrack_map_name,json=conntrackMapName,proto3" json:"conntrack_map_name,omitempty"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *NetworkPolicy) Reset() {
*x = NetworkPolicy{}
mi := &file_cilium_api_npds_proto_msgTypes[0]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *NetworkPolicy) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*NetworkPolicy) ProtoMessage() {}
func (x *NetworkPolicy) ProtoReflect() protoreflect.Message {
mi := &file_cilium_api_npds_proto_msgTypes[0]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use NetworkPolicy.ProtoReflect.Descriptor instead.
func (*NetworkPolicy) Descriptor() ([]byte, []int) {
return file_cilium_api_npds_proto_rawDescGZIP(), []int{0}
}
func (x *NetworkPolicy) GetEndpointIps() []string {
if x != nil {
return x.EndpointIps
}
return nil
}
func (x *NetworkPolicy) GetEndpointId() uint64 {
if x != nil {
return x.EndpointId
}
return 0
}
func (x *NetworkPolicy) GetIngressPerPortPolicies() []*PortNetworkPolicy {
if x != nil {
return x.IngressPerPortPolicies
}
return nil
}
func (x *NetworkPolicy) GetEgressPerPortPolicies() []*PortNetworkPolicy {
if x != nil {
return x.EgressPerPortPolicies
}
return nil
}
func (x *NetworkPolicy) GetConntrackMapName() string {
if x != nil {
return x.ConntrackMapName
}
return ""
}
// A network policy to whitelist flows to a specific destination L4 port,
// as a conjunction of predicates on L3/L4/L7 flows.
// If all the predicates of a policy match a flow, the flow is whitelisted.
type PortNetworkPolicy struct {
state protoimpl.MessageState `protogen:"open.v1"`
// The flows' destination L4 port number, as an unsigned 16-bit integer.
// If 0, all destination L4 port numbers are matched by this predicate.
Port uint32 `protobuf:"varint,1,opt,name=port,proto3" json:"port,omitempty"`
// The end of the destination port range, if non-zero.
EndPort uint32 `protobuf:"varint,4,opt,name=end_port,json=endPort,proto3" json:"end_port,omitempty"`
// The flows' L4 transport protocol.
// Required.
Protocol v3.SocketAddress_Protocol `protobuf:"varint,2,opt,name=protocol,proto3,enum=envoy.config.core.v3.SocketAddress_Protocol" json:"protocol,omitempty"`
// The network policy rules to be enforced on the flows to the port.
// Optional. A flow is matched by this predicate if either the set of
// rules is empty or any of the rules matches it.
Rules []*PortNetworkPolicyRule `protobuf:"bytes,3,rep,name=rules,proto3" json:"rules,omitempty"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *PortNetworkPolicy) Reset() {
*x = PortNetworkPolicy{}
mi := &file_cilium_api_npds_proto_msgTypes[1]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *PortNetworkPolicy) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*PortNetworkPolicy) ProtoMessage() {}
func (x *PortNetworkPolicy) ProtoReflect() protoreflect.Message {
mi := &file_cilium_api_npds_proto_msgTypes[1]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use PortNetworkPolicy.ProtoReflect.Descriptor instead.
func (*PortNetworkPolicy) Descriptor() ([]byte, []int) {
return file_cilium_api_npds_proto_rawDescGZIP(), []int{1}
}
func (x *PortNetworkPolicy) GetPort() uint32 {
if x != nil {
return x.Port
}
return 0
}
func (x *PortNetworkPolicy) GetEndPort() uint32 {
if x != nil {
return x.EndPort
}
return 0
}
func (x *PortNetworkPolicy) GetProtocol() v3.SocketAddress_Protocol {
if x != nil {
return x.Protocol
}
return v3.SocketAddress_Protocol(0)
}
func (x *PortNetworkPolicy) GetRules() []*PortNetworkPolicyRule {
if x != nil {
return x.Rules
}
return nil
}
type TLSContext struct {
state protoimpl.MessageState `protogen:"open.v1"`
// CA certificates. If present, the counterparty must provide a valid
// certificate.
// Deprecated, use 'validation_context_sds_secret' instead.
TrustedCa string `protobuf:"bytes,1,opt,name=trusted_ca,json=trustedCa,proto3" json:"trusted_ca,omitempty"`
// Certificate chain.
// Deprecated, use 'tls_sds_secret' instead.
CertificateChain string `protobuf:"bytes,2,opt,name=certificate_chain,json=certificateChain,proto3" json:"certificate_chain,omitempty"`
// Private key
// Deprecated, use 'tls_sds_secret' instead.
PrivateKey string `protobuf:"bytes,3,opt,name=private_key,json=privateKey,proto3" json:"private_key,omitempty"`
// Server Name Indicator. For downstream this helps choose the certificate to
// present to the client. For upstream this will be used as the SNI on the
// client connection.
ServerNames []string `protobuf:"bytes,4,rep,name=server_names,json=serverNames,proto3" json:"server_names,omitempty"`
// Name of an SDS secret for CA certificates. Secret is fetched from the same gRPC source as
// this Network Policy. If present, the counterparty must provide a valid certificate.
// May not be used at the same time with 'trusted_ca'.
ValidationContextSdsSecret string `protobuf:"bytes,5,opt,name=validation_context_sds_secret,json=validationContextSdsSecret,proto3" json:"validation_context_sds_secret,omitempty"`
// Name of an SDS secret for both TLS private key and certificate chain. Secret is fetched
// from the same gRPC source as this Network Policy.
// May not be used at the same time with 'certificate_chain' or 'private_key'.
TlsSdsSecret string `protobuf:"bytes,6,opt,name=tls_sds_secret,json=tlsSdsSecret,proto3" json:"tls_sds_secret,omitempty"`
// Set of ALPN protocols, e.g., [ “h2", "http/1.1” ] when both HTTP 1.1 and HTTP 2 are supported.
AlpnProtocols []string `protobuf:"bytes,7,rep,name=alpn_protocols,json=alpnProtocols,proto3" json:"alpn_protocols,omitempty"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *TLSContext) Reset() {
*x = TLSContext{}
mi := &file_cilium_api_npds_proto_msgTypes[2]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *TLSContext) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*TLSContext) ProtoMessage() {}
func (x *TLSContext) ProtoReflect() protoreflect.Message {
mi := &file_cilium_api_npds_proto_msgTypes[2]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use TLSContext.ProtoReflect.Descriptor instead.
func (*TLSContext) Descriptor() ([]byte, []int) {
return file_cilium_api_npds_proto_rawDescGZIP(), []int{2}
}
func (x *TLSContext) GetTrustedCa() string {
if x != nil {
return x.TrustedCa
}
return ""
}
func (x *TLSContext) GetCertificateChain() string {
if x != nil {
return x.CertificateChain
}
return ""
}
func (x *TLSContext) GetPrivateKey() string {
if x != nil {
return x.PrivateKey
}
return ""
}
func (x *TLSContext) GetServerNames() []string {
if x != nil {
return x.ServerNames
}
return nil
}
func (x *TLSContext) GetValidationContextSdsSecret() string {
if x != nil {
return x.ValidationContextSdsSecret
}
return ""
}
func (x *TLSContext) GetTlsSdsSecret() string {
if x != nil {
return x.TlsSdsSecret
}
return ""
}
func (x *TLSContext) GetAlpnProtocols() []string {
if x != nil {
return x.AlpnProtocols
}
return nil
}
// A network policy rule, as a conjunction of predicates on L3/L7 flows.
// If all the predicates of a rule match a flow, the flow is matched by the
// rule.
type PortNetworkPolicyRule struct {
state protoimpl.MessageState `protogen:"open.v1"`
// Precedence level for this rule. Rules with **higher** numeric values take
// precedence, even over deny rules of lower precedence level.
Precedence uint32 `protobuf:"varint,10,opt,name=precedence,proto3" json:"precedence,omitempty"`
// Traffic on this port is denied for all `remote_policies` if true
Deny bool `protobuf:"varint,8,opt,name=deny,proto3" json:"deny,omitempty"`
// ProxyID is non-zero if the rule was an allow rule with an explicit listener reference.
// The given value corresponds to the 'proxy_id' value in the BpfMetadata listener filter
// configuration.
// This rule should be ignored if not executing in the referred listener.
ProxyId uint32 `protobuf:"varint,9,opt,name=proxy_id,json=proxyId,proto3" json:"proxy_id,omitempty"`
// Optional name for the rule, can be used in logging and error messages.
Name string `protobuf:"bytes,5,opt,name=name,proto3" json:"name,omitempty"`
// The set of numeric remote security IDs explicitly allowed or denied.
// A flow is matched by this predicate if the identifier of the policy
// applied on the flow's remote host is contained in this set.
// Optional. If not specified, any remote host is matched by this predicate.
RemotePolicies []uint32 `protobuf:"varint,7,rep,packed,name=remote_policies,json=remotePolicies,proto3" json:"remote_policies,omitempty"`
// Optional downstream TLS context. If present, the incoming connection must
// be a TLS connection.
DownstreamTlsContext *TLSContext `protobuf:"bytes,3,opt,name=downstream_tls_context,json=downstreamTlsContext,proto3" json:"downstream_tls_context,omitempty"`
// Optional upstream TLS context. If present, the outgoing connection will use
// TLS.
UpstreamTlsContext *TLSContext `protobuf:"bytes,4,opt,name=upstream_tls_context,json=upstreamTlsContext,proto3" json:"upstream_tls_context,omitempty"`
// Optional allowed SNIs in TLS handshake.
ServerNames []string `protobuf:"bytes,6,rep,name=server_names,json=serverNames,proto3" json:"server_names,omitempty"`
// Optional L7 protocol parser name. This is only used if the parser is not
// one of the well knows ones. If specified, the l7 parser having this name
// needs to be built in to libcilium.so.
L7Proto string `protobuf:"bytes,2,opt,name=l7_proto,json=l7Proto,proto3" json:"l7_proto,omitempty"`
// Optional. If not specified, any L7 request is matched by this predicate.
// All rules on any given port must have the same type of L7 rules!
//
// Types that are valid to be assigned to L7:
//
// *PortNetworkPolicyRule_HttpRules
// *PortNetworkPolicyRule_KafkaRules
// *PortNetworkPolicyRule_L7Rules
L7 isPortNetworkPolicyRule_L7 `protobuf_oneof:"l7"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *PortNetworkPolicyRule) Reset() {
*x = PortNetworkPolicyRule{}
mi := &file_cilium_api_npds_proto_msgTypes[3]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *PortNetworkPolicyRule) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*PortNetworkPolicyRule) ProtoMessage() {}
func (x *PortNetworkPolicyRule) ProtoReflect() protoreflect.Message {
mi := &file_cilium_api_npds_proto_msgTypes[3]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use PortNetworkPolicyRule.ProtoReflect.Descriptor instead.
func (*PortNetworkPolicyRule) Descriptor() ([]byte, []int) {
return file_cilium_api_npds_proto_rawDescGZIP(), []int{3}
}
func (x *PortNetworkPolicyRule) GetPrecedence() uint32 {
if x != nil {
return x.Precedence
}
return 0
}
func (x *PortNetworkPolicyRule) GetDeny() bool {
if x != nil {
return x.Deny
}
return false
}
func (x *PortNetworkPolicyRule) GetProxyId() uint32 {
if x != nil {
return x.ProxyId
}
return 0
}
func (x *PortNetworkPolicyRule) GetName() string {
if x != nil {
return x.Name
}
return ""
}
func (x *PortNetworkPolicyRule) GetRemotePolicies() []uint32 {
if x != nil {
return x.RemotePolicies
}
return nil
}
func (x *PortNetworkPolicyRule) GetDownstreamTlsContext() *TLSContext {
if x != nil {
return x.DownstreamTlsContext
}
return nil
}
func (x *PortNetworkPolicyRule) GetUpstreamTlsContext() *TLSContext {
if x != nil {
return x.UpstreamTlsContext
}
return nil
}
func (x *PortNetworkPolicyRule) GetServerNames() []string {
if x != nil {
return x.ServerNames
}
return nil
}
func (x *PortNetworkPolicyRule) GetL7Proto() string {
if x != nil {
return x.L7Proto
}
return ""
}
func (x *PortNetworkPolicyRule) GetL7() isPortNetworkPolicyRule_L7 {
if x != nil {
return x.L7
}
return nil
}
func (x *PortNetworkPolicyRule) GetHttpRules() *HttpNetworkPolicyRules {
if x != nil {
if x, ok := x.L7.(*PortNetworkPolicyRule_HttpRules); ok {
return x.HttpRules
}
}
return nil
}
func (x *PortNetworkPolicyRule) GetKafkaRules() *KafkaNetworkPolicyRules {
if x != nil {
if x, ok := x.L7.(*PortNetworkPolicyRule_KafkaRules); ok {
return x.KafkaRules
}
}
return nil
}
func (x *PortNetworkPolicyRule) GetL7Rules() *L7NetworkPolicyRules {
if x != nil {
if x, ok := x.L7.(*PortNetworkPolicyRule_L7Rules); ok {
return x.L7Rules
}
}
return nil
}
type isPortNetworkPolicyRule_L7 interface {
isPortNetworkPolicyRule_L7()
}
type PortNetworkPolicyRule_HttpRules struct {
// The set of HTTP network policy rules.
// An HTTP request is matched by this predicate if any of its rules matches
// the request.
HttpRules *HttpNetworkPolicyRules `protobuf:"bytes,100,opt,name=http_rules,json=httpRules,proto3,oneof"`
}
type PortNetworkPolicyRule_KafkaRules struct {
// The set of Kafka network policy rules.
// A Kafka request is matched by this predicate if any of its rules matches
// the request.
KafkaRules *KafkaNetworkPolicyRules `protobuf:"bytes,101,opt,name=kafka_rules,json=kafkaRules,proto3,oneof"`
}
type PortNetworkPolicyRule_L7Rules struct {
// Set of Generic policy rules used when 'l7_proto' is defined.
// Only to be used for l7 protocols for which a specific oneof
// is not defined
L7Rules *L7NetworkPolicyRules `protobuf:"bytes,102,opt,name=l7_rules,json=l7Rules,proto3,oneof"`
}
func (*PortNetworkPolicyRule_HttpRules) isPortNetworkPolicyRule_L7() {}
func (*PortNetworkPolicyRule_KafkaRules) isPortNetworkPolicyRule_L7() {}
func (*PortNetworkPolicyRule_L7Rules) isPortNetworkPolicyRule_L7() {}
// A set of network policy rules that match HTTP requests.
type HttpNetworkPolicyRules struct {
state protoimpl.MessageState `protogen:"open.v1"`
// The set of HTTP network policy rules.
// An HTTP request is matched if any of its rules matches the request.
// Required and may not be empty.
HttpRules []*HttpNetworkPolicyRule `protobuf:"bytes,1,rep,name=http_rules,json=httpRules,proto3" json:"http_rules,omitempty"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *HttpNetworkPolicyRules) Reset() {
*x = HttpNetworkPolicyRules{}
mi := &file_cilium_api_npds_proto_msgTypes[4]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *HttpNetworkPolicyRules) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*HttpNetworkPolicyRules) ProtoMessage() {}
func (x *HttpNetworkPolicyRules) ProtoReflect() protoreflect.Message {
mi := &file_cilium_api_npds_proto_msgTypes[4]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use HttpNetworkPolicyRules.ProtoReflect.Descriptor instead.
func (*HttpNetworkPolicyRules) Descriptor() ([]byte, []int) {
return file_cilium_api_npds_proto_rawDescGZIP(), []int{4}
}
func (x *HttpNetworkPolicyRules) GetHttpRules() []*HttpNetworkPolicyRule {
if x != nil {
return x.HttpRules
}
return nil
}
type HeaderMatch struct {
state protoimpl.MessageState `protogen:"open.v1"`
Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"`
Value string `protobuf:"bytes,2,opt,name=value,proto3" json:"value,omitempty"` // empty for presence match. For secret data use 'value_sds_secret' instead.
MatchAction HeaderMatch_MatchAction `protobuf:"varint,3,opt,name=match_action,json=matchAction,proto3,enum=cilium.HeaderMatch_MatchAction" json:"match_action,omitempty"`
MismatchAction HeaderMatch_MismatchAction `protobuf:"varint,4,opt,name=mismatch_action,json=mismatchAction,proto3,enum=cilium.HeaderMatch_MismatchAction" json:"mismatch_action,omitempty"`
// Generic secret name for fetching value via SDS. Secret is fetched from the same gRPC source as
// this Network Policy.
ValueSdsSecret string `protobuf:"bytes,5,opt,name=value_sds_secret,json=valueSdsSecret,proto3" json:"value_sds_secret,omitempty"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *HeaderMatch) Reset() {
*x = HeaderMatch{}
mi := &file_cilium_api_npds_proto_msgTypes[5]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *HeaderMatch) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*HeaderMatch) ProtoMessage() {}
func (x *HeaderMatch) ProtoReflect() protoreflect.Message {
mi := &file_cilium_api_npds_proto_msgTypes[5]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use HeaderMatch.ProtoReflect.Descriptor instead.
func (*HeaderMatch) Descriptor() ([]byte, []int) {
return file_cilium_api_npds_proto_rawDescGZIP(), []int{5}
}
func (x *HeaderMatch) GetName() string {
if x != nil {
return x.Name
}
return ""
}
func (x *HeaderMatch) GetValue() string {
if x != nil {
return x.Value
}
return ""
}
func (x *HeaderMatch) GetMatchAction() HeaderMatch_MatchAction {
if x != nil {
return x.MatchAction
}
return HeaderMatch_CONTINUE_ON_MATCH
}
func (x *HeaderMatch) GetMismatchAction() HeaderMatch_MismatchAction {
if x != nil {
return x.MismatchAction
}
return HeaderMatch_FAIL_ON_MISMATCH
}
func (x *HeaderMatch) GetValueSdsSecret() string {
if x != nil {
return x.ValueSdsSecret
}
return ""
}
// An HTTP network policy rule, as a conjunction of predicates on HTTP requests.
// If all the predicates of a rule match an HTTP request, the request is
// allowed. Otherwise, it is denied.
type HttpNetworkPolicyRule struct {
state protoimpl.MessageState `protogen:"open.v1"`
// A set of matchers on the HTTP request's headers' names and values.
// If all the matchers in this set match an HTTP request, the request is
// allowed by this rule. Otherwise, it is denied.
//
// Some special header names are:
//
// * *:uri*: The HTTP request's URI.
// * *:method*: The HTTP request's method.
// * *:authority*: Also maps to the HTTP 1.1 *Host* header.
//
// Optional. If empty, matches any HTTP request.
Headers []*v31.HeaderMatcher `protobuf:"bytes,1,rep,name=headers,proto3" json:"headers,omitempty"`
// header_matches is a set of HTTP header name and value pairs that
// will be matched against the request headers, if all the other match
// requirements in 'headers' are met. Each HeaderAction determines what to do
// when there is a match or mismatch.
//
// Optional.
HeaderMatches []*HeaderMatch `protobuf:"bytes,2,rep,name=header_matches,json=headerMatches,proto3" json:"header_matches,omitempty"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *HttpNetworkPolicyRule) Reset() {
*x = HttpNetworkPolicyRule{}
mi := &file_cilium_api_npds_proto_msgTypes[6]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *HttpNetworkPolicyRule) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*HttpNetworkPolicyRule) ProtoMessage() {}
func (x *HttpNetworkPolicyRule) ProtoReflect() protoreflect.Message {
mi := &file_cilium_api_npds_proto_msgTypes[6]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use HttpNetworkPolicyRule.ProtoReflect.Descriptor instead.
func (*HttpNetworkPolicyRule) Descriptor() ([]byte, []int) {
return file_cilium_api_npds_proto_rawDescGZIP(), []int{6}
}
func (x *HttpNetworkPolicyRule) GetHeaders() []*v31.HeaderMatcher {
if x != nil {
return x.Headers
}
return nil
}
func (x *HttpNetworkPolicyRule) GetHeaderMatches() []*HeaderMatch {
if x != nil {
return x.HeaderMatches
}
return nil
}
// A set of network policy rules that match Kafka requests.
type KafkaNetworkPolicyRules struct {
state protoimpl.MessageState `protogen:"open.v1"`
// The set of Kafka network policy rules.
// A Kafka request is matched if any of its rules matches the request.
// Required and may not be empty.
KafkaRules []*KafkaNetworkPolicyRule `protobuf:"bytes,1,rep,name=kafka_rules,json=kafkaRules,proto3" json:"kafka_rules,omitempty"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *KafkaNetworkPolicyRules) Reset() {
*x = KafkaNetworkPolicyRules{}
mi := &file_cilium_api_npds_proto_msgTypes[7]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *KafkaNetworkPolicyRules) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*KafkaNetworkPolicyRules) ProtoMessage() {}
func (x *KafkaNetworkPolicyRules) ProtoReflect() protoreflect.Message {
mi := &file_cilium_api_npds_proto_msgTypes[7]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use KafkaNetworkPolicyRules.ProtoReflect.Descriptor instead.
func (*KafkaNetworkPolicyRules) Descriptor() ([]byte, []int) {
return file_cilium_api_npds_proto_rawDescGZIP(), []int{7}
}
func (x *KafkaNetworkPolicyRules) GetKafkaRules() []*KafkaNetworkPolicyRule {
if x != nil {
return x.KafkaRules
}
return nil
}
// A Kafka network policy rule, as a conjunction of predicates on Kafka
// requests. If all the predicates of a rule match a Kafka request, the request
// is allowed. Otherwise, it is denied.
type KafkaNetworkPolicyRule struct {
state protoimpl.MessageState `protogen:"open.v1"`
// The Kafka request's API version.
// If < 0, all Kafka requests are matched by this predicate.
ApiVersion int32 `protobuf:"varint,1,opt,name=api_version,json=apiVersion,proto3" json:"api_version,omitempty"`
// Set of allowed API keys in the Kafka request.
// If none, all Kafka requests are matched by this predicate.
ApiKeys []int32 `protobuf:"varint,2,rep,packed,name=api_keys,json=apiKeys,proto3" json:"api_keys,omitempty"`
// The Kafka request's client ID.
// Optional. If not specified, all Kafka requests are matched by this
// predicate. If specified, this predicates only matches requests that contain
// this client ID, and never matches requests that don't contain any client
// ID.
ClientId string `protobuf:"bytes,3,opt,name=client_id,json=clientId,proto3" json:"client_id,omitempty"`
// The Kafka request's topic.
// Optional. If not specified, this rule will not consider the Kafka request's
// topics. If specified, this predicates only matches requests that contain
// this topic, and never matches requests that don't contain any topic.
// However, messages that can not contain a topic will also me matched.
Topic string `protobuf:"bytes,4,opt,name=topic,proto3" json:"topic,omitempty"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *KafkaNetworkPolicyRule) Reset() {
*x = KafkaNetworkPolicyRule{}
mi := &file_cilium_api_npds_proto_msgTypes[8]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *KafkaNetworkPolicyRule) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*KafkaNetworkPolicyRule) ProtoMessage() {}
func (x *KafkaNetworkPolicyRule) ProtoReflect() protoreflect.Message {
mi := &file_cilium_api_npds_proto_msgTypes[8]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use KafkaNetworkPolicyRule.ProtoReflect.Descriptor instead.
func (*KafkaNetworkPolicyRule) Descriptor() ([]byte, []int) {
return file_cilium_api_npds_proto_rawDescGZIP(), []int{8}
}
func (x *KafkaNetworkPolicyRule) GetApiVersion() int32 {
if x != nil {
return x.ApiVersion
}
return 0
}
func (x *KafkaNetworkPolicyRule) GetApiKeys() []int32 {
if x != nil {
return x.ApiKeys
}
return nil
}
func (x *KafkaNetworkPolicyRule) GetClientId() string {
if x != nil {
return x.ClientId
}
return ""
}
func (x *KafkaNetworkPolicyRule) GetTopic() string {
if x != nil {
return x.Topic
}
return ""
}
// A set of network policy rules that match generic L7 requests.
type L7NetworkPolicyRules struct {
state protoimpl.MessageState `protogen:"open.v1"`
// The set of allowing l7 policy rules.
// A request is allowed if any of these rules matches the request,
// and the request does not match any of the deny rules.
// Optional. If missing or empty then all requests are allowed, unless
// denied by a deny rule.
L7AllowRules []*L7NetworkPolicyRule `protobuf:"bytes,1,rep,name=l7_allow_rules,json=l7AllowRules,proto3" json:"l7_allow_rules,omitempty"`
// The set of denying l7 policy rules.
// A request is denied if any of these rules matches the request.
// A request that is not denied may be allowed by 'l7_allow_rules'.
// Optional.
L7DenyRules []*L7NetworkPolicyRule `protobuf:"bytes,2,rep,name=l7_deny_rules,json=l7DenyRules,proto3" json:"l7_deny_rules,omitempty"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *L7NetworkPolicyRules) Reset() {
*x = L7NetworkPolicyRules{}
mi := &file_cilium_api_npds_proto_msgTypes[9]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *L7NetworkPolicyRules) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*L7NetworkPolicyRules) ProtoMessage() {}
func (x *L7NetworkPolicyRules) ProtoReflect() protoreflect.Message {
mi := &file_cilium_api_npds_proto_msgTypes[9]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use L7NetworkPolicyRules.ProtoReflect.Descriptor instead.
func (*L7NetworkPolicyRules) Descriptor() ([]byte, []int) {
return file_cilium_api_npds_proto_rawDescGZIP(), []int{9}
}
func (x *L7NetworkPolicyRules) GetL7AllowRules() []*L7NetworkPolicyRule {
if x != nil {
return x.L7AllowRules
}
return nil
}
func (x *L7NetworkPolicyRules) GetL7DenyRules() []*L7NetworkPolicyRule {
if x != nil {
return x.L7DenyRules
}
return nil
}
// A generic L7 policy rule, as a conjunction of predicates on l7 requests.
// If all the predicates of a rule match a request, the request is allowed.
// Otherwise, it is denied.
type L7NetworkPolicyRule struct {
state protoimpl.MessageState `protogen:"open.v1"`
// Optional rule name, can be used in logging and error messages.
Name string `protobuf:"bytes,3,opt,name=name,proto3" json:"name,omitempty"`
// Generic rule for Go extensions.
// Optional. If empty, matches any request. Not allowed if 'metadata_rule' is
// present.
Rule map[string]string `protobuf:"bytes,1,rep,name=rule,proto3" json:"rule,omitempty" protobuf_key:"bytes,1,opt,name=key" protobuf_val:"bytes,2,opt,name=value"`
// Generic rule for Envoy metadata enforcement. All matchers must match for
// the rule to allow the request/connection. Optional. If empty, matches any
// request. Not allowed if 'rule' is present.
MetadataRule []*v32.MetadataMatcher `protobuf:"bytes,2,rep,name=metadata_rule,json=metadataRule,proto3" json:"metadata_rule,omitempty"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *L7NetworkPolicyRule) Reset() {
*x = L7NetworkPolicyRule{}
mi := &file_cilium_api_npds_proto_msgTypes[10]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *L7NetworkPolicyRule) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*L7NetworkPolicyRule) ProtoMessage() {}
func (x *L7NetworkPolicyRule) ProtoReflect() protoreflect.Message {
mi := &file_cilium_api_npds_proto_msgTypes[10]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use L7NetworkPolicyRule.ProtoReflect.Descriptor instead.
func (*L7NetworkPolicyRule) Descriptor() ([]byte, []int) {
return file_cilium_api_npds_proto_rawDescGZIP(), []int{10}
}
func (x *L7NetworkPolicyRule) GetName() string {
if x != nil {
return x.Name
}
return ""
}
func (x *L7NetworkPolicyRule) GetRule() map[string]string {
if x != nil {
return x.Rule
}
return nil
}
func (x *L7NetworkPolicyRule) GetMetadataRule() []*v32.MetadataMatcher {
if x != nil {
return x.MetadataRule
}
return nil
}
// Cilium's network policy manager fills this message with all currently known network policies.
type NetworkPoliciesConfigDump struct {
state protoimpl.MessageState `protogen:"open.v1"`
// The loaded networkpolicy configs.
Networkpolicies []*NetworkPolicy `protobuf:"bytes,1,rep,name=networkpolicies,proto3" json:"networkpolicies,omitempty"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *NetworkPoliciesConfigDump) Reset() {
*x = NetworkPoliciesConfigDump{}
mi := &file_cilium_api_npds_proto_msgTypes[11]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *NetworkPoliciesConfigDump) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*NetworkPoliciesConfigDump) ProtoMessage() {}
func (x *NetworkPoliciesConfigDump) ProtoReflect() protoreflect.Message {
mi := &file_cilium_api_npds_proto_msgTypes[11]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use NetworkPoliciesConfigDump.ProtoReflect.Descriptor instead.
func (*NetworkPoliciesConfigDump) Descriptor() ([]byte, []int) {
return file_cilium_api_npds_proto_rawDescGZIP(), []int{11}
}
func (x *NetworkPoliciesConfigDump) GetNetworkpolicies() []*NetworkPolicy {
if x != nil {
return x.Networkpolicies
}
return nil
}
var File_cilium_api_npds_proto protoreflect.FileDescriptor
const file_cilium_api_npds_proto_rawDesc = "" +
"\n" +
"\x15cilium/api/npds.proto\x12\x06cilium\x1a\"envoy/config/core/v3/address.proto\x1a,envoy/config/route/v3/route_components.proto\x1a*envoy/service/discovery/v3/discovery.proto\x1a$envoy/type/matcher/v3/metadata.proto\x1a\x1cgoogle/api/annotations.proto\x1a envoy/annotations/resource.proto\x1a\x17validate/validate.proto\"\xbd\x02\n" +
"\rNetworkPolicy\x123\n" +
"\fendpoint_ips\x18\x01 \x03(\tB\x10\xfaB\r\x92\x01\n" +
"\b\x01\x10\x02\"\x04r\x02\x10\x01R\vendpointIps\x12\x1f\n" +
"\vendpoint_id\x18\x02 \x01(\x04R\n" +
"endpointId\x12T\n" +
"\x19ingress_per_port_policies\x18\x03 \x03(\v2\x19.cilium.PortNetworkPolicyR\x16ingressPerPortPolicies\x12R\n" +
"\x18egress_per_port_policies\x18\x04 \x03(\v2\x19.cilium.PortNetworkPolicyR\x15egressPerPortPolicies\x12,\n" +
"\x12conntrack_map_name\x18\x05 \x01(\tR\x10conntrackMapName\"\xd7\x01\n" +
"\x11PortNetworkPolicy\x12\x1d\n" +
"\x04port\x18\x01 \x01(\rB\t\xfaB\x06*\x04\x18\xff\xff\x03R\x04port\x12$\n" +
"\bend_port\x18\x04 \x01(\rB\t\xfaB\x06*\x04\x18\xff\xff\x03R\aendPort\x12H\n" +
"\bprotocol\x18\x02 \x01(\x0e2,.envoy.config.core.v3.SocketAddress.ProtocolR\bprotocol\x123\n" +
"\x05rules\x18\x03 \x03(\v2\x1d.cilium.PortNetworkPolicyRuleR\x05rules\"\xac\x02\n" +
"\n" +
"TLSContext\x12\x1d\n" +
"\n" +
"trusted_ca\x18\x01 \x01(\tR\ttrustedCa\x12+\n" +
"\x11certificate_chain\x18\x02 \x01(\tR\x10certificateChain\x12\x1f\n" +
"\vprivate_key\x18\x03 \x01(\tR\n" +
"privateKey\x12!\n" +
"\fserver_names\x18\x04 \x03(\tR\vserverNames\x12A\n" +
"\x1dvalidation_context_sds_secret\x18\x05 \x01(\tR\x1avalidationContextSdsSecret\x12$\n" +
"\x0etls_sds_secret\x18\x06 \x01(\tR\ftlsSdsSecret\x12%\n" +
"\x0ealpn_protocols\x18\a \x03(\tR\ralpnProtocols\"\xbd\x04\n" +
"\x15PortNetworkPolicyRule\x12\x1e\n" +
"\n" +
"precedence\x18\n" +
" \x01(\rR\n" +
"precedence\x12\x12\n" +
"\x04deny\x18\b \x01(\bR\x04deny\x12\x19\n" +
"\bproxy_id\x18\t \x01(\rR\aproxyId\x12\x12\n" +
"\x04name\x18\x05 \x01(\tR\x04name\x12'\n" +
"\x0fremote_policies\x18\a \x03(\rR\x0eremotePolicies\x12H\n" +
"\x16downstream_tls_context\x18\x03 \x01(\v2\x12.cilium.TLSContextR\x14downstreamTlsContext\x12D\n" +
"\x14upstream_tls_context\x18\x04 \x01(\v2\x12.cilium.TLSContextR\x12upstreamTlsContext\x12!\n" +
"\fserver_names\x18\x06 \x03(\tR\vserverNames\x12\x19\n" +
"\bl7_proto\x18\x02 \x01(\tR\al7Proto\x12?\n" +
"\n" +
"http_rules\x18d \x01(\v2\x1e.cilium.HttpNetworkPolicyRulesH\x00R\thttpRules\x12B\n" +
"\vkafka_rules\x18e \x01(\v2\x1f.cilium.KafkaNetworkPolicyRulesH\x00R\n" +
"kafkaRules\x129\n" +
"\bl7_rules\x18f \x01(\v2\x1c.cilium.L7NetworkPolicyRulesH\x00R\al7RulesB\x04\n" +
"\x02l7J\x04\b\x01\x10\x02\"`\n" +
"\x16HttpNetworkPolicyRules\x12F\n" +
"\n" +
"http_rules\x18\x01 \x03(\v2\x1d.cilium.HttpNetworkPolicyRuleB\b\xfaB\x05\x92\x01\x02\b\x01R\thttpRules\"\xd2\x03\n" +
"\vHeaderMatch\x12\x1b\n" +
"\x04name\x18\x01 \x01(\tB\a\xfaB\x04r\x02\x10\x01R\x04name\x12\x14\n" +
"\x05value\x18\x02 \x01(\tR\x05value\x12B\n" +
"\fmatch_action\x18\x03 \x01(\x0e2\x1f.cilium.HeaderMatch.MatchActionR\vmatchAction\x12K\n" +
"\x0fmismatch_action\x18\x04 \x01(\x0e2\".cilium.HeaderMatch.MismatchActionR\x0emismatchAction\x12(\n" +
"\x10value_sds_secret\x18\x05 \x01(\tR\x0evalueSdsSecret\"L\n" +
"\vMatchAction\x12\x15\n" +
"\x11CONTINUE_ON_MATCH\x10\x00\x12\x11\n" +
"\rFAIL_ON_MATCH\x10\x01\x12\x13\n" +
"\x0fDELETE_ON_MATCH\x10\x02\"\x86\x01\n" +
"\x0eMismatchAction\x12\x14\n" +
"\x10FAIL_ON_MISMATCH\x10\x00\x12\x18\n" +
"\x14CONTINUE_ON_MISMATCH\x10\x01\x12\x13\n" +
"\x0fADD_ON_MISMATCH\x10\x02\x12\x16\n" +
"\x12DELETE_ON_MISMATCH\x10\x03\x12\x17\n" +
"\x13REPLACE_ON_MISMATCH\x10\x04\"\x93\x01\n" +
"\x15HttpNetworkPolicyRule\x12>\n" +
"\aheaders\x18\x01 \x03(\v2$.envoy.config.route.v3.HeaderMatcherR\aheaders\x12:\n" +
"\x0eheader_matches\x18\x02 \x03(\v2\x13.cilium.HeaderMatchR\rheaderMatches\"d\n" +
"\x17KafkaNetworkPolicyRules\x12I\n" +
"\vkafka_rules\x18\x01 \x03(\v2\x1e.cilium.KafkaNetworkPolicyRuleB\b\xfaB\x05\x92\x01\x02\b\x01R\n" +
"kafkaRules\"\xbe\x01\n" +
"\x16KafkaNetworkPolicyRule\x12\x1f\n" +
"\vapi_version\x18\x01 \x01(\x05R\n" +
"apiVersion\x12\x19\n" +
"\bapi_keys\x18\x02 \x03(\x05R\aapiKeys\x125\n" +
"\tclient_id\x18\x03 \x01(\tB\x18\xfaB\x15r\x132\x11^[a-zA-Z0-9._-]*$R\bclientId\x121\n" +
"\x05topic\x18\x04 \x01(\tB\x1b\xfaB\x18r\x16\x18\xff\x012\x11^[a-zA-Z0-9._-]*$R\x05topic\"\x9a\x01\n" +
"\x14L7NetworkPolicyRules\x12A\n" +
"\x0el7_allow_rules\x18\x01 \x03(\v2\x1b.cilium.L7NetworkPolicyRuleR\fl7AllowRules\x12?\n" +
"\rl7_deny_rules\x18\x02 \x03(\v2\x1b.cilium.L7NetworkPolicyRuleR\vl7DenyRules\"\xea\x01\n" +
"\x13L7NetworkPolicyRule\x12\x12\n" +
"\x04name\x18\x03 \x01(\tR\x04name\x129\n" +
"\x04rule\x18\x01 \x03(\v2%.cilium.L7NetworkPolicyRule.RuleEntryR\x04rule\x12K\n" +
"\rmetadata_rule\x18\x02 \x03(\v2&.envoy.type.matcher.v3.MetadataMatcherR\fmetadataRule\x1a7\n" +
"\tRuleEntry\x12\x10\n" +
"\x03key\x18\x01 \x01(\tR\x03key\x12\x14\n" +
"\x05value\x18\x02 \x01(\tR\x05value:\x028\x01\"\\\n" +
"\x19NetworkPoliciesConfigDump\x12?\n" +
"\x0fnetworkpolicies\x18\x01 \x03(\v2\x15.cilium.NetworkPolicyR\x0fnetworkpolicies2\xda\x02\n" +
"\x1dNetworkPolicyDiscoveryService\x12z\n" +
"\x15StreamNetworkPolicies\x12,.envoy.service.discovery.v3.DiscoveryRequest\x1a-.envoy.service.discovery.v3.DiscoveryResponse\"\x00(\x010\x01\x12\x9e\x01\n" +
"\x14FetchNetworkPolicies\x12,.envoy.service.discovery.v3.DiscoveryRequest\x1a-.envoy.service.discovery.v3.DiscoveryResponse\")\x82\xd3\xe4\x93\x02#:\x01*\"\x1e/v3/discovery:network_policies\x1a\x1c\x8a\xa4\x96\xf3\a\x16\n" +
"\x14cilium.NetworkPolicyB.Z,github.com/cilium/proxy/go/cilium/api;ciliumb\x06proto3"
var (
file_cilium_api_npds_proto_rawDescOnce sync.Once
file_cilium_api_npds_proto_rawDescData []byte
)
func file_cilium_api_npds_proto_rawDescGZIP() []byte {
file_cilium_api_npds_proto_rawDescOnce.Do(func() {
file_cilium_api_npds_proto_rawDescData = protoimpl.X.CompressGZIP(unsafe.Slice(unsafe.StringData(file_cilium_api_npds_proto_rawDesc), len(file_cilium_api_npds_proto_rawDesc)))
})
return file_cilium_api_npds_proto_rawDescData
}
var file_cilium_api_npds_proto_enumTypes = make([]protoimpl.EnumInfo, 2)
var file_cilium_api_npds_proto_msgTypes = make([]protoimpl.MessageInfo, 13)
var file_cilium_api_npds_proto_goTypes = []any{
(HeaderMatch_MatchAction)(0), // 0: cilium.HeaderMatch.MatchAction
(HeaderMatch_MismatchAction)(0), // 1: cilium.HeaderMatch.MismatchAction
(*NetworkPolicy)(nil), // 2: cilium.NetworkPolicy
(*PortNetworkPolicy)(nil), // 3: cilium.PortNetworkPolicy
(*TLSContext)(nil), // 4: cilium.TLSContext
(*PortNetworkPolicyRule)(nil), // 5: cilium.PortNetworkPolicyRule
(*HttpNetworkPolicyRules)(nil), // 6: cilium.HttpNetworkPolicyRules
(*HeaderMatch)(nil), // 7: cilium.HeaderMatch
(*HttpNetworkPolicyRule)(nil), // 8: cilium.HttpNetworkPolicyRule
(*KafkaNetworkPolicyRules)(nil), // 9: cilium.KafkaNetworkPolicyRules
(*KafkaNetworkPolicyRule)(nil), // 10: cilium.KafkaNetworkPolicyRule
(*L7NetworkPolicyRules)(nil), // 11: cilium.L7NetworkPolicyRules
(*L7NetworkPolicyRule)(nil), // 12: cilium.L7NetworkPolicyRule
(*NetworkPoliciesConfigDump)(nil), // 13: cilium.NetworkPoliciesConfigDump
nil, // 14: cilium.L7NetworkPolicyRule.RuleEntry
(v3.SocketAddress_Protocol)(0), // 15: envoy.config.core.v3.SocketAddress.Protocol
(*v31.HeaderMatcher)(nil), // 16: envoy.config.route.v3.HeaderMatcher
(*v32.MetadataMatcher)(nil), // 17: envoy.type.matcher.v3.MetadataMatcher
(*v33.DiscoveryRequest)(nil), // 18: envoy.service.discovery.v3.DiscoveryRequest
(*v33.DiscoveryResponse)(nil), // 19: envoy.service.discovery.v3.DiscoveryResponse
}
var file_cilium_api_npds_proto_depIdxs = []int32{
3, // 0: cilium.NetworkPolicy.ingress_per_port_policies:type_name -> cilium.PortNetworkPolicy
3, // 1: cilium.NetworkPolicy.egress_per_port_policies:type_name -> cilium.PortNetworkPolicy
15, // 2: cilium.PortNetworkPolicy.protocol:type_name -> envoy.config.core.v3.SocketAddress.Protocol
5, // 3: cilium.PortNetworkPolicy.rules:type_name -> cilium.PortNetworkPolicyRule
4, // 4: cilium.PortNetworkPolicyRule.downstream_tls_context:type_name -> cilium.TLSContext
4, // 5: cilium.PortNetworkPolicyRule.upstream_tls_context:type_name -> cilium.TLSContext
6, // 6: cilium.PortNetworkPolicyRule.http_rules:type_name -> cilium.HttpNetworkPolicyRules
9, // 7: cilium.PortNetworkPolicyRule.kafka_rules:type_name -> cilium.KafkaNetworkPolicyRules
11, // 8: cilium.PortNetworkPolicyRule.l7_rules:type_name -> cilium.L7NetworkPolicyRules
8, // 9: cilium.HttpNetworkPolicyRules.http_rules:type_name -> cilium.HttpNetworkPolicyRule
0, // 10: cilium.HeaderMatch.match_action:type_name -> cilium.HeaderMatch.MatchAction
1, // 11: cilium.HeaderMatch.mismatch_action:type_name -> cilium.HeaderMatch.MismatchAction
16, // 12: cilium.HttpNetworkPolicyRule.headers:type_name -> envoy.config.route.v3.HeaderMatcher
7, // 13: cilium.HttpNetworkPolicyRule.header_matches:type_name -> cilium.HeaderMatch
10, // 14: cilium.KafkaNetworkPolicyRules.kafka_rules:type_name -> cilium.KafkaNetworkPolicyRule
12, // 15: cilium.L7NetworkPolicyRules.l7_allow_rules:type_name -> cilium.L7NetworkPolicyRule
12, // 16: cilium.L7NetworkPolicyRules.l7_deny_rules:type_name -> cilium.L7NetworkPolicyRule
14, // 17: cilium.L7NetworkPolicyRule.rule:type_name -> cilium.L7NetworkPolicyRule.RuleEntry
17, // 18: cilium.L7NetworkPolicyRule.metadata_rule:type_name -> envoy.type.matcher.v3.MetadataMatcher
2, // 19: cilium.NetworkPoliciesConfigDump.networkpolicies:type_name -> cilium.NetworkPolicy
18, // 20: cilium.NetworkPolicyDiscoveryService.StreamNetworkPolicies:input_type -> envoy.service.discovery.v3.DiscoveryRequest
18, // 21: cilium.NetworkPolicyDiscoveryService.FetchNetworkPolicies:input_type -> envoy.service.discovery.v3.DiscoveryRequest
19, // 22: cilium.NetworkPolicyDiscoveryService.StreamNetworkPolicies:output_type -> envoy.service.discovery.v3.DiscoveryResponse
19, // 23: cilium.NetworkPolicyDiscoveryService.FetchNetworkPolicies:output_type -> envoy.service.discovery.v3.DiscoveryResponse
22, // [22:24] is the sub-list for method output_type
20, // [20:22] is the sub-list for method input_type
20, // [20:20] is the sub-list for extension type_name
20, // [20:20] is the sub-list for extension extendee
0, // [0:20] is the sub-list for field type_name
}
func init() { file_cilium_api_npds_proto_init() }
func file_cilium_api_npds_proto_init() {
if File_cilium_api_npds_proto != nil {
return
}
file_cilium_api_npds_proto_msgTypes[3].OneofWrappers = []any{
(*PortNetworkPolicyRule_HttpRules)(nil),
(*PortNetworkPolicyRule_KafkaRules)(nil),
(*PortNetworkPolicyRule_L7Rules)(nil),
}
type x struct{}
out := protoimpl.TypeBuilder{
File: protoimpl.DescBuilder{
GoPackagePath: reflect.TypeOf(x{}).PkgPath(),
RawDescriptor: unsafe.Slice(unsafe.StringData(file_cilium_api_npds_proto_rawDesc), len(file_cilium_api_npds_proto_rawDesc)),
NumEnums: 2,
NumMessages: 13,
NumExtensions: 0,
NumServices: 1,
},
GoTypes: file_cilium_api_npds_proto_goTypes,
DependencyIndexes: file_cilium_api_npds_proto_depIdxs,
EnumInfos: file_cilium_api_npds_proto_enumTypes,
MessageInfos: file_cilium_api_npds_proto_msgTypes,
}.Build()
File_cilium_api_npds_proto = out.File
file_cilium_api_npds_proto_goTypes = nil
file_cilium_api_npds_proto_depIdxs = nil
}
// Reference imports to suppress errors if they are not otherwise used.
var _ context.Context
var _ grpc.ClientConnInterface
// This is a compile-time assertion to ensure that this generated file
// is compatible with the grpc package it is being compiled against.
const _ = grpc.SupportPackageIsVersion6
// NetworkPolicyDiscoveryServiceClient is the client API for NetworkPolicyDiscoveryService service.
//
// For semantics around ctx use and closing/ending streaming RPCs, please refer to https://godoc.org/google.golang.org/grpc#ClientConn.NewStream.
type NetworkPolicyDiscoveryServiceClient interface {
StreamNetworkPolicies(ctx context.Context, opts ...grpc.CallOption) (NetworkPolicyDiscoveryService_StreamNetworkPoliciesClient, error)
FetchNetworkPolicies(ctx context.Context, in *v33.DiscoveryRequest, opts ...grpc.CallOption) (*v33.DiscoveryResponse, error)
}
type networkPolicyDiscoveryServiceClient struct {
cc grpc.ClientConnInterface
}
func NewNetworkPolicyDiscoveryServiceClient(cc grpc.ClientConnInterface) NetworkPolicyDiscoveryServiceClient {
return &networkPolicyDiscoveryServiceClient{cc}
}
func (c *networkPolicyDiscoveryServiceClient) StreamNetworkPolicies(ctx context.Context, opts ...grpc.CallOption) (NetworkPolicyDiscoveryService_StreamNetworkPoliciesClient, error) {
stream, err := c.cc.NewStream(ctx, &_NetworkPolicyDiscoveryService_serviceDesc.Streams[0], "/cilium.NetworkPolicyDiscoveryService/StreamNetworkPolicies", opts...)
if err != nil {
return nil, err
}
x := &networkPolicyDiscoveryServiceStreamNetworkPoliciesClient{stream}
return x, nil
}
type NetworkPolicyDiscoveryService_StreamNetworkPoliciesClient interface {
Send(*v33.DiscoveryRequest) error
Recv() (*v33.DiscoveryResponse, error)
grpc.ClientStream
}
type networkPolicyDiscoveryServiceStreamNetworkPoliciesClient struct {
grpc.ClientStream
}
func (x *networkPolicyDiscoveryServiceStreamNetworkPoliciesClient) Send(m *v33.DiscoveryRequest) error {
return x.ClientStream.SendMsg(m)
}
func (x *networkPolicyDiscoveryServiceStreamNetworkPoliciesClient) Recv() (*v33.DiscoveryResponse, error) {
m := new(v33.DiscoveryResponse)
if err := x.ClientStream.RecvMsg(m); err != nil {
return nil, err
}
return m, nil
}
func (c *networkPolicyDiscoveryServiceClient) FetchNetworkPolicies(ctx context.Context, in *v33.DiscoveryRequest, opts ...grpc.CallOption) (*v33.DiscoveryResponse, error) {
out := new(v33.DiscoveryResponse)
err := c.cc.Invoke(ctx, "/cilium.NetworkPolicyDiscoveryService/FetchNetworkPolicies", in, out, opts...)
if err != nil {
return nil, err
}
return out, nil
}
// NetworkPolicyDiscoveryServiceServer is the server API for NetworkPolicyDiscoveryService service.
type NetworkPolicyDiscoveryServiceServer interface {
StreamNetworkPolicies(NetworkPolicyDiscoveryService_StreamNetworkPoliciesServer) error
FetchNetworkPolicies(context.Context, *v33.DiscoveryRequest) (*v33.DiscoveryResponse, error)
}
// UnimplementedNetworkPolicyDiscoveryServiceServer can be embedded to have forward compatible implementations.
type UnimplementedNetworkPolicyDiscoveryServiceServer struct {
}
func (*UnimplementedNetworkPolicyDiscoveryServiceServer) StreamNetworkPolicies(NetworkPolicyDiscoveryService_StreamNetworkPoliciesServer) error {
return status.Errorf(codes.Unimplemented, "method StreamNetworkPolicies not implemented")
}
func (*UnimplementedNetworkPolicyDiscoveryServiceServer) FetchNetworkPolicies(context.Context, *v33.DiscoveryRequest) (*v33.DiscoveryResponse, error) {
return nil, status.Errorf(codes.Unimplemented, "method FetchNetworkPolicies not implemented")
}
func RegisterNetworkPolicyDiscoveryServiceServer(s *grpc.Server, srv NetworkPolicyDiscoveryServiceServer) {
s.RegisterService(&_NetworkPolicyDiscoveryService_serviceDesc, srv)
}
func _NetworkPolicyDiscoveryService_StreamNetworkPolicies_Handler(srv interface{}, stream grpc.ServerStream) error {
return srv.(NetworkPolicyDiscoveryServiceServer).StreamNetworkPolicies(&networkPolicyDiscoveryServiceStreamNetworkPoliciesServer{stream})
}
type NetworkPolicyDiscoveryService_StreamNetworkPoliciesServer interface {
Send(*v33.DiscoveryResponse) error
Recv() (*v33.DiscoveryRequest, error)
grpc.ServerStream
}
type networkPolicyDiscoveryServiceStreamNetworkPoliciesServer struct {
grpc.ServerStream
}
func (x *networkPolicyDiscoveryServiceStreamNetworkPoliciesServer) Send(m *v33.DiscoveryResponse) error {
return x.ServerStream.SendMsg(m)
}
func (x *networkPolicyDiscoveryServiceStreamNetworkPoliciesServer) Recv() (*v33.DiscoveryRequest, error) {
m := new(v33.DiscoveryRequest)
if err := x.ServerStream.RecvMsg(m); err != nil {
return nil, err
}
return m, nil
}
func _NetworkPolicyDiscoveryService_FetchNetworkPolicies_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
in := new(v33.DiscoveryRequest)
if err := dec(in); err != nil {
return nil, err
}
if interceptor == nil {
return srv.(NetworkPolicyDiscoveryServiceServer).FetchNetworkPolicies(ctx, in)
}
info := &grpc.UnaryServerInfo{
Server: srv,
FullMethod: "/cilium.NetworkPolicyDiscoveryService/FetchNetworkPolicies",
}
handler := func(ctx context.Context, req interface{}) (interface{}, error) {
return srv.(NetworkPolicyDiscoveryServiceServer).FetchNetworkPolicies(ctx, req.(*v33.DiscoveryRequest))
}
return interceptor(ctx, in, info, handler)
}
var _NetworkPolicyDiscoveryService_serviceDesc = grpc.ServiceDesc{
ServiceName: "cilium.NetworkPolicyDiscoveryService",
HandlerType: (*NetworkPolicyDiscoveryServiceServer)(nil),
Methods: []grpc.MethodDesc{
{
MethodName: "FetchNetworkPolicies",
Handler: _NetworkPolicyDiscoveryService_FetchNetworkPolicies_Handler,
},
},
Streams: []grpc.StreamDesc{
{
StreamName: "StreamNetworkPolicies",
Handler: _NetworkPolicyDiscoveryService_StreamNetworkPolicies_Handler,
ServerStreams: true,
ClientStreams: true,
},
},
Metadata: "cilium/api/npds.proto",
}
// Code generated by protoc-gen-validate. DO NOT EDIT.
// source: cilium/api/npds.proto
package cilium
import (
"bytes"
"errors"
"fmt"
"net"
"net/mail"
"net/url"
"regexp"
"sort"
"strings"
"time"
"unicode/utf8"
"google.golang.org/protobuf/types/known/anypb"
v3 "github.com/envoyproxy/go-control-plane/envoy/config/core/v3"
)
// ensure the imports are used
var (
_ = bytes.MinRead
_ = errors.New("")
_ = fmt.Print
_ = utf8.UTFMax
_ = (*regexp.Regexp)(nil)
_ = (*strings.Reader)(nil)
_ = net.IPv4len
_ = time.Duration(0)
_ = (*url.URL)(nil)
_ = (*mail.Address)(nil)
_ = anypb.Any{}
_ = sort.Sort
_ = v3.SocketAddress_Protocol(0)
)
// Validate checks the field values on NetworkPolicy with the rules defined in
// the proto definition for this message. If any rules are violated, the first
// error encountered is returned, or nil if there are no violations.
func (m *NetworkPolicy) Validate() error {
return m.validate(false)
}
// ValidateAll checks the field values on NetworkPolicy with the rules defined
// in the proto definition for this message. If any rules are violated, the
// result is a list of violation errors wrapped in NetworkPolicyMultiError, or
// nil if none found.
func (m *NetworkPolicy) ValidateAll() error {
return m.validate(true)
}
func (m *NetworkPolicy) validate(all bool) error {
if m == nil {
return nil
}
var errors []error
if l := len(m.GetEndpointIps()); l < 1 || l > 2 {
err := NetworkPolicyValidationError{
field: "EndpointIps",
reason: "value must contain between 1 and 2 items, inclusive",
}
if !all {
return err
}
errors = append(errors, err)
}
for idx, item := range m.GetEndpointIps() {
_, _ = idx, item
if utf8.RuneCountInString(item) < 1 {
err := NetworkPolicyValidationError{
field: fmt.Sprintf("EndpointIps[%v]", idx),
reason: "value length must be at least 1 runes",
}
if !all {
return err
}
errors = append(errors, err)
}
}
// no validation rules for EndpointId
for idx, item := range m.GetIngressPerPortPolicies() {
_, _ = idx, item
if all {
switch v := interface{}(item).(type) {
case interface{ ValidateAll() error }:
if err := v.ValidateAll(); err != nil {
errors = append(errors, NetworkPolicyValidationError{
field: fmt.Sprintf("IngressPerPortPolicies[%v]", idx),
reason: "embedded message failed validation",
cause: err,
})
}
case interface{ Validate() error }:
if err := v.Validate(); err != nil {
errors = append(errors, NetworkPolicyValidationError{
field: fmt.Sprintf("IngressPerPortPolicies[%v]", idx),
reason: "embedded message failed validation",
cause: err,
})
}
}
} else if v, ok := interface{}(item).(interface{ Validate() error }); ok {
if err := v.Validate(); err != nil {
return NetworkPolicyValidationError{
field: fmt.Sprintf("IngressPerPortPolicies[%v]", idx),
reason: "embedded message failed validation",
cause: err,
}
}
}
}
for idx, item := range m.GetEgressPerPortPolicies() {
_, _ = idx, item
if all {
switch v := interface{}(item).(type) {
case interface{ ValidateAll() error }:
if err := v.ValidateAll(); err != nil {
errors = append(errors, NetworkPolicyValidationError{
field: fmt.Sprintf("EgressPerPortPolicies[%v]", idx),
reason: "embedded message failed validation",
cause: err,
})
}
case interface{ Validate() error }:
if err := v.Validate(); err != nil {
errors = append(errors, NetworkPolicyValidationError{
field: fmt.Sprintf("EgressPerPortPolicies[%v]", idx),
reason: "embedded message failed validation",
cause: err,
})
}
}
} else if v, ok := interface{}(item).(interface{ Validate() error }); ok {
if err := v.Validate(); err != nil {
return NetworkPolicyValidationError{
field: fmt.Sprintf("EgressPerPortPolicies[%v]", idx),
reason: "embedded message failed validation",
cause: err,
}
}
}
}
// no validation rules for ConntrackMapName
if len(errors) > 0 {
return NetworkPolicyMultiError(errors)
}
return nil
}
// NetworkPolicyMultiError is an error wrapping multiple validation errors
// returned by NetworkPolicy.ValidateAll() if the designated constraints
// aren't met.
type NetworkPolicyMultiError []error
// Error returns a concatenation of all the error messages it wraps.
func (m NetworkPolicyMultiError) Error() string {
msgs := make([]string, 0, len(m))
for _, err := range m {
msgs = append(msgs, err.Error())
}
return strings.Join(msgs, "; ")
}
// AllErrors returns a list of validation violation errors.
func (m NetworkPolicyMultiError) AllErrors() []error { return m }
// NetworkPolicyValidationError is the validation error returned by
// NetworkPolicy.Validate if the designated constraints aren't met.
type NetworkPolicyValidationError struct {
field string
reason string
cause error
key bool
}
// Field function returns field value.
func (e NetworkPolicyValidationError) Field() string { return e.field }
// Reason function returns reason value.
func (e NetworkPolicyValidationError) Reason() string { return e.reason }
// Cause function returns cause value.
func (e NetworkPolicyValidationError) Cause() error { return e.cause }
// Key function returns key value.
func (e NetworkPolicyValidationError) Key() bool { return e.key }
// ErrorName returns error name.
func (e NetworkPolicyValidationError) ErrorName() string { return "NetworkPolicyValidationError" }
// Error satisfies the builtin error interface
func (e NetworkPolicyValidationError) Error() string {
cause := ""
if e.cause != nil {
cause = fmt.Sprintf(" | caused by: %v", e.cause)
}
key := ""
if e.key {
key = "key for "
}
return fmt.Sprintf(
"invalid %sNetworkPolicy.%s: %s%s",
key,
e.field,
e.reason,
cause)
}
var _ error = NetworkPolicyValidationError{}
var _ interface {
Field() string
Reason() string
Key() bool
Cause() error
ErrorName() string
} = NetworkPolicyValidationError{}
// Validate checks the field values on PortNetworkPolicy with the rules defined
// in the proto definition for this message. If any rules are violated, the
// first error encountered is returned, or nil if there are no violations.
func (m *PortNetworkPolicy) Validate() error {
return m.validate(false)
}
// ValidateAll checks the field values on PortNetworkPolicy with the rules
// defined in the proto definition for this message. If any rules are
// violated, the result is a list of violation errors wrapped in
// PortNetworkPolicyMultiError, or nil if none found.
func (m *PortNetworkPolicy) ValidateAll() error {
return m.validate(true)
}
func (m *PortNetworkPolicy) validate(all bool) error {
if m == nil {
return nil
}
var errors []error
if m.GetPort() > 65535 {
err := PortNetworkPolicyValidationError{
field: "Port",
reason: "value must be less than or equal to 65535",
}
if !all {
return err
}
errors = append(errors, err)
}
if m.GetEndPort() > 65535 {
err := PortNetworkPolicyValidationError{
field: "EndPort",
reason: "value must be less than or equal to 65535",
}
if !all {
return err
}
errors = append(errors, err)
}
// no validation rules for Protocol
for idx, item := range m.GetRules() {
_, _ = idx, item
if all {
switch v := interface{}(item).(type) {
case interface{ ValidateAll() error }:
if err := v.ValidateAll(); err != nil {
errors = append(errors, PortNetworkPolicyValidationError{
field: fmt.Sprintf("Rules[%v]", idx),
reason: "embedded message failed validation",
cause: err,
})
}
case interface{ Validate() error }:
if err := v.Validate(); err != nil {
errors = append(errors, PortNetworkPolicyValidationError{
field: fmt.Sprintf("Rules[%v]", idx),
reason: "embedded message failed validation",
cause: err,
})
}
}
} else if v, ok := interface{}(item).(interface{ Validate() error }); ok {
if err := v.Validate(); err != nil {
return PortNetworkPolicyValidationError{
field: fmt.Sprintf("Rules[%v]", idx),
reason: "embedded message failed validation",
cause: err,
}
}
}
}
if len(errors) > 0 {
return PortNetworkPolicyMultiError(errors)
}
return nil
}
// PortNetworkPolicyMultiError is an error wrapping multiple validation errors
// returned by PortNetworkPolicy.ValidateAll() if the designated constraints
// aren't met.
type PortNetworkPolicyMultiError []error
// Error returns a concatenation of all the error messages it wraps.
func (m PortNetworkPolicyMultiError) Error() string {
msgs := make([]string, 0, len(m))
for _, err := range m {
msgs = append(msgs, err.Error())
}
return strings.Join(msgs, "; ")
}
// AllErrors returns a list of validation violation errors.
func (m PortNetworkPolicyMultiError) AllErrors() []error { return m }
// PortNetworkPolicyValidationError is the validation error returned by
// PortNetworkPolicy.Validate if the designated constraints aren't met.
type PortNetworkPolicyValidationError struct {
field string
reason string
cause error
key bool
}
// Field function returns field value.
func (e PortNetworkPolicyValidationError) Field() string { return e.field }
// Reason function returns reason value.
func (e PortNetworkPolicyValidationError) Reason() string { return e.reason }
// Cause function returns cause value.
func (e PortNetworkPolicyValidationError) Cause() error { return e.cause }
// Key function returns key value.
func (e PortNetworkPolicyValidationError) Key() bool { return e.key }
// ErrorName returns error name.
func (e PortNetworkPolicyValidationError) ErrorName() string {
return "PortNetworkPolicyValidationError"
}
// Error satisfies the builtin error interface
func (e PortNetworkPolicyValidationError) Error() string {
cause := ""
if e.cause != nil {
cause = fmt.Sprintf(" | caused by: %v", e.cause)
}
key := ""
if e.key {
key = "key for "
}
return fmt.Sprintf(
"invalid %sPortNetworkPolicy.%s: %s%s",
key,
e.field,
e.reason,
cause)
}
var _ error = PortNetworkPolicyValidationError{}
var _ interface {
Field() string
Reason() string
Key() bool
Cause() error
ErrorName() string
} = PortNetworkPolicyValidationError{}
// Validate checks the field values on TLSContext with the rules defined in the
// proto definition for this message. If any rules are violated, the first
// error encountered is returned, or nil if there are no violations.
func (m *TLSContext) Validate() error {
return m.validate(false)
}
// ValidateAll checks the field values on TLSContext with the rules defined in
// the proto definition for this message. If any rules are violated, the
// result is a list of violation errors wrapped in TLSContextMultiError, or
// nil if none found.
func (m *TLSContext) ValidateAll() error {
return m.validate(true)
}
func (m *TLSContext) validate(all bool) error {
if m == nil {
return nil
}
var errors []error
// no validation rules for TrustedCa
// no validation rules for CertificateChain
// no validation rules for PrivateKey
// no validation rules for ValidationContextSdsSecret
// no validation rules for TlsSdsSecret
if len(errors) > 0 {
return TLSContextMultiError(errors)
}
return nil
}
// TLSContextMultiError is an error wrapping multiple validation errors
// returned by TLSContext.ValidateAll() if the designated constraints aren't met.
type TLSContextMultiError []error
// Error returns a concatenation of all the error messages it wraps.
func (m TLSContextMultiError) Error() string {
msgs := make([]string, 0, len(m))
for _, err := range m {
msgs = append(msgs, err.Error())
}
return strings.Join(msgs, "; ")
}
// AllErrors returns a list of validation violation errors.
func (m TLSContextMultiError) AllErrors() []error { return m }
// TLSContextValidationError is the validation error returned by
// TLSContext.Validate if the designated constraints aren't met.
type TLSContextValidationError struct {
field string
reason string
cause error
key bool
}
// Field function returns field value.
func (e TLSContextValidationError) Field() string { return e.field }
// Reason function returns reason value.
func (e TLSContextValidationError) Reason() string { return e.reason }
// Cause function returns cause value.
func (e TLSContextValidationError) Cause() error { return e.cause }
// Key function returns key value.
func (e TLSContextValidationError) Key() bool { return e.key }
// ErrorName returns error name.
func (e TLSContextValidationError) ErrorName() string { return "TLSContextValidationError" }
// Error satisfies the builtin error interface
func (e TLSContextValidationError) Error() string {
cause := ""
if e.cause != nil {
cause = fmt.Sprintf(" | caused by: %v", e.cause)
}
key := ""
if e.key {
key = "key for "
}
return fmt.Sprintf(
"invalid %sTLSContext.%s: %s%s",
key,
e.field,
e.reason,
cause)
}
var _ error = TLSContextValidationError{}
var _ interface {
Field() string
Reason() string
Key() bool
Cause() error
ErrorName() string
} = TLSContextValidationError{}
// Validate checks the field values on PortNetworkPolicyRule with the rules
// defined in the proto definition for this message. If any rules are
// violated, the first error encountered is returned, or nil if there are no violations.
func (m *PortNetworkPolicyRule) Validate() error {
return m.validate(false)
}
// ValidateAll checks the field values on PortNetworkPolicyRule with the rules
// defined in the proto definition for this message. If any rules are
// violated, the result is a list of violation errors wrapped in
// PortNetworkPolicyRuleMultiError, or nil if none found.
func (m *PortNetworkPolicyRule) ValidateAll() error {
return m.validate(true)
}
func (m *PortNetworkPolicyRule) validate(all bool) error {
if m == nil {
return nil
}
var errors []error
// no validation rules for Precedence
// no validation rules for Deny
// no validation rules for ProxyId
// no validation rules for Name
if all {
switch v := interface{}(m.GetDownstreamTlsContext()).(type) {
case interface{ ValidateAll() error }:
if err := v.ValidateAll(); err != nil {
errors = append(errors, PortNetworkPolicyRuleValidationError{
field: "DownstreamTlsContext",
reason: "embedded message failed validation",
cause: err,
})
}
case interface{ Validate() error }:
if err := v.Validate(); err != nil {
errors = append(errors, PortNetworkPolicyRuleValidationError{
field: "DownstreamTlsContext",
reason: "embedded message failed validation",
cause: err,
})
}
}
} else if v, ok := interface{}(m.GetDownstreamTlsContext()).(interface{ Validate() error }); ok {
if err := v.Validate(); err != nil {
return PortNetworkPolicyRuleValidationError{
field: "DownstreamTlsContext",
reason: "embedded message failed validation",
cause: err,
}
}
}
if all {
switch v := interface{}(m.GetUpstreamTlsContext()).(type) {
case interface{ ValidateAll() error }:
if err := v.ValidateAll(); err != nil {
errors = append(errors, PortNetworkPolicyRuleValidationError{
field: "UpstreamTlsContext",
reason: "embedded message failed validation",
cause: err,
})
}
case interface{ Validate() error }:
if err := v.Validate(); err != nil {
errors = append(errors, PortNetworkPolicyRuleValidationError{
field: "UpstreamTlsContext",
reason: "embedded message failed validation",
cause: err,
})
}
}
} else if v, ok := interface{}(m.GetUpstreamTlsContext()).(interface{ Validate() error }); ok {
if err := v.Validate(); err != nil {
return PortNetworkPolicyRuleValidationError{
field: "UpstreamTlsContext",
reason: "embedded message failed validation",
cause: err,
}
}
}
// no validation rules for L7Proto
switch v := m.L7.(type) {
case *PortNetworkPolicyRule_HttpRules:
if v == nil {
err := PortNetworkPolicyRuleValidationError{
field: "L7",
reason: "oneof value cannot be a typed-nil",
}
if !all {
return err
}
errors = append(errors, err)
}
if all {
switch v := interface{}(m.GetHttpRules()).(type) {
case interface{ ValidateAll() error }:
if err := v.ValidateAll(); err != nil {
errors = append(errors, PortNetworkPolicyRuleValidationError{
field: "HttpRules",
reason: "embedded message failed validation",
cause: err,
})
}
case interface{ Validate() error }:
if err := v.Validate(); err != nil {
errors = append(errors, PortNetworkPolicyRuleValidationError{
field: "HttpRules",
reason: "embedded message failed validation",
cause: err,
})
}
}
} else if v, ok := interface{}(m.GetHttpRules()).(interface{ Validate() error }); ok {
if err := v.Validate(); err != nil {
return PortNetworkPolicyRuleValidationError{
field: "HttpRules",
reason: "embedded message failed validation",
cause: err,
}
}
}
case *PortNetworkPolicyRule_KafkaRules:
if v == nil {
err := PortNetworkPolicyRuleValidationError{
field: "L7",
reason: "oneof value cannot be a typed-nil",
}
if !all {
return err
}
errors = append(errors, err)
}
if all {
switch v := interface{}(m.GetKafkaRules()).(type) {
case interface{ ValidateAll() error }:
if err := v.ValidateAll(); err != nil {
errors = append(errors, PortNetworkPolicyRuleValidationError{
field: "KafkaRules",
reason: "embedded message failed validation",
cause: err,
})
}
case interface{ Validate() error }:
if err := v.Validate(); err != nil {
errors = append(errors, PortNetworkPolicyRuleValidationError{
field: "KafkaRules",
reason: "embedded message failed validation",
cause: err,
})
}
}
} else if v, ok := interface{}(m.GetKafkaRules()).(interface{ Validate() error }); ok {
if err := v.Validate(); err != nil {
return PortNetworkPolicyRuleValidationError{
field: "KafkaRules",
reason: "embedded message failed validation",
cause: err,
}
}
}
case *PortNetworkPolicyRule_L7Rules:
if v == nil {
err := PortNetworkPolicyRuleValidationError{
field: "L7",
reason: "oneof value cannot be a typed-nil",
}
if !all {
return err
}
errors = append(errors, err)
}
if all {
switch v := interface{}(m.GetL7Rules()).(type) {
case interface{ ValidateAll() error }:
if err := v.ValidateAll(); err != nil {
errors = append(errors, PortNetworkPolicyRuleValidationError{
field: "L7Rules",
reason: "embedded message failed validation",
cause: err,
})
}
case interface{ Validate() error }:
if err := v.Validate(); err != nil {
errors = append(errors, PortNetworkPolicyRuleValidationError{
field: "L7Rules",
reason: "embedded message failed validation",
cause: err,
})
}
}
} else if v, ok := interface{}(m.GetL7Rules()).(interface{ Validate() error }); ok {
if err := v.Validate(); err != nil {
return PortNetworkPolicyRuleValidationError{
field: "L7Rules",
reason: "embedded message failed validation",
cause: err,
}
}
}
default:
_ = v // ensures v is used
}
if len(errors) > 0 {
return PortNetworkPolicyRuleMultiError(errors)
}
return nil
}
// PortNetworkPolicyRuleMultiError is an error wrapping multiple validation
// errors returned by PortNetworkPolicyRule.ValidateAll() if the designated
// constraints aren't met.
type PortNetworkPolicyRuleMultiError []error
// Error returns a concatenation of all the error messages it wraps.
func (m PortNetworkPolicyRuleMultiError) Error() string {
msgs := make([]string, 0, len(m))
for _, err := range m {
msgs = append(msgs, err.Error())
}
return strings.Join(msgs, "; ")
}
// AllErrors returns a list of validation violation errors.
func (m PortNetworkPolicyRuleMultiError) AllErrors() []error { return m }
// PortNetworkPolicyRuleValidationError is the validation error returned by
// PortNetworkPolicyRule.Validate if the designated constraints aren't met.
type PortNetworkPolicyRuleValidationError struct {
field string
reason string
cause error
key bool
}
// Field function returns field value.
func (e PortNetworkPolicyRuleValidationError) Field() string { return e.field }
// Reason function returns reason value.
func (e PortNetworkPolicyRuleValidationError) Reason() string { return e.reason }
// Cause function returns cause value.
func (e PortNetworkPolicyRuleValidationError) Cause() error { return e.cause }
// Key function returns key value.
func (e PortNetworkPolicyRuleValidationError) Key() bool { return e.key }
// ErrorName returns error name.
func (e PortNetworkPolicyRuleValidationError) ErrorName() string {
return "PortNetworkPolicyRuleValidationError"
}
// Error satisfies the builtin error interface
func (e PortNetworkPolicyRuleValidationError) Error() string {
cause := ""
if e.cause != nil {
cause = fmt.Sprintf(" | caused by: %v", e.cause)
}
key := ""
if e.key {
key = "key for "
}
return fmt.Sprintf(
"invalid %sPortNetworkPolicyRule.%s: %s%s",
key,
e.field,
e.reason,
cause)
}
var _ error = PortNetworkPolicyRuleValidationError{}
var _ interface {
Field() string
Reason() string
Key() bool
Cause() error
ErrorName() string
} = PortNetworkPolicyRuleValidationError{}
// Validate checks the field values on HttpNetworkPolicyRules with the rules
// defined in the proto definition for this message. If any rules are
// violated, the first error encountered is returned, or nil if there are no violations.
func (m *HttpNetworkPolicyRules) Validate() error {
return m.validate(false)
}
// ValidateAll checks the field values on HttpNetworkPolicyRules with the rules
// defined in the proto definition for this message. If any rules are
// violated, the result is a list of violation errors wrapped in
// HttpNetworkPolicyRulesMultiError, or nil if none found.
func (m *HttpNetworkPolicyRules) ValidateAll() error {
return m.validate(true)
}
func (m *HttpNetworkPolicyRules) validate(all bool) error {
if m == nil {
return nil
}
var errors []error
if len(m.GetHttpRules()) < 1 {
err := HttpNetworkPolicyRulesValidationError{
field: "HttpRules",
reason: "value must contain at least 1 item(s)",
}
if !all {
return err
}
errors = append(errors, err)
}
for idx, item := range m.GetHttpRules() {
_, _ = idx, item
if all {
switch v := interface{}(item).(type) {
case interface{ ValidateAll() error }:
if err := v.ValidateAll(); err != nil {
errors = append(errors, HttpNetworkPolicyRulesValidationError{
field: fmt.Sprintf("HttpRules[%v]", idx),
reason: "embedded message failed validation",
cause: err,
})
}
case interface{ Validate() error }:
if err := v.Validate(); err != nil {
errors = append(errors, HttpNetworkPolicyRulesValidationError{
field: fmt.Sprintf("HttpRules[%v]", idx),
reason: "embedded message failed validation",
cause: err,
})
}
}
} else if v, ok := interface{}(item).(interface{ Validate() error }); ok {
if err := v.Validate(); err != nil {
return HttpNetworkPolicyRulesValidationError{
field: fmt.Sprintf("HttpRules[%v]", idx),
reason: "embedded message failed validation",
cause: err,
}
}
}
}
if len(errors) > 0 {
return HttpNetworkPolicyRulesMultiError(errors)
}
return nil
}
// HttpNetworkPolicyRulesMultiError is an error wrapping multiple validation
// errors returned by HttpNetworkPolicyRules.ValidateAll() if the designated
// constraints aren't met.
type HttpNetworkPolicyRulesMultiError []error
// Error returns a concatenation of all the error messages it wraps.
func (m HttpNetworkPolicyRulesMultiError) Error() string {
msgs := make([]string, 0, len(m))
for _, err := range m {
msgs = append(msgs, err.Error())
}
return strings.Join(msgs, "; ")
}
// AllErrors returns a list of validation violation errors.
func (m HttpNetworkPolicyRulesMultiError) AllErrors() []error { return m }
// HttpNetworkPolicyRulesValidationError is the validation error returned by
// HttpNetworkPolicyRules.Validate if the designated constraints aren't met.
type HttpNetworkPolicyRulesValidationError struct {
field string
reason string
cause error
key bool
}
// Field function returns field value.
func (e HttpNetworkPolicyRulesValidationError) Field() string { return e.field }
// Reason function returns reason value.
func (e HttpNetworkPolicyRulesValidationError) Reason() string { return e.reason }
// Cause function returns cause value.
func (e HttpNetworkPolicyRulesValidationError) Cause() error { return e.cause }
// Key function returns key value.
func (e HttpNetworkPolicyRulesValidationError) Key() bool { return e.key }
// ErrorName returns error name.
func (e HttpNetworkPolicyRulesValidationError) ErrorName() string {
return "HttpNetworkPolicyRulesValidationError"
}
// Error satisfies the builtin error interface
func (e HttpNetworkPolicyRulesValidationError) Error() string {
cause := ""
if e.cause != nil {
cause = fmt.Sprintf(" | caused by: %v", e.cause)
}
key := ""
if e.key {
key = "key for "
}
return fmt.Sprintf(
"invalid %sHttpNetworkPolicyRules.%s: %s%s",
key,
e.field,
e.reason,
cause)
}
var _ error = HttpNetworkPolicyRulesValidationError{}
var _ interface {
Field() string
Reason() string
Key() bool
Cause() error
ErrorName() string
} = HttpNetworkPolicyRulesValidationError{}
// Validate checks the field values on HeaderMatch with the rules defined in
// the proto definition for this message. If any rules are violated, the first
// error encountered is returned, or nil if there are no violations.
func (m *HeaderMatch) Validate() error {
return m.validate(false)
}
// ValidateAll checks the field values on HeaderMatch with the rules defined in
// the proto definition for this message. If any rules are violated, the
// result is a list of violation errors wrapped in HeaderMatchMultiError, or
// nil if none found.
func (m *HeaderMatch) ValidateAll() error {
return m.validate(true)
}
func (m *HeaderMatch) validate(all bool) error {
if m == nil {
return nil
}
var errors []error
if utf8.RuneCountInString(m.GetName()) < 1 {
err := HeaderMatchValidationError{
field: "Name",
reason: "value length must be at least 1 runes",
}
if !all {
return err
}
errors = append(errors, err)
}
// no validation rules for Value
// no validation rules for MatchAction
// no validation rules for MismatchAction
// no validation rules for ValueSdsSecret
if len(errors) > 0 {
return HeaderMatchMultiError(errors)
}
return nil
}
// HeaderMatchMultiError is an error wrapping multiple validation errors
// returned by HeaderMatch.ValidateAll() if the designated constraints aren't met.
type HeaderMatchMultiError []error
// Error returns a concatenation of all the error messages it wraps.
func (m HeaderMatchMultiError) Error() string {
msgs := make([]string, 0, len(m))
for _, err := range m {
msgs = append(msgs, err.Error())
}
return strings.Join(msgs, "; ")
}
// AllErrors returns a list of validation violation errors.
func (m HeaderMatchMultiError) AllErrors() []error { return m }
// HeaderMatchValidationError is the validation error returned by
// HeaderMatch.Validate if the designated constraints aren't met.
type HeaderMatchValidationError struct {
field string
reason string
cause error
key bool
}
// Field function returns field value.
func (e HeaderMatchValidationError) Field() string { return e.field }
// Reason function returns reason value.
func (e HeaderMatchValidationError) Reason() string { return e.reason }
// Cause function returns cause value.
func (e HeaderMatchValidationError) Cause() error { return e.cause }
// Key function returns key value.
func (e HeaderMatchValidationError) Key() bool { return e.key }
// ErrorName returns error name.
func (e HeaderMatchValidationError) ErrorName() string { return "HeaderMatchValidationError" }
// Error satisfies the builtin error interface
func (e HeaderMatchValidationError) Error() string {
cause := ""
if e.cause != nil {
cause = fmt.Sprintf(" | caused by: %v", e.cause)
}
key := ""
if e.key {
key = "key for "
}
return fmt.Sprintf(
"invalid %sHeaderMatch.%s: %s%s",
key,
e.field,
e.reason,
cause)
}
var _ error = HeaderMatchValidationError{}
var _ interface {
Field() string
Reason() string
Key() bool
Cause() error
ErrorName() string
} = HeaderMatchValidationError{}
// Validate checks the field values on HttpNetworkPolicyRule with the rules
// defined in the proto definition for this message. If any rules are
// violated, the first error encountered is returned, or nil if there are no violations.
func (m *HttpNetworkPolicyRule) Validate() error {
return m.validate(false)
}
// ValidateAll checks the field values on HttpNetworkPolicyRule with the rules
// defined in the proto definition for this message. If any rules are
// violated, the result is a list of violation errors wrapped in
// HttpNetworkPolicyRuleMultiError, or nil if none found.
func (m *HttpNetworkPolicyRule) ValidateAll() error {
return m.validate(true)
}
func (m *HttpNetworkPolicyRule) validate(all bool) error {
if m == nil {
return nil
}
var errors []error
for idx, item := range m.GetHeaders() {
_, _ = idx, item
if all {
switch v := interface{}(item).(type) {
case interface{ ValidateAll() error }:
if err := v.ValidateAll(); err != nil {
errors = append(errors, HttpNetworkPolicyRuleValidationError{
field: fmt.Sprintf("Headers[%v]", idx),
reason: "embedded message failed validation",
cause: err,
})
}
case interface{ Validate() error }:
if err := v.Validate(); err != nil {
errors = append(errors, HttpNetworkPolicyRuleValidationError{
field: fmt.Sprintf("Headers[%v]", idx),
reason: "embedded message failed validation",
cause: err,
})
}
}
} else if v, ok := interface{}(item).(interface{ Validate() error }); ok {
if err := v.Validate(); err != nil {
return HttpNetworkPolicyRuleValidationError{
field: fmt.Sprintf("Headers[%v]", idx),
reason: "embedded message failed validation",
cause: err,
}
}
}
}
for idx, item := range m.GetHeaderMatches() {
_, _ = idx, item
if all {
switch v := interface{}(item).(type) {
case interface{ ValidateAll() error }:
if err := v.ValidateAll(); err != nil {
errors = append(errors, HttpNetworkPolicyRuleValidationError{
field: fmt.Sprintf("HeaderMatches[%v]", idx),
reason: "embedded message failed validation",
cause: err,
})
}
case interface{ Validate() error }:
if err := v.Validate(); err != nil {
errors = append(errors, HttpNetworkPolicyRuleValidationError{
field: fmt.Sprintf("HeaderMatches[%v]", idx),
reason: "embedded message failed validation",
cause: err,
})
}
}
} else if v, ok := interface{}(item).(interface{ Validate() error }); ok {
if err := v.Validate(); err != nil {
return HttpNetworkPolicyRuleValidationError{
field: fmt.Sprintf("HeaderMatches[%v]", idx),
reason: "embedded message failed validation",
cause: err,
}
}
}
}
if len(errors) > 0 {
return HttpNetworkPolicyRuleMultiError(errors)
}
return nil
}
// HttpNetworkPolicyRuleMultiError is an error wrapping multiple validation
// errors returned by HttpNetworkPolicyRule.ValidateAll() if the designated
// constraints aren't met.
type HttpNetworkPolicyRuleMultiError []error
// Error returns a concatenation of all the error messages it wraps.
func (m HttpNetworkPolicyRuleMultiError) Error() string {
msgs := make([]string, 0, len(m))
for _, err := range m {
msgs = append(msgs, err.Error())
}
return strings.Join(msgs, "; ")
}
// AllErrors returns a list of validation violation errors.
func (m HttpNetworkPolicyRuleMultiError) AllErrors() []error { return m }
// HttpNetworkPolicyRuleValidationError is the validation error returned by
// HttpNetworkPolicyRule.Validate if the designated constraints aren't met.
type HttpNetworkPolicyRuleValidationError struct {
field string
reason string
cause error
key bool
}
// Field function returns field value.
func (e HttpNetworkPolicyRuleValidationError) Field() string { return e.field }
// Reason function returns reason value.
func (e HttpNetworkPolicyRuleValidationError) Reason() string { return e.reason }
// Cause function returns cause value.
func (e HttpNetworkPolicyRuleValidationError) Cause() error { return e.cause }
// Key function returns key value.
func (e HttpNetworkPolicyRuleValidationError) Key() bool { return e.key }
// ErrorName returns error name.
func (e HttpNetworkPolicyRuleValidationError) ErrorName() string {
return "HttpNetworkPolicyRuleValidationError"
}
// Error satisfies the builtin error interface
func (e HttpNetworkPolicyRuleValidationError) Error() string {
cause := ""
if e.cause != nil {
cause = fmt.Sprintf(" | caused by: %v", e.cause)
}
key := ""
if e.key {
key = "key for "
}
return fmt.Sprintf(
"invalid %sHttpNetworkPolicyRule.%s: %s%s",
key,
e.field,
e.reason,
cause)
}
var _ error = HttpNetworkPolicyRuleValidationError{}
var _ interface {
Field() string
Reason() string
Key() bool
Cause() error
ErrorName() string
} = HttpNetworkPolicyRuleValidationError{}
// Validate checks the field values on KafkaNetworkPolicyRules with the rules
// defined in the proto definition for this message. If any rules are
// violated, the first error encountered is returned, or nil if there are no violations.
func (m *KafkaNetworkPolicyRules) Validate() error {
return m.validate(false)
}
// ValidateAll checks the field values on KafkaNetworkPolicyRules with the
// rules defined in the proto definition for this message. If any rules are
// violated, the result is a list of violation errors wrapped in
// KafkaNetworkPolicyRulesMultiError, or nil if none found.
func (m *KafkaNetworkPolicyRules) ValidateAll() error {
return m.validate(true)
}
func (m *KafkaNetworkPolicyRules) validate(all bool) error {
if m == nil {
return nil
}
var errors []error
if len(m.GetKafkaRules()) < 1 {
err := KafkaNetworkPolicyRulesValidationError{
field: "KafkaRules",
reason: "value must contain at least 1 item(s)",
}
if !all {
return err
}
errors = append(errors, err)
}
for idx, item := range m.GetKafkaRules() {
_, _ = idx, item
if all {
switch v := interface{}(item).(type) {
case interface{ ValidateAll() error }:
if err := v.ValidateAll(); err != nil {
errors = append(errors, KafkaNetworkPolicyRulesValidationError{
field: fmt.Sprintf("KafkaRules[%v]", idx),
reason: "embedded message failed validation",
cause: err,
})
}
case interface{ Validate() error }:
if err := v.Validate(); err != nil {
errors = append(errors, KafkaNetworkPolicyRulesValidationError{
field: fmt.Sprintf("KafkaRules[%v]", idx),
reason: "embedded message failed validation",
cause: err,
})
}
}
} else if v, ok := interface{}(item).(interface{ Validate() error }); ok {
if err := v.Validate(); err != nil {
return KafkaNetworkPolicyRulesValidationError{
field: fmt.Sprintf("KafkaRules[%v]", idx),
reason: "embedded message failed validation",
cause: err,
}
}
}
}
if len(errors) > 0 {
return KafkaNetworkPolicyRulesMultiError(errors)
}
return nil
}
// KafkaNetworkPolicyRulesMultiError is an error wrapping multiple validation
// errors returned by KafkaNetworkPolicyRules.ValidateAll() if the designated
// constraints aren't met.
type KafkaNetworkPolicyRulesMultiError []error
// Error returns a concatenation of all the error messages it wraps.
func (m KafkaNetworkPolicyRulesMultiError) Error() string {
msgs := make([]string, 0, len(m))
for _, err := range m {
msgs = append(msgs, err.Error())
}
return strings.Join(msgs, "; ")
}
// AllErrors returns a list of validation violation errors.
func (m KafkaNetworkPolicyRulesMultiError) AllErrors() []error { return m }
// KafkaNetworkPolicyRulesValidationError is the validation error returned by
// KafkaNetworkPolicyRules.Validate if the designated constraints aren't met.
type KafkaNetworkPolicyRulesValidationError struct {
field string
reason string
cause error
key bool
}
// Field function returns field value.
func (e KafkaNetworkPolicyRulesValidationError) Field() string { return e.field }
// Reason function returns reason value.
func (e KafkaNetworkPolicyRulesValidationError) Reason() string { return e.reason }
// Cause function returns cause value.
func (e KafkaNetworkPolicyRulesValidationError) Cause() error { return e.cause }
// Key function returns key value.
func (e KafkaNetworkPolicyRulesValidationError) Key() bool { return e.key }
// ErrorName returns error name.
func (e KafkaNetworkPolicyRulesValidationError) ErrorName() string {
return "KafkaNetworkPolicyRulesValidationError"
}
// Error satisfies the builtin error interface
func (e KafkaNetworkPolicyRulesValidationError) Error() string {
cause := ""
if e.cause != nil {
cause = fmt.Sprintf(" | caused by: %v", e.cause)
}
key := ""
if e.key {
key = "key for "
}
return fmt.Sprintf(
"invalid %sKafkaNetworkPolicyRules.%s: %s%s",
key,
e.field,
e.reason,
cause)
}
var _ error = KafkaNetworkPolicyRulesValidationError{}
var _ interface {
Field() string
Reason() string
Key() bool
Cause() error
ErrorName() string
} = KafkaNetworkPolicyRulesValidationError{}
// Validate checks the field values on KafkaNetworkPolicyRule with the rules
// defined in the proto definition for this message. If any rules are
// violated, the first error encountered is returned, or nil if there are no violations.
func (m *KafkaNetworkPolicyRule) Validate() error {
return m.validate(false)
}
// ValidateAll checks the field values on KafkaNetworkPolicyRule with the rules
// defined in the proto definition for this message. If any rules are
// violated, the result is a list of violation errors wrapped in
// KafkaNetworkPolicyRuleMultiError, or nil if none found.
func (m *KafkaNetworkPolicyRule) ValidateAll() error {
return m.validate(true)
}
func (m *KafkaNetworkPolicyRule) validate(all bool) error {
if m == nil {
return nil
}
var errors []error
// no validation rules for ApiVersion
if !_KafkaNetworkPolicyRule_ClientId_Pattern.MatchString(m.GetClientId()) {
err := KafkaNetworkPolicyRuleValidationError{
field: "ClientId",
reason: "value does not match regex pattern \"^[a-zA-Z0-9._-]*$\"",
}
if !all {
return err
}
errors = append(errors, err)
}
if utf8.RuneCountInString(m.GetTopic()) > 255 {
err := KafkaNetworkPolicyRuleValidationError{
field: "Topic",
reason: "value length must be at most 255 runes",
}
if !all {
return err
}
errors = append(errors, err)
}
if !_KafkaNetworkPolicyRule_Topic_Pattern.MatchString(m.GetTopic()) {
err := KafkaNetworkPolicyRuleValidationError{
field: "Topic",
reason: "value does not match regex pattern \"^[a-zA-Z0-9._-]*$\"",
}
if !all {
return err
}
errors = append(errors, err)
}
if len(errors) > 0 {
return KafkaNetworkPolicyRuleMultiError(errors)
}
return nil
}
// KafkaNetworkPolicyRuleMultiError is an error wrapping multiple validation
// errors returned by KafkaNetworkPolicyRule.ValidateAll() if the designated
// constraints aren't met.
type KafkaNetworkPolicyRuleMultiError []error
// Error returns a concatenation of all the error messages it wraps.
func (m KafkaNetworkPolicyRuleMultiError) Error() string {
msgs := make([]string, 0, len(m))
for _, err := range m {
msgs = append(msgs, err.Error())
}
return strings.Join(msgs, "; ")
}
// AllErrors returns a list of validation violation errors.
func (m KafkaNetworkPolicyRuleMultiError) AllErrors() []error { return m }
// KafkaNetworkPolicyRuleValidationError is the validation error returned by
// KafkaNetworkPolicyRule.Validate if the designated constraints aren't met.
type KafkaNetworkPolicyRuleValidationError struct {
field string
reason string
cause error
key bool
}
// Field function returns field value.
func (e KafkaNetworkPolicyRuleValidationError) Field() string { return e.field }
// Reason function returns reason value.
func (e KafkaNetworkPolicyRuleValidationError) Reason() string { return e.reason }
// Cause function returns cause value.
func (e KafkaNetworkPolicyRuleValidationError) Cause() error { return e.cause }
// Key function returns key value.
func (e KafkaNetworkPolicyRuleValidationError) Key() bool { return e.key }
// ErrorName returns error name.
func (e KafkaNetworkPolicyRuleValidationError) ErrorName() string {
return "KafkaNetworkPolicyRuleValidationError"
}
// Error satisfies the builtin error interface
func (e KafkaNetworkPolicyRuleValidationError) Error() string {
cause := ""
if e.cause != nil {
cause = fmt.Sprintf(" | caused by: %v", e.cause)
}
key := ""
if e.key {
key = "key for "
}
return fmt.Sprintf(
"invalid %sKafkaNetworkPolicyRule.%s: %s%s",
key,
e.field,
e.reason,
cause)
}
var _ error = KafkaNetworkPolicyRuleValidationError{}
var _ interface {
Field() string
Reason() string
Key() bool
Cause() error
ErrorName() string
} = KafkaNetworkPolicyRuleValidationError{}
var _KafkaNetworkPolicyRule_ClientId_Pattern = regexp.MustCompile("^[a-zA-Z0-9._-]*$")
var _KafkaNetworkPolicyRule_Topic_Pattern = regexp.MustCompile("^[a-zA-Z0-9._-]*$")
// Validate checks the field values on L7NetworkPolicyRules with the rules
// defined in the proto definition for this message. If any rules are
// violated, the first error encountered is returned, or nil if there are no violations.
func (m *L7NetworkPolicyRules) Validate() error {
return m.validate(false)
}
// ValidateAll checks the field values on L7NetworkPolicyRules with the rules
// defined in the proto definition for this message. If any rules are
// violated, the result is a list of violation errors wrapped in
// L7NetworkPolicyRulesMultiError, or nil if none found.
func (m *L7NetworkPolicyRules) ValidateAll() error {
return m.validate(true)
}
func (m *L7NetworkPolicyRules) validate(all bool) error {
if m == nil {
return nil
}
var errors []error
for idx, item := range m.GetL7AllowRules() {
_, _ = idx, item
if all {
switch v := interface{}(item).(type) {
case interface{ ValidateAll() error }:
if err := v.ValidateAll(); err != nil {
errors = append(errors, L7NetworkPolicyRulesValidationError{
field: fmt.Sprintf("L7AllowRules[%v]", idx),
reason: "embedded message failed validation",
cause: err,
})
}
case interface{ Validate() error }:
if err := v.Validate(); err != nil {
errors = append(errors, L7NetworkPolicyRulesValidationError{
field: fmt.Sprintf("L7AllowRules[%v]", idx),
reason: "embedded message failed validation",
cause: err,
})
}
}
} else if v, ok := interface{}(item).(interface{ Validate() error }); ok {
if err := v.Validate(); err != nil {
return L7NetworkPolicyRulesValidationError{
field: fmt.Sprintf("L7AllowRules[%v]", idx),
reason: "embedded message failed validation",
cause: err,
}
}
}
}
for idx, item := range m.GetL7DenyRules() {
_, _ = idx, item
if all {
switch v := interface{}(item).(type) {
case interface{ ValidateAll() error }:
if err := v.ValidateAll(); err != nil {
errors = append(errors, L7NetworkPolicyRulesValidationError{
field: fmt.Sprintf("L7DenyRules[%v]", idx),
reason: "embedded message failed validation",
cause: err,
})
}
case interface{ Validate() error }:
if err := v.Validate(); err != nil {
errors = append(errors, L7NetworkPolicyRulesValidationError{
field: fmt.Sprintf("L7DenyRules[%v]", idx),
reason: "embedded message failed validation",
cause: err,
})
}
}
} else if v, ok := interface{}(item).(interface{ Validate() error }); ok {
if err := v.Validate(); err != nil {
return L7NetworkPolicyRulesValidationError{
field: fmt.Sprintf("L7DenyRules[%v]", idx),
reason: "embedded message failed validation",
cause: err,
}
}
}
}
if len(errors) > 0 {
return L7NetworkPolicyRulesMultiError(errors)
}
return nil
}
// L7NetworkPolicyRulesMultiError is an error wrapping multiple validation
// errors returned by L7NetworkPolicyRules.ValidateAll() if the designated
// constraints aren't met.
type L7NetworkPolicyRulesMultiError []error
// Error returns a concatenation of all the error messages it wraps.
func (m L7NetworkPolicyRulesMultiError) Error() string {
msgs := make([]string, 0, len(m))
for _, err := range m {
msgs = append(msgs, err.Error())
}
return strings.Join(msgs, "; ")
}
// AllErrors returns a list of validation violation errors.
func (m L7NetworkPolicyRulesMultiError) AllErrors() []error { return m }
// L7NetworkPolicyRulesValidationError is the validation error returned by
// L7NetworkPolicyRules.Validate if the designated constraints aren't met.
type L7NetworkPolicyRulesValidationError struct {
field string
reason string
cause error
key bool
}
// Field function returns field value.
func (e L7NetworkPolicyRulesValidationError) Field() string { return e.field }
// Reason function returns reason value.
func (e L7NetworkPolicyRulesValidationError) Reason() string { return e.reason }
// Cause function returns cause value.
func (e L7NetworkPolicyRulesValidationError) Cause() error { return e.cause }
// Key function returns key value.
func (e L7NetworkPolicyRulesValidationError) Key() bool { return e.key }
// ErrorName returns error name.
func (e L7NetworkPolicyRulesValidationError) ErrorName() string {
return "L7NetworkPolicyRulesValidationError"
}
// Error satisfies the builtin error interface
func (e L7NetworkPolicyRulesValidationError) Error() string {
cause := ""
if e.cause != nil {
cause = fmt.Sprintf(" | caused by: %v", e.cause)
}
key := ""
if e.key {
key = "key for "
}
return fmt.Sprintf(
"invalid %sL7NetworkPolicyRules.%s: %s%s",
key,
e.field,
e.reason,
cause)
}
var _ error = L7NetworkPolicyRulesValidationError{}
var _ interface {
Field() string
Reason() string
Key() bool
Cause() error
ErrorName() string
} = L7NetworkPolicyRulesValidationError{}
// Validate checks the field values on L7NetworkPolicyRule with the rules
// defined in the proto definition for this message. If any rules are
// violated, the first error encountered is returned, or nil if there are no violations.
func (m *L7NetworkPolicyRule) Validate() error {
return m.validate(false)
}
// ValidateAll checks the field values on L7NetworkPolicyRule with the rules
// defined in the proto definition for this message. If any rules are
// violated, the result is a list of violation errors wrapped in
// L7NetworkPolicyRuleMultiError, or nil if none found.
func (m *L7NetworkPolicyRule) ValidateAll() error {
return m.validate(true)
}
func (m *L7NetworkPolicyRule) validate(all bool) error {
if m == nil {
return nil
}
var errors []error
// no validation rules for Name
// no validation rules for Rule
for idx, item := range m.GetMetadataRule() {
_, _ = idx, item
if all {
switch v := interface{}(item).(type) {
case interface{ ValidateAll() error }:
if err := v.ValidateAll(); err != nil {
errors = append(errors, L7NetworkPolicyRuleValidationError{
field: fmt.Sprintf("MetadataRule[%v]", idx),
reason: "embedded message failed validation",
cause: err,
})
}
case interface{ Validate() error }:
if err := v.Validate(); err != nil {
errors = append(errors, L7NetworkPolicyRuleValidationError{
field: fmt.Sprintf("MetadataRule[%v]", idx),
reason: "embedded message failed validation",
cause: err,
})
}
}
} else if v, ok := interface{}(item).(interface{ Validate() error }); ok {
if err := v.Validate(); err != nil {
return L7NetworkPolicyRuleValidationError{
field: fmt.Sprintf("MetadataRule[%v]", idx),
reason: "embedded message failed validation",
cause: err,
}
}
}
}
if len(errors) > 0 {
return L7NetworkPolicyRuleMultiError(errors)
}
return nil
}
// L7NetworkPolicyRuleMultiError is an error wrapping multiple validation
// errors returned by L7NetworkPolicyRule.ValidateAll() if the designated
// constraints aren't met.
type L7NetworkPolicyRuleMultiError []error
// Error returns a concatenation of all the error messages it wraps.
func (m L7NetworkPolicyRuleMultiError) Error() string {
msgs := make([]string, 0, len(m))
for _, err := range m {
msgs = append(msgs, err.Error())
}
return strings.Join(msgs, "; ")
}
// AllErrors returns a list of validation violation errors.
func (m L7NetworkPolicyRuleMultiError) AllErrors() []error { return m }
// L7NetworkPolicyRuleValidationError is the validation error returned by
// L7NetworkPolicyRule.Validate if the designated constraints aren't met.
type L7NetworkPolicyRuleValidationError struct {
field string
reason string
cause error
key bool
}
// Field function returns field value.
func (e L7NetworkPolicyRuleValidationError) Field() string { return e.field }
// Reason function returns reason value.
func (e L7NetworkPolicyRuleValidationError) Reason() string { return e.reason }
// Cause function returns cause value.
func (e L7NetworkPolicyRuleValidationError) Cause() error { return e.cause }
// Key function returns key value.
func (e L7NetworkPolicyRuleValidationError) Key() bool { return e.key }
// ErrorName returns error name.
func (e L7NetworkPolicyRuleValidationError) ErrorName() string {
return "L7NetworkPolicyRuleValidationError"
}
// Error satisfies the builtin error interface
func (e L7NetworkPolicyRuleValidationError) Error() string {
cause := ""
if e.cause != nil {
cause = fmt.Sprintf(" | caused by: %v", e.cause)
}
key := ""
if e.key {
key = "key for "
}
return fmt.Sprintf(
"invalid %sL7NetworkPolicyRule.%s: %s%s",
key,
e.field,
e.reason,
cause)
}
var _ error = L7NetworkPolicyRuleValidationError{}
var _ interface {
Field() string
Reason() string
Key() bool
Cause() error
ErrorName() string
} = L7NetworkPolicyRuleValidationError{}
// Validate checks the field values on NetworkPoliciesConfigDump with the rules
// defined in the proto definition for this message. If any rules are
// violated, the first error encountered is returned, or nil if there are no violations.
func (m *NetworkPoliciesConfigDump) Validate() error {
return m.validate(false)
}
// ValidateAll checks the field values on NetworkPoliciesConfigDump with the
// rules defined in the proto definition for this message. If any rules are
// violated, the result is a list of violation errors wrapped in
// NetworkPoliciesConfigDumpMultiError, or nil if none found.
func (m *NetworkPoliciesConfigDump) ValidateAll() error {
return m.validate(true)
}
func (m *NetworkPoliciesConfigDump) validate(all bool) error {
if m == nil {
return nil
}
var errors []error
for idx, item := range m.GetNetworkpolicies() {
_, _ = idx, item
if all {
switch v := interface{}(item).(type) {
case interface{ ValidateAll() error }:
if err := v.ValidateAll(); err != nil {
errors = append(errors, NetworkPoliciesConfigDumpValidationError{
field: fmt.Sprintf("Networkpolicies[%v]", idx),
reason: "embedded message failed validation",
cause: err,
})
}
case interface{ Validate() error }:
if err := v.Validate(); err != nil {
errors = append(errors, NetworkPoliciesConfigDumpValidationError{
field: fmt.Sprintf("Networkpolicies[%v]", idx),
reason: "embedded message failed validation",
cause: err,
})
}
}
} else if v, ok := interface{}(item).(interface{ Validate() error }); ok {
if err := v.Validate(); err != nil {
return NetworkPoliciesConfigDumpValidationError{
field: fmt.Sprintf("Networkpolicies[%v]", idx),
reason: "embedded message failed validation",
cause: err,
}
}
}
}
if len(errors) > 0 {
return NetworkPoliciesConfigDumpMultiError(errors)
}
return nil
}
// NetworkPoliciesConfigDumpMultiError is an error wrapping multiple validation
// errors returned by NetworkPoliciesConfigDump.ValidateAll() if the
// designated constraints aren't met.
type NetworkPoliciesConfigDumpMultiError []error
// Error returns a concatenation of all the error messages it wraps.
func (m NetworkPoliciesConfigDumpMultiError) Error() string {
msgs := make([]string, 0, len(m))
for _, err := range m {
msgs = append(msgs, err.Error())
}
return strings.Join(msgs, "; ")
}
// AllErrors returns a list of validation violation errors.
func (m NetworkPoliciesConfigDumpMultiError) AllErrors() []error { return m }
// NetworkPoliciesConfigDumpValidationError is the validation error returned by
// NetworkPoliciesConfigDump.Validate if the designated constraints aren't met.
type NetworkPoliciesConfigDumpValidationError struct {
field string
reason string
cause error
key bool
}
// Field function returns field value.
func (e NetworkPoliciesConfigDumpValidationError) Field() string { return e.field }
// Reason function returns reason value.
func (e NetworkPoliciesConfigDumpValidationError) Reason() string { return e.reason }
// Cause function returns cause value.
func (e NetworkPoliciesConfigDumpValidationError) Cause() error { return e.cause }
// Key function returns key value.
func (e NetworkPoliciesConfigDumpValidationError) Key() bool { return e.key }
// ErrorName returns error name.
func (e NetworkPoliciesConfigDumpValidationError) ErrorName() string {
return "NetworkPoliciesConfigDumpValidationError"
}
// Error satisfies the builtin error interface
func (e NetworkPoliciesConfigDumpValidationError) Error() string {
cause := ""
if e.cause != nil {
cause = fmt.Sprintf(" | caused by: %v", e.cause)
}
key := ""
if e.key {
key = "key for "
}
return fmt.Sprintf(
"invalid %sNetworkPoliciesConfigDump.%s: %s%s",
key,
e.field,
e.reason,
cause)
}
var _ error = NetworkPoliciesConfigDumpValidationError{}
var _ interface {
Field() string
Reason() string
Key() bool
Cause() error
ErrorName() string
} = NetworkPoliciesConfigDumpValidationError{}
// Code generated by protoc-gen-go. DO NOT EDIT.
// versions:
// protoc-gen-go v1.36.6
// protoc v5.29.3
// source: cilium/api/nphds.proto
package cilium
import (
context "context"
_ "github.com/envoyproxy/go-control-plane/envoy/annotations"
v3 "github.com/envoyproxy/go-control-plane/envoy/service/discovery/v3"
_ "github.com/envoyproxy/protoc-gen-validate/validate"
_ "google.golang.org/genproto/googleapis/api/annotations"
grpc "google.golang.org/grpc"
codes "google.golang.org/grpc/codes"
status "google.golang.org/grpc/status"
protoreflect "google.golang.org/protobuf/reflect/protoreflect"
protoimpl "google.golang.org/protobuf/runtime/protoimpl"
reflect "reflect"
sync "sync"
unsafe "unsafe"
)
const (
// Verify that this generated code is sufficiently up-to-date.
_ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion)
// Verify that runtime/protoimpl is sufficiently up-to-date.
_ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20)
)
// The mapping of a network policy identifier to the IP addresses of all the
// hosts on which the network policy is enforced.
// A host may be associated only with one network policy.
type NetworkPolicyHosts struct {
state protoimpl.MessageState `protogen:"open.v1"`
// The unique identifier of the network policy enforced on the hosts.
Policy uint64 `protobuf:"varint,1,opt,name=policy,proto3" json:"policy,omitempty"`
// The set of IP addresses of the hosts on which the network policy is
// enforced. Optional. May be empty.
HostAddresses []string `protobuf:"bytes,2,rep,name=host_addresses,json=hostAddresses,proto3" json:"host_addresses,omitempty"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *NetworkPolicyHosts) Reset() {
*x = NetworkPolicyHosts{}
mi := &file_cilium_api_nphds_proto_msgTypes[0]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *NetworkPolicyHosts) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*NetworkPolicyHosts) ProtoMessage() {}
func (x *NetworkPolicyHosts) ProtoReflect() protoreflect.Message {
mi := &file_cilium_api_nphds_proto_msgTypes[0]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use NetworkPolicyHosts.ProtoReflect.Descriptor instead.
func (*NetworkPolicyHosts) Descriptor() ([]byte, []int) {
return file_cilium_api_nphds_proto_rawDescGZIP(), []int{0}
}
func (x *NetworkPolicyHosts) GetPolicy() uint64 {
if x != nil {
return x.Policy
}
return 0
}
func (x *NetworkPolicyHosts) GetHostAddresses() []string {
if x != nil {
return x.HostAddresses
}
return nil
}
var File_cilium_api_nphds_proto protoreflect.FileDescriptor
const file_cilium_api_nphds_proto_rawDesc = "" +
"\n" +
"\x16cilium/api/nphds.proto\x12\x06cilium\x1a*envoy/service/discovery/v3/discovery.proto\x1a\x1cgoogle/api/annotations.proto\x1a envoy/annotations/resource.proto\x1a\x17validate/validate.proto\"c\n" +
"\x12NetworkPolicyHosts\x12\x16\n" +
"\x06policy\x18\x01 \x01(\x04R\x06policy\x125\n" +
"\x0ehost_addresses\x18\x02 \x03(\tB\x0e\xfaB\v\x92\x01\b\x18\x01\"\x04r\x02\x10\x01R\rhostAddresses2\xee\x02\n" +
"\"NetworkPolicyHostsDiscoveryService\x12}\n" +
"\x18StreamNetworkPolicyHosts\x12,.envoy.service.discovery.v3.DiscoveryRequest\x1a-.envoy.service.discovery.v3.DiscoveryResponse\"\x00(\x010\x01\x12\xa5\x01\n" +
"\x17FetchNetworkPolicyHosts\x12,.envoy.service.discovery.v3.DiscoveryRequest\x1a-.envoy.service.discovery.v3.DiscoveryResponse\"-\x82\xd3\xe4\x93\x02':\x01*\"\"/v2/discovery:network_policy_hosts\x1a!\x8a\xa4\x96\xf3\a\x1b\n" +
"\x19cilium.NetworkPolicyHostsB.Z,github.com/cilium/proxy/go/cilium/api;ciliumb\x06proto3"
var (
file_cilium_api_nphds_proto_rawDescOnce sync.Once
file_cilium_api_nphds_proto_rawDescData []byte
)
func file_cilium_api_nphds_proto_rawDescGZIP() []byte {
file_cilium_api_nphds_proto_rawDescOnce.Do(func() {
file_cilium_api_nphds_proto_rawDescData = protoimpl.X.CompressGZIP(unsafe.Slice(unsafe.StringData(file_cilium_api_nphds_proto_rawDesc), len(file_cilium_api_nphds_proto_rawDesc)))
})
return file_cilium_api_nphds_proto_rawDescData
}
var file_cilium_api_nphds_proto_msgTypes = make([]protoimpl.MessageInfo, 1)
var file_cilium_api_nphds_proto_goTypes = []any{
(*NetworkPolicyHosts)(nil), // 0: cilium.NetworkPolicyHosts
(*v3.DiscoveryRequest)(nil), // 1: envoy.service.discovery.v3.DiscoveryRequest
(*v3.DiscoveryResponse)(nil), // 2: envoy.service.discovery.v3.DiscoveryResponse
}
var file_cilium_api_nphds_proto_depIdxs = []int32{
1, // 0: cilium.NetworkPolicyHostsDiscoveryService.StreamNetworkPolicyHosts:input_type -> envoy.service.discovery.v3.DiscoveryRequest
1, // 1: cilium.NetworkPolicyHostsDiscoveryService.FetchNetworkPolicyHosts:input_type -> envoy.service.discovery.v3.DiscoveryRequest
2, // 2: cilium.NetworkPolicyHostsDiscoveryService.StreamNetworkPolicyHosts:output_type -> envoy.service.discovery.v3.DiscoveryResponse
2, // 3: cilium.NetworkPolicyHostsDiscoveryService.FetchNetworkPolicyHosts:output_type -> envoy.service.discovery.v3.DiscoveryResponse
2, // [2:4] is the sub-list for method output_type
0, // [0:2] is the sub-list for method input_type
0, // [0:0] is the sub-list for extension type_name
0, // [0:0] is the sub-list for extension extendee
0, // [0:0] is the sub-list for field type_name
}
func init() { file_cilium_api_nphds_proto_init() }
func file_cilium_api_nphds_proto_init() {
if File_cilium_api_nphds_proto != nil {
return
}
type x struct{}
out := protoimpl.TypeBuilder{
File: protoimpl.DescBuilder{
GoPackagePath: reflect.TypeOf(x{}).PkgPath(),
RawDescriptor: unsafe.Slice(unsafe.StringData(file_cilium_api_nphds_proto_rawDesc), len(file_cilium_api_nphds_proto_rawDesc)),
NumEnums: 0,
NumMessages: 1,
NumExtensions: 0,
NumServices: 1,
},
GoTypes: file_cilium_api_nphds_proto_goTypes,
DependencyIndexes: file_cilium_api_nphds_proto_depIdxs,
MessageInfos: file_cilium_api_nphds_proto_msgTypes,
}.Build()
File_cilium_api_nphds_proto = out.File
file_cilium_api_nphds_proto_goTypes = nil
file_cilium_api_nphds_proto_depIdxs = nil
}
// Reference imports to suppress errors if they are not otherwise used.
var _ context.Context
var _ grpc.ClientConnInterface
// This is a compile-time assertion to ensure that this generated file
// is compatible with the grpc package it is being compiled against.
const _ = grpc.SupportPackageIsVersion6
// NetworkPolicyHostsDiscoveryServiceClient is the client API for NetworkPolicyHostsDiscoveryService service.
//
// For semantics around ctx use and closing/ending streaming RPCs, please refer to https://godoc.org/google.golang.org/grpc#ClientConn.NewStream.
type NetworkPolicyHostsDiscoveryServiceClient interface {
StreamNetworkPolicyHosts(ctx context.Context, opts ...grpc.CallOption) (NetworkPolicyHostsDiscoveryService_StreamNetworkPolicyHostsClient, error)
FetchNetworkPolicyHosts(ctx context.Context, in *v3.DiscoveryRequest, opts ...grpc.CallOption) (*v3.DiscoveryResponse, error)
}
type networkPolicyHostsDiscoveryServiceClient struct {
cc grpc.ClientConnInterface
}
func NewNetworkPolicyHostsDiscoveryServiceClient(cc grpc.ClientConnInterface) NetworkPolicyHostsDiscoveryServiceClient {
return &networkPolicyHostsDiscoveryServiceClient{cc}
}
func (c *networkPolicyHostsDiscoveryServiceClient) StreamNetworkPolicyHosts(ctx context.Context, opts ...grpc.CallOption) (NetworkPolicyHostsDiscoveryService_StreamNetworkPolicyHostsClient, error) {
stream, err := c.cc.NewStream(ctx, &_NetworkPolicyHostsDiscoveryService_serviceDesc.Streams[0], "/cilium.NetworkPolicyHostsDiscoveryService/StreamNetworkPolicyHosts", opts...)
if err != nil {
return nil, err
}
x := &networkPolicyHostsDiscoveryServiceStreamNetworkPolicyHostsClient{stream}
return x, nil
}
type NetworkPolicyHostsDiscoveryService_StreamNetworkPolicyHostsClient interface {
Send(*v3.DiscoveryRequest) error
Recv() (*v3.DiscoveryResponse, error)
grpc.ClientStream
}
type networkPolicyHostsDiscoveryServiceStreamNetworkPolicyHostsClient struct {
grpc.ClientStream
}
func (x *networkPolicyHostsDiscoveryServiceStreamNetworkPolicyHostsClient) Send(m *v3.DiscoveryRequest) error {
return x.ClientStream.SendMsg(m)
}
func (x *networkPolicyHostsDiscoveryServiceStreamNetworkPolicyHostsClient) Recv() (*v3.DiscoveryResponse, error) {
m := new(v3.DiscoveryResponse)
if err := x.ClientStream.RecvMsg(m); err != nil {
return nil, err
}
return m, nil
}
func (c *networkPolicyHostsDiscoveryServiceClient) FetchNetworkPolicyHosts(ctx context.Context, in *v3.DiscoveryRequest, opts ...grpc.CallOption) (*v3.DiscoveryResponse, error) {
out := new(v3.DiscoveryResponse)
err := c.cc.Invoke(ctx, "/cilium.NetworkPolicyHostsDiscoveryService/FetchNetworkPolicyHosts", in, out, opts...)
if err != nil {
return nil, err
}
return out, nil
}
// NetworkPolicyHostsDiscoveryServiceServer is the server API for NetworkPolicyHostsDiscoveryService service.
type NetworkPolicyHostsDiscoveryServiceServer interface {
StreamNetworkPolicyHosts(NetworkPolicyHostsDiscoveryService_StreamNetworkPolicyHostsServer) error
FetchNetworkPolicyHosts(context.Context, *v3.DiscoveryRequest) (*v3.DiscoveryResponse, error)
}
// UnimplementedNetworkPolicyHostsDiscoveryServiceServer can be embedded to have forward compatible implementations.
type UnimplementedNetworkPolicyHostsDiscoveryServiceServer struct {
}
func (*UnimplementedNetworkPolicyHostsDiscoveryServiceServer) StreamNetworkPolicyHosts(NetworkPolicyHostsDiscoveryService_StreamNetworkPolicyHostsServer) error {
return status.Errorf(codes.Unimplemented, "method StreamNetworkPolicyHosts not implemented")
}
func (*UnimplementedNetworkPolicyHostsDiscoveryServiceServer) FetchNetworkPolicyHosts(context.Context, *v3.DiscoveryRequest) (*v3.DiscoveryResponse, error) {
return nil, status.Errorf(codes.Unimplemented, "method FetchNetworkPolicyHosts not implemented")
}
func RegisterNetworkPolicyHostsDiscoveryServiceServer(s *grpc.Server, srv NetworkPolicyHostsDiscoveryServiceServer) {
s.RegisterService(&_NetworkPolicyHostsDiscoveryService_serviceDesc, srv)
}
func _NetworkPolicyHostsDiscoveryService_StreamNetworkPolicyHosts_Handler(srv interface{}, stream grpc.ServerStream) error {
return srv.(NetworkPolicyHostsDiscoveryServiceServer).StreamNetworkPolicyHosts(&networkPolicyHostsDiscoveryServiceStreamNetworkPolicyHostsServer{stream})
}
type NetworkPolicyHostsDiscoveryService_StreamNetworkPolicyHostsServer interface {
Send(*v3.DiscoveryResponse) error
Recv() (*v3.DiscoveryRequest, error)
grpc.ServerStream
}
type networkPolicyHostsDiscoveryServiceStreamNetworkPolicyHostsServer struct {
grpc.ServerStream
}
func (x *networkPolicyHostsDiscoveryServiceStreamNetworkPolicyHostsServer) Send(m *v3.DiscoveryResponse) error {
return x.ServerStream.SendMsg(m)
}
func (x *networkPolicyHostsDiscoveryServiceStreamNetworkPolicyHostsServer) Recv() (*v3.DiscoveryRequest, error) {
m := new(v3.DiscoveryRequest)
if err := x.ServerStream.RecvMsg(m); err != nil {
return nil, err
}
return m, nil
}
func _NetworkPolicyHostsDiscoveryService_FetchNetworkPolicyHosts_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
in := new(v3.DiscoveryRequest)
if err := dec(in); err != nil {
return nil, err
}
if interceptor == nil {
return srv.(NetworkPolicyHostsDiscoveryServiceServer).FetchNetworkPolicyHosts(ctx, in)
}
info := &grpc.UnaryServerInfo{
Server: srv,
FullMethod: "/cilium.NetworkPolicyHostsDiscoveryService/FetchNetworkPolicyHosts",
}
handler := func(ctx context.Context, req interface{}) (interface{}, error) {
return srv.(NetworkPolicyHostsDiscoveryServiceServer).FetchNetworkPolicyHosts(ctx, req.(*v3.DiscoveryRequest))
}
return interceptor(ctx, in, info, handler)
}
var _NetworkPolicyHostsDiscoveryService_serviceDesc = grpc.ServiceDesc{
ServiceName: "cilium.NetworkPolicyHostsDiscoveryService",
HandlerType: (*NetworkPolicyHostsDiscoveryServiceServer)(nil),
Methods: []grpc.MethodDesc{
{
MethodName: "FetchNetworkPolicyHosts",
Handler: _NetworkPolicyHostsDiscoveryService_FetchNetworkPolicyHosts_Handler,
},
},
Streams: []grpc.StreamDesc{
{
StreamName: "StreamNetworkPolicyHosts",
Handler: _NetworkPolicyHostsDiscoveryService_StreamNetworkPolicyHosts_Handler,
ServerStreams: true,
ClientStreams: true,
},
},
Metadata: "cilium/api/nphds.proto",
}
// Code generated by protoc-gen-validate. DO NOT EDIT.
// source: cilium/api/nphds.proto
package cilium
import (
"bytes"
"errors"
"fmt"
"net"
"net/mail"
"net/url"
"regexp"
"sort"
"strings"
"time"
"unicode/utf8"
"google.golang.org/protobuf/types/known/anypb"
)
// ensure the imports are used
var (
_ = bytes.MinRead
_ = errors.New("")
_ = fmt.Print
_ = utf8.UTFMax
_ = (*regexp.Regexp)(nil)
_ = (*strings.Reader)(nil)
_ = net.IPv4len
_ = time.Duration(0)
_ = (*url.URL)(nil)
_ = (*mail.Address)(nil)
_ = anypb.Any{}
_ = sort.Sort
)
// Validate checks the field values on NetworkPolicyHosts with the rules
// defined in the proto definition for this message. If any rules are
// violated, the first error encountered is returned, or nil if there are no violations.
func (m *NetworkPolicyHosts) Validate() error {
return m.validate(false)
}
// ValidateAll checks the field values on NetworkPolicyHosts with the rules
// defined in the proto definition for this message. If any rules are
// violated, the result is a list of violation errors wrapped in
// NetworkPolicyHostsMultiError, or nil if none found.
func (m *NetworkPolicyHosts) ValidateAll() error {
return m.validate(true)
}
func (m *NetworkPolicyHosts) validate(all bool) error {
if m == nil {
return nil
}
var errors []error
// no validation rules for Policy
_NetworkPolicyHosts_HostAddresses_Unique := make(map[string]struct{}, len(m.GetHostAddresses()))
for idx, item := range m.GetHostAddresses() {
_, _ = idx, item
if _, exists := _NetworkPolicyHosts_HostAddresses_Unique[item]; exists {
err := NetworkPolicyHostsValidationError{
field: fmt.Sprintf("HostAddresses[%v]", idx),
reason: "repeated value must contain unique items",
}
if !all {
return err
}
errors = append(errors, err)
} else {
_NetworkPolicyHosts_HostAddresses_Unique[item] = struct{}{}
}
if utf8.RuneCountInString(item) < 1 {
err := NetworkPolicyHostsValidationError{
field: fmt.Sprintf("HostAddresses[%v]", idx),
reason: "value length must be at least 1 runes",
}
if !all {
return err
}
errors = append(errors, err)
}
}
if len(errors) > 0 {
return NetworkPolicyHostsMultiError(errors)
}
return nil
}
// NetworkPolicyHostsMultiError is an error wrapping multiple validation errors
// returned by NetworkPolicyHosts.ValidateAll() if the designated constraints
// aren't met.
type NetworkPolicyHostsMultiError []error
// Error returns a concatenation of all the error messages it wraps.
func (m NetworkPolicyHostsMultiError) Error() string {
msgs := make([]string, 0, len(m))
for _, err := range m {
msgs = append(msgs, err.Error())
}
return strings.Join(msgs, "; ")
}
// AllErrors returns a list of validation violation errors.
func (m NetworkPolicyHostsMultiError) AllErrors() []error { return m }
// NetworkPolicyHostsValidationError is the validation error returned by
// NetworkPolicyHosts.Validate if the designated constraints aren't met.
type NetworkPolicyHostsValidationError struct {
field string
reason string
cause error
key bool
}
// Field function returns field value.
func (e NetworkPolicyHostsValidationError) Field() string { return e.field }
// Reason function returns reason value.
func (e NetworkPolicyHostsValidationError) Reason() string { return e.reason }
// Cause function returns cause value.
func (e NetworkPolicyHostsValidationError) Cause() error { return e.cause }
// Key function returns key value.
func (e NetworkPolicyHostsValidationError) Key() bool { return e.key }
// ErrorName returns error name.
func (e NetworkPolicyHostsValidationError) ErrorName() string {
return "NetworkPolicyHostsValidationError"
}
// Error satisfies the builtin error interface
func (e NetworkPolicyHostsValidationError) Error() string {
cause := ""
if e.cause != nil {
cause = fmt.Sprintf(" | caused by: %v", e.cause)
}
key := ""
if e.key {
key = "key for "
}
return fmt.Sprintf(
"invalid %sNetworkPolicyHosts.%s: %s%s",
key,
e.field,
e.reason,
cause)
}
var _ error = NetworkPolicyHostsValidationError{}
var _ interface {
Field() string
Reason() string
Key() bool
Cause() error
ErrorName() string
} = NetworkPolicyHostsValidationError{}
// Code generated by protoc-gen-go. DO NOT EDIT.
// versions:
// protoc-gen-go v1.36.6
// protoc v5.29.3
// source: cilium/api/tls_wrapper.proto
package cilium
import (
protoreflect "google.golang.org/protobuf/reflect/protoreflect"
protoimpl "google.golang.org/protobuf/runtime/protoimpl"
reflect "reflect"
sync "sync"
unsafe "unsafe"
)
const (
// Verify that this generated code is sufficiently up-to-date.
_ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion)
// Verify that runtime/protoimpl is sufficiently up-to-date.
_ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20)
)
// Empty configuration messages for Cilium TLS wrapper to make Envoy happy
type UpstreamTlsWrapperContext struct {
state protoimpl.MessageState `protogen:"open.v1"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *UpstreamTlsWrapperContext) Reset() {
*x = UpstreamTlsWrapperContext{}
mi := &file_cilium_api_tls_wrapper_proto_msgTypes[0]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *UpstreamTlsWrapperContext) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*UpstreamTlsWrapperContext) ProtoMessage() {}
func (x *UpstreamTlsWrapperContext) ProtoReflect() protoreflect.Message {
mi := &file_cilium_api_tls_wrapper_proto_msgTypes[0]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use UpstreamTlsWrapperContext.ProtoReflect.Descriptor instead.
func (*UpstreamTlsWrapperContext) Descriptor() ([]byte, []int) {
return file_cilium_api_tls_wrapper_proto_rawDescGZIP(), []int{0}
}
type DownstreamTlsWrapperContext struct {
state protoimpl.MessageState `protogen:"open.v1"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *DownstreamTlsWrapperContext) Reset() {
*x = DownstreamTlsWrapperContext{}
mi := &file_cilium_api_tls_wrapper_proto_msgTypes[1]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *DownstreamTlsWrapperContext) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*DownstreamTlsWrapperContext) ProtoMessage() {}
func (x *DownstreamTlsWrapperContext) ProtoReflect() protoreflect.Message {
mi := &file_cilium_api_tls_wrapper_proto_msgTypes[1]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use DownstreamTlsWrapperContext.ProtoReflect.Descriptor instead.
func (*DownstreamTlsWrapperContext) Descriptor() ([]byte, []int) {
return file_cilium_api_tls_wrapper_proto_rawDescGZIP(), []int{1}
}
var File_cilium_api_tls_wrapper_proto protoreflect.FileDescriptor
const file_cilium_api_tls_wrapper_proto_rawDesc = "" +
"\n" +
"\x1ccilium/api/tls_wrapper.proto\x12\x06cilium\"\x1b\n" +
"\x19UpstreamTlsWrapperContext\"\x1d\n" +
"\x1bDownstreamTlsWrapperContextB.Z,github.com/cilium/proxy/go/cilium/api;ciliumb\x06proto3"
var (
file_cilium_api_tls_wrapper_proto_rawDescOnce sync.Once
file_cilium_api_tls_wrapper_proto_rawDescData []byte
)
func file_cilium_api_tls_wrapper_proto_rawDescGZIP() []byte {
file_cilium_api_tls_wrapper_proto_rawDescOnce.Do(func() {
file_cilium_api_tls_wrapper_proto_rawDescData = protoimpl.X.CompressGZIP(unsafe.Slice(unsafe.StringData(file_cilium_api_tls_wrapper_proto_rawDesc), len(file_cilium_api_tls_wrapper_proto_rawDesc)))
})
return file_cilium_api_tls_wrapper_proto_rawDescData
}
var file_cilium_api_tls_wrapper_proto_msgTypes = make([]protoimpl.MessageInfo, 2)
var file_cilium_api_tls_wrapper_proto_goTypes = []any{
(*UpstreamTlsWrapperContext)(nil), // 0: cilium.UpstreamTlsWrapperContext
(*DownstreamTlsWrapperContext)(nil), // 1: cilium.DownstreamTlsWrapperContext
}
var file_cilium_api_tls_wrapper_proto_depIdxs = []int32{
0, // [0:0] is the sub-list for method output_type
0, // [0:0] is the sub-list for method input_type
0, // [0:0] is the sub-list for extension type_name
0, // [0:0] is the sub-list for extension extendee
0, // [0:0] is the sub-list for field type_name
}
func init() { file_cilium_api_tls_wrapper_proto_init() }
func file_cilium_api_tls_wrapper_proto_init() {
if File_cilium_api_tls_wrapper_proto != nil {
return
}
type x struct{}
out := protoimpl.TypeBuilder{
File: protoimpl.DescBuilder{
GoPackagePath: reflect.TypeOf(x{}).PkgPath(),
RawDescriptor: unsafe.Slice(unsafe.StringData(file_cilium_api_tls_wrapper_proto_rawDesc), len(file_cilium_api_tls_wrapper_proto_rawDesc)),
NumEnums: 0,
NumMessages: 2,
NumExtensions: 0,
NumServices: 0,
},
GoTypes: file_cilium_api_tls_wrapper_proto_goTypes,
DependencyIndexes: file_cilium_api_tls_wrapper_proto_depIdxs,
MessageInfos: file_cilium_api_tls_wrapper_proto_msgTypes,
}.Build()
File_cilium_api_tls_wrapper_proto = out.File
file_cilium_api_tls_wrapper_proto_goTypes = nil
file_cilium_api_tls_wrapper_proto_depIdxs = nil
}
// Code generated by protoc-gen-validate. DO NOT EDIT.
// source: cilium/api/tls_wrapper.proto
package cilium
import (
"bytes"
"errors"
"fmt"
"net"
"net/mail"
"net/url"
"regexp"
"sort"
"strings"
"time"
"unicode/utf8"
"google.golang.org/protobuf/types/known/anypb"
)
// ensure the imports are used
var (
_ = bytes.MinRead
_ = errors.New("")
_ = fmt.Print
_ = utf8.UTFMax
_ = (*regexp.Regexp)(nil)
_ = (*strings.Reader)(nil)
_ = net.IPv4len
_ = time.Duration(0)
_ = (*url.URL)(nil)
_ = (*mail.Address)(nil)
_ = anypb.Any{}
_ = sort.Sort
)
// Validate checks the field values on UpstreamTlsWrapperContext with the rules
// defined in the proto definition for this message. If any rules are
// violated, the first error encountered is returned, or nil if there are no violations.
func (m *UpstreamTlsWrapperContext) Validate() error {
return m.validate(false)
}
// ValidateAll checks the field values on UpstreamTlsWrapperContext with the
// rules defined in the proto definition for this message. If any rules are
// violated, the result is a list of violation errors wrapped in
// UpstreamTlsWrapperContextMultiError, or nil if none found.
func (m *UpstreamTlsWrapperContext) ValidateAll() error {
return m.validate(true)
}
func (m *UpstreamTlsWrapperContext) validate(all bool) error {
if m == nil {
return nil
}
var errors []error
if len(errors) > 0 {
return UpstreamTlsWrapperContextMultiError(errors)
}
return nil
}
// UpstreamTlsWrapperContextMultiError is an error wrapping multiple validation
// errors returned by UpstreamTlsWrapperContext.ValidateAll() if the
// designated constraints aren't met.
type UpstreamTlsWrapperContextMultiError []error
// Error returns a concatenation of all the error messages it wraps.
func (m UpstreamTlsWrapperContextMultiError) Error() string {
msgs := make([]string, 0, len(m))
for _, err := range m {
msgs = append(msgs, err.Error())
}
return strings.Join(msgs, "; ")
}
// AllErrors returns a list of validation violation errors.
func (m UpstreamTlsWrapperContextMultiError) AllErrors() []error { return m }
// UpstreamTlsWrapperContextValidationError is the validation error returned by
// UpstreamTlsWrapperContext.Validate if the designated constraints aren't met.
type UpstreamTlsWrapperContextValidationError struct {
field string
reason string
cause error
key bool
}
// Field function returns field value.
func (e UpstreamTlsWrapperContextValidationError) Field() string { return e.field }
// Reason function returns reason value.
func (e UpstreamTlsWrapperContextValidationError) Reason() string { return e.reason }
// Cause function returns cause value.
func (e UpstreamTlsWrapperContextValidationError) Cause() error { return e.cause }
// Key function returns key value.
func (e UpstreamTlsWrapperContextValidationError) Key() bool { return e.key }
// ErrorName returns error name.
func (e UpstreamTlsWrapperContextValidationError) ErrorName() string {
return "UpstreamTlsWrapperContextValidationError"
}
// Error satisfies the builtin error interface
func (e UpstreamTlsWrapperContextValidationError) Error() string {
cause := ""
if e.cause != nil {
cause = fmt.Sprintf(" | caused by: %v", e.cause)
}
key := ""
if e.key {
key = "key for "
}
return fmt.Sprintf(
"invalid %sUpstreamTlsWrapperContext.%s: %s%s",
key,
e.field,
e.reason,
cause)
}
var _ error = UpstreamTlsWrapperContextValidationError{}
var _ interface {
Field() string
Reason() string
Key() bool
Cause() error
ErrorName() string
} = UpstreamTlsWrapperContextValidationError{}
// Validate checks the field values on DownstreamTlsWrapperContext with the
// rules defined in the proto definition for this message. If any rules are
// violated, the first error encountered is returned, or nil if there are no violations.
func (m *DownstreamTlsWrapperContext) Validate() error {
return m.validate(false)
}
// ValidateAll checks the field values on DownstreamTlsWrapperContext with the
// rules defined in the proto definition for this message. If any rules are
// violated, the result is a list of violation errors wrapped in
// DownstreamTlsWrapperContextMultiError, or nil if none found.
func (m *DownstreamTlsWrapperContext) ValidateAll() error {
return m.validate(true)
}
func (m *DownstreamTlsWrapperContext) validate(all bool) error {
if m == nil {
return nil
}
var errors []error
if len(errors) > 0 {
return DownstreamTlsWrapperContextMultiError(errors)
}
return nil
}
// DownstreamTlsWrapperContextMultiError is an error wrapping multiple
// validation errors returned by DownstreamTlsWrapperContext.ValidateAll() if
// the designated constraints aren't met.
type DownstreamTlsWrapperContextMultiError []error
// Error returns a concatenation of all the error messages it wraps.
func (m DownstreamTlsWrapperContextMultiError) Error() string {
msgs := make([]string, 0, len(m))
for _, err := range m {
msgs = append(msgs, err.Error())
}
return strings.Join(msgs, "; ")
}
// AllErrors returns a list of validation violation errors.
func (m DownstreamTlsWrapperContextMultiError) AllErrors() []error { return m }
// DownstreamTlsWrapperContextValidationError is the validation error returned
// by DownstreamTlsWrapperContext.Validate if the designated constraints
// aren't met.
type DownstreamTlsWrapperContextValidationError struct {
field string
reason string
cause error
key bool
}
// Field function returns field value.
func (e DownstreamTlsWrapperContextValidationError) Field() string { return e.field }
// Reason function returns reason value.
func (e DownstreamTlsWrapperContextValidationError) Reason() string { return e.reason }
// Cause function returns cause value.
func (e DownstreamTlsWrapperContextValidationError) Cause() error { return e.cause }
// Key function returns key value.
func (e DownstreamTlsWrapperContextValidationError) Key() bool { return e.key }
// ErrorName returns error name.
func (e DownstreamTlsWrapperContextValidationError) ErrorName() string {
return "DownstreamTlsWrapperContextValidationError"
}
// Error satisfies the builtin error interface
func (e DownstreamTlsWrapperContextValidationError) Error() string {
cause := ""
if e.cause != nil {
cause = fmt.Sprintf(" | caused by: %v", e.cause)
}
key := ""
if e.key {
key = "key for "
}
return fmt.Sprintf(
"invalid %sDownstreamTlsWrapperContext.%s: %s%s",
key,
e.field,
e.reason,
cause)
}
var _ error = DownstreamTlsWrapperContextValidationError{}
var _ interface {
Field() string
Reason() string
Key() bool
Cause() error
ErrorName() string
} = DownstreamTlsWrapperContextValidationError{}
// Code generated by protoc-gen-go. DO NOT EDIT.
// versions:
// protoc-gen-go v1.36.6
// protoc v5.29.3
// source: cilium/api/websocket.proto
package cilium
import (
_ "github.com/envoyproxy/protoc-gen-validate/validate"
protoreflect "google.golang.org/protobuf/reflect/protoreflect"
protoimpl "google.golang.org/protobuf/runtime/protoimpl"
durationpb "google.golang.org/protobuf/types/known/durationpb"
reflect "reflect"
sync "sync"
unsafe "unsafe"
)
const (
// Verify that this generated code is sufficiently up-to-date.
_ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion)
// Verify that runtime/protoimpl is sufficiently up-to-date.
_ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20)
)
type WebSocketClient struct {
state protoimpl.MessageState `protogen:"open.v1"`
// Path to the unix domain socket for the cilium access log, if any.
AccessLogPath string `protobuf:"bytes,1,opt,name=access_log_path,json=accessLogPath,proto3" json:"access_log_path,omitempty"`
// Host header value, required.
Host string `protobuf:"bytes,2,opt,name=host,proto3" json:"host,omitempty"`
// Path value. Defaults to "/".
Path string `protobuf:"bytes,3,opt,name=path,proto3" json:"path,omitempty"`
// sec-websocket-key value to use, defaults to a random key.
Key string `protobuf:"bytes,4,opt,name=key,proto3" json:"key,omitempty"`
// Websocket version, defaults to "13".
Version string `protobuf:"bytes,5,opt,name=version,proto3" json:"version,omitempty"`
// Origin header, if any.
Origin string `protobuf:"bytes,6,opt,name=origin,proto3" json:"origin,omitempty"`
// Websocket handshake timeout, default is 5 seconds.
HandshakeTimeout *durationpb.Duration `protobuf:"bytes,7,opt,name=handshake_timeout,json=handshakeTimeout,proto3" json:"handshake_timeout,omitempty"`
// ping interval, default is 0 (disabled).
// Connection is assumed dead if response is not received before the next ping is to be sent.
PingInterval *durationpb.Duration `protobuf:"bytes,8,opt,name=ping_interval,json=pingInterval,proto3" json:"ping_interval,omitempty"`
// ping only on when idle on both directions.
// ping_interval must be non-zero when this is true.
PingWhenIdle bool `protobuf:"varint,9,opt,name=ping_when_idle,json=pingWhenIdle,proto3" json:"ping_when_idle,omitempty"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *WebSocketClient) Reset() {
*x = WebSocketClient{}
mi := &file_cilium_api_websocket_proto_msgTypes[0]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *WebSocketClient) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*WebSocketClient) ProtoMessage() {}
func (x *WebSocketClient) ProtoReflect() protoreflect.Message {
mi := &file_cilium_api_websocket_proto_msgTypes[0]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use WebSocketClient.ProtoReflect.Descriptor instead.
func (*WebSocketClient) Descriptor() ([]byte, []int) {
return file_cilium_api_websocket_proto_rawDescGZIP(), []int{0}
}
func (x *WebSocketClient) GetAccessLogPath() string {
if x != nil {
return x.AccessLogPath
}
return ""
}
func (x *WebSocketClient) GetHost() string {
if x != nil {
return x.Host
}
return ""
}
func (x *WebSocketClient) GetPath() string {
if x != nil {
return x.Path
}
return ""
}
func (x *WebSocketClient) GetKey() string {
if x != nil {
return x.Key
}
return ""
}
func (x *WebSocketClient) GetVersion() string {
if x != nil {
return x.Version
}
return ""
}
func (x *WebSocketClient) GetOrigin() string {
if x != nil {
return x.Origin
}
return ""
}
func (x *WebSocketClient) GetHandshakeTimeout() *durationpb.Duration {
if x != nil {
return x.HandshakeTimeout
}
return nil
}
func (x *WebSocketClient) GetPingInterval() *durationpb.Duration {
if x != nil {
return x.PingInterval
}
return nil
}
func (x *WebSocketClient) GetPingWhenIdle() bool {
if x != nil {
return x.PingWhenIdle
}
return false
}
type WebSocketServer struct {
state protoimpl.MessageState `protogen:"open.v1"`
// Path to the unix domain socket for the cilium access log, if any.
AccessLogPath string `protobuf:"bytes,1,opt,name=access_log_path,json=accessLogPath,proto3" json:"access_log_path,omitempty"`
// Expected host header value, if any.
Host string `protobuf:"bytes,2,opt,name=host,proto3" json:"host,omitempty"`
// Expected path value, if any.
Path string `protobuf:"bytes,3,opt,name=path,proto3" json:"path,omitempty"`
// sec-websocket-key value to expect, if any.
Key string `protobuf:"bytes,4,opt,name=key,proto3" json:"key,omitempty"`
// Websocket version, ignored if omitted.
Version string `protobuf:"bytes,5,opt,name=version,proto3" json:"version,omitempty"`
// Origin header, if any. Origin header is not allowed if omitted.
Origin string `protobuf:"bytes,6,opt,name=origin,proto3" json:"origin,omitempty"`
// Websocket handshake timeout, default is 5 seconds.
HandshakeTimeout *durationpb.Duration `protobuf:"bytes,7,opt,name=handshake_timeout,json=handshakeTimeout,proto3" json:"handshake_timeout,omitempty"`
// ping interval, default is 0 (disabled).
// Connection is assumed dead if response is not received before the next ping is to be sent.
PingInterval *durationpb.Duration `protobuf:"bytes,8,opt,name=ping_interval,json=pingInterval,proto3" json:"ping_interval,omitempty"`
// ping only on when idle on both directions.
// ping_interval must be non-zero when this is true.
PingWhenIdle bool `protobuf:"varint,9,opt,name=ping_when_idle,json=pingWhenIdle,proto3" json:"ping_when_idle,omitempty"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *WebSocketServer) Reset() {
*x = WebSocketServer{}
mi := &file_cilium_api_websocket_proto_msgTypes[1]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *WebSocketServer) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*WebSocketServer) ProtoMessage() {}
func (x *WebSocketServer) ProtoReflect() protoreflect.Message {
mi := &file_cilium_api_websocket_proto_msgTypes[1]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use WebSocketServer.ProtoReflect.Descriptor instead.
func (*WebSocketServer) Descriptor() ([]byte, []int) {
return file_cilium_api_websocket_proto_rawDescGZIP(), []int{1}
}
func (x *WebSocketServer) GetAccessLogPath() string {
if x != nil {
return x.AccessLogPath
}
return ""
}
func (x *WebSocketServer) GetHost() string {
if x != nil {
return x.Host
}
return ""
}
func (x *WebSocketServer) GetPath() string {
if x != nil {
return x.Path
}
return ""
}
func (x *WebSocketServer) GetKey() string {
if x != nil {
return x.Key
}
return ""
}
func (x *WebSocketServer) GetVersion() string {
if x != nil {
return x.Version
}
return ""
}
func (x *WebSocketServer) GetOrigin() string {
if x != nil {
return x.Origin
}
return ""
}
func (x *WebSocketServer) GetHandshakeTimeout() *durationpb.Duration {
if x != nil {
return x.HandshakeTimeout
}
return nil
}
func (x *WebSocketServer) GetPingInterval() *durationpb.Duration {
if x != nil {
return x.PingInterval
}
return nil
}
func (x *WebSocketServer) GetPingWhenIdle() bool {
if x != nil {
return x.PingWhenIdle
}
return false
}
var File_cilium_api_websocket_proto protoreflect.FileDescriptor
const file_cilium_api_websocket_proto_rawDesc = "" +
"\n" +
"\x1acilium/api/websocket.proto\x12\x06cilium\x1a\x1egoogle/protobuf/duration.proto\x1a\x17validate/validate.proto\"\xdc\x02\n" +
"\x0fWebSocketClient\x12&\n" +
"\x0faccess_log_path\x18\x01 \x01(\tR\raccessLogPath\x12\x1b\n" +
"\x04host\x18\x02 \x01(\tB\a\xfaB\x04r\x02\x10\x02R\x04host\x12\x12\n" +
"\x04path\x18\x03 \x01(\tR\x04path\x12\x10\n" +
"\x03key\x18\x04 \x01(\tR\x03key\x12\x18\n" +
"\aversion\x18\x05 \x01(\tR\aversion\x12\x16\n" +
"\x06origin\x18\x06 \x01(\tR\x06origin\x12F\n" +
"\x11handshake_timeout\x18\a \x01(\v2\x19.google.protobuf.DurationR\x10handshakeTimeout\x12>\n" +
"\rping_interval\x18\b \x01(\v2\x19.google.protobuf.DurationR\fpingInterval\x12$\n" +
"\x0eping_when_idle\x18\t \x01(\bR\fpingWhenIdle\"\xd3\x02\n" +
"\x0fWebSocketServer\x12&\n" +
"\x0faccess_log_path\x18\x01 \x01(\tR\raccessLogPath\x12\x12\n" +
"\x04host\x18\x02 \x01(\tR\x04host\x12\x12\n" +
"\x04path\x18\x03 \x01(\tR\x04path\x12\x10\n" +
"\x03key\x18\x04 \x01(\tR\x03key\x12\x18\n" +
"\aversion\x18\x05 \x01(\tR\aversion\x12\x16\n" +
"\x06origin\x18\x06 \x01(\tR\x06origin\x12F\n" +
"\x11handshake_timeout\x18\a \x01(\v2\x19.google.protobuf.DurationR\x10handshakeTimeout\x12>\n" +
"\rping_interval\x18\b \x01(\v2\x19.google.protobuf.DurationR\fpingInterval\x12$\n" +
"\x0eping_when_idle\x18\t \x01(\bR\fpingWhenIdleB.Z,github.com/cilium/proxy/go/cilium/api;ciliumb\x06proto3"
var (
file_cilium_api_websocket_proto_rawDescOnce sync.Once
file_cilium_api_websocket_proto_rawDescData []byte
)
func file_cilium_api_websocket_proto_rawDescGZIP() []byte {
file_cilium_api_websocket_proto_rawDescOnce.Do(func() {
file_cilium_api_websocket_proto_rawDescData = protoimpl.X.CompressGZIP(unsafe.Slice(unsafe.StringData(file_cilium_api_websocket_proto_rawDesc), len(file_cilium_api_websocket_proto_rawDesc)))
})
return file_cilium_api_websocket_proto_rawDescData
}
var file_cilium_api_websocket_proto_msgTypes = make([]protoimpl.MessageInfo, 2)
var file_cilium_api_websocket_proto_goTypes = []any{
(*WebSocketClient)(nil), // 0: cilium.WebSocketClient
(*WebSocketServer)(nil), // 1: cilium.WebSocketServer
(*durationpb.Duration)(nil), // 2: google.protobuf.Duration
}
var file_cilium_api_websocket_proto_depIdxs = []int32{
2, // 0: cilium.WebSocketClient.handshake_timeout:type_name -> google.protobuf.Duration
2, // 1: cilium.WebSocketClient.ping_interval:type_name -> google.protobuf.Duration
2, // 2: cilium.WebSocketServer.handshake_timeout:type_name -> google.protobuf.Duration
2, // 3: cilium.WebSocketServer.ping_interval:type_name -> google.protobuf.Duration
4, // [4:4] is the sub-list for method output_type
4, // [4:4] is the sub-list for method input_type
4, // [4:4] is the sub-list for extension type_name
4, // [4:4] is the sub-list for extension extendee
0, // [0:4] is the sub-list for field type_name
}
func init() { file_cilium_api_websocket_proto_init() }
func file_cilium_api_websocket_proto_init() {
if File_cilium_api_websocket_proto != nil {
return
}
type x struct{}
out := protoimpl.TypeBuilder{
File: protoimpl.DescBuilder{
GoPackagePath: reflect.TypeOf(x{}).PkgPath(),
RawDescriptor: unsafe.Slice(unsafe.StringData(file_cilium_api_websocket_proto_rawDesc), len(file_cilium_api_websocket_proto_rawDesc)),
NumEnums: 0,
NumMessages: 2,
NumExtensions: 0,
NumServices: 0,
},
GoTypes: file_cilium_api_websocket_proto_goTypes,
DependencyIndexes: file_cilium_api_websocket_proto_depIdxs,
MessageInfos: file_cilium_api_websocket_proto_msgTypes,
}.Build()
File_cilium_api_websocket_proto = out.File
file_cilium_api_websocket_proto_goTypes = nil
file_cilium_api_websocket_proto_depIdxs = nil
}
// Code generated by protoc-gen-validate. DO NOT EDIT.
// source: cilium/api/websocket.proto
package cilium
import (
"bytes"
"errors"
"fmt"
"net"
"net/mail"
"net/url"
"regexp"
"sort"
"strings"
"time"
"unicode/utf8"
"google.golang.org/protobuf/types/known/anypb"
)
// ensure the imports are used
var (
_ = bytes.MinRead
_ = errors.New("")
_ = fmt.Print
_ = utf8.UTFMax
_ = (*regexp.Regexp)(nil)
_ = (*strings.Reader)(nil)
_ = net.IPv4len
_ = time.Duration(0)
_ = (*url.URL)(nil)
_ = (*mail.Address)(nil)
_ = anypb.Any{}
_ = sort.Sort
)
// Validate checks the field values on WebSocketClient with the rules defined
// in the proto definition for this message. If any rules are violated, the
// first error encountered is returned, or nil if there are no violations.
func (m *WebSocketClient) Validate() error {
return m.validate(false)
}
// ValidateAll checks the field values on WebSocketClient with the rules
// defined in the proto definition for this message. If any rules are
// violated, the result is a list of violation errors wrapped in
// WebSocketClientMultiError, or nil if none found.
func (m *WebSocketClient) ValidateAll() error {
return m.validate(true)
}
func (m *WebSocketClient) validate(all bool) error {
if m == nil {
return nil
}
var errors []error
// no validation rules for AccessLogPath
if utf8.RuneCountInString(m.GetHost()) < 2 {
err := WebSocketClientValidationError{
field: "Host",
reason: "value length must be at least 2 runes",
}
if !all {
return err
}
errors = append(errors, err)
}
// no validation rules for Path
// no validation rules for Key
// no validation rules for Version
// no validation rules for Origin
if all {
switch v := interface{}(m.GetHandshakeTimeout()).(type) {
case interface{ ValidateAll() error }:
if err := v.ValidateAll(); err != nil {
errors = append(errors, WebSocketClientValidationError{
field: "HandshakeTimeout",
reason: "embedded message failed validation",
cause: err,
})
}
case interface{ Validate() error }:
if err := v.Validate(); err != nil {
errors = append(errors, WebSocketClientValidationError{
field: "HandshakeTimeout",
reason: "embedded message failed validation",
cause: err,
})
}
}
} else if v, ok := interface{}(m.GetHandshakeTimeout()).(interface{ Validate() error }); ok {
if err := v.Validate(); err != nil {
return WebSocketClientValidationError{
field: "HandshakeTimeout",
reason: "embedded message failed validation",
cause: err,
}
}
}
if all {
switch v := interface{}(m.GetPingInterval()).(type) {
case interface{ ValidateAll() error }:
if err := v.ValidateAll(); err != nil {
errors = append(errors, WebSocketClientValidationError{
field: "PingInterval",
reason: "embedded message failed validation",
cause: err,
})
}
case interface{ Validate() error }:
if err := v.Validate(); err != nil {
errors = append(errors, WebSocketClientValidationError{
field: "PingInterval",
reason: "embedded message failed validation",
cause: err,
})
}
}
} else if v, ok := interface{}(m.GetPingInterval()).(interface{ Validate() error }); ok {
if err := v.Validate(); err != nil {
return WebSocketClientValidationError{
field: "PingInterval",
reason: "embedded message failed validation",
cause: err,
}
}
}
// no validation rules for PingWhenIdle
if len(errors) > 0 {
return WebSocketClientMultiError(errors)
}
return nil
}
// WebSocketClientMultiError is an error wrapping multiple validation errors
// returned by WebSocketClient.ValidateAll() if the designated constraints
// aren't met.
type WebSocketClientMultiError []error
// Error returns a concatenation of all the error messages it wraps.
func (m WebSocketClientMultiError) Error() string {
msgs := make([]string, 0, len(m))
for _, err := range m {
msgs = append(msgs, err.Error())
}
return strings.Join(msgs, "; ")
}
// AllErrors returns a list of validation violation errors.
func (m WebSocketClientMultiError) AllErrors() []error { return m }
// WebSocketClientValidationError is the validation error returned by
// WebSocketClient.Validate if the designated constraints aren't met.
type WebSocketClientValidationError struct {
field string
reason string
cause error
key bool
}
// Field function returns field value.
func (e WebSocketClientValidationError) Field() string { return e.field }
// Reason function returns reason value.
func (e WebSocketClientValidationError) Reason() string { return e.reason }
// Cause function returns cause value.
func (e WebSocketClientValidationError) Cause() error { return e.cause }
// Key function returns key value.
func (e WebSocketClientValidationError) Key() bool { return e.key }
// ErrorName returns error name.
func (e WebSocketClientValidationError) ErrorName() string { return "WebSocketClientValidationError" }
// Error satisfies the builtin error interface
func (e WebSocketClientValidationError) Error() string {
cause := ""
if e.cause != nil {
cause = fmt.Sprintf(" | caused by: %v", e.cause)
}
key := ""
if e.key {
key = "key for "
}
return fmt.Sprintf(
"invalid %sWebSocketClient.%s: %s%s",
key,
e.field,
e.reason,
cause)
}
var _ error = WebSocketClientValidationError{}
var _ interface {
Field() string
Reason() string
Key() bool
Cause() error
ErrorName() string
} = WebSocketClientValidationError{}
// Validate checks the field values on WebSocketServer with the rules defined
// in the proto definition for this message. If any rules are violated, the
// first error encountered is returned, or nil if there are no violations.
func (m *WebSocketServer) Validate() error {
return m.validate(false)
}
// ValidateAll checks the field values on WebSocketServer with the rules
// defined in the proto definition for this message. If any rules are
// violated, the result is a list of violation errors wrapped in
// WebSocketServerMultiError, or nil if none found.
func (m *WebSocketServer) ValidateAll() error {
return m.validate(true)
}
func (m *WebSocketServer) validate(all bool) error {
if m == nil {
return nil
}
var errors []error
// no validation rules for AccessLogPath
// no validation rules for Host
// no validation rules for Path
// no validation rules for Key
// no validation rules for Version
// no validation rules for Origin
if all {
switch v := interface{}(m.GetHandshakeTimeout()).(type) {
case interface{ ValidateAll() error }:
if err := v.ValidateAll(); err != nil {
errors = append(errors, WebSocketServerValidationError{
field: "HandshakeTimeout",
reason: "embedded message failed validation",
cause: err,
})
}
case interface{ Validate() error }:
if err := v.Validate(); err != nil {
errors = append(errors, WebSocketServerValidationError{
field: "HandshakeTimeout",
reason: "embedded message failed validation",
cause: err,
})
}
}
} else if v, ok := interface{}(m.GetHandshakeTimeout()).(interface{ Validate() error }); ok {
if err := v.Validate(); err != nil {
return WebSocketServerValidationError{
field: "HandshakeTimeout",
reason: "embedded message failed validation",
cause: err,
}
}
}
if all {
switch v := interface{}(m.GetPingInterval()).(type) {
case interface{ ValidateAll() error }:
if err := v.ValidateAll(); err != nil {
errors = append(errors, WebSocketServerValidationError{
field: "PingInterval",
reason: "embedded message failed validation",
cause: err,
})
}
case interface{ Validate() error }:
if err := v.Validate(); err != nil {
errors = append(errors, WebSocketServerValidationError{
field: "PingInterval",
reason: "embedded message failed validation",
cause: err,
})
}
}
} else if v, ok := interface{}(m.GetPingInterval()).(interface{ Validate() error }); ok {
if err := v.Validate(); err != nil {
return WebSocketServerValidationError{
field: "PingInterval",
reason: "embedded message failed validation",
cause: err,
}
}
}
// no validation rules for PingWhenIdle
if len(errors) > 0 {
return WebSocketServerMultiError(errors)
}
return nil
}
// WebSocketServerMultiError is an error wrapping multiple validation errors
// returned by WebSocketServer.ValidateAll() if the designated constraints
// aren't met.
type WebSocketServerMultiError []error
// Error returns a concatenation of all the error messages it wraps.
func (m WebSocketServerMultiError) Error() string {
msgs := make([]string, 0, len(m))
for _, err := range m {
msgs = append(msgs, err.Error())
}
return strings.Join(msgs, "; ")
}
// AllErrors returns a list of validation violation errors.
func (m WebSocketServerMultiError) AllErrors() []error { return m }
// WebSocketServerValidationError is the validation error returned by
// WebSocketServer.Validate if the designated constraints aren't met.
type WebSocketServerValidationError struct {
field string
reason string
cause error
key bool
}
// Field function returns field value.
func (e WebSocketServerValidationError) Field() string { return e.field }
// Reason function returns reason value.
func (e WebSocketServerValidationError) Reason() string { return e.reason }
// Cause function returns cause value.
func (e WebSocketServerValidationError) Cause() error { return e.cause }
// Key function returns key value.
func (e WebSocketServerValidationError) Key() bool { return e.key }
// ErrorName returns error name.
func (e WebSocketServerValidationError) ErrorName() string { return "WebSocketServerValidationError" }
// Error satisfies the builtin error interface
func (e WebSocketServerValidationError) Error() string {
cause := ""
if e.cause != nil {
cause = fmt.Sprintf(" | caused by: %v", e.cause)
}
key := ""
if e.key {
key = "key for "
}
return fmt.Sprintf(
"invalid %sWebSocketServer.%s: %s%s",
key,
e.field,
e.reason,
cause)
}
var _ error = WebSocketServerValidationError{}
var _ interface {
Field() string
Reason() string
Key() bool
Cause() error
ErrorName() string
} = WebSocketServerValidationError{}
// SPDX-License-Identifier: Apache-2.0
// Copyright Authors of Cilium
package accesslog
import (
"net"
"sync"
"sync/atomic"
"github.com/golang/protobuf/proto"
"github.com/sirupsen/logrus"
cilium "github.com/cilium/proxy/go/cilium/api"
"github.com/cilium/proxy/proxylib/proxylib"
)
type Client struct {
connected uint32 // Accessed atomically without locking
path string
mutex sync.Mutex // Used to protect opening the connection
conn atomic.Pointer[net.UnixConn] // Read atomically without locking
}
func (cl *Client) connect() *net.UnixConn {
if cl.path == "" {
return nil
}
if atomic.LoadUint32(&cl.connected) > 0 {
// Guaranteed to be non-nil
return cl.conn.Load()
}
cl.mutex.Lock()
defer cl.mutex.Unlock()
conn := cl.conn.Load()
// Did someone else connect while we were contending on the lock?
// cl.connected may be written to by others concurrently
if atomic.LoadUint32(&cl.connected) > 0 {
return conn
}
if conn != nil {
conn.Close() // not setting conn to nil!
}
logrus.Debugf("Accesslog: Connecting to Cilium access log socket: %s", cl.path)
conn, err := net.DialUnix("unixpacket", nil, &net.UnixAddr{Name: cl.path, Net: "unixpacket"})
if err != nil {
logrus.WithError(err).Error("Accesslog: DialUnix() failed")
return nil
}
cl.conn.Store(conn)
// Always have a non-nil 'cl.conn' after 'cl.connected' is set for the first time!
atomic.StoreUint32(&cl.connected, 1)
return conn
}
func (cl *Client) Log(pblog *cilium.LogEntry) {
if conn := cl.connect(); conn != nil {
// Encode
logmsg, err := proto.Marshal(pblog)
if err != nil {
logrus.WithError(err).Error("Accesslog: Protobuf marshaling error")
return
}
// Write
_, err = conn.Write(logmsg)
if err != nil {
logrus.WithError(err).Error("Accesslog: Write() failed")
atomic.StoreUint32(&cl.connected, 0) // Mark connection as broken
}
} else {
logrus.Debugf("Accesslog: No connection, cannot send: %s", pblog.String())
}
}
func (c *Client) Path() string {
return c.path
}
func NewClient(accessLogPath string) proxylib.AccessLogger {
client := &Client{
path: accessLogPath,
}
client.connect()
return client
}
func (cl *Client) Close() {
conn := cl.conn.Load()
if conn != nil {
conn.Close()
}
}
// Copyright 2022 ADA Logics Ltd
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//
package cassandra
import (
"bytes"
"encoding/binary"
"errors"
"fmt"
"strings"
"github.com/cilium/proxy/proxylib/accesslog"
"github.com/cilium/proxy/proxylib/proxylib"
"github.com/cilium/proxy/proxylib/test"
fuzz "github.com/AdaLogics/go-fuzz-headers"
"github.com/sirupsen/logrus"
)
var (
s *CassandraSuite
parsers = []string{"cassandra", "kafka", "r2d2", "memcache"}
ErrInvalidBytes = errors.New("Invalid bytes")
ErrCouldNotCreateData = errors.New("Could not create test data")
ErrInvalidParserData = errors.New("Invalid parser-specific data")
)
func init() {
s = &CassandraSuite{}
s.logServer = test.StartAccessLogServer("access_log.sock", 10)
s.ins = proxylib.NewInstance("node1", accesslog.NewClient(s.logServer.Path))
logrus.SetLevel(logrus.PanicLevel)
proxylib.LogFatal = func(format string, args ...interface{}) {
fmt.Sprintf(format, args...)
}
}
// Sets the parser type
func getParser(index int) string {
return parsers[index%len(parsers)]
}
// Creates the test data from the byte slice
func createData(f *fuzz.ConsumeFuzzer) (string, []string, [][]byte, bool, string, error) {
// Empty return values
version := ""
policies := make([]string, 0)
d := make([][]byte, 0)
reply := false
parser := ""
err := ErrCouldNotCreateData
// Create the data
version, err = f.GetString()
if err != nil {
return version, policies, d, reply, parser, err
}
err = f.CreateSlice(&policies)
if err != nil {
return version, policies, d, reply, parser, err
}
err = f.CreateSlice(&d)
if err != nil {
return version, policies, d, reply, parser, err
}
reply, err = f.GetBool()
if err != nil {
return version, policies, d, reply, parser, err
}
parserType, err := f.GetInt()
if err != nil {
return version, policies, d, reply, parser, err
}
parser = getParser(parserType)
return version, policies, d, reply, parser, nil
}
// The cassandra bytes are validated later in the calltree.
// Doing the same validation now improves performance.
func verifyCassandraBytes(d [][]byte) error {
testD := bytes.Join(d, []byte{})
if len(testD) < 10 {
return ErrInvalidBytes
}
requestLen := binary.BigEndian.Uint32(testD[5:9])
if requestLen > cassMaxLen {
return ErrInvalidBytes
}
return nil
}
func verifyr2d2Bytes(d [][]byte) error {
testD := string(bytes.Join(d, []byte{}))
msgLen := strings.Index(testD, "\r\n")
if msgLen < 0 {
return ErrInvalidBytes
}
msgStr := testD[:msgLen] // read single request
fields := strings.Split(msgStr, " ")
if len(fields) < 1 {
return ErrInvalidBytes
}
return nil
}
func verifyParserData(parser string, d [][]byte, reply bool) error {
if parser == "cassandra" {
err := verifyCassandraBytes(d)
if err != nil {
return ErrInvalidParserData
}
}
if parser == "r2d2" {
// Could consider setting reply here instead of checking it
if reply == true {
return ErrInvalidParserData
}
err := verifyr2d2Bytes(d)
if err != nil {
return ErrInvalidParserData
}
}
if parser == "kafka" {
// Could consider setting reply here instead of checking it
if reply == true {
return ErrInvalidParserData
}
}
return nil
}
// FuzzMultipleParsers implements the fuzzer
func FuzzMultipleParsers(data []byte) int {
f := fuzz.NewConsumer(data)
version, policies, d, reply, parser, err := createData(f)
if err != nil {
return 0
}
err = verifyParserData(parser, d, reply)
if err != nil {
return 0
}
defer s.logServer.Clear()
err = s.ins.InsertPolicyText(version, policies, "")
if err != nil {
return 0
}
bufSize := 1024
origBuf := make([]byte, 0, bufSize)
replyBuf := make([]byte, 0, bufSize)
err, conn := proxylib.NewConnection(s.ins, parser, 1, true, 1, 2, "1.1.1.1:34567", "10.0.0.2:80", "no-policy", &origBuf, &replyBuf)
if err != nil {
return 0
}
ops := make([][2]int64, 0, 100)
conn.OnData(reply, false, &d, &ops)
return 1
}
// SPDX-License-Identifier: Apache-2.0
// Copyright Authors of Cilium
package cassandra
import (
"bytes"
"encoding/binary"
"fmt"
"regexp"
"strings"
"github.com/sirupsen/logrus"
cilium "github.com/cilium/proxy/go/cilium/api"
. "github.com/cilium/proxy/proxylib/proxylib"
)
//
// Cassandra v3/v4 Parser
//
// Spec: https://github.com/apache/cassandra/blob/trunk/doc/native_protocol_v4.spec
//
// Current Cassandra parser supports filtering on messages where the opcode is 'query-like'
// (i.e., opcode 'query', 'prepare', 'batch'. In those scenarios, we match on query_action and query_table.
// Examples:
// query_action = 'select', query_table = 'system.*'
// query_action = 'insert', query_table = 'attendance.daily_records'
// query_action = 'select', query_table = 'deathstar.scrum_notes'
// query_action = 'insert', query_table = 'covalent.foo'
//
// Batch requests are logged as invidual queries, but an entire batch request will be allowed
// only if all requests are allowed.
// Non-query client requests, including 'Options', 'Auth_Response', 'Startup', and 'Register'
// are automatically allowed to simplify the policy language.
// There are known changes in protocol v2 that are not compatible with this parser, see the
// the "Changes from v2" in https://github.com/apache/cassandra/blob/trunk/doc/native_protocol_v3.spec
type CassandraRule struct {
queryActionExact string
tableRegexCompiled *regexp.Regexp
}
const cassHdrLen = 9
const cassMaxLen = 268435456 // 256 MB, per spec
const unknownPreparedQueryPath = "/unknown-prepared-query"
func (rule *CassandraRule) Matches(data interface{}) bool {
// Cast 'data' to the type we give to 'Matches()'
path, ok := data.(string)
if !ok {
logrus.Warning("Matches() called with type other than string")
return false
}
logrus.Debugf("Policy Match test for '%s'", path)
regexStr := ""
if rule.tableRegexCompiled != nil {
regexStr = rule.tableRegexCompiled.String()
}
logrus.Debugf("Rule: action '%s', table '%s'", rule.queryActionExact, regexStr)
if path == unknownPreparedQueryPath {
logrus.Warning("Dropping execute for unknown prepared-id")
return false
}
parts := strings.Split(path, "/")
if len(parts) <= 2 {
// this is not a query-like request, just allow
return true
} else if len(parts) < 4 {
// should never happen unless we've messed up internally
// as path is either /<opcode> or /<opcode>/<action>/<table>
logrus.Errorf("Invalid parsed path: '%s'", path)
return false
}
if rule.queryActionExact != "" && rule.queryActionExact != parts[2] {
logrus.Debugf("CassandraRule: query_action mismatch %v, %s", rule.queryActionExact, parts[1])
return false
}
if len(parts[3]) > 0 &&
rule.tableRegexCompiled != nil &&
!rule.tableRegexCompiled.MatchString(parts[3]) {
logrus.Debugf("CassandraRule: table_regex mismatch '%v', '%s'", rule.tableRegexCompiled, parts[3])
return false
}
return true
}
// CassandraRuleParser parses protobuf L7 rules to enforcement objects
// May panic
func CassandraRuleParser(rule *cilium.PortNetworkPolicyRule) []L7NetworkPolicyRule {
l7Rules := rule.GetL7Rules()
if l7Rules == nil {
return nil
}
allowRules := l7Rules.GetL7AllowRules()
rules := make([]L7NetworkPolicyRule, 0, len(allowRules))
for _, l7Rule := range allowRules {
var cr CassandraRule
for k, v := range l7Rule.Rule {
switch k {
case "query_action":
cr.queryActionExact = v
case "query_table":
if v != "" {
cr.tableRegexCompiled = regexp.MustCompile(v)
}
default:
ParseError(fmt.Sprintf("Unsupported key: %s", k), rule)
}
}
if len(cr.queryActionExact) > 0 {
// ensure this is a valid query action
res := queryActionMap[cr.queryActionExact]
if res == invalidAction {
ParseError(fmt.Sprintf("Unable to parse L7 cassandra rule with invalid query_action: '%s'", cr.queryActionExact), rule)
} else if res == actionNoTable && cr.tableRegexCompiled != nil {
ParseError(fmt.Sprintf("query_action '%s' is not compatible with a query_table match", cr.queryActionExact), rule)
}
}
logrus.Debugf("Parsed CassandraRule pair: %v", cr)
rules = append(rules, &cr)
}
return rules
}
type CassandraParserFactory struct{}
var cassandraParserFactory *CassandraParserFactory
func init() {
logrus.Debug("init(): Registering cassandraParserFactory")
RegisterParserFactory("cassandra", cassandraParserFactory)
RegisterL7RuleParser("cassandra", CassandraRuleParser)
}
type CassandraParser struct {
connection *Connection
keyspace string // stores current keyspace name from 'use' command
// stores prepared query string while
// waiting for 'prepared' reply from server
// with a prepared id.
// replies associated via stream-id
preparedQueryPathByStreamID map[uint16]string
// allowing us to enforce policy on query
// at the time of the execute command.
preparedQueryPathByPreparedID map[string]string // stores query string based on prepared-id,
}
func (pf *CassandraParserFactory) Create(connection *Connection) interface{} {
logrus.Debugf("CassandraParserFactory: Create: %v", connection)
p := CassandraParser{connection: connection}
p.preparedQueryPathByStreamID = make(map[uint16]string)
p.preparedQueryPathByPreparedID = make(map[string]string)
return &p
}
func (p *CassandraParser) OnData(reply, endStream bool, dataArray [][]byte) (OpType, int) {
// inefficient, but simple for now
data := bytes.Join(dataArray, []byte{})
if len(data) < cassHdrLen {
// Partial header received, ask for more
needs := cassHdrLen - len(data)
logrus.Debugf("Did not receive full header, need %d more bytes", needs)
return MORE, needs
}
// full header available, read full request length
requestLen := binary.BigEndian.Uint32(data[5:9])
logrus.Debugf("Request length = %d", requestLen)
if requestLen > cassMaxLen {
logrus.Errorf("Request length of %d is greater than 256 MB", requestLen)
return ERROR, int(ERROR_INVALID_FRAME_LENGTH)
}
dataMissing := (cassHdrLen + int(requestLen)) - len(data)
if dataMissing > 0 {
// full header received, but only partial request
logrus.Debugf("Hdr received, but need %d more bytes of request", dataMissing)
return MORE, dataMissing
}
// we parse replies, but only to look for prepared-query-id responses
if reply {
if len(data) == 0 {
logrus.Debugf("ignoring zero length reply call to onData")
return NOP, 0
}
cassandraParseReply(p, data[0:(cassHdrLen+requestLen)])
logrus.Debugf("reply, passing %d bytes", (cassHdrLen + requestLen))
return PASS, (cassHdrLen + int(requestLen))
}
err, paths := cassandraParseRequest(p, data[0:(cassHdrLen+requestLen)])
if err != 0 {
logrus.Errorf("Parsing error %d", err)
return ERROR, int(err)
}
logrus.Debugf("Request paths = %s", paths)
matches := true
access_log_entry_type := cilium.EntryType_Request
unpreparedQuery := false
for i := 0; i < len(paths); i++ {
if strings.HasPrefix(paths[i], "/query/use/") ||
strings.HasPrefix(paths[i], "/batch/use/") ||
strings.HasPrefix(paths[i], "/prepare/use/") {
// do not count a "use" query as a deny
continue
}
if paths[i] == unknownPreparedQueryPath {
matches = false
unpreparedQuery = true
access_log_entry_type = cilium.EntryType_Denied
break
}
if !p.connection.Matches(paths[i]) {
matches = false
access_log_entry_type = cilium.EntryType_Denied
break
}
}
for i := 0; i < len(paths); i++ {
parts := strings.Split(paths[i], "/")
fields := map[string]string{}
if len(parts) >= 3 && parts[2] == "use" {
// do not log 'use' queries
continue
} else if len(parts) == 4 {
fields["query_action"] = parts[2]
fields["query_table"] = parts[3]
} else if unpreparedQuery {
fields["error"] = "unknown prepared query id"
} else {
// do not log non-query accesses
continue
}
p.connection.Log(access_log_entry_type,
&cilium.LogEntry_GenericL7{
GenericL7: &cilium.L7LogEntry{
Proto: "cassandra",
Fields: fields,
},
})
}
if !matches {
// If we have already sent another error to the client,
// do not send unauthorized message
if !unpreparedQuery {
unauthMsg := make([]byte, len(unauthMsgBase))
copy(unauthMsg, unauthMsgBase)
// We want to use the same protocol and stream ID
// as the incoming request.
// update the protocol to match the request
unauthMsg[0] = 0x80 | (data[0] & 0x07)
// update the stream ID to match the request
unauthMsg[2] = data[2]
unauthMsg[3] = data[3]
p.connection.Inject(true, unauthMsg)
}
return DROP, int(cassHdrLen + requestLen)
}
return PASS, int(cassHdrLen + requestLen)
}
// A full response (header + body) to be used as an
// "unauthorized" error to be sent to cassandra client as part of policy
// deny. Array must be updated to ensure that reply has
// protocol version and stream-id that matches the request.
var unauthMsgBase = []byte{
0x0, // version (uint8) - must be set before injection
0x0, // flags, (uint8)
0x0, 0x0, // stream-id (uint16) - must be set before injection
0x0, // opcode error (uint8)
0x0, 0x0, 0x0, 0x1a, // request length (uint32) - update if text changes
0x0, 0x0, 0x21, 0x00, // 'unauthorized error code' 0x2100 (uint32)
0x0, 0x14, // length of error msg (uint16) - update if text changes
'R', 'e', 'q', 'u', 'e', 's', 't', ' ', 'U', 'n', 'a', 'u', 't', 'h', 'o', 'r', 'i', 'z', 'e', 'd',
}
// A full response (header + body) to be used as a
// "unprepared" error to be sent to cassandra client if proxy
// does not have the path for this prepare-query-id cached
var unpreparedMsgBase = []byte{
0x0, // version (uint8) - must be set before injection
0x0, // flags, (uint8)
0x0, 0x0, // stream-id (uint16) - must be set before injection
0x0, // opcode error (uint8)
0x0, 0x0, 0x0, 0x0, // request length (uint32) - must be set based on
// of length of prepared query id
0x0, 0x0, 0x25, 0x00, // 'unprepared error code' 0x2500 (uint32)
// must append [short bytes] array of prepared query id.
}
// create reply byte buffer with error code 'unprepared' with code 0x2500
// followed by a [short bytes] indicating the unknown ID
// must set stream-id of the response to match the request
func createUnpreparedMsg(version byte, streamID []byte, preparedID string) []byte {
unpreparedMsg := make([]byte, len(unpreparedMsgBase))
copy(unpreparedMsg, unpreparedMsgBase)
unpreparedMsg[0] = 0x80 | version
unpreparedMsg[2] = streamID[0]
unpreparedMsg[3] = streamID[1]
idLen := len(preparedID)
idLenBytes := make([]byte, 2)
binary.BigEndian.PutUint16(idLenBytes, uint16(idLen))
reqLen := 4 + 2 + idLen
reqLenBytes := make([]byte, 4)
binary.BigEndian.PutUint32(reqLenBytes, uint32(reqLen))
unpreparedMsg[5] = reqLenBytes[0]
unpreparedMsg[6] = reqLenBytes[1]
unpreparedMsg[7] = reqLenBytes[2]
unpreparedMsg[8] = reqLenBytes[3]
res := append(unpreparedMsg, idLenBytes...)
return append(res, []byte(preparedID)...)
}
var opcodeMap = map[byte]string{
0x00: "error",
0x01: "startup",
0x02: "ready",
0x03: "authenticate",
0x05: "options",
0x06: "supported",
0x07: "query",
0x08: "result",
0x09: "prepare",
0x0A: "execute",
0x0B: "register",
0x0C: "event",
0x0D: "batch",
0x0E: "auth_challenge",
0x0F: "auth_response",
0x10: "auth_success",
}
// map to test whether a 'query_action' is valid or not
const invalidAction = 0
const actionWithTable = 1
const actionNoTable = 2
var queryActionMap = map[string]int{
"select": actionWithTable,
"delete": actionWithTable,
"insert": actionWithTable,
"update": actionWithTable,
"create-table": actionWithTable,
"drop-table": actionWithTable,
"alter-table": actionWithTable,
"truncate-table": actionWithTable,
// these queries take a keyspace
// and match against query_table
"use": actionWithTable,
"create-keyspace": actionWithTable,
"alter-keyspace": actionWithTable,
"drop-keyspace": actionWithTable,
"drop-index": actionNoTable,
"create-index": actionNoTable, // TODO: we could tie this to table if we want
"create-materialized-view": actionNoTable,
"drop-materialized-view": actionNoTable,
// TODO: these admin ops could be bundled into meta roles
// (e.g., role-mgmt, permission-mgmt)
"create-role": actionNoTable,
"alter-role": actionNoTable,
"drop-role": actionNoTable,
"grant-role": actionNoTable,
"revoke-role": actionNoTable,
"list-roles": actionNoTable,
"grant-permission": actionNoTable,
"revoke-permission": actionNoTable,
"list-permissions": actionNoTable,
"create-user": actionNoTable,
"alter-user": actionNoTable,
"drop-user": actionNoTable,
"list-users": actionNoTable,
"create-function": actionNoTable,
"drop-function": actionNoTable,
"create-aggregate": actionNoTable,
"drop-aggregate": actionNoTable,
"create-type": actionNoTable,
"alter-type": actionNoTable,
"drop-type": actionNoTable,
"create-trigger": actionNoTable,
"drop-trigger": actionNoTable,
}
func parseQuery(p *CassandraParser, query string) (string, string) {
var action string
var table string
query = strings.TrimRight(query, ";") // remove potential trailing ;
fields := strings.Fields(strings.ToLower(query)) // handles all whitespace
// we currently do not strip comments. It seems like cqlsh does
// strip comments, but its not clear if that can be assumed of all clients
// It should not be possible to "spoof" the 'action' as this is assumed to be
// the first token (leaving no room for a comment to start), but it could potentially
// trick this parser into thinking we're accessing table X, when in fact the
// query accesses table Y, which would obviously be a security vulnerability
// As a result, we look at each token here, and if any of them match the comment
// characters for cassandra, we fail parsing.
for i := 0; i < len(fields); i++ {
if len(fields[i]) >= 2 &&
(fields[i][:2] == "--" ||
fields[i][:2] == "/*" ||
fields[i][:2] == "//") {
logrus.Warnf("Unable to safely parse query with comments '%s'", query)
return "", ""
}
}
if len(fields) < 2 {
goto invalidQuery
}
action = fields[0]
switch action {
case "select", "delete":
for i := 1; i < len(fields); i++ {
if fields[i] == "from" {
table = strings.ToLower(fields[i+1])
}
}
if len(table) == 0 {
logrus.Warnf("Unable to parse table name from query '%s'", query)
return "", ""
}
case "insert":
// INSERT into <table-name>
if len(fields) < 3 {
goto invalidQuery
}
table = strings.ToLower(fields[2])
case "update":
// UPDATE <table-name>
table = strings.ToLower(fields[1])
case "use":
p.keyspace = strings.Trim(fields[1], "\"\\'")
logrus.Debugf("Saving keyspace '%s'", p.keyspace)
table = p.keyspace
case "alter", "create", "drop", "truncate", "list":
action = strings.Join([]string{action, fields[1]}, "-")
if fields[1] == "table" || fields[1] == "keyspace" {
if len(fields) < 3 {
goto invalidQuery
}
table = fields[2]
if table == "if" {
if action == "create-table" {
if len(fields) < 6 {
goto invalidQuery
}
// handle optional "IF NOT EXISTS"
table = fields[5]
} else if action == "drop-table" || action == "drop-keyspace" {
if len(fields) < 5 {
goto invalidQuery
}
// handle optional "IF EXISTS"
table = fields[4]
}
}
}
if action == "truncate" && len(fields) == 2 {
// special case, truncate can just be passed table name
table = fields[1]
}
if fields[1] == "materialized" {
action = action + "-view"
} else if fields[1] == "custom" {
action = "create-index"
}
default:
goto invalidQuery
}
if len(table) > 0 && !strings.Contains(table, ".") && action != "use" {
table = p.keyspace + "." + table
}
return action, table
invalidQuery:
logrus.Errorf("Unable to parse query: '%s'", query)
return "", ""
}
func cassandraParseRequest(p *CassandraParser, data []byte) (OpError, []string) {
direction := data[0] & 0x80 // top bit
if direction != 0 {
logrus.Errorf("Direction bit is 'reply', but we are trying to parse a request")
return ERROR_INVALID_FRAME_TYPE, nil
}
compressionFlag := data[1] & 0x01
if compressionFlag == 1 {
logrus.Errorf("Compression flag set, unable to parse request beyond the header")
return ERROR_INVALID_FRAME_TYPE, nil
}
opcode := data[4]
path := opcodeMap[opcode]
// parse query string from query/prepare/batch requests
// NOTE: parsing only prepare statements and passing all execute
// statements requires that we 'invalidate' all execute statements
// anytime policy changes, to ensure that no execute statements are
// allowed that correspond to prepared queries that would no longer
// be valid. A better option might be to cache all prepared queries,
// mapping the execution ID to allow/deny each time policy is changed.
if opcode == 0x07 || opcode == 0x09 {
// query || prepare
queryLen := binary.BigEndian.Uint32(data[9:13])
endIndex := 13 + queryLen
query := string(data[13:endIndex])
action, table := parseQuery(p, query)
if action == "" {
return ERROR_INVALID_FRAME_TYPE, nil
}
path = "/" + path + "/" + action + "/" + table
if opcode == 0x09 {
// stash 'path' for this prepared query based on stream id
// rewrite 'opcode' portion of the path to be 'execute' rather than 'prepare'
streamID := binary.BigEndian.Uint16(data[2:4])
logrus.Debugf("Prepare query path '%s' with stream-id %d", path, streamID)
p.preparedQueryPathByStreamID[streamID] = strings.Replace(path, "prepare", "execute", 1)
}
return 0, []string{path}
} else if opcode == 0x0d {
// batch
numQueries := binary.BigEndian.Uint16(data[10:12])
paths := make([]string, numQueries)
logrus.Debugf("batch query count = %d", numQueries)
offset := 12
for i := 0; i < int(numQueries); i++ {
kind := data[offset]
if kind == 0 {
// full query string
queryLen := int(binary.BigEndian.Uint32(data[offset+1 : offset+5]))
query := string(data[offset+5 : offset+5+queryLen])
action, table := parseQuery(p, query)
if action == "" {
return ERROR_INVALID_FRAME_TYPE, nil
}
path = "/" + path + "/" + action + "/" + table
paths[i] = path
path = "batch" // reset for next item
offset = offset + 5 + queryLen
offset = readPastBatchValues(data, offset)
} else if kind == 1 {
// prepared query id
idLen := int(binary.BigEndian.Uint16(data[offset+1 : offset+3]))
preparedID := string(data[offset+3 : (offset + 3 + idLen)])
logrus.Debugf("Batch entry with prepared-id = '%s'", preparedID)
path := p.preparedQueryPathByPreparedID[preparedID]
if len(path) > 0 {
paths[i] = path
} else {
logrus.Warnf("No cached entry for prepared-id = '%s' in batch", preparedID)
unpreparedMsg := createUnpreparedMsg(data[0], data[2:4], preparedID)
p.connection.Inject(true, unpreparedMsg)
return 0, []string{unknownPreparedQueryPath}
}
offset = offset + 3 + idLen
offset = readPastBatchValues(data, offset)
} else {
logrus.Errorf("unexpected value of 'kind' in batch query: %d", kind)
return ERROR_INVALID_FRAME_TYPE, nil
}
}
return 0, paths
} else if opcode == 0x0a {
// execute
// parse out prepared query id, and then look up our
// cached query path for policy evaluation.
idLen := binary.BigEndian.Uint16(data[9:11])
preparedID := string(data[11:(11 + idLen)])
logrus.Debugf("Execute with prepared-id = '%s'", preparedID)
path := p.preparedQueryPathByPreparedID[preparedID]
if len(path) == 0 {
logrus.Warnf("No cached entry for prepared-id = '%s'", preparedID)
unpreparedMsg := createUnpreparedMsg(data[0], data[2:4], preparedID)
p.connection.Inject(true, unpreparedMsg)
// this path is special-cased in Matches() so that unknown
// prepared IDs are dropped if any rules are defined
return 0, []string{unknownPreparedQueryPath}
}
return 0, []string{path}
} else {
// other opcode, just return type of opcode
return 0, []string{"/" + path}
}
}
func readPastBatchValues(data []byte, initialOffset int) int {
numValues := int(binary.BigEndian.Uint16(data[initialOffset : initialOffset+2]))
offset := initialOffset + 2
for i := 0; i < numValues; i++ {
valueLen := int(binary.BigEndian.Uint32(data[offset : offset+4]))
// handle 'null' (-1) and 'not set' (-2) case, where 0 bytes follow
if valueLen >= 0 {
offset = offset + 4 + valueLen
}
}
return offset
}
// reply parsing is very basic, just focusing on parsing RESULT messages that
// contain prepared query IDs so that we can later enforce policy on "execute" requests.
func cassandraParseReply(p *CassandraParser, data []byte) {
direction := data[0] & 0x80 // top bit
if direction != 0x80 {
logrus.Errorf("Direction bit is 'request', but we are trying to parse a reply")
return
}
compressionFlag := data[1] & 0x01
if compressionFlag == 1 {
logrus.Errorf("Compression flag set, unable to parse reply beyond the header")
return
}
streamID := binary.BigEndian.Uint16(data[2:4])
logrus.Debugf("Reply with opcode %d and stream-id %d", data[4], streamID)
// if this is an opcode == RESULT message of type 'prepared', associate the prepared
// statement id with the full query string that was included in the
// associated PREPARE request. The stream-id in this reply allows us to
// find the associated prepare query string.
if data[4] == 0x08 {
resultKind := binary.BigEndian.Uint32(data[9:13])
logrus.Debugf("resultKind = %d", resultKind)
if resultKind == 0x0004 {
idLen := binary.BigEndian.Uint16(data[13:15])
preparedID := string(data[15 : 15+idLen])
logrus.Debugf("Result with prepared-id = '%s' for stream-id %d", preparedID, streamID)
path := p.preparedQueryPathByStreamID[streamID]
if len(path) > 0 {
// found cached query path to associate with this preparedID
p.preparedQueryPathByPreparedID[preparedID] = path
logrus.Debugf("Associating query path '%s' with prepared-id %s as part of stream-id %d", path, preparedID, streamID)
} else {
logrus.Warnf("Unable to find prepared query path associated with stream-id %d", streamID)
}
}
}
}
// SPDX-License-Identifier: Apache-2.0
// Copyright Authors of Cilium
package cassandra
import (
"encoding/hex"
"testing"
. "github.com/cilium/checkmate"
"github.com/cilium/proxy/proxylib/accesslog"
"github.com/cilium/proxy/proxylib/proxylib"
"github.com/cilium/proxy/proxylib/test"
)
// Hook up gocheck into the "go test" runner.
func Test(t *testing.T) {
// logging.ToggleDebugLogs(true)
// log.SetLevel(log.DebugLevel)
TestingT(t)
}
type CassandraSuite struct {
logServer *test.AccessLogServer
ins *proxylib.Instance
}
var _ = Suite(&CassandraSuite{})
// Set up access log server and Library instance for all the test cases
func (s *CassandraSuite) SetUpSuite(c *C) {
s.logServer = test.StartAccessLogServer("access_log.sock", 10)
c.Assert(s.logServer, Not(IsNil))
s.ins = proxylib.NewInstance("node1", accesslog.NewClient(s.logServer.Path))
c.Assert(s.ins, Not(IsNil))
}
func (s *CassandraSuite) checkAccessLogs(c *C, expPasses, expDrops int) {
passes, drops := s.logServer.Clear()
c.Check(passes, Equals, expPasses, Commentf("Unxpected number of passed access log messages"))
c.Check(drops, Equals, expDrops, Commentf("Unxpected number of passed access log messages"))
}
func (s *CassandraSuite) TearDownTest(c *C) {
s.logServer.Clear()
}
func (s *CassandraSuite) TearDownSuite(c *C) {
s.logServer.Close()
}
// util function used for Cassandra tests, as we have cassandra requests
// as hex strings
func hexData(c *C, dataHex ...string) [][]byte {
data := make([][]byte, 0, len(dataHex))
for i := range dataHex {
dataRaw, err := hex.DecodeString(dataHex[i])
c.Assert(err, IsNil)
data = append(data, dataRaw)
}
return data
}
func (s *CassandraSuite) TestCassandraOnDataNoHeader(c *C) {
conn := s.ins.CheckNewConnectionOK(c, "cassandra", true, 1, 2, "1.1.1.1:34567", "10.0.0.2:80", "no-policy")
data := hexData(c, "0400")
conn.CheckOnDataOK(c, false, false, &data, []byte{},
proxylib.MORE, 9-len(data[0]))
}
func (s *CassandraSuite) TestCassandraOnDataOptionsReq(c *C) {
s.ins.CheckInsertPolicyText(c, "1", []string{`
endpoint_ips: "1.1.1.1"
endpoint_id: 2
ingress_per_port_policies: <
port: 80
rules: <
remote_policies: 1
remote_policies: 3
remote_policies: 4
l7_proto: "cassandra"
l7_rules: <
l7_allow_rules: <
rule: <
key: "query_action"
value: "select"
>
>
>
>
>
`})
conn := s.ins.CheckNewConnectionOK(c, "cassandra", true, 1, 2, "1.1.1.1:34567", "10.0.0.2:80", "1.1.1.1")
data := hexData(c, "040000000500000000")
conn.CheckOnDataOK(c, false, false, &data, []byte{},
proxylib.PASS, len(data[0]),
proxylib.MORE, 9)
}
// this passes a large query request that is missing just the last byte
func (s *CassandraSuite) TestCassandraOnDataPartialReq(c *C) {
s.ins.CheckInsertPolicyText(c, "1", []string{`
endpoint_ips: "1.1.1.1"
endpoint_id: 2
ingress_per_port_policies: <
port: 80
rules: <
remote_policies: 1
remote_policies: 3
remote_policies: 4
l7_proto: "cassandra"
l7_rules: <
l7_allow_rules: <
rule: <
key: "query_table"
value: ".*"
>
>
>
>
>
`})
conn := s.ins.CheckNewConnectionOK(c, "cassandra", true, 1, 2, "1.1.1.1:34567", "10.0.0.2:80", "1.1.1.1")
data := hexData(c, "0400000407000000760000006f53454c45435420636c75737465725f6e616d652c20646174615f63656e7465722c207261636b2c20746f6b656e732c20706172746974696f6e65722c20736368656d615f76657273696f6e2046524f4d2073797374656d2e6c6f63616c205748455245206b65793d276c6f63616c270001")
conn.CheckOnDataOK(c, false, false, &data, []byte{},
proxylib.MORE, 1)
}
func (s *CassandraSuite) TestCassandraOnDataQueryReq(c *C) {
s.ins.CheckInsertPolicyText(c, "1", []string{`
endpoint_ips: "1.1.1.1"
endpoint_id: 2
ingress_per_port_policies: <
port: 80
rules: <
remote_policies: 1
remote_policies: 3
remote_policies: 4
l7_proto: "cassandra"
l7_rules: <
l7_allow_rules: <
rule: <
key: "query_table"
value: ".*"
>
>
>
>
>
`})
conn := s.ins.CheckNewConnectionOK(c, "cassandra", true, 1, 2, "1.1.1.1:34567", "10.0.0.2:80", "1.1.1.1")
data := hexData(c, "0400000407000000760000006f53454c45435420636c75737465725f6e616d652c20646174615f63656e7465722c207261636b2c20746f6b656e732c20706172746974696f6e65722c20736368656d615f76657273696f6e2046524f4d2073797374656d2e6c6f63616c205748455245206b65793d276c6f63616c27000100")
conn.CheckOnDataOK(c, false, false, &data, []byte{},
proxylib.PASS, len(data[0]),
proxylib.MORE, 9)
}
func (s *CassandraSuite) TestCassandraOnDataSplitQueryReq(c *C) {
s.ins.CheckInsertPolicyText(c, "1", []string{`
endpoint_ips: "1.1.1.1"
endpoint_id: 2
ingress_per_port_policies: <
port: 80
rules: <
remote_policies: 1
remote_policies: 3
remote_policies: 4
l7_proto: "cassandra"
l7_rules: <
l7_allow_rules: <
rule: <
key: "query_table"
value: ".*"
>
>
>
>
>
`})
conn := s.ins.CheckNewConnectionOK(c, "cassandra", true, 1, 2, "1.1.1.1:34567", "10.0.0.2:80", "1.1.1.1")
data := hexData(c, "04000004070000007600", "00006f53454c45435420636c75737465725f6e616d652c20646174615f63656e7465722c207261636b2c20746f6b656e732c20706172746974696f6e65722c20736368656d615f76657273696f6e2046524f4d2073797374656d2e6c6f63616c205748455245206b65793d276c6f63616c27000100")
conn.CheckOnDataOK(c, false, false, &data, []byte{},
proxylib.PASS, len(data[0])+len(data[1]),
proxylib.MORE, 9)
}
func (s *CassandraSuite) TestCassandraOnDataMultiReq(c *C) {
s.ins.CheckInsertPolicyText(c, "1", []string{`
endpoint_ips: "1.1.1.1"
endpoint_id: 2
ingress_per_port_policies: <
port: 80
rules: <
remote_policies: 1
remote_policies: 3
remote_policies: 4
l7_proto: "cassandra"
l7_rules: <
l7_allow_rules: <
rule: <
key: "query_table"
value: ".*"
>
>
>
>
>
`})
conn := s.ins.CheckNewConnectionOK(c, "cassandra", true, 1, 2, "1.1.1.1:34567", "10.0.0.2:80", "1.1.1.1")
data := hexData(c, "040000000500000000",
"0400000407000000760000006f53454c45435420636c75737465725f6e616d652c20646174615f63656e7465722c207261636b2c20746f6b656e732c20706172746974696f6e65722c20736368656d615f76657273696f6e2046524f4d2073797374656d2e6c6f63616c205748455245206b65793d276c6f63616c27000100")
conn.CheckOnDataOK(c, false, false, &data, []byte{},
proxylib.PASS, len(data[0]),
proxylib.PASS, len(data[1]),
proxylib.MORE, 9)
}
func (s *CassandraSuite) TestSimpleCassandraPolicy(c *C) {
s.ins.CheckInsertPolicyText(c, "1", []string{`
endpoint_ips: "1.1.1.1"
endpoint_id: 2
ingress_per_port_policies: <
port: 80
rules: <
remote_policies: 1
remote_policies: 3
remote_policies: 4
l7_proto: "cassandra"
l7_rules: <
l7_allow_rules: <
rule: <
key: "query_table"
value: "no-match"
>
>
>
>
>
`})
conn := s.ins.CheckNewConnectionOK(c, "cassandra", true, 1, 2, "1.1.1.1:34567", "10.0.0.2:80", "1.1.1.1")
unauthMsg := createUnauthMsg(0x4)
data := hexData(c, "040000000500000000",
"0400000407000000760000006f53454c45435420636c75737465725f6e616d652c20646174615f63656e7465722c207261636b2c20746f6b656e732c20706172746974696f6e65722c20736368656d615f76657273696f6e2046524f4d2073797374656d2e6c6f63616c205748455245206b65793d276c6f63616c27000100")
conn.CheckOnDataOK(c, false, false, &data, unauthMsg,
proxylib.PASS, len(data[0]),
proxylib.DROP, len(data[1]),
proxylib.MORE, 9)
// All passes are not access-logged
s.checkAccessLogs(c, 0, 1)
}
func createUnauthMsg(streamID byte) []byte {
unauthMsg := make([]byte, len(unauthMsgBase))
copy(unauthMsg, unauthMsgBase)
unauthMsg[0] = 0x84
unauthMsg[2] = 0x0
unauthMsg[3] = streamID
return unauthMsg
}
// this test confirms that we correctly parse and allow a valid batch requests
func (s *CassandraSuite) TestCassandraBatchRequestPolicy(c *C) {
s.ins.CheckInsertPolicyText(c, "1", []string{`
endpoint_ips: "1.1.1.1"
endpoint_id: 2
ingress_per_port_policies: <
port: 80
rules: <
remote_policies: 1
remote_policies: 3
remote_policies: 4
l7_proto: "cassandra"
l7_rules: <
l7_allow_rules: <
rule: <
key: "query_table"
value: "db1.*"
>
>
>
>
>
`})
conn := s.ins.CheckNewConnectionOK(c, "cassandra", true, 1, 2, "1.1.1.1:34567", "10.0.0.2:80", "1.1.1.1")
batchMsg := []byte{
0x04, // version
0x0, // flags, (uint8)
0x0, 0x4, // stream-id (uint16) (test request uses 0x0004 as stream ID)
0x0d, // opcode batch (uint8)
0x0, 0x0, 0x0, 0x3c, // request length of 60 (uint32) - update if body changes
0x0, // batch type == logged
0x0, 0x2, // two batch messages
// first batch message
0x0, // type: non-prepared query
0x0, 0x0, 0x0, 0x14, // [long string] length (20)
'S', 'E', 'L', 'E', 'C', 'T', ' ', '*', ' ', 'F', 'R', 'O', 'M', ' ', 'd', 'b', '1', '.', 't', '1',
0x0, 0x0, // # of bound values
// second batch message
0x0, // type: non-prepared query
0x0, 0x0, 0x0, 0x14, // [long string] length (20)
'S', 'E', 'L', 'E', 'C', 'T', ' ', '*', ' ', 'F', 'R', 'O', 'M', ' ', 'd', 'b', '1', '.', 't', '2',
0x0, 0x0, // # of bound values
0x0, 0x0, // consistency level [short]
0x0, // batch flags
}
data := [][]byte{batchMsg}
conn.CheckOnDataOK(c, false, false, &data, []byte{},
proxylib.PASS, len(data[0]),
proxylib.MORE, 9)
// batch requests are access-logged individually
s.checkAccessLogs(c, 2, 0)
}
// this test confirms that we correctly parse and deny a batch request
// if any of the requests are denied.
func (s *CassandraSuite) TestCassandraBatchRequestPolicyDenied(c *C) {
s.ins.CheckInsertPolicyText(c, "1", []string{`
endpoint_ips: "1.1.1.1"
endpoint_id: 2
ingress_per_port_policies: <
port: 80
rules: <
remote_policies: 1
remote_policies: 3
remote_policies: 4
l7_proto: "cassandra"
l7_rules: <
l7_allow_rules: <
rule: <
key: "query_table"
value: "db1.*"
>
>
>
>
>
`})
conn := s.ins.CheckNewConnectionOK(c, "cassandra", true, 1, 2, "1.1.1.1:34567", "10.0.0.2:80", "1.1.1.1")
batchMsg := []byte{
0x04, // version
0x0, // flags, (uint8)
0x0, 0x4, // stream-id (uint16) (test request uses 0x0004 as stream ID)
0x0d, // opcode batch (uint8)
0x0, 0x0, 0x0, 0x3c, // request length of 60 (uint32) - update if body changes
0x0, // batch type == logged
0x0, 0x2, // two batch messages
// first batch message
0x0, // type: non-prepared query
0x0, 0x0, 0x0, 0x14, // [long string] length (20)
'S', 'E', 'L', 'E', 'C', 'T', ' ', '*', ' ', 'F', 'R', 'O', 'M', ' ', 'd', 'b', '1', '.', 't', '1',
0x0, 0x0, // # of bound values
// second batch message (accesses db2.t2, which should be denied)
0x0, // type: non-prepared query
0x0, 0x0, 0x0, 0x14, // [long string] length (20)
'S', 'E', 'L', 'E', 'C', 'T', ' ', '*', ' ', 'F', 'R', 'O', 'M', ' ', 'd', 'b', '2', '.', 't', '2',
0x0, 0x0, // # of bound values
0x0, 0x0, // consistency level [short]
0x0, // batch flags
}
data := [][]byte{batchMsg}
unauthMsg := createUnauthMsg(0x4)
conn.CheckOnDataOK(c, false, false, &data, unauthMsg,
proxylib.DROP, len(data[0]),
proxylib.MORE, 9)
// batch requests are access-logged individually
// Note: in this case, both accesses are denied, as a batch
// request is either entirely allowed or denied
s.checkAccessLogs(c, 0, 2)
}
// test batch requests with prepared statements
func (s *CassandraSuite) TestCassandraBatchRequestPreparedStatement(c *C) {
s.ins.CheckInsertPolicyText(c, "1", []string{`
endpoint_ips: "1.1.1.1"
endpoint_id: 2
ingress_per_port_policies: <
port: 80
rules: <
remote_policies: 1
remote_policies: 3
remote_policies: 4
l7_proto: "cassandra"
l7_rules: <
l7_allow_rules: <
rule: <
key: "query_table"
value: "db3.*"
>
>
>
>
>
`})
conn := s.ins.CheckNewConnectionOK(c, "cassandra", true, 1, 2, "1.1.1.1:34567", "10.0.0.2:80", "1.1.1.1")
cassParser, ok := (conn.Parser).(*CassandraParser)
if !ok {
panic("failed to cast conn.Parser to *CassandraParser\n")
}
preparedQueryID1 := "aaaa"
cassParser.preparedQueryPathByPreparedID[preparedQueryID1] = "/batch/select/db3.t1"
preparedQueryID2 := "bbbb"
cassParser.preparedQueryPathByPreparedID[preparedQueryID2] = "/batch/select/db3.t2"
batchMsg := []byte{
0x04, // version
0x0, // flags, (uint8)
0x0, 0x4, // stream-id (uint16) (test request uses 0x0004 as stream ID)
0x0d, // opcode batch (uint8)
0x0, 0x0, 0x0, 0x18, // request length of 60 (uint32) - update if body changes
0x0, // batch type == logged
0x0, 0x2, // two batch messages
// first batch message
0x1, // type: prepared query
0x0, 0x4, // [short] length (4)
'a', 'a', 'a', 'a',
0x0, 0x0, // # of bound values
// second batch message
0x1, // type: non-prepared query
0x0, 0x4, // [short] length (4)
'b', 'b', 'b', 'b',
0x0, 0x0, // # of bound values
0x0, 0x0, // consistency level [short]
0x0, // batch flags
}
data := [][]byte{batchMsg}
conn.CheckOnDataOK(c, false, false, &data, []byte{},
proxylib.PASS, len(data[0]),
proxylib.MORE, 9)
// batch requests are access-logged individually
s.checkAccessLogs(c, 2, 0)
}
// test batch requests with prepared statements, including a deny
func (s *CassandraSuite) TestCassandraBatchRequestPreparedStatementDenied(c *C) {
s.ins.CheckInsertPolicyText(c, "1", []string{`
endpoint_ips: "1.1.1.1"
endpoint_id: 2
ingress_per_port_policies: <
port: 80
rules: <
remote_policies: 1
remote_policies: 3
remote_policies: 4
l7_proto: "cassandra"
l7_rules: <
l7_allow_rules: <
rule: <
key: "query_table"
value: "db3.*"
>
>
>
>
>
`})
conn := s.ins.CheckNewConnectionOK(c, "cassandra", true, 1, 2, "1.1.1.1:34567", "10.0.0.2:80", "1.1.1.1")
cassParser, ok := (conn.Parser).(*CassandraParser)
if !ok {
panic("failed to cast conn.Parser to *CassandraParser\n")
}
preparedQueryID1 := "aaaa"
cassParser.preparedQueryPathByPreparedID[preparedQueryID1] = "/batch/select/db3.t1"
preparedQueryID2 := "bbbb"
cassParser.preparedQueryPathByPreparedID[preparedQueryID2] = "/batch/select/db4.t2"
batchMsg := []byte{
0x04, // version
0x0, // flags, (uint8)
0x0, 0x4, // stream-id (uint16) (test request uses 0x0004 as stream ID)
0x0d, // opcode batch (uint8)
0x0, 0x0, 0x0, 0x18, // request length of 60 (uint32) - update if body changes
0x0, // batch type == logged
0x0, 0x2, // two batch messages
// first batch message
0x1, // type: prepared query
0x0, 0x4, // [short] length (4)
'a', 'a', 'a', 'a',
0x0, 0x0, // # of bound values
// second batch message (accesses table db4, which should be denied)
0x1, // type: non-prepared query
0x0, 0x4, // [short] length (4)
'b', 'b', 'b', 'b',
0x0, 0x0, // # of bound values
0x0, 0x0, // consistency level [short]
0x0, // batch flags
}
data := [][]byte{batchMsg}
unauthMsg := createUnauthMsg(0x4)
conn.CheckOnDataOK(c, false, false, &data, unauthMsg,
proxylib.DROP, len(data[0]),
proxylib.MORE, 9)
// batch requests are access-logged individually
s.checkAccessLogs(c, 0, 2)
}
// test execute statement, allow request
func (s *CassandraSuite) TestCassandraExecutePreparedStatement(c *C) {
s.ins.CheckInsertPolicyText(c, "1", []string{`
endpoint_ips: "1.1.1.1"
endpoint_id: 2
ingress_per_port_policies: <
port: 80
rules: <
remote_policies: 1
remote_policies: 3
remote_policies: 4
l7_proto: "cassandra"
l7_rules: <
l7_allow_rules: <
rule: <
key: "query_table"
value: "db3.*"
>
>
>
>
>
`})
conn := s.ins.CheckNewConnectionOK(c, "cassandra", true, 1, 2, "1.1.1.1:34567", "10.0.0.2:80", "1.1.1.1")
cassParser, ok := (conn.Parser).(*CassandraParser)
if !ok {
panic("failed to cast conn.Parser to *CassandraParser\n")
}
preparedQueryID1 := "aaaa"
cassParser.preparedQueryPathByPreparedID[preparedQueryID1] = "/query/select/db3.t1"
executeMsg := []byte{
0x04, // version
0x0, // flags, (uint8)
0x0, 0x4, // stream-id (uint16) (test request uses 0x0004 as stream ID)
0x0a, // opcode execute (uint8)
0x0, 0x0, 0x0, 0x09, // request length (uint32) - update if body changes
// Execute request
0x0, 0x4, // short bytes len (4)
'a', 'a', 'a', 'a',
// the rest of this is values that can be ignored by our parser,
// but we add some here to make sure that we're properly passing
// based on total request length.
'x', 'y', 'z',
}
data := [][]byte{executeMsg}
conn.CheckOnDataOK(c, false, false, &data, []byte{},
proxylib.PASS, len(data[0]),
proxylib.MORE, 9)
s.checkAccessLogs(c, 1, 0)
}
// test execute statement with unknown prepared-id
func (s *CassandraSuite) TestCassandraExecutePreparedStatementUnknownID(c *C) {
conn := s.ins.CheckNewConnectionOK(c, "cassandra", true, 1, 2, "1.1.1.1:34567", "10.0.0.2:80", "cp1")
executeMsg := []byte{
0x04, // version
0x0, // flags, (uint8)
0x0, 0x4, // stream-id (uint16) (test request uses 0x0004 as stream ID)
0x0a, // opcode execute (uint8)
0x0, 0x0, 0x0, 0x06, // request length (uint32) - update if body changes
// Execute request
0x0, 0x4, // short bytes len (4)
'a', 'a', 'a', 'a',
}
data := [][]byte{executeMsg}
unpreparedMsg := createUnpreparedMsg(0x04, []byte{0x0, 0x4}, "aaaa")
conn.CheckOnDataOK(c, false, false, &data, unpreparedMsg,
proxylib.DROP, len(data[0]),
proxylib.MORE, 9)
s.checkAccessLogs(c, 0, 1)
}
// test parsing of a prepared query reply
func (s *CassandraSuite) TestCassandraPreparedResultReply(c *C) {
conn := s.ins.CheckNewConnectionOK(c, "cassandra", true, 1, 2, "1.1.1.1:34567", "10.0.0.2:80", "cp1")
cassParser, ok := (conn.Parser).(*CassandraParser)
if !ok {
panic("failed to cast conn.Parser to *CassandraParser\n")
}
// make sure there is a stream-id (4) that matches the request below
// this would have been populated by a "prepare" request
cassParser.preparedQueryPathByStreamID[uint16(4)] = "/query/select/db3.t1"
preparedResultMsg := []byte{
0x84, // reply + version
0x0, // flags, (uint8)
0x0, 0x4, // stream-id (uint16) (test request uses 0x0004 as stream ID)
0x08, // opcode result (uint8)
0x0, 0x0, 0x0, 0x16, // request length 22 (uint32) - update if body changes
// Prepared Result request
0x0, 0x0, 0x0, 0x4, // [int] result type
0x0, 0x4, // prepared-id len (short)
'a', 'a', 'a', 'a', // prepared-id
0x0, 0x0, 0x0, 0x0, // prepared results flags
0x0, 0x0, 0x0, 0x0, // column-count
0x0, 0x0, 0x0, 0x0, // pk-count
}
data := [][]byte{preparedResultMsg}
conn.CheckOnDataOK(c, true, false, &data, []byte{},
proxylib.PASS, len(data[0]),
proxylib.MORE, 9)
// these replies are not access logged
s.checkAccessLogs(c, 0, 0)
}
// test additional queries
func (s *CassandraSuite) TestCassandraAdditionalQueries(c *C) {
s.ins.CheckInsertPolicyText(c, "1", []string{`
endpoint_ips: "1.1.1.1"
endpoint_id: 2
ingress_per_port_policies: <
port: 80
rules: <
remote_policies: 1
remote_policies: 3
remote_policies: 4
l7_proto: "cassandra"
l7_rules: <
l7_allow_rules: <
rule: <
key: "query_table"
value: "db4.t1"
>
>
>
>
>
`})
conn := s.ins.CheckNewConnectionOK(c, "cassandra", true, 1, 2, "1.1.1.1:34567", "10.0.0.2:80", "1.1.1.1")
queries := []string{"CREATE TABLE db4.t1 (f1 varchar, f2 timeuuid, PRIMARY KEY ((f1), f2))",
"INSERT INTO db4.t1 (f1, f2, f3) values ('dan', now(), 'Cilium!')",
"UPDATE db4.t1 SET f1 = 'donald' where f2 in (1,2,3)",
"DROP TABLE db4.t1",
"TRUNCATE db4.t1",
"CREATE TABLE IF NOT EXISTS db4.t1 (f1 varchar, PRIMARY KEY(f1))",
}
queryMsgBase := []byte{
0x04, // version
0x0, // flags, (uint8)
0x0, 0x5, // stream-id (uint16) (test request uses 0x0005 as stream ID)
0x07, // opcode query (uint8)
0x0, 0x0, 0x0, 0x0, // length of request - must be set
// Query Req
0x0, 0x0, 0x0, 0x0, // length of query (int) - must be set
// query string goes here
}
data := make([][]byte, len(queries))
for i := 0; i < len(queries); i++ {
queryLen := len(queries[i])
queryMsg := append(queryMsgBase, []byte(queries[i])...)
// this works as long as query is less than 251 bytes
queryMsg[8] = byte(4 + queryLen)
queryMsg[12] = byte(queryLen)
data[i] = queryMsg
}
conn.CheckOnDataOK(c, false, false, &data, []byte{},
proxylib.PASS, len(data[0]),
proxylib.PASS, len(data[1]),
proxylib.PASS, len(data[2]),
proxylib.PASS, len(data[3]),
proxylib.PASS, len(data[4]),
proxylib.PASS, len(data[5]),
proxylib.MORE, 9)
s.checkAccessLogs(c, 6, 0)
}
// test use query, following by query that does not include the keyspace
func (s *CassandraSuite) TestCassandraUseQuery(c *C) {
s.ins.CheckInsertPolicyText(c, "1", []string{`
endpoint_ips: "1.1.1.1"
endpoint_id: 2
ingress_per_port_policies: <
port: 80
rules: <
remote_policies: 1
remote_policies: 3
remote_policies: 4
l7_proto: "cassandra"
l7_rules: <
l7_allow_rules: <
rule: <
key: "query_table"
value: "db5.t1"
>
>
>
>
>
`})
conn := s.ins.CheckNewConnectionOK(c, "cassandra", true, 1, 2, "1.1.1.1:34567", "10.0.0.2:80", "1.1.1.1")
// note: the second insert command intentionally does not include a keyspace, so that it will only
// be allowed if we properly propagate the keyspace from the previous use command
queries := []string{"USE db5", "INSERT INTO t1 (f1, f2, f3) values ('dan', now(), 'Cilium!')"}
queryMsgBase := []byte{
0x04, // version
0x0, // flags, (uint8)
0x0, 0x5, // stream-id (uint16) (test request uses 0x0005 as stream ID)
0x07, // opcode query (uint8)
0x0, 0x0, 0x0, 0x0, // length of request - must be set
// Query Req
0x0, 0x0, 0x0, 0x0, // length of query (int) - must be set
// query string goes here
}
data := make([][]byte, len(queries))
for i := 0; i < len(queries); i++ {
queryLen := len(queries[i])
queryMsg := append(queryMsgBase, []byte(queries[i])...)
// this works as long as query is less than 251 bytes
queryMsg[8] = byte(4 + queryLen)
queryMsg[12] = byte(queryLen)
data[i] = queryMsg
}
conn.CheckOnDataOK(c, false, false, &data, []byte{},
proxylib.PASS, len(data[0]),
proxylib.PASS, len(data[1]),
proxylib.MORE, 9)
// use command will not show up in access log, so only expect one msg
s.checkAccessLogs(c, 1, 0)
}
// SPDX-License-Identifier: Apache-2.0
// Copyright Authors of Cilium
package proxylib
import (
"fmt"
"net"
"strconv"
"time"
"github.com/sirupsen/logrus"
cilium "github.com/cilium/proxy/go/cilium/api"
)
// A parser sees data from the underlying stream in both directions
// (original, connection open direction and the opposite, the reply
// direction). Each call to the filter returns an ordered set of
// operations to be performed on the data in that direction. Any data
// left over after the returned operations must be buffered by the
// caller and passed in again when more data has been received on the
// connection.
// InjectBuf is a pointer to a slice header for an inject buffer allocated by
// the proxylib caller. As data is placed into the buffer, the length
// of the buffer in the slice header is increased correspondingly. To make
// the the injected data visible to the caller we need to pass the slice header
// by reference rather than by value, hence the pointer in the type.
// As the caller is typically in a differnent memory management domain (not
// subject to Go runtime garbage collection), the underlying buffer may never
// be expanded or otherwise reallocated.
type InjectBuf *[]byte
// Connection holds the connection metadata that is used both for
// policy enforcement and access logging.
type Connection struct {
Instance *Instance // Holder of Policy protocol and access logging clients
Id uint64 // Unique connection ID allocated by the caller
Ingress bool // 'true' for ingress, 'false' for egress
SrcId uint32 // Source security ID, may be mapped from the source IP address
DstId uint32 // Destination security ID, may be mapped from the destination IP address
SrcAddr string // Source IP address in "a.b.c.d:port" or "[A:...:C]:port" format
DstAddr string // Original destination IP address
PolicyName string // Identifies which policy instance applies to this connection
Port uint32 // (original) destination port number in numeric format
ParserName string // Name of the parser
Parser interface{} // Parser instance used on this connection
Reader Reader
OrigBuf InjectBuf // Buffer for injected frames in original direction
ReplyBuf InjectBuf // Buffer for injected frames in reply direction
}
func NewConnection(instance *Instance, proto string, connectionId uint64, ingress bool, srcId, dstId uint32, srcAddr, dstAddr, policyName string, origBuf, replyBuf *[]byte) (error, *Connection) {
// Find the parser for the proto
parserFactory := GetParserFactory(proto)
if parserFactory == nil {
return UNKNOWN_PARSER, nil
}
_, port, err := net.SplitHostPort(dstAddr)
if err != nil {
return INVALID_ADDRESS, nil
}
dstPort, err := strconv.ParseUint(port, 10, 32)
if err != nil || dstPort == 0 {
return INVALID_ADDRESS, nil
}
connection := &Connection{
Instance: instance,
Id: connectionId,
Ingress: ingress,
SrcId: srcId,
DstId: dstId,
SrcAddr: srcAddr,
DstAddr: dstAddr,
Port: uint32(dstPort),
PolicyName: policyName,
ParserName: proto,
OrigBuf: origBuf,
ReplyBuf: replyBuf,
}
connection.Parser = parserFactory.Create(connection)
if connection.Parser == nil {
// Parser rejected the new connection based on the connection metadata
return POLICY_DROP, nil
}
return nil, connection
}
// Skip bytes in input, or exhaust the input.
func advanceInput(input [][]byte, bytes int) [][]byte {
for bytes > 0 && len(input) > 0 {
rem := len(input[0]) // this much data left in the first slice
if bytes < rem {
input[0] = input[0][bytes:] // skip 'bytes' bytes
bytes = 0
} else { // go to the beginning of the next unit
bytes -= rem
input = input[1:] // may result in an empty slice
}
}
return input
}
func (connection *Connection) OnData(reply, endStream bool, data *[][]byte, filterOps *[][2]int64) (res FilterResult) {
defer func() {
// Recover from any possible parser datapath panics
if r := recover(); r != nil {
// Log the Panic into accesslog
connection.Log(cilium.EntryType_Denied,
&cilium.LogEntry_GenericL7{
GenericL7: &cilium.L7LogEntry{
Proto: connection.ParserName,
Fields: map[string]string{
// "status" is shown in Cilium monitor
"status": fmt.Sprintf("Panic: %s", r),
},
},
})
res = PARSER_ERROR // Causes the connection to be dropped
}
}()
if parser, ok := connection.Parser.(Parser); ok {
input := *data
// Loop until `filterOps` becomes full, or parser is done with the data.
for len(*filterOps) < cap(*filterOps) {
op, bytes := parser.OnData(reply, endStream, input)
if op == NOP {
break // No operations after NOP
}
if bytes == 0 {
return PARSER_ERROR
}
*filterOps = append(*filterOps, [2]int64{int64(op), int64(bytes)})
if op == MORE {
// Need more data before can parse ahead.
// Parser will see the unused data again in the next call, which will take place
// after there are at least 'bytes' of additional data to parse.
break
}
if op == PASS || op == DROP {
input = advanceInput(input, bytes)
// Loop back to parser even if have no more data to allow the parser to
// inject frames at the end of the input.
}
// Injection does not advance input data, but instructs the datapath to
// send data the parser has placed in the inject buffer. We need to stop processing
// if inject buffer becomes full as the parser in this case can't inject any more
// data.
if op == INJECT && connection.IsInjectBufFull(reply) {
// return if inject buffer becomes full
break
}
}
} else if parser, ok := connection.Parser.(ReaderParser); ok {
connection.Reader = NewReader(*data, endStream)
// Loop until `filterOps` becomes full, or parser is done with the data.
for len(*filterOps) < cap(*filterOps) {
op, bytes := parser.OnData(reply, &connection.Reader)
if op == NOP {
break // No operations after NOP
}
if bytes == 0 {
return PARSER_ERROR
}
*filterOps = append(*filterOps, [2]int64{int64(op), int64(bytes)})
if op == MORE {
// Need more data before can parse ahead.
// Parser will see the unused data again in the next call, which will take place
// after there are at least 'bytes' of additional data to parse.
break
}
// Get the current read count && reset for the next round
read := connection.Reader.Reset()
if op == PASS || op == DROP {
// Andvance input if needed
if bytes > read {
connection.Reader.AdvanceInput(bytes - read)
}
// Loop back to parser even if have no more data to allow the parser to
// inject frames at the end of the input.
}
// Injection does not advance input data, but instructs the datapath to
// send data the parser has placed in the inject buffer. We need to stop processing
// if inject buffer becomes full as the parser in this case can't inject any more
// data.
if op == INJECT && connection.IsInjectBufFull(reply) {
// return if inject buffer becomes full
break
}
}
}
return OK
}
func (connection *Connection) Matches(l7 interface{}) bool {
logrus.Debugf("proxylib: Matching policy on connection %v", connection)
remoteID := connection.DstId
if connection.Ingress {
remoteID = connection.SrcId
}
return connection.Instance.PolicyMatches(connection.PolicyName, connection.Ingress, connection.Port, remoteID, l7)
}
// getInjectBuf return the pointer to the inject buffer slice header for the indicated direction
func (connection *Connection) getInjectBuf(reply bool) InjectBuf {
if reply {
return connection.ReplyBuf
}
return connection.OrigBuf
}
// inject buffers data to be injected into the connection at the point of INJECT
func (connection *Connection) Inject(reply bool, data []byte) int {
buf := connection.getInjectBuf(reply)
// append data to C-provided buffer
offset := len(*buf)
n := copy((*buf)[offset:cap(*buf)], data)
*buf = (*buf)[:offset+n] // update the buffer length
logrus.Debugf("proxylib: Injected %d bytes: %s (given: %s)", n, string((*buf)[offset:offset+n]), string(data))
// return the number of bytes injected. This may be less than the length of `data` is
// the buffer becomes full.
// Parser may opt dropping the connection via parser error in this case!
return n
}
// isInjectBufFull return true if the inject buffer for the indicated direction is full
func (connection *Connection) IsInjectBufFull(reply bool) bool {
buf := connection.getInjectBuf(reply)
return len(*buf) == cap(*buf)
}
func (conn *Connection) Log(entryType cilium.EntryType, l7 cilium.IsLogEntry_L7) {
pblog := &cilium.LogEntry{
Timestamp: uint64(time.Now().UnixNano()),
IsIngress: conn.Ingress,
EntryType: entryType,
PolicyName: conn.PolicyName,
SourceSecurityId: conn.SrcId,
DestinationSecurityId: conn.DstId,
SourceAddress: conn.SrcAddr,
DestinationAddress: conn.DstAddr,
L7: l7,
}
conn.Instance.Log(pblog)
}
// SPDX-License-Identifier: Apache-2.0
// Copyright Authors of Cilium
package proxylib
import (
"fmt"
"sync"
"sync/atomic"
"github.com/golang/protobuf/proto"
"github.com/sirupsen/logrus"
cilium "github.com/cilium/proxy/go/cilium/api"
envoy_service_discovery "github.com/envoyproxy/go-control-plane/envoy/service/discovery/v3"
)
type PolicyClient interface {
Close()
Path() string
}
type AccessLogger interface {
Log(pblog *cilium.LogEntry)
Close()
Path() string
}
type PolicyUpdater interface {
PolicyUpdate(resp *envoy_service_discovery.DiscoveryResponse) error
}
type Instance struct {
id uint64
openCount uint64
nodeID string
accessLogger AccessLogger
policyClient PolicyClient
policyMap atomic.Value // holds PolicyMap
}
var (
// mutex protects instances
mutex sync.RWMutex
// Key uint64 is a monotonically increasing instance ID
instances map[uint64]*Instance = make(map[uint64]*Instance)
// Last instance ID used
instanceId uint64 = 0
)
func NewInstance(nodeID string, accessLogger AccessLogger) *Instance {
instanceId++
if nodeID == "" {
nodeID = fmt.Sprintf("host~127.0.0.2~libcilium-%d~localdomain", instanceId)
}
ins := &Instance{
id: instanceId,
openCount: 1,
nodeID: nodeID,
accessLogger: accessLogger,
}
ins.setPolicyMap(newPolicyMap())
return ins
}
// OpenInstance creates a new instance or finds an existing one with equivalent parameters.
// returns the instance id.
func OpenInstance(nodeID string, xdsPath string, newPolicyClient func(path, nodeID string, updater PolicyUpdater) PolicyClient,
accessLogPath string, newAccessLogger func(accessLogPath string) AccessLogger,
) uint64 {
mutex.Lock()
defer mutex.Unlock()
// Check if have an instance with these params already
for id, old := range instances {
oldXdsPath := ""
if old.policyClient != nil {
oldXdsPath = old.policyClient.Path()
}
oldAccessLogPath := ""
if old.accessLogger != nil {
oldAccessLogPath = old.accessLogger.Path()
}
if (nodeID == "" || old.nodeID == nodeID) && xdsPath == oldXdsPath && accessLogPath == oldAccessLogPath {
old.openCount++
logrus.Debugf("Opened existing library instance %d, open count: %d", id, old.openCount)
return id
}
}
ins := NewInstance(nodeID, newAccessLogger(accessLogPath))
// policy client needs the instance so we set it after instance has been created
ins.policyClient = newPolicyClient(xdsPath, ins.nodeID, ins)
instances[instanceId] = ins
logrus.Debugf("Opened new library instance %d", instanceId)
return instanceId
}
func FindInstance(id uint64) *Instance {
mutex.RLock()
defer mutex.RUnlock()
return instances[id]
}
// Close returns the new open count
func CloseInstance(id uint64) uint64 {
mutex.Lock()
defer mutex.Unlock()
count := uint64(0)
if ins, ok := instances[id]; ok {
ins.openCount--
count = ins.openCount
if count == 0 {
if ins.policyClient != nil {
ins.policyClient.Close()
}
if ins.accessLogger != nil {
ins.accessLogger.Close()
}
delete(instances, id)
}
logrus.Debugf("CloseInstance(%d): Remaining open count: %d", id, count)
} else {
logrus.Debugf("CloseInstance(%d): Not found (closed already?)", id)
}
return count
}
func (ins *Instance) getPolicyMap() PolicyMap {
return ins.policyMap.Load().(PolicyMap)
}
func (ins *Instance) setPolicyMap(newMap PolicyMap) {
ins.policyMap.Store(newMap)
}
func (ins *Instance) PolicyMatches(endpointPolicyName string, ingress bool, port, remoteId uint32, l7 interface{}) bool {
// Policy maps are never modified once published
policy, found := ins.getPolicyMap()[endpointPolicyName]
if !found {
logrus.Debugf("NPDS: Policy for %s not found", endpointPolicyName)
}
return found && policy.Matches(ingress, port, remoteId, l7)
}
// Update the PolicyMap from a protobuf. PolicyMap is only ever changed if the whole update is successful.
func (ins *Instance) PolicyUpdate(resp *envoy_service_discovery.DiscoveryResponse) (err error) {
defer func() {
if r := recover(); r != nil {
var ok bool
if err, ok = r.(error); !ok {
err = fmt.Errorf("NPDS: Panic: %v", r)
}
}
}()
logrus.Debugf("NPDS: Updating policy for version %s", resp.VersionInfo)
oldMap := ins.getPolicyMap()
newMap := newPolicyMap()
for _, any := range resp.Resources {
if any.TypeUrl != resp.TypeUrl {
return fmt.Errorf("NPDS: Mismatching TypeUrls: %s != %s", any.TypeUrl, resp.TypeUrl)
}
var config cilium.NetworkPolicy
if err = proto.Unmarshal(any.Value, &config); err != nil {
return fmt.Errorf("NPDS: Policy unmarshal error: %v", err)
}
ips := config.GetEndpointIps()
if len(ips) == 0 {
return fmt.Errorf("NPDS: Policy has no endpoint_ips")
}
for _, ip := range ips {
logrus.Debugf("NPDS: Endpoint IP: %s", ip)
}
// Locate the old version, if any
oldPolicy, found := oldMap[ips[0]]
if found {
// Check if the new policy is the same as the old one
if proto.Equal(&config, oldPolicy.protobuf) {
logrus.Debugf("NPDS: New policy for Endpoint %d is equal to the old one, no need to change", config.GetEndpointId())
for _, ip := range ips {
newMap[ip] = oldPolicy
}
continue
}
}
// Validate new config
if err = config.Validate(); err != nil {
return fmt.Errorf("NPDS: Policy validation error for Endpoint %d: %v", config.GetEndpointId(), err)
}
// Create new PolicyInstance, may panic. Takes ownership of 'config'.
newPolicy := newPolicyInstance(&config)
for _, ip := range ips {
newMap[ip] = newPolicy
}
}
// Store the new policy map
ins.setPolicyMap(newMap)
logrus.Debugf("NPDS: Policy Update completed for instance %d: %v", ins.id, newMap)
return
}
func (ins *Instance) Log(pblog *cilium.LogEntry) {
ins.accessLogger.Log(pblog)
}
// SPDX-License-Identifier: Apache-2.0
// Copyright Authors of Cilium
package proxylib
import (
"github.com/sirupsen/logrus"
)
// Parser is a paser instance used for each connection. OnData will be called from a single thread only.
type Parser interface {
// OnData is called when input is available on the underlying connection. The Parser
// instance is only ever used for processing data of a single connection, which allows
// the parser instance to keep connection specific state. All OnData() calls for a
// single connection (both directions) are made from a single thread, so that
// no locking is needed for the parser instance if no other goroutines need to access
// the parser instance. (Note that any L7 policy protocol rule parsing happens in
// other goroutine so any such parsing should not access parser instances directly.)
//
// OnData() parameters are as follows:
// 'reply' is 'false' for original direction of the connection, 'true' otherwise.
// 'endStream' is true if there is no more data after 'data' in this direction.
// 'data' is the available data in the current direction. The datapath buffers
// partial frames as instructed by the operations returned by the parser
// so that the 'data' always starts on a frame boundary. That is, whenever
// the parser returns `MORE` indicating it needs more input, the bytes
// not 'PASS'ed or 'DROP'ped are retained in a datapath buffer and those
// same bytes are passed to the parser again when more input is available.
// 'data' may be an empty slice, but the slices contained are never empty.
//
// OnData() returns an operation and the number of bytes ('N') the operation applies.
// The possible values for 'op' are:
// 'MORE' - Data currently in 'data' is to be retained by the datapath and passed
// again to OnData() after 'N' bytes more data is available.
// 'PASS' - Allow 'N' bytes.
// 'DROP' - Drop 'N' bytes and call OnData() again for the remaining data.
// 'INJECT' - Insert 'N' bytes of data placed into the inject buffer in to the
// data stream in this direction.
// 'NOP' - Do nothing, to be used when it is known if no more input
// is to be expected.
// 'ERROR' - Protocol parsing failed and the connection should be closed.
//
// OnData() is called again after 'PASS', 'DROP', and 'INJECT' with the remaining
// data even if none remains.
OnData(reply, endStream bool, data [][]byte) (op OpType, N int)
}
// ReaderParser is an alternate parser instance is used for each connection. OnData will be called from a single thread only.
type ReaderParser interface {
// OnData is called when input is available on the underlying connection. The Parser
// instance is only ever used for processing data of a single connection, which allows
// the parser instance to keep connection specific state. All OnData() calls for a
// single connection (both directions) are made from a single thread, so that
// no locking is needed for the parser instance if no other goroutines need to access
// the parser instance. (Note that any L7 policy protocol rule parsing happens in
// other goroutine so any such parsing should not access parser instances directly.)
//
// OnData() parameters are as follows:
// 'reply' is 'false' for original direction of the connection, 'true' otherwise.
// 'endStream' is true if there is no more data after 'data' in this direction.
// 'data' is the available data in the current direction. The datapath buffers
// partial frames as instructed by the operations returned by the parser
// so that the 'data' always starts on a frame boundary. That is, whenever
// the parser returns `MORE` indicating it needs more input, the bytes
// not 'PASS'ed or 'DROP'ped are retained in a datapath buffer and those
// same bytes are passed to the parser again when more input is available.
// 'data' may be an empty slice, but the slices contained are never empty.
//
// OnData() returns an operation and the number of bytes ('N') the operation applies.
// The possible values for 'op' are:
// 'MORE' - Data currently in 'data' is to be retained by the datapath and passed
// again to OnData() after 'N' bytes more data is available.
// 'PASS' - Allow 'N' bytes.
// 'DROP' - Drop 'N' bytes and call OnData() again for the remaining data.
// 'INJECT' - Insert 'N' bytes of data placed into the inject buffer in to the
// data stream in this direction.
// 'NOP' - Do nothing, to be used when it is known if no more input
// is to be expected.
// 'ERROR' - Protocol parsing failed and the connection should be closed.
//
// OnData() is called again after 'PASS', 'DROP', and 'INJECT' with the remaining
// data even if none remains.
OnData(reply bool, reader *Reader) (op OpType, N int)
}
type ParserFactory interface {
Create(connection *Connection) interface{} // must be thread safe!
}
// const after initialization
var parserFactories map[string]ParserFactory = make(map[string]ParserFactory)
// RegisterParserFactory adds a protocol parser factory to the map of known parsers.
// This is called from parser init() functions while we are still single-threaded
func RegisterParserFactory(name string, parserFactory ParserFactory) {
logrus.Debugf("proxylib: Registering L7 parser: %v", name)
parserFactories[name] = parserFactory
}
func GetParserFactory(name string) ParserFactory {
return parserFactories[name]
}
// SPDX-License-Identifier: Apache-2.0
// Copyright Authors of Cilium
package proxylib
import (
"fmt"
"reflect"
"strings"
"github.com/sirupsen/logrus"
cilium "github.com/cilium/proxy/go/cilium/api"
core "github.com/envoyproxy/go-control-plane/envoy/config/core/v3"
)
// L7NetworkPolicyRule is the interface, which each L7 rule implements this interface
type L7NetworkPolicyRule interface {
Matches(interface{}) bool
}
// L7RuleParser takes the protobuf and converts the one of relevant for the given L7 to an array
// of L7 rules. A packet matches if the 'Matches' method of any of these rules matches the
// 'l7' interface passed by the L7 implementation to PolicyMap.Matches() as the last parameter.
type L7RuleParser func(rule *cilium.PortNetworkPolicyRule) []L7NetworkPolicyRule
// const after initialization
var l7RuleParsers map[string]L7RuleParser = make(map[string]L7RuleParser)
// RegisterL7RuleParser adds a l7 policy protocol parser to the map of known l7 policy parsers.
// This is called from parser init() functions while we are still single-threaded
func RegisterL7RuleParser(l7PolicyTypeName string, parserFunc L7RuleParser) {
logrus.Debugf("NPDS: Registering L7 rule parser: %s", l7PolicyTypeName)
l7RuleParsers[l7PolicyTypeName] = parserFunc
}
// ParseError may be issued by Policy parsing code. The policy configuration change will
// be graciously rejected by recovering from the panic.
func ParseError(reason string, config interface{}) {
panic(fmt.Errorf("NPDS: %s (config: %v)", reason, config))
}
type PortNetworkPolicyRule struct {
Deny bool
Remotes map[uint32]struct{}
L7Rules []L7NetworkPolicyRule // only used when not denied
}
func newPortNetworkPolicyRule(config *cilium.PortNetworkPolicyRule) (PortNetworkPolicyRule, string, bool) {
rule := PortNetworkPolicyRule{
Deny: config.GetDeny(),
Remotes: make(map[uint32]struct{}, len(config.RemotePolicies)),
}
action := "Allowing"
if rule.Deny {
action = "Denying"
}
for _, remote := range config.GetRemotePolicies() {
logrus.Debugf("NPDS::PortNetworkPolicyRule: %s remote %d", action, remote)
rule.Remotes[remote] = struct{}{}
}
// Each parser registers a parsing function to parse it's L7 rules
// The registered name must match 'l7_proto', if included in the message,
// or one of the oneof type names
l7Name := config.L7Proto
if l7Name == "" {
typeOf := reflect.TypeOf(config.L7)
if typeOf != nil {
l7Name = typeOf.Elem().Name()
}
}
if strings.HasPrefix(l7Name, "envoy.") {
return rule, "", false // Silently drop Envoy filter traffic to this port if forwarded to proxylib
}
if l7Name != "" {
l7Parser, ok := l7RuleParsers[l7Name]
if ok {
if logrus.IsLevelEnabled(logrus.DebugLevel) {
logrus.Debugf("NPDS::PortNetworkPolicyRule: Calling L7Parser %s on %v", l7Name, config.String())
}
rule.L7Rules = l7Parser(config)
} else {
logrus.Debugf("NPDS::PortNetworkPolicyRule: Unknown L7 (%s), should drop everything.", l7Name)
}
// Unknown parsers are expected, but will result in drop-all policy
return rule, l7Name, ok
}
return rule, "", true // No L7 is ok
}
func (p *PortNetworkPolicyRule) Matches(remoteId uint32, l7 interface{}) (allowed, denied bool) {
// Remote ID must match if we have any.
if len(p.Remotes) > 0 {
_, found := p.Remotes[remoteId]
if !found {
if logrus.IsLevelEnabled(logrus.DebugLevel) {
logrus.Debugf("NPDS::PortNetworkPolicyRule: No L3 match on (%v)", *p)
}
// no remote ID match, does not allow or deny explicitly
return false, false
}
if p.Deny {
// Explicit deny, not allowed even if another rule would allow.
return false, true
}
} else if p.Deny {
// Deny with empty remotes denies all remotes explicitly
return false, true
}
if len(p.L7Rules) > 0 {
for _, rule := range p.L7Rules {
if rule.Matches(l7) {
if logrus.IsLevelEnabled(logrus.DebugLevel) {
logrus.Debugf("NPDS::PortNetworkPolicyRule: L7 rule matches (%v)", *p)
}
return true, false
}
}
return false, false
}
// Empty set matches any payload
if logrus.IsLevelEnabled(logrus.DebugLevel) {
logrus.Debugf("NPDS::PortNetworkPolicyRule: Empty L7Rules matches (%v)", *p)
}
return true, false
}
type PortNetworkPolicyRules struct {
Rules []PortNetworkPolicyRule
}
func newPortNetworkPolicyRules(config []*cilium.PortNetworkPolicyRule, port uint32) (PortNetworkPolicyRules, bool) {
rules := PortNetworkPolicyRules{
Rules: make([]PortNetworkPolicyRule, 0, len(config)),
}
if len(config) == 0 {
logrus.Debugf("NPDS::PortNetworkPolicyRules: No rules, will allow everything.")
}
var firstTypeName string
for _, rule := range config {
newRule, typeName, ok := newPortNetworkPolicyRule(rule)
if !ok {
// Unknown L7 parser, must drop all traffic
return PortNetworkPolicyRules{}, false
}
if typeName != "" {
if firstTypeName == "" {
firstTypeName = typeName
} else if typeName != firstTypeName {
ParseError("Mismatching L7 types on the same port", config)
}
}
rules.Rules = append(rules.Rules, newRule)
}
return rules, true
}
func (p *PortNetworkPolicyRules) Matches(remoteId uint32, l7 interface{}) bool {
// Empty set matches any payload from anyone
if len(p.Rules) == 0 {
if logrus.IsLevelEnabled(logrus.DebugLevel) {
logrus.Debugf("NPDS::PortNetworkPolicyRules: No Rules; matches (%v)", p)
}
return true
}
var allowed bool
for _, rule := range p.Rules {
allow, deny := rule.Matches(remoteId, l7)
if deny {
// explicit deny
return false
}
if allow {
// allowed if no other rule denies
allowed = true
}
}
if allowed {
if logrus.IsLevelEnabled(logrus.DebugLevel) {
logrus.Debugf("NPDS::PortNetworkPolicyRules(remoteId=%d): rule matches (%v)", remoteId, p)
}
return true
}
return false
}
type PortNetworkPolicies struct {
Rules map[uint32]PortNetworkPolicyRules
}
func newPortNetworkPolicies(config []*cilium.PortNetworkPolicy, dir string) PortNetworkPolicies {
policy := PortNetworkPolicies{
Rules: make(map[uint32]PortNetworkPolicyRules, len(config)),
}
for _, rule := range config {
// Ignore UDP policies
if rule.GetProtocol() == core.SocketAddress_UDP {
continue
}
port := rule.GetPort()
if _, found := policy.Rules[port]; found {
ParseError(fmt.Sprintf("Duplicate port number %d in (rule: %v)", port, rule), config)
}
if rule.GetProtocol() != core.SocketAddress_TCP {
ParseError(fmt.Sprintf("Invalid transport protocol %v", rule.GetProtocol()), config)
}
// Skip the port if not 'ok'
rules, ok := newPortNetworkPolicyRules(rule.GetRules(), port)
if ok {
logrus.Debugf("NPDS::PortNetworkPolicies(): installed %s TCP policy for port %d", dir, port)
policy.Rules[port] = rules
} else {
logrus.Debugf("NPDS::PortNetworkPolicies(): Skipped %s port due to unsupported L7: %d", dir, port)
}
}
return policy
}
func (p *PortNetworkPolicies) Matches(port, remoteId uint32, l7 interface{}) bool {
rules, found := p.Rules[port]
if found {
if rules.Matches(remoteId, l7) {
if logrus.IsLevelEnabled(logrus.DebugLevel) {
logrus.Debugf("NPDS::PortNetworkPolicies(port=%d, remoteId=%d): rule matches (%v)", port, remoteId, p)
}
return true
}
}
// No exact port match, try wildcard
rules, foundWc := p.Rules[0]
if foundWc {
if rules.Matches(remoteId, l7) {
if logrus.IsLevelEnabled(logrus.DebugLevel) {
logrus.Debugf("NPDS::PortNetworkPolicies(port=*, remoteId=%d): rule matches (%v)", remoteId, p)
}
return true
}
}
// No policy for the port was found. Cilium always creates a policy for redirects it
// creates, so the host proxy never gets here.
// TODO: Change back to false only when non-bpf datapath is supported?
// logrus.Debugf("NPDS::PortNetworkPolicies(port=%d, remoteId=%d): allowing traffic on port for which there is no policy, assuming L3/L4 has passed it! (%v)", port, remoteId, p)
// return !(found || foundWc)
if !(found || foundWc) {
logrus.Debugf("NPDS::PortNetworkPolicies(port=%d, remoteId=%d): Dropping traffic on port for which there is no policy! (%v)", port, remoteId, p)
}
return false
}
type PolicyInstance struct {
protobuf *cilium.NetworkPolicy
Ingress PortNetworkPolicies
Egress PortNetworkPolicies
}
func newPolicyInstance(config *cilium.NetworkPolicy) *PolicyInstance {
logrus.Debugf("NPDS::PolicyInstance: Inserting policy for %v", config.EndpointIps)
return &PolicyInstance{
protobuf: config,
Ingress: newPortNetworkPolicies(config.GetIngressPerPortPolicies(), "ingress"),
Egress: newPortNetworkPolicies(config.GetEgressPerPortPolicies(), "egress"),
}
}
func (p *PolicyInstance) Matches(ingress bool, port, remoteId uint32, l7 interface{}) bool {
if logrus.IsLevelEnabled(logrus.DebugLevel) {
logrus.Debugf("NPDS::PolicyInstance::Matches(ingress: %v, port: %d, remoteId: %d, l7: %v (policy: %s)", ingress, port, remoteId, l7, p.protobuf.String())
}
if ingress {
return p.Ingress.Matches(port, remoteId, l7)
}
return p.Egress.Matches(port, remoteId, l7)
}
// Network policies keyed by endpoint IPs
type PolicyMap map[string]*PolicyInstance
func newPolicyMap() PolicyMap {
return make(PolicyMap)
}
// SPDX-License-Identifier: Apache-2.0
// Copyright Authors of Cilium
package proxylib
import (
"io"
)
type Reader struct {
buf [][]byte // buffer that shrinks as data is being read
read int // Number of byte read since last reset
endStream bool // connection is known to end (in this direction) after the current input
}
func NewReader(input [][]byte, endStream bool) Reader {
return Reader{
buf: input,
endStream: endStream,
}
}
func (r *Reader) Reset() int {
read := r.read
r.read = 0
return read
}
func (r *Reader) Length() int {
length := 0
for i := 0; i < len(r.buf); i++ {
length += len(r.buf[i])
}
return length
}
func (r *Reader) PeekFull(p []byte) (n int, err error) {
n = 0
slice := 0
index := 0
for n < len(p) && slice < len(r.buf) {
bytes := len(r.buf[slice][index:])
nc := copy(p[n:], r.buf[slice][index:])
if nc == bytes {
// next slice please
slice++
index = 0
} else {
// move ahead in the same slice
index += nc
}
n += nc
}
if n < len(p) {
return n, io.EOF
}
return n, nil
}
func (r *Reader) Read(p []byte) (n int, err error) {
n = 0
for n < len(p) && len(r.buf) > 0 {
nc := copy(p[n:], r.buf[0])
if nc == len(r.buf[0]) {
// next slice please
r.buf = r.buf[1:]
} else {
// move ahead in the same slice
r.buf[0] = r.buf[0][nc:]
}
n += nc
}
if n == 0 {
return 0, io.EOF
}
r.read += n
return n, nil
}
// Skip bytes in input, or exhaust the input.
func (r *Reader) AdvanceInput(bytes int) {
for bytes > 0 && len(r.buf) > 0 {
rem := len(r.buf[0]) // this much data left in the first slice
if bytes < rem {
r.buf[0] = r.buf[0][bytes:] // skip 'bytes' bytes
return
} else { // go to the beginning of the next unit
bytes -= rem
r.buf = r.buf[1:] // may result in an empty slice
}
}
}
// SPDX-License-Identifier: Apache-2.0
// Copyright Authors of Cilium
package proxylib
import (
. "github.com/cilium/checkmate"
"github.com/golang/protobuf/proto"
"github.com/golang/protobuf/ptypes/any"
"github.com/sirupsen/logrus"
cilium "github.com/cilium/proxy/go/cilium/api"
envoy_service_discovery "github.com/envoyproxy/go-control-plane/envoy/service/discovery/v3"
)
var LogFatal = func(format string, args ...interface{}) {
logrus.Fatalf(format, args...)
}
func (ins *Instance) CheckInsertPolicyText(c *C, version string, policies []string) {
err := ins.InsertPolicyText(version, policies, "")
c.Assert(err, IsNil)
}
func (ins *Instance) InsertPolicyText(version string, policies []string, expectFail string) error {
typeUrl := "type.googleapis.com/cilium.NetworkPolicy"
resources := make([]*any.Any, 0, len(policies))
for _, policy := range policies {
pb := new(cilium.NetworkPolicy)
err := proto.UnmarshalText(policy, pb)
if err != nil {
if expectFail != "unmarshal" {
LogFatal("Policy UnmarshalText failed: %v", err)
}
return err
}
logrus.Debugf("Text -> proto.Message: %s -> %v", policy, pb)
data, err := proto.Marshal(pb)
if err != nil {
if expectFail != "marshal" {
LogFatal("Policy marshal failed: %v", err)
}
return err
}
resources = append(resources, &any.Any{
TypeUrl: typeUrl,
Value: data,
})
}
msg := &envoy_service_discovery.DiscoveryResponse{
VersionInfo: version,
Canary: false,
TypeUrl: typeUrl,
Nonce: "randomNonce1",
Resources: resources,
}
err := ins.PolicyUpdate(msg)
if err != nil {
if expectFail != "update" {
LogFatal("Policy Update failed: %v", err)
}
}
return err
}
var connectionID uint64
func (ins *Instance) CheckNewConnectionOK(c *C, proto string, ingress bool, srcId, dstId uint32, srcAddr, dstAddr, policyName string) *Connection {
err, conn := ins.CheckNewConnection(c, proto, ingress, srcId, dstId, srcAddr, dstAddr, policyName)
c.Assert(err, IsNil)
c.Assert(conn, Not(IsNil))
return conn
}
func (ins *Instance) CheckNewConnection(c *C, proto string, ingress bool, srcId, dstId uint32, srcAddr, dstAddr, policyName string) (error, *Connection) {
connectionID++
bufSize := 1024
origBuf := make([]byte, 0, bufSize)
replyBuf := make([]byte, 0, bufSize)
return NewConnection(ins, proto, connectionID, ingress, srcId, dstId, srcAddr, dstAddr, policyName, &origBuf, &replyBuf)
}
func (conn *Connection) CheckOnDataOK(c *C, reply, endStream bool, data *[][]byte, expReplyBuf []byte, expOps ...interface{}) {
conn.CheckOnData(c, reply, endStream, data, OK, expReplyBuf, expOps...)
}
func (conn *Connection) CheckOnData(c *C, reply, endStream bool, data *[][]byte, expResult FilterResult, expReplyBuf []byte, expOps ...interface{}) {
ops := make([][2]int64, 0, len(expOps)/2)
res := conn.OnData(reply, endStream, data, &ops)
c.Check(res, Equals, expResult)
c.Check(len(ops), Equals, len(expOps)/2, Commentf("Unexpected number of filter operations"))
for i, op := range ops {
if i*2+1 < len(expOps) {
expOp, ok := expOps[i*2].(OpType)
c.Assert(ok, Equals, true, Commentf("Invalid expected operation type"))
c.Check(op[0], Equals, int64(expOp), Commentf("Unexpected filter operation"))
expN, ok := expOps[i*2+1].(int)
c.Assert(ok, Equals, true, Commentf("Invalid expected operation length (must be int)"))
c.Check(op[1], Equals, int64(expN), Commentf("Unexpected operation length"))
}
}
buf := conn.ReplyBuf
c.Check(*buf, DeepEquals, expReplyBuf, Commentf("Inject buffer mismatch"))
*buf = (*buf)[:0] // make empty again
// Clear the same-direction inject buffer, simulating the datapath forwarding the injected data
injectBuf := conn.getInjectBuf(reply)
*injectBuf = (*injectBuf)[:0]
logrus.Debugf("proxylib test helper: Cleared inject buf, used %d/%d", len(*injectBuf), cap(*injectBuf))
}
// SPDX-License-Identifier: Apache-2.0
// Copyright Authors of Cilium
package proxylib
import "fmt"
// OpType mirrors enum FilterOpType in types.h.
type OpType int64
const (
MORE OpType = iota
PASS
DROP
INJECT
ERROR
// Internal types not exposed to Caller
NOP OpType = 256
)
// OpError mirrors enum FilterOpError in types.h.
type OpError int64
const (
ERROR_INVALID_OP_LENGTH OpError = iota + 1
ERROR_INVALID_FRAME_TYPE
ERROR_INVALID_FRAME_LENGTH
)
func (op OpType) String() string {
switch op {
case MORE:
return "MORE"
case PASS:
return "PASS"
case DROP:
return "DROP"
case INJECT:
return "INJECT"
case ERROR:
return "ERROR"
case NOP:
return "NOP"
}
return "UNKNOWN_OP"
}
func (opErr OpError) String() string {
switch opErr {
case ERROR_INVALID_OP_LENGTH:
return "ERROR_INVALID_OP_LENGTH"
case ERROR_INVALID_FRAME_TYPE:
return "ERROR_INVALID_FRAME_TYPE"
case ERROR_INVALID_FRAME_LENGTH:
return "ERROR_INVALID_FRAME_LENGTH"
}
return "UNKNOWN_OP_ERROR"
}
// FilterResult mirrors enum FilterResult in types.h.
type FilterResult int
const (
OK FilterResult = iota
POLICY_DROP
PARSER_ERROR
UNKNOWN_PARSER
UNKNOWN_CONNECTION
INVALID_ADDRESS
INVALID_INSTANCE
UNKNOWN_ERROR
)
// Error() implements the error interface for FilterResult
func (r FilterResult) Error() string {
switch r {
case OK:
return "OK"
case POLICY_DROP:
return "POLICY_DROP"
case PARSER_ERROR:
return "PARSER_ERROR"
case UNKNOWN_PARSER:
return "UNKNOWN_PARSER"
case UNKNOWN_CONNECTION:
return "UNKNOWN_CONNECTION"
case INVALID_ADDRESS:
return "INVALID_ADDRESS"
case INVALID_INSTANCE:
return "INVALID_INSTANCE"
case UNKNOWN_ERROR:
return "UNKNOWN_ERROR"
}
return fmt.Sprintf("%d", r)
}
// SPDX-License-Identifier: Apache-2.0
// Copyright Authors of Cilium
package test
import (
"errors"
"io"
"net"
"os"
"path/filepath"
"sync"
"syscall"
"time"
"github.com/golang/protobuf/proto"
"github.com/sirupsen/logrus"
"golang.org/x/sys/unix"
cilium "github.com/cilium/proxy/go/cilium/api"
)
type AccessLogServer struct {
Path string
Logs chan cilium.EntryType
done chan struct{}
listener *net.UnixListener
mu sync.Mutex // protects conns
conns []*net.UnixConn
}
// Close removes the unix domain socket from the filesystem
func (s *AccessLogServer) Close() {
if s != nil {
close(s.done)
s.listener.Close()
s.mu.Lock()
for _, conn := range s.conns {
conn.Close()
}
s.mu.Unlock()
os.Remove(s.Path)
}
}
func (s *AccessLogServer) isClosing() bool {
select {
case <-s.done:
return true
default:
return false
}
}
// Clear empties the access log server buffer, counting the passes and drops
func (s *AccessLogServer) Clear() (passed, drops int) {
passes, drops := 0, 0
empty := false
for !empty {
select {
case entryType := <-s.Logs:
if entryType == cilium.EntryType_Denied {
drops++
} else {
passes++
}
case <-time.After(10 * time.Millisecond):
empty = true
}
}
return passes, drops
}
// StartAccessLogServer starts the access log server.
func StartAccessLogServer(accessLogName string, bufSize int) *AccessLogServer {
accessLogPath := filepath.Join(Tmpdir, accessLogName)
server := &AccessLogServer{
Path: accessLogPath,
Logs: make(chan cilium.EntryType, bufSize),
done: make(chan struct{}),
}
// Create the access log listener
os.Remove(accessLogPath) // Remove/Unlink the old unix domain socket, if any.
var err error
server.listener, err = net.ListenUnix("unixpacket", &net.UnixAddr{Name: accessLogPath, Net: "unixpacket"})
if err != nil {
logrus.Fatalf("Failed to open access log listen socket at %s: %v", accessLogPath, err)
}
server.listener.SetUnlinkOnClose(true)
// Make the socket accessible by non-root Envoy proxies.
if err = os.Chmod(accessLogPath, 0777); err != nil {
logrus.Fatalf("Failed to change mode of access log listen socket at %s: %v", accessLogPath, err)
}
logrus.Debug("Starting Access Log Server")
go func() {
for {
// Each Envoy listener opens a new connection over the Unix domain socket.
// Multiple worker threads serving the listener share that same connection
uc, err := server.listener.AcceptUnix()
if err != nil {
// These errors are expected when we are closing down
if server.isClosing() ||
errors.Is(err, net.ErrClosed) ||
errors.Is(err, syscall.EINVAL) {
break
}
logrus.WithError(err).Warn("Failed to accept access log connection")
continue
}
if server.isClosing() {
break
}
logrus.Debug("Accepted access log connection")
server.mu.Lock()
server.conns = append(server.conns, uc)
server.mu.Unlock()
// Serve this access log socket in a goroutine, so we can serve multiple
// connections concurrently.
go server.accessLogger(uc)
}
}()
return server
}
// isEOF returns true if the error message ends in "EOF". ReadMsgUnix returns extra info in the beginning.
func isEOF(err error) bool {
strerr := err.Error()
errlen := len(strerr)
return errlen >= 3 && strerr[errlen-3:] == io.EOF.Error()
}
func (s *AccessLogServer) accessLogger(conn *net.UnixConn) {
defer func() {
logrus.Debug("Closing access log connection")
conn.Close()
}()
buf := make([]byte, 4096)
for {
n, _, flags, _, err := conn.ReadMsgUnix(buf, nil)
if err != nil {
if !isEOF(err) && !s.isClosing() {
logrus.WithError(err).Error("Error while reading from access log connection")
}
break
}
if flags&unix.MSG_TRUNC != 0 {
logrus.Warning("Discarded truncated access log message")
continue
}
pblog := cilium.LogEntry{}
err = proto.Unmarshal(buf[:n], &pblog)
if err != nil {
logrus.WithError(err).Warning("Discarded invalid access log message")
continue
}
logrus.Debugf("Access log message: %s", pblog.String())
s.Logs <- pblog.EntryType
}
}
// SPDX-License-Identifier: Apache-2.0
// Copyright Authors of Cilium
package test
import (
"os"
"github.com/sirupsen/logrus"
)
var Tmpdir string
func init() {
var err error
Tmpdir, err = os.MkdirTemp("", "cilium_envoy_go_test")
if err != nil {
logrus.Fatal("Failed to create a temporary directory for testing")
}
}